1
2
3
4
5
6
7
8
9
10
11#include <linux/clk.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/dma-mapping.h>
21#include <linux/platform_device.h>
22#include <linux/phy.h>
23
24#include <mach/board.h>
25#include <mach/cpu.h>
26
27#include "macb.h"
28
29#define RX_BUFFER_SIZE 128
30#define RX_RING_SIZE 512
31#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
32
33
34#define RX_OFFSET 2
35
36#define TX_RING_SIZE 128
37#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
38#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
39
40#define TX_RING_GAP(bp) \
41 (TX_RING_SIZE - (bp)->tx_pending)
42#define TX_BUFFS_AVAIL(bp) \
43 (((bp)->tx_tail <= (bp)->tx_head) ? \
44 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
45 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
46#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
47
48#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
49
50
51#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
52
53#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
54 | MACB_BIT(ISR_ROVR))
55
56static void __macb_set_hwaddr(struct macb *bp)
57{
58 u32 bottom;
59 u16 top;
60
61 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
62 macb_writel(bp, SA1B, bottom);
63 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
64 macb_writel(bp, SA1T, top);
65}
66
67static void __init macb_get_hwaddr(struct macb *bp)
68{
69 u32 bottom;
70 u16 top;
71 u8 addr[6];
72
73 bottom = macb_readl(bp, SA1B);
74 top = macb_readl(bp, SA1T);
75
76 addr[0] = bottom & 0xff;
77 addr[1] = (bottom >> 8) & 0xff;
78 addr[2] = (bottom >> 16) & 0xff;
79 addr[3] = (bottom >> 24) & 0xff;
80 addr[4] = top & 0xff;
81 addr[5] = (top >> 8) & 0xff;
82
83 if (is_valid_ether_addr(addr)) {
84 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
85 } else {
86 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
87 random_ether_addr(bp->dev->dev_addr);
88 }
89}
90
91static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
92{
93 struct macb *bp = bus->priv;
94 int value;
95
96 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
97 | MACB_BF(RW, MACB_MAN_READ)
98 | MACB_BF(PHYA, mii_id)
99 | MACB_BF(REGA, regnum)
100 | MACB_BF(CODE, MACB_MAN_CODE)));
101
102
103 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
104 cpu_relax();
105
106 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
107
108 return value;
109}
110
111static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
112 u16 value)
113{
114 struct macb *bp = bus->priv;
115
116 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
117 | MACB_BF(RW, MACB_MAN_WRITE)
118 | MACB_BF(PHYA, mii_id)
119 | MACB_BF(REGA, regnum)
120 | MACB_BF(CODE, MACB_MAN_CODE)
121 | MACB_BF(DATA, value)));
122
123
124 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
125 cpu_relax();
126
127 return 0;
128}
129
130static int macb_mdio_reset(struct mii_bus *bus)
131{
132 return 0;
133}
134
135static void macb_handle_link_change(struct net_device *dev)
136{
137 struct macb *bp = netdev_priv(dev);
138 struct phy_device *phydev = bp->phy_dev;
139 unsigned long flags;
140
141 int status_change = 0;
142
143 spin_lock_irqsave(&bp->lock, flags);
144
145 if (phydev->link) {
146 if ((bp->speed != phydev->speed) ||
147 (bp->duplex != phydev->duplex)) {
148 u32 reg;
149
150 reg = macb_readl(bp, NCFGR);
151 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
152
153 if (phydev->duplex)
154 reg |= MACB_BIT(FD);
155 if (phydev->speed == SPEED_100)
156 reg |= MACB_BIT(SPD);
157
158 macb_writel(bp, NCFGR, reg);
159
160 bp->speed = phydev->speed;
161 bp->duplex = phydev->duplex;
162 status_change = 1;
163 }
164 }
165
166 if (phydev->link != bp->link) {
167 if (!phydev->link) {
168 bp->speed = 0;
169 bp->duplex = -1;
170 }
171 bp->link = phydev->link;
172
173 status_change = 1;
174 }
175
176 spin_unlock_irqrestore(&bp->lock, flags);
177
178 if (status_change) {
179 if (phydev->link)
180 printk(KERN_INFO "%s: link up (%d/%s)\n",
181 dev->name, phydev->speed,
182 DUPLEX_FULL == phydev->duplex ? "Full":"Half");
183 else
184 printk(KERN_INFO "%s: link down\n", dev->name);
185 }
186}
187
188
189static int macb_mii_probe(struct net_device *dev)
190{
191 struct macb *bp = netdev_priv(dev);
192 struct phy_device *phydev = NULL;
193 struct eth_platform_data *pdata;
194 int phy_addr;
195
196
197 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
198 if (bp->mii_bus->phy_map[phy_addr]) {
199 phydev = bp->mii_bus->phy_map[phy_addr];
200 break;
201 }
202 }
203
204 if (!phydev) {
205 printk (KERN_ERR "%s: no PHY found\n", dev->name);
206 return -1;
207 }
208
209 pdata = bp->pdev->dev.platform_data;
210
211
212
213 if (pdata && pdata->is_rmii) {
214 phydev = phy_connect(dev, dev_name(&phydev->dev),
215 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
216 } else {
217 phydev = phy_connect(dev, dev_name(&phydev->dev),
218 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
219 }
220
221 if (IS_ERR(phydev)) {
222 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
223 return PTR_ERR(phydev);
224 }
225
226
227 phydev->supported &= PHY_BASIC_FEATURES;
228
229 phydev->advertising = phydev->supported;
230
231 bp->link = 0;
232 bp->speed = 0;
233 bp->duplex = -1;
234 bp->phy_dev = phydev;
235
236 return 0;
237}
238
239static int macb_mii_init(struct macb *bp)
240{
241 struct eth_platform_data *pdata;
242 int err = -ENXIO, i;
243
244
245 macb_writel(bp, NCR, MACB_BIT(MPE));
246
247 bp->mii_bus = mdiobus_alloc();
248 if (bp->mii_bus == NULL) {
249 err = -ENOMEM;
250 goto err_out;
251 }
252
253 bp->mii_bus->name = "MACB_mii_bus";
254 bp->mii_bus->read = &macb_mdio_read;
255 bp->mii_bus->write = &macb_mdio_write;
256 bp->mii_bus->reset = &macb_mdio_reset;
257 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
258 bp->mii_bus->priv = bp;
259 bp->mii_bus->parent = &bp->dev->dev;
260 pdata = bp->pdev->dev.platform_data;
261
262 if (pdata)
263 bp->mii_bus->phy_mask = pdata->phy_mask;
264
265 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
266 if (!bp->mii_bus->irq) {
267 err = -ENOMEM;
268 goto err_out_free_mdiobus;
269 }
270
271 for (i = 0; i < PHY_MAX_ADDR; i++)
272 bp->mii_bus->irq[i] = PHY_POLL;
273
274 platform_set_drvdata(bp->dev, bp->mii_bus);
275
276 if (mdiobus_register(bp->mii_bus))
277 goto err_out_free_mdio_irq;
278
279 if (macb_mii_probe(bp->dev) != 0) {
280 goto err_out_unregister_bus;
281 }
282
283 return 0;
284
285err_out_unregister_bus:
286 mdiobus_unregister(bp->mii_bus);
287err_out_free_mdio_irq:
288 kfree(bp->mii_bus->irq);
289err_out_free_mdiobus:
290 mdiobus_free(bp->mii_bus);
291err_out:
292 return err;
293}
294
295static void macb_update_stats(struct macb *bp)
296{
297 u32 __iomem *reg = bp->regs + MACB_PFR;
298 u32 *p = &bp->hw_stats.rx_pause_frames;
299 u32 *end = &bp->hw_stats.tx_pause_frames + 1;
300
301 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
302
303 for(; p < end; p++, reg++)
304 *p += __raw_readl(reg);
305}
306
307static void macb_tx(struct macb *bp)
308{
309 unsigned int tail;
310 unsigned int head;
311 u32 status;
312
313 status = macb_readl(bp, TSR);
314 macb_writel(bp, TSR, status);
315
316 dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
317 (unsigned long)status);
318
319 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
320 int i;
321 printk(KERN_ERR "%s: TX %s, resetting buffers\n",
322 bp->dev->name, status & MACB_BIT(UND) ?
323 "underrun" : "retry limit exceeded");
324
325
326 if (status & MACB_BIT(TGO))
327 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
328
329 head = bp->tx_head;
330
331
332 for (i = 0; i < TX_RING_SIZE; i++)
333 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
334
335
336 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
337 struct ring_info *rp = &bp->tx_skb[tail];
338 struct sk_buff *skb = rp->skb;
339
340 BUG_ON(skb == NULL);
341
342 rmb();
343
344 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
345 DMA_TO_DEVICE);
346 rp->skb = NULL;
347 dev_kfree_skb_irq(skb);
348 }
349
350 bp->tx_head = bp->tx_tail = 0;
351
352
353 if (status & MACB_BIT(TGO))
354 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
355 }
356
357 if (!(status & MACB_BIT(COMP)))
358
359
360
361
362
363 return;
364
365 head = bp->tx_head;
366 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
367 struct ring_info *rp = &bp->tx_skb[tail];
368 struct sk_buff *skb = rp->skb;
369 u32 bufstat;
370
371 BUG_ON(skb == NULL);
372
373 rmb();
374 bufstat = bp->tx_ring[tail].ctrl;
375
376 if (!(bufstat & MACB_BIT(TX_USED)))
377 break;
378
379 dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
380 tail, skb->data);
381 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
382 DMA_TO_DEVICE);
383 bp->stats.tx_packets++;
384 bp->stats.tx_bytes += skb->len;
385 rp->skb = NULL;
386 dev_kfree_skb_irq(skb);
387 }
388
389 bp->tx_tail = tail;
390 if (netif_queue_stopped(bp->dev) &&
391 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
392 netif_wake_queue(bp->dev);
393}
394
395static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
396 unsigned int last_frag)
397{
398 unsigned int len;
399 unsigned int frag;
400 unsigned int offset = 0;
401 struct sk_buff *skb;
402
403 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
404
405 dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
406 first_frag, last_frag, len);
407
408 skb = dev_alloc_skb(len + RX_OFFSET);
409 if (!skb) {
410 bp->stats.rx_dropped++;
411 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
412 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
413 if (frag == last_frag)
414 break;
415 }
416 wmb();
417 return 1;
418 }
419
420 skb_reserve(skb, RX_OFFSET);
421 skb->ip_summed = CHECKSUM_NONE;
422 skb_put(skb, len);
423
424 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
425 unsigned int frag_len = RX_BUFFER_SIZE;
426
427 if (offset + frag_len > len) {
428 BUG_ON(frag != last_frag);
429 frag_len = len - offset;
430 }
431 skb_copy_to_linear_data_offset(skb, offset,
432 (bp->rx_buffers +
433 (RX_BUFFER_SIZE * frag)),
434 frag_len);
435 offset += RX_BUFFER_SIZE;
436 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
437 wmb();
438
439 if (frag == last_frag)
440 break;
441 }
442
443 skb->protocol = eth_type_trans(skb, bp->dev);
444
445 bp->stats.rx_packets++;
446 bp->stats.rx_bytes += len;
447 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
448 skb->len, skb->csum);
449 netif_receive_skb(skb);
450
451 return 0;
452}
453
454
455static void discard_partial_frame(struct macb *bp, unsigned int begin,
456 unsigned int end)
457{
458 unsigned int frag;
459
460 for (frag = begin; frag != end; frag = NEXT_RX(frag))
461 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
462 wmb();
463
464
465
466
467
468
469}
470
471static int macb_rx(struct macb *bp, int budget)
472{
473 int received = 0;
474 unsigned int tail = bp->rx_tail;
475 int first_frag = -1;
476
477 for (; budget > 0; tail = NEXT_RX(tail)) {
478 u32 addr, ctrl;
479
480 rmb();
481 addr = bp->rx_ring[tail].addr;
482 ctrl = bp->rx_ring[tail].ctrl;
483
484 if (!(addr & MACB_BIT(RX_USED)))
485 break;
486
487 if (ctrl & MACB_BIT(RX_SOF)) {
488 if (first_frag != -1)
489 discard_partial_frame(bp, first_frag, tail);
490 first_frag = tail;
491 }
492
493 if (ctrl & MACB_BIT(RX_EOF)) {
494 int dropped;
495 BUG_ON(first_frag == -1);
496
497 dropped = macb_rx_frame(bp, first_frag, tail);
498 first_frag = -1;
499 if (!dropped) {
500 received++;
501 budget--;
502 }
503 }
504 }
505
506 if (first_frag != -1)
507 bp->rx_tail = first_frag;
508 else
509 bp->rx_tail = tail;
510
511 return received;
512}
513
514static int macb_poll(struct napi_struct *napi, int budget)
515{
516 struct macb *bp = container_of(napi, struct macb, napi);
517 int work_done;
518 u32 status;
519
520 status = macb_readl(bp, RSR);
521 macb_writel(bp, RSR, status);
522
523 work_done = 0;
524
525 dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
526 (unsigned long)status, budget);
527
528 work_done = macb_rx(bp, budget);
529 if (work_done < budget)
530 napi_complete(napi);
531
532
533
534
535
536 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
537
538
539
540 return work_done;
541}
542
543static irqreturn_t macb_interrupt(int irq, void *dev_id)
544{
545 struct net_device *dev = dev_id;
546 struct macb *bp = netdev_priv(dev);
547 u32 status;
548
549 status = macb_readl(bp, ISR);
550
551 if (unlikely(!status))
552 return IRQ_NONE;
553
554 spin_lock(&bp->lock);
555
556 while (status) {
557
558 if (unlikely(!netif_running(dev))) {
559 macb_writel(bp, IDR, ~0UL);
560 break;
561 }
562
563 if (status & MACB_RX_INT_FLAGS) {
564 if (napi_schedule_prep(&bp->napi)) {
565
566
567
568
569 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
570 dev_dbg(&bp->pdev->dev,
571 "scheduling RX softirq\n");
572 __napi_schedule(&bp->napi);
573 }
574 }
575
576 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
577 MACB_BIT(ISR_RLE)))
578 macb_tx(bp);
579
580
581
582
583
584
585 if (status & MACB_BIT(HRESP)) {
586
587
588
589
590 printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
591 dev->name);
592 }
593
594 status = macb_readl(bp, ISR);
595 }
596
597 spin_unlock(&bp->lock);
598
599 return IRQ_HANDLED;
600}
601
602#ifdef CONFIG_NET_POLL_CONTROLLER
603
604
605
606
607static void macb_poll_controller(struct net_device *dev)
608{
609 unsigned long flags;
610
611 local_irq_save(flags);
612 macb_interrupt(dev->irq, dev);
613 local_irq_restore(flags);
614}
615#endif
616
617static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
618{
619 struct macb *bp = netdev_priv(dev);
620 dma_addr_t mapping;
621 unsigned int len, entry;
622 u32 ctrl;
623 unsigned long flags;
624
625#ifdef DEBUG
626 int i;
627 dev_dbg(&bp->pdev->dev,
628 "start_xmit: len %u head %p data %p tail %p end %p\n",
629 skb->len, skb->head, skb->data,
630 skb_tail_pointer(skb), skb_end_pointer(skb));
631 dev_dbg(&bp->pdev->dev,
632 "data:");
633 for (i = 0; i < 16; i++)
634 printk(" %02x", (unsigned int)skb->data[i]);
635 printk("\n");
636#endif
637
638 len = skb->len;
639 spin_lock_irqsave(&bp->lock, flags);
640
641
642 if (TX_BUFFS_AVAIL(bp) < 1) {
643 netif_stop_queue(dev);
644 spin_unlock_irqrestore(&bp->lock, flags);
645 dev_err(&bp->pdev->dev,
646 "BUG! Tx Ring full when queue awake!\n");
647 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
648 bp->tx_head, bp->tx_tail);
649 return NETDEV_TX_BUSY;
650 }
651
652 entry = bp->tx_head;
653 dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
654 mapping = dma_map_single(&bp->pdev->dev, skb->data,
655 len, DMA_TO_DEVICE);
656 bp->tx_skb[entry].skb = skb;
657 bp->tx_skb[entry].mapping = mapping;
658 dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
659 skb->data, (unsigned long)mapping);
660
661 ctrl = MACB_BF(TX_FRMLEN, len);
662 ctrl |= MACB_BIT(TX_LAST);
663 if (entry == (TX_RING_SIZE - 1))
664 ctrl |= MACB_BIT(TX_WRAP);
665
666 bp->tx_ring[entry].addr = mapping;
667 bp->tx_ring[entry].ctrl = ctrl;
668 wmb();
669
670 entry = NEXT_TX(entry);
671 bp->tx_head = entry;
672
673 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
674
675 if (TX_BUFFS_AVAIL(bp) < 1)
676 netif_stop_queue(dev);
677
678 spin_unlock_irqrestore(&bp->lock, flags);
679
680 dev->trans_start = jiffies;
681
682 return NETDEV_TX_OK;
683}
684
685static void macb_free_consistent(struct macb *bp)
686{
687 if (bp->tx_skb) {
688 kfree(bp->tx_skb);
689 bp->tx_skb = NULL;
690 }
691 if (bp->rx_ring) {
692 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
693 bp->rx_ring, bp->rx_ring_dma);
694 bp->rx_ring = NULL;
695 }
696 if (bp->tx_ring) {
697 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
698 bp->tx_ring, bp->tx_ring_dma);
699 bp->tx_ring = NULL;
700 }
701 if (bp->rx_buffers) {
702 dma_free_coherent(&bp->pdev->dev,
703 RX_RING_SIZE * RX_BUFFER_SIZE,
704 bp->rx_buffers, bp->rx_buffers_dma);
705 bp->rx_buffers = NULL;
706 }
707}
708
709static int macb_alloc_consistent(struct macb *bp)
710{
711 int size;
712
713 size = TX_RING_SIZE * sizeof(struct ring_info);
714 bp->tx_skb = kmalloc(size, GFP_KERNEL);
715 if (!bp->tx_skb)
716 goto out_err;
717
718 size = RX_RING_BYTES;
719 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
720 &bp->rx_ring_dma, GFP_KERNEL);
721 if (!bp->rx_ring)
722 goto out_err;
723 dev_dbg(&bp->pdev->dev,
724 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
725 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
726
727 size = TX_RING_BYTES;
728 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
729 &bp->tx_ring_dma, GFP_KERNEL);
730 if (!bp->tx_ring)
731 goto out_err;
732 dev_dbg(&bp->pdev->dev,
733 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
734 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
735
736 size = RX_RING_SIZE * RX_BUFFER_SIZE;
737 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
738 &bp->rx_buffers_dma, GFP_KERNEL);
739 if (!bp->rx_buffers)
740 goto out_err;
741 dev_dbg(&bp->pdev->dev,
742 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
743 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
744
745 return 0;
746
747out_err:
748 macb_free_consistent(bp);
749 return -ENOMEM;
750}
751
752static void macb_init_rings(struct macb *bp)
753{
754 int i;
755 dma_addr_t addr;
756
757 addr = bp->rx_buffers_dma;
758 for (i = 0; i < RX_RING_SIZE; i++) {
759 bp->rx_ring[i].addr = addr;
760 bp->rx_ring[i].ctrl = 0;
761 addr += RX_BUFFER_SIZE;
762 }
763 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
764
765 for (i = 0; i < TX_RING_SIZE; i++) {
766 bp->tx_ring[i].addr = 0;
767 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
768 }
769 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
770
771 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
772}
773
774static void macb_reset_hw(struct macb *bp)
775{
776
777 wmb();
778
779
780
781
782
783 macb_writel(bp, NCR, 0);
784
785
786 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
787
788
789 macb_writel(bp, TSR, ~0UL);
790 macb_writel(bp, RSR, ~0UL);
791
792
793 macb_writel(bp, IDR, ~0UL);
794 macb_readl(bp, ISR);
795}
796
797static void macb_init_hw(struct macb *bp)
798{
799 u32 config;
800
801 macb_reset_hw(bp);
802 __macb_set_hwaddr(bp);
803
804 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
805 config |= MACB_BIT(PAE);
806 config |= MACB_BIT(DRFCS);
807 if (bp->dev->flags & IFF_PROMISC)
808 config |= MACB_BIT(CAF);
809 if (!(bp->dev->flags & IFF_BROADCAST))
810 config |= MACB_BIT(NBC);
811 macb_writel(bp, NCFGR, config);
812
813
814 macb_writel(bp, RBQP, bp->rx_ring_dma);
815 macb_writel(bp, TBQP, bp->tx_ring_dma);
816
817
818 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
819
820
821 macb_writel(bp, IER, (MACB_BIT(RCOMP)
822 | MACB_BIT(RXUBR)
823 | MACB_BIT(ISR_TUND)
824 | MACB_BIT(ISR_RLE)
825 | MACB_BIT(TXERR)
826 | MACB_BIT(TCOMP)
827 | MACB_BIT(ISR_ROVR)
828 | MACB_BIT(HRESP)));
829
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866static inline int hash_bit_value(int bitnr, __u8 *addr)
867{
868 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
869 return 1;
870 return 0;
871}
872
873
874
875
876static int hash_get_index(__u8 *addr)
877{
878 int i, j, bitval;
879 int hash_index = 0;
880
881 for (j = 0; j < 6; j++) {
882 for (i = 0, bitval = 0; i < 8; i++)
883 bitval ^= hash_bit_value(i*6 + j, addr);
884
885 hash_index |= (bitval << j);
886 }
887
888 return hash_index;
889}
890
891
892
893
894static void macb_sethashtable(struct net_device *dev)
895{
896 struct dev_mc_list *curr;
897 unsigned long mc_filter[2];
898 unsigned int i, bitnr;
899 struct macb *bp = netdev_priv(dev);
900
901 mc_filter[0] = mc_filter[1] = 0;
902
903 curr = dev->mc_list;
904 for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
905 if (!curr) break;
906
907 bitnr = hash_get_index(curr->dmi_addr);
908 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
909 }
910
911 macb_writel(bp, HRB, mc_filter[0]);
912 macb_writel(bp, HRT, mc_filter[1]);
913}
914
915
916
917
918static void macb_set_rx_mode(struct net_device *dev)
919{
920 unsigned long cfg;
921 struct macb *bp = netdev_priv(dev);
922
923 cfg = macb_readl(bp, NCFGR);
924
925 if (dev->flags & IFF_PROMISC)
926
927 cfg |= MACB_BIT(CAF);
928 else if (dev->flags & (~IFF_PROMISC))
929
930 cfg &= ~MACB_BIT(CAF);
931
932 if (dev->flags & IFF_ALLMULTI) {
933
934 macb_writel(bp, HRB, -1);
935 macb_writel(bp, HRT, -1);
936 cfg |= MACB_BIT(NCFGR_MTI);
937 } else if (dev->mc_count > 0) {
938
939 macb_sethashtable(dev);
940 cfg |= MACB_BIT(NCFGR_MTI);
941 } else if (dev->flags & (~IFF_ALLMULTI)) {
942
943 macb_writel(bp, HRB, 0);
944 macb_writel(bp, HRT, 0);
945 cfg &= ~MACB_BIT(NCFGR_MTI);
946 }
947
948 macb_writel(bp, NCFGR, cfg);
949}
950
951static int macb_open(struct net_device *dev)
952{
953 struct macb *bp = netdev_priv(dev);
954 int err;
955
956 dev_dbg(&bp->pdev->dev, "open\n");
957
958
959 if (!bp->phy_dev)
960 return -EAGAIN;
961
962 if (!is_valid_ether_addr(dev->dev_addr))
963 return -EADDRNOTAVAIL;
964
965 err = macb_alloc_consistent(bp);
966 if (err) {
967 printk(KERN_ERR
968 "%s: Unable to allocate DMA memory (error %d)\n",
969 dev->name, err);
970 return err;
971 }
972
973 napi_enable(&bp->napi);
974
975 macb_init_rings(bp);
976 macb_init_hw(bp);
977
978
979 phy_start(bp->phy_dev);
980
981 netif_start_queue(dev);
982
983 return 0;
984}
985
986static int macb_close(struct net_device *dev)
987{
988 struct macb *bp = netdev_priv(dev);
989 unsigned long flags;
990
991 netif_stop_queue(dev);
992 napi_disable(&bp->napi);
993
994 if (bp->phy_dev)
995 phy_stop(bp->phy_dev);
996
997 spin_lock_irqsave(&bp->lock, flags);
998 macb_reset_hw(bp);
999 netif_carrier_off(dev);
1000 spin_unlock_irqrestore(&bp->lock, flags);
1001
1002 macb_free_consistent(bp);
1003
1004 return 0;
1005}
1006
1007static struct net_device_stats *macb_get_stats(struct net_device *dev)
1008{
1009 struct macb *bp = netdev_priv(dev);
1010 struct net_device_stats *nstat = &bp->stats;
1011 struct macb_stats *hwstat = &bp->hw_stats;
1012
1013
1014 macb_update_stats(bp);
1015
1016
1017 nstat->rx_errors = (hwstat->rx_fcs_errors +
1018 hwstat->rx_align_errors +
1019 hwstat->rx_resource_errors +
1020 hwstat->rx_overruns +
1021 hwstat->rx_oversize_pkts +
1022 hwstat->rx_jabbers +
1023 hwstat->rx_undersize_pkts +
1024 hwstat->sqe_test_errors +
1025 hwstat->rx_length_mismatch);
1026 nstat->tx_errors = (hwstat->tx_late_cols +
1027 hwstat->tx_excessive_cols +
1028 hwstat->tx_underruns +
1029 hwstat->tx_carrier_errors);
1030 nstat->collisions = (hwstat->tx_single_cols +
1031 hwstat->tx_multiple_cols +
1032 hwstat->tx_excessive_cols);
1033 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1034 hwstat->rx_jabbers +
1035 hwstat->rx_undersize_pkts +
1036 hwstat->rx_length_mismatch);
1037 nstat->rx_over_errors = hwstat->rx_resource_errors;
1038 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1039 nstat->rx_frame_errors = hwstat->rx_align_errors;
1040 nstat->rx_fifo_errors = hwstat->rx_overruns;
1041
1042 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1043 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1044 nstat->tx_fifo_errors = hwstat->tx_underruns;
1045
1046
1047 return nstat;
1048}
1049
1050static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1051{
1052 struct macb *bp = netdev_priv(dev);
1053 struct phy_device *phydev = bp->phy_dev;
1054
1055 if (!phydev)
1056 return -ENODEV;
1057
1058 return phy_ethtool_gset(phydev, cmd);
1059}
1060
1061static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1062{
1063 struct macb *bp = netdev_priv(dev);
1064 struct phy_device *phydev = bp->phy_dev;
1065
1066 if (!phydev)
1067 return -ENODEV;
1068
1069 return phy_ethtool_sset(phydev, cmd);
1070}
1071
1072static void macb_get_drvinfo(struct net_device *dev,
1073 struct ethtool_drvinfo *info)
1074{
1075 struct macb *bp = netdev_priv(dev);
1076
1077 strcpy(info->driver, bp->pdev->dev.driver->name);
1078 strcpy(info->version, "$Revision: 1.14 $");
1079 strcpy(info->bus_info, dev_name(&bp->pdev->dev));
1080}
1081
1082static const struct ethtool_ops macb_ethtool_ops = {
1083 .get_settings = macb_get_settings,
1084 .set_settings = macb_set_settings,
1085 .get_drvinfo = macb_get_drvinfo,
1086 .get_link = ethtool_op_get_link,
1087};
1088
1089static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1090{
1091 struct macb *bp = netdev_priv(dev);
1092 struct phy_device *phydev = bp->phy_dev;
1093
1094 if (!netif_running(dev))
1095 return -EINVAL;
1096
1097 if (!phydev)
1098 return -ENODEV;
1099
1100 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1101}
1102
1103static const struct net_device_ops macb_netdev_ops = {
1104 .ndo_open = macb_open,
1105 .ndo_stop = macb_close,
1106 .ndo_start_xmit = macb_start_xmit,
1107 .ndo_set_multicast_list = macb_set_rx_mode,
1108 .ndo_get_stats = macb_get_stats,
1109 .ndo_do_ioctl = macb_ioctl,
1110 .ndo_validate_addr = eth_validate_addr,
1111 .ndo_change_mtu = eth_change_mtu,
1112 .ndo_set_mac_address = eth_mac_addr,
1113#ifdef CONFIG_NET_POLL_CONTROLLER
1114 .ndo_poll_controller = macb_poll_controller,
1115#endif
1116};
1117
1118static int __init macb_probe(struct platform_device *pdev)
1119{
1120 struct eth_platform_data *pdata;
1121 struct resource *regs;
1122 struct net_device *dev;
1123 struct macb *bp;
1124 struct phy_device *phydev;
1125 unsigned long pclk_hz;
1126 u32 config;
1127 int err = -ENXIO;
1128
1129 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1130 if (!regs) {
1131 dev_err(&pdev->dev, "no mmio resource defined\n");
1132 goto err_out;
1133 }
1134
1135 err = -ENOMEM;
1136 dev = alloc_etherdev(sizeof(*bp));
1137 if (!dev) {
1138 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
1139 goto err_out;
1140 }
1141
1142 SET_NETDEV_DEV(dev, &pdev->dev);
1143
1144
1145 dev->features |= 0;
1146
1147 bp = netdev_priv(dev);
1148 bp->pdev = pdev;
1149 bp->dev = dev;
1150
1151 spin_lock_init(&bp->lock);
1152
1153#if defined(CONFIG_ARCH_AT91)
1154 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1155 if (IS_ERR(bp->pclk)) {
1156 dev_err(&pdev->dev, "failed to get macb_clk\n");
1157 goto err_out_free_dev;
1158 }
1159 clk_enable(bp->pclk);
1160#else
1161 bp->pclk = clk_get(&pdev->dev, "pclk");
1162 if (IS_ERR(bp->pclk)) {
1163 dev_err(&pdev->dev, "failed to get pclk\n");
1164 goto err_out_free_dev;
1165 }
1166 bp->hclk = clk_get(&pdev->dev, "hclk");
1167 if (IS_ERR(bp->hclk)) {
1168 dev_err(&pdev->dev, "failed to get hclk\n");
1169 goto err_out_put_pclk;
1170 }
1171
1172 clk_enable(bp->pclk);
1173 clk_enable(bp->hclk);
1174#endif
1175
1176 bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
1177 if (!bp->regs) {
1178 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1179 err = -ENOMEM;
1180 goto err_out_disable_clocks;
1181 }
1182
1183 dev->irq = platform_get_irq(pdev, 0);
1184 err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM,
1185 dev->name, dev);
1186 if (err) {
1187 printk(KERN_ERR
1188 "%s: Unable to request IRQ %d (error %d)\n",
1189 dev->name, dev->irq, err);
1190 goto err_out_iounmap;
1191 }
1192
1193 dev->netdev_ops = &macb_netdev_ops;
1194 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1195 dev->ethtool_ops = &macb_ethtool_ops;
1196
1197 dev->base_addr = regs->start;
1198
1199
1200 pclk_hz = clk_get_rate(bp->pclk);
1201 if (pclk_hz <= 20000000)
1202 config = MACB_BF(CLK, MACB_CLK_DIV8);
1203 else if (pclk_hz <= 40000000)
1204 config = MACB_BF(CLK, MACB_CLK_DIV16);
1205 else if (pclk_hz <= 80000000)
1206 config = MACB_BF(CLK, MACB_CLK_DIV32);
1207 else
1208 config = MACB_BF(CLK, MACB_CLK_DIV64);
1209 macb_writel(bp, NCFGR, config);
1210
1211 macb_get_hwaddr(bp);
1212 pdata = pdev->dev.platform_data;
1213
1214 if (pdata && pdata->is_rmii)
1215#if defined(CONFIG_ARCH_AT91)
1216 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1217#else
1218 macb_writel(bp, USRIO, 0);
1219#endif
1220 else
1221#if defined(CONFIG_ARCH_AT91)
1222 macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1223#else
1224 macb_writel(bp, USRIO, MACB_BIT(MII));
1225#endif
1226
1227 bp->tx_pending = DEF_TX_RING_PENDING;
1228
1229 err = register_netdev(dev);
1230 if (err) {
1231 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1232 goto err_out_free_irq;
1233 }
1234
1235 if (macb_mii_init(bp) != 0) {
1236 goto err_out_unregister_netdev;
1237 }
1238
1239 platform_set_drvdata(pdev, dev);
1240
1241 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
1242 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
1243
1244 phydev = bp->phy_dev;
1245 printk(KERN_INFO "%s: attached PHY driver [%s] "
1246 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
1247 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1248
1249 return 0;
1250
1251err_out_unregister_netdev:
1252 unregister_netdev(dev);
1253err_out_free_irq:
1254 free_irq(dev->irq, dev);
1255err_out_iounmap:
1256 iounmap(bp->regs);
1257err_out_disable_clocks:
1258#ifndef CONFIG_ARCH_AT91
1259 clk_disable(bp->hclk);
1260 clk_put(bp->hclk);
1261#endif
1262 clk_disable(bp->pclk);
1263#ifndef CONFIG_ARCH_AT91
1264err_out_put_pclk:
1265#endif
1266 clk_put(bp->pclk);
1267err_out_free_dev:
1268 free_netdev(dev);
1269err_out:
1270 platform_set_drvdata(pdev, NULL);
1271 return err;
1272}
1273
1274static int __exit macb_remove(struct platform_device *pdev)
1275{
1276 struct net_device *dev;
1277 struct macb *bp;
1278
1279 dev = platform_get_drvdata(pdev);
1280
1281 if (dev) {
1282 bp = netdev_priv(dev);
1283 if (bp->phy_dev)
1284 phy_disconnect(bp->phy_dev);
1285 mdiobus_unregister(bp->mii_bus);
1286 kfree(bp->mii_bus->irq);
1287 mdiobus_free(bp->mii_bus);
1288 unregister_netdev(dev);
1289 free_irq(dev->irq, dev);
1290 iounmap(bp->regs);
1291#ifndef CONFIG_ARCH_AT91
1292 clk_disable(bp->hclk);
1293 clk_put(bp->hclk);
1294#endif
1295 clk_disable(bp->pclk);
1296 clk_put(bp->pclk);
1297 free_netdev(dev);
1298 platform_set_drvdata(pdev, NULL);
1299 }
1300
1301 return 0;
1302}
1303
1304#ifdef CONFIG_PM
1305static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1306{
1307 struct net_device *netdev = platform_get_drvdata(pdev);
1308 struct macb *bp = netdev_priv(netdev);
1309
1310 netif_device_detach(netdev);
1311
1312#ifndef CONFIG_ARCH_AT91
1313 clk_disable(bp->hclk);
1314#endif
1315 clk_disable(bp->pclk);
1316
1317 return 0;
1318}
1319
1320static int macb_resume(struct platform_device *pdev)
1321{
1322 struct net_device *netdev = platform_get_drvdata(pdev);
1323 struct macb *bp = netdev_priv(netdev);
1324
1325 clk_enable(bp->pclk);
1326#ifndef CONFIG_ARCH_AT91
1327 clk_enable(bp->hclk);
1328#endif
1329
1330 netif_device_attach(netdev);
1331
1332 return 0;
1333}
1334#else
1335#define macb_suspend NULL
1336#define macb_resume NULL
1337#endif
1338
1339static struct platform_driver macb_driver = {
1340 .remove = __exit_p(macb_remove),
1341 .suspend = macb_suspend,
1342 .resume = macb_resume,
1343 .driver = {
1344 .name = "macb",
1345 .owner = THIS_MODULE,
1346 },
1347};
1348
1349static int __init macb_init(void)
1350{
1351 return platform_driver_probe(&macb_driver, macb_probe);
1352}
1353
1354static void __exit macb_exit(void)
1355{
1356 platform_driver_unregister(&macb_driver);
1357}
1358
1359module_init(macb_init);
1360module_exit(macb_exit);
1361
1362MODULE_LICENSE("GPL");
1363MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
1364MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
1365MODULE_ALIAS("platform:macb");
1366