1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/capability.h>
22#include <linux/dma-mapping.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/in.h>
29#include <linux/ioport.h>
30#include <linux/bitops.h>
31#include <linux/slab.h>
32#include <linux/interrupt.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/mii.h>
37#include <linux/skbuff.h>
38#include <linux/delay.h>
39#include <linux/crc32.h>
40#include <linux/phy.h>
41#include <linux/platform_device.h>
42#include <linux/cpu.h>
43#include <linux/io.h>
44
45#include <asm/mipsregs.h>
46#include <asm/irq.h>
47#include <asm/processor.h>
48
49#include <au1000.h>
50#include <au1xxx_eth.h>
51#include <prom.h>
52
53#include "au1000_eth.h"
54
55#ifdef AU1000_ETH_DEBUG
56static int au1000_debug = 5;
57#else
58static int au1000_debug = 3;
59#endif
60
61#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
62 NETIF_MSG_PROBE | \
63 NETIF_MSG_LINK)
64
65#define DRV_NAME "au1000_eth"
66#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
67#define DRV_DESC "Au1xxx on-chip Ethernet driver"
68
69MODULE_AUTHOR(DRV_AUTHOR);
70MODULE_DESCRIPTION(DRV_DESC);
71MODULE_LICENSE("GPL");
72
73
74#define MAC_CONTROL 0x0
75# define MAC_RX_ENABLE (1 << 2)
76# define MAC_TX_ENABLE (1 << 3)
77# define MAC_DEF_CHECK (1 << 5)
78# define MAC_SET_BL(X) (((X) & 0x3) << 6)
79# define MAC_AUTO_PAD (1 << 8)
80# define MAC_DISABLE_RETRY (1 << 10)
81# define MAC_DISABLE_BCAST (1 << 11)
82# define MAC_LATE_COL (1 << 12)
83# define MAC_HASH_MODE (1 << 13)
84# define MAC_HASH_ONLY (1 << 15)
85# define MAC_PASS_ALL (1 << 16)
86# define MAC_INVERSE_FILTER (1 << 17)
87# define MAC_PROMISCUOUS (1 << 18)
88# define MAC_PASS_ALL_MULTI (1 << 19)
89# define MAC_FULL_DUPLEX (1 << 20)
90# define MAC_NORMAL_MODE 0
91# define MAC_INT_LOOPBACK (1 << 21)
92# define MAC_EXT_LOOPBACK (1 << 22)
93# define MAC_DISABLE_RX_OWN (1 << 23)
94# define MAC_BIG_ENDIAN (1 << 30)
95# define MAC_RX_ALL (1 << 31)
96#define MAC_ADDRESS_HIGH 0x4
97#define MAC_ADDRESS_LOW 0x8
98#define MAC_MCAST_HIGH 0xC
99#define MAC_MCAST_LOW 0x10
100#define MAC_MII_CNTRL 0x14
101# define MAC_MII_BUSY (1 << 0)
102# define MAC_MII_READ 0
103# define MAC_MII_WRITE (1 << 1)
104# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
105# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
106#define MAC_MII_DATA 0x18
107#define MAC_FLOW_CNTRL 0x1C
108# define MAC_FLOW_CNTRL_BUSY (1 << 0)
109# define MAC_FLOW_CNTRL_ENABLE (1 << 1)
110# define MAC_PASS_CONTROL (1 << 2)
111# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
112#define MAC_VLAN1_TAG 0x20
113#define MAC_VLAN2_TAG 0x24
114
115
116# define MAC_EN_CLOCK_ENABLE (1 << 0)
117# define MAC_EN_RESET0 (1 << 1)
118# define MAC_EN_TOSS (0 << 2)
119# define MAC_EN_CACHEABLE (1 << 3)
120# define MAC_EN_RESET1 (1 << 4)
121# define MAC_EN_RESET2 (1 << 5)
122# define MAC_DMA_RESET (1 << 6)
123
124
125
126#define MAC_TX_BUFF0_STATUS 0x0
127# define TX_FRAME_ABORTED (1 << 0)
128# define TX_JAB_TIMEOUT (1 << 1)
129# define TX_NO_CARRIER (1 << 2)
130# define TX_LOSS_CARRIER (1 << 3)
131# define TX_EXC_DEF (1 << 4)
132# define TX_LATE_COLL_ABORT (1 << 5)
133# define TX_EXC_COLL (1 << 6)
134# define TX_UNDERRUN (1 << 7)
135# define TX_DEFERRED (1 << 8)
136# define TX_LATE_COLL (1 << 9)
137# define TX_COLL_CNT_MASK (0xF << 10)
138# define TX_PKT_RETRY (1 << 31)
139#define MAC_TX_BUFF0_ADDR 0x4
140# define TX_DMA_ENABLE (1 << 0)
141# define TX_T_DONE (1 << 1)
142# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
143#define MAC_TX_BUFF0_LEN 0x8
144#define MAC_TX_BUFF1_STATUS 0x10
145#define MAC_TX_BUFF1_ADDR 0x14
146#define MAC_TX_BUFF1_LEN 0x18
147#define MAC_TX_BUFF2_STATUS 0x20
148#define MAC_TX_BUFF2_ADDR 0x24
149#define MAC_TX_BUFF2_LEN 0x28
150#define MAC_TX_BUFF3_STATUS 0x30
151#define MAC_TX_BUFF3_ADDR 0x34
152#define MAC_TX_BUFF3_LEN 0x38
153
154
155#define MAC_RX_BUFF0_STATUS 0x0
156# define RX_FRAME_LEN_MASK 0x3fff
157# define RX_WDOG_TIMER (1 << 14)
158# define RX_RUNT (1 << 15)
159# define RX_OVERLEN (1 << 16)
160# define RX_COLL (1 << 17)
161# define RX_ETHER (1 << 18)
162# define RX_MII_ERROR (1 << 19)
163# define RX_DRIBBLING (1 << 20)
164# define RX_CRC_ERROR (1 << 21)
165# define RX_VLAN1 (1 << 22)
166# define RX_VLAN2 (1 << 23)
167# define RX_LEN_ERROR (1 << 24)
168# define RX_CNTRL_FRAME (1 << 25)
169# define RX_U_CNTRL_FRAME (1 << 26)
170# define RX_MCAST_FRAME (1 << 27)
171# define RX_BCAST_FRAME (1 << 28)
172# define RX_FILTER_FAIL (1 << 29)
173# define RX_PACKET_FILTER (1 << 30)
174# define RX_MISSED_FRAME (1 << 31)
175
176# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
177 RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
178 RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
179#define MAC_RX_BUFF0_ADDR 0x4
180# define RX_DMA_ENABLE (1 << 0)
181# define RX_T_DONE (1 << 1)
182# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
183# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
184#define MAC_RX_BUFF1_STATUS 0x10
185#define MAC_RX_BUFF1_ADDR 0x14
186#define MAC_RX_BUFF2_STATUS 0x20
187#define MAC_RX_BUFF2_ADDR 0x24
188#define MAC_RX_BUFF3_STATUS 0x30
189#define MAC_RX_BUFF3_ADDR 0x34
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static void au1000_enable_mac(struct net_device *dev, int force_reset)
245{
246 unsigned long flags;
247 struct au1000_private *aup = netdev_priv(dev);
248
249 spin_lock_irqsave(&aup->lock, flags);
250
251 if (force_reset || (!aup->mac_enabled)) {
252 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
253 wmb();
254 mdelay(2);
255 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
256 | MAC_EN_CLOCK_ENABLE), aup->enable);
257 wmb();
258 mdelay(2);
259
260 aup->mac_enabled = 1;
261 }
262
263 spin_unlock_irqrestore(&aup->lock, flags);
264}
265
266
267
268
269static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
270{
271 struct au1000_private *aup = netdev_priv(dev);
272 u32 *const mii_control_reg = &aup->mac->mii_control;
273 u32 *const mii_data_reg = &aup->mac->mii_data;
274 u32 timedout = 20;
275 u32 mii_control;
276
277 while (readl(mii_control_reg) & MAC_MII_BUSY) {
278 mdelay(1);
279 if (--timedout == 0) {
280 netdev_err(dev, "read_MII busy timeout!!\n");
281 return -1;
282 }
283 }
284
285 mii_control = MAC_SET_MII_SELECT_REG(reg) |
286 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
287
288 writel(mii_control, mii_control_reg);
289
290 timedout = 20;
291 while (readl(mii_control_reg) & MAC_MII_BUSY) {
292 mdelay(1);
293 if (--timedout == 0) {
294 netdev_err(dev, "mdio_read busy timeout!!\n");
295 return -1;
296 }
297 }
298 return readl(mii_data_reg);
299}
300
301static void au1000_mdio_write(struct net_device *dev, int phy_addr,
302 int reg, u16 value)
303{
304 struct au1000_private *aup = netdev_priv(dev);
305 u32 *const mii_control_reg = &aup->mac->mii_control;
306 u32 *const mii_data_reg = &aup->mac->mii_data;
307 u32 timedout = 20;
308 u32 mii_control;
309
310 while (readl(mii_control_reg) & MAC_MII_BUSY) {
311 mdelay(1);
312 if (--timedout == 0) {
313 netdev_err(dev, "mdio_write busy timeout!!\n");
314 return;
315 }
316 }
317
318 mii_control = MAC_SET_MII_SELECT_REG(reg) |
319 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
320
321 writel(value, mii_data_reg);
322 writel(mii_control, mii_control_reg);
323}
324
325static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
326{
327 struct net_device *const dev = bus->priv;
328
329
330
331
332 au1000_enable_mac(dev, 0);
333
334 return au1000_mdio_read(dev, phy_addr, regnum);
335}
336
337static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
338 u16 value)
339{
340 struct net_device *const dev = bus->priv;
341
342
343
344
345 au1000_enable_mac(dev, 0);
346
347 au1000_mdio_write(dev, phy_addr, regnum, value);
348 return 0;
349}
350
351static int au1000_mdiobus_reset(struct mii_bus *bus)
352{
353 struct net_device *const dev = bus->priv;
354
355
356
357
358 au1000_enable_mac(dev, 0);
359
360 return 0;
361}
362
363static void au1000_hard_stop(struct net_device *dev)
364{
365 struct au1000_private *aup = netdev_priv(dev);
366 u32 reg;
367
368 netif_dbg(aup, drv, dev, "hard stop\n");
369
370 reg = readl(&aup->mac->control);
371 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
372 writel(reg, &aup->mac->control);
373 wmb();
374 mdelay(10);
375}
376
377static void au1000_enable_rx_tx(struct net_device *dev)
378{
379 struct au1000_private *aup = netdev_priv(dev);
380 u32 reg;
381
382 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
383
384 reg = readl(&aup->mac->control);
385 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
386 writel(reg, &aup->mac->control);
387 wmb();
388 mdelay(10);
389}
390
391static void
392au1000_adjust_link(struct net_device *dev)
393{
394 struct au1000_private *aup = netdev_priv(dev);
395 struct phy_device *phydev = dev->phydev;
396 unsigned long flags;
397 u32 reg;
398
399 int status_change = 0;
400
401 BUG_ON(!phydev);
402
403 spin_lock_irqsave(&aup->lock, flags);
404
405 if (phydev->link && (aup->old_speed != phydev->speed)) {
406
407
408 switch (phydev->speed) {
409 case SPEED_10:
410 case SPEED_100:
411 break;
412 default:
413 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
414 phydev->speed);
415 break;
416 }
417
418 aup->old_speed = phydev->speed;
419
420 status_change = 1;
421 }
422
423 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
424
425
426
427 au1000_hard_stop(dev);
428
429 reg = readl(&aup->mac->control);
430 if (DUPLEX_FULL == phydev->duplex) {
431 reg |= MAC_FULL_DUPLEX;
432 reg &= ~MAC_DISABLE_RX_OWN;
433 } else {
434 reg &= ~MAC_FULL_DUPLEX;
435 reg |= MAC_DISABLE_RX_OWN;
436 }
437 writel(reg, &aup->mac->control);
438 wmb();
439 mdelay(1);
440
441 au1000_enable_rx_tx(dev);
442 aup->old_duplex = phydev->duplex;
443
444 status_change = 1;
445 }
446
447 if (phydev->link != aup->old_link) {
448
449
450 if (!phydev->link) {
451
452 aup->old_speed = 0;
453 aup->old_duplex = -1;
454 }
455
456 aup->old_link = phydev->link;
457 status_change = 1;
458 }
459
460 spin_unlock_irqrestore(&aup->lock, flags);
461
462 if (status_change) {
463 if (phydev->link)
464 netdev_info(dev, "link up (%d/%s)\n",
465 phydev->speed,
466 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
467 else
468 netdev_info(dev, "link down\n");
469 }
470}
471
472static int au1000_mii_probe(struct net_device *dev)
473{
474 struct au1000_private *const aup = netdev_priv(dev);
475 struct phy_device *phydev = NULL;
476 int phy_addr;
477
478 if (aup->phy_static_config) {
479 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
480
481 if (aup->phy_addr)
482 phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
483 else
484 netdev_info(dev, "using PHY-less setup\n");
485 return 0;
486 }
487
488
489
490
491 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
492 if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
493 phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
494 if (!aup->phy_search_highest_addr)
495
496 break;
497 }
498
499 if (aup->phy1_search_mac0) {
500
501 if (!phydev && (aup->mac_id == 1)) {
502
503 dev_info(&dev->dev, ": no PHY found on MAC1, "
504 "let's see if it's attached to MAC0...\n");
505
506
507
508
509 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
510 struct phy_device *const tmp_phydev =
511 mdiobus_get_phy(aup->mii_bus,
512 phy_addr);
513
514 if (aup->mac_id == 1)
515 break;
516
517
518 if (!tmp_phydev)
519 continue;
520
521
522 if (tmp_phydev->attached_dev)
523 continue;
524
525 phydev = tmp_phydev;
526 break;
527 }
528 }
529 }
530
531 if (!phydev) {
532 netdev_err(dev, "no PHY found\n");
533 return -1;
534 }
535
536
537 BUG_ON(phydev->attached_dev);
538
539 phydev = phy_connect(dev, phydev_name(phydev),
540 &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
541
542 if (IS_ERR(phydev)) {
543 netdev_err(dev, "Could not attach to PHY\n");
544 return PTR_ERR(phydev);
545 }
546
547 phy_set_max_speed(phydev, SPEED_100);
548
549 aup->old_link = 0;
550 aup->old_speed = 0;
551 aup->old_duplex = -1;
552
553 phy_attached_info(phydev);
554
555 return 0;
556}
557
558
559
560
561
562
563static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
564{
565 struct db_dest *pDB;
566 pDB = aup->pDBfree;
567
568 if (pDB)
569 aup->pDBfree = pDB->pnext;
570
571 return pDB;
572}
573
574void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
575{
576 struct db_dest *pDBfree = aup->pDBfree;
577 if (pDBfree)
578 pDBfree->pnext = pDB;
579 aup->pDBfree = pDB;
580}
581
582static void au1000_reset_mac_unlocked(struct net_device *dev)
583{
584 struct au1000_private *const aup = netdev_priv(dev);
585 int i;
586
587 au1000_hard_stop(dev);
588
589 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
590 wmb();
591 mdelay(2);
592 writel(0, aup->enable);
593 wmb();
594 mdelay(2);
595
596 aup->tx_full = 0;
597 for (i = 0; i < NUM_RX_DMA; i++) {
598
599 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
600 }
601 for (i = 0; i < NUM_TX_DMA; i++) {
602
603 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
604 }
605
606 aup->mac_enabled = 0;
607
608}
609
610static void au1000_reset_mac(struct net_device *dev)
611{
612 struct au1000_private *const aup = netdev_priv(dev);
613 unsigned long flags;
614
615 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
616 (unsigned)aup);
617
618 spin_lock_irqsave(&aup->lock, flags);
619
620 au1000_reset_mac_unlocked(dev);
621
622 spin_unlock_irqrestore(&aup->lock, flags);
623}
624
625
626
627
628
629
630static void
631au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
632{
633 int i;
634
635 for (i = 0; i < NUM_RX_DMA; i++) {
636 aup->rx_dma_ring[i] = (struct rx_dma *)
637 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
638 }
639 for (i = 0; i < NUM_TX_DMA; i++) {
640 aup->tx_dma_ring[i] = (struct tx_dma *)
641 (tx_base + sizeof(struct tx_dma) * i);
642 }
643}
644
645
646
647
648static void
649au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
650{
651 struct au1000_private *aup = netdev_priv(dev);
652
653 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
654 snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
655 aup->mac_id);
656}
657
658static void au1000_set_msglevel(struct net_device *dev, u32 value)
659{
660 struct au1000_private *aup = netdev_priv(dev);
661 aup->msg_enable = value;
662}
663
664static u32 au1000_get_msglevel(struct net_device *dev)
665{
666 struct au1000_private *aup = netdev_priv(dev);
667 return aup->msg_enable;
668}
669
670static const struct ethtool_ops au1000_ethtool_ops = {
671 .get_drvinfo = au1000_get_drvinfo,
672 .get_link = ethtool_op_get_link,
673 .get_msglevel = au1000_get_msglevel,
674 .set_msglevel = au1000_set_msglevel,
675 .get_link_ksettings = phy_ethtool_get_link_ksettings,
676 .set_link_ksettings = phy_ethtool_set_link_ksettings,
677};
678
679
680
681
682
683
684
685
686
687
688static int au1000_init(struct net_device *dev)
689{
690 struct au1000_private *aup = netdev_priv(dev);
691 unsigned long flags;
692 int i;
693 u32 control;
694
695 netif_dbg(aup, hw, dev, "au1000_init\n");
696
697
698 au1000_enable_mac(dev, 1);
699
700 spin_lock_irqsave(&aup->lock, flags);
701
702 writel(0, &aup->mac->control);
703 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
704 aup->tx_tail = aup->tx_head;
705 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
706
707 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
708 &aup->mac->mac_addr_high);
709 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
710 dev->dev_addr[1]<<8 | dev->dev_addr[0],
711 &aup->mac->mac_addr_low);
712
713
714 for (i = 0; i < NUM_RX_DMA; i++)
715 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
716
717 wmb();
718
719 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
720#ifndef CONFIG_CPU_LITTLE_ENDIAN
721 control |= MAC_BIG_ENDIAN;
722#endif
723 if (dev->phydev) {
724 if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
725 control |= MAC_FULL_DUPLEX;
726 else
727 control |= MAC_DISABLE_RX_OWN;
728 } else {
729 control |= MAC_FULL_DUPLEX;
730 }
731
732 writel(control, &aup->mac->control);
733 writel(0x8100, &aup->mac->vlan1_tag);
734 wmb();
735
736 spin_unlock_irqrestore(&aup->lock, flags);
737 return 0;
738}
739
740static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
741{
742 struct net_device_stats *ps = &dev->stats;
743
744 ps->rx_packets++;
745 if (status & RX_MCAST_FRAME)
746 ps->multicast++;
747
748 if (status & RX_ERROR) {
749 ps->rx_errors++;
750 if (status & RX_MISSED_FRAME)
751 ps->rx_missed_errors++;
752 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
753 ps->rx_length_errors++;
754 if (status & RX_CRC_ERROR)
755 ps->rx_crc_errors++;
756 if (status & RX_COLL)
757 ps->collisions++;
758 } else
759 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
760
761}
762
763
764
765
766static int au1000_rx(struct net_device *dev)
767{
768 struct au1000_private *aup = netdev_priv(dev);
769 struct sk_buff *skb;
770 struct rx_dma *prxd;
771 u32 buff_stat, status;
772 struct db_dest *pDB;
773 u32 frmlen;
774
775 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
776
777 prxd = aup->rx_dma_ring[aup->rx_head];
778 buff_stat = prxd->buff_stat;
779 while (buff_stat & RX_T_DONE) {
780 status = prxd->status;
781 pDB = aup->rx_db_inuse[aup->rx_head];
782 au1000_update_rx_stats(dev, status);
783 if (!(status & RX_ERROR)) {
784
785
786 frmlen = (status & RX_FRAME_LEN_MASK);
787 frmlen -= 4;
788 skb = netdev_alloc_skb(dev, frmlen + 2);
789 if (skb == NULL) {
790 dev->stats.rx_dropped++;
791 continue;
792 }
793 skb_reserve(skb, 2);
794 skb_copy_to_linear_data(skb,
795 (unsigned char *)pDB->vaddr, frmlen);
796 skb_put(skb, frmlen);
797 skb->protocol = eth_type_trans(skb, dev);
798 netif_rx(skb);
799 } else {
800 if (au1000_debug > 4) {
801 pr_err("rx_error(s):");
802 if (status & RX_MISSED_FRAME)
803 pr_cont(" miss");
804 if (status & RX_WDOG_TIMER)
805 pr_cont(" wdog");
806 if (status & RX_RUNT)
807 pr_cont(" runt");
808 if (status & RX_OVERLEN)
809 pr_cont(" overlen");
810 if (status & RX_COLL)
811 pr_cont(" coll");
812 if (status & RX_MII_ERROR)
813 pr_cont(" mii error");
814 if (status & RX_CRC_ERROR)
815 pr_cont(" crc error");
816 if (status & RX_LEN_ERROR)
817 pr_cont(" len error");
818 if (status & RX_U_CNTRL_FRAME)
819 pr_cont(" u control frame");
820 pr_cont("\n");
821 }
822 }
823 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
824 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
825 wmb();
826
827
828 prxd = aup->rx_dma_ring[aup->rx_head];
829 buff_stat = prxd->buff_stat;
830 }
831 return 0;
832}
833
834static void au1000_update_tx_stats(struct net_device *dev, u32 status)
835{
836 struct net_device_stats *ps = &dev->stats;
837
838 if (status & TX_FRAME_ABORTED) {
839 if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
840 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
841
842
843
844 ps->tx_errors++;
845 ps->tx_aborted_errors++;
846 }
847 } else {
848 ps->tx_errors++;
849 ps->tx_aborted_errors++;
850 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
851 ps->tx_carrier_errors++;
852 }
853 }
854}
855
856
857
858
859
860
861static void au1000_tx_ack(struct net_device *dev)
862{
863 struct au1000_private *aup = netdev_priv(dev);
864 struct tx_dma *ptxd;
865
866 ptxd = aup->tx_dma_ring[aup->tx_tail];
867
868 while (ptxd->buff_stat & TX_T_DONE) {
869 au1000_update_tx_stats(dev, ptxd->status);
870 ptxd->buff_stat &= ~TX_T_DONE;
871 ptxd->len = 0;
872 wmb();
873
874 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
875 ptxd = aup->tx_dma_ring[aup->tx_tail];
876
877 if (aup->tx_full) {
878 aup->tx_full = 0;
879 netif_wake_queue(dev);
880 }
881 }
882}
883
884
885
886
887static irqreturn_t au1000_interrupt(int irq, void *dev_id)
888{
889 struct net_device *dev = dev_id;
890
891
892
893 au1000_rx(dev);
894 au1000_tx_ack(dev);
895 return IRQ_RETVAL(1);
896}
897
898static int au1000_open(struct net_device *dev)
899{
900 int retval;
901 struct au1000_private *aup = netdev_priv(dev);
902
903 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
904
905 retval = request_irq(dev->irq, au1000_interrupt, 0,
906 dev->name, dev);
907 if (retval) {
908 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
909 return retval;
910 }
911
912 retval = au1000_init(dev);
913 if (retval) {
914 netdev_err(dev, "error in au1000_init\n");
915 free_irq(dev->irq, dev);
916 return retval;
917 }
918
919 if (dev->phydev)
920 phy_start(dev->phydev);
921
922 netif_start_queue(dev);
923
924 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
925
926 return 0;
927}
928
929static int au1000_close(struct net_device *dev)
930{
931 unsigned long flags;
932 struct au1000_private *const aup = netdev_priv(dev);
933
934 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
935
936 if (dev->phydev)
937 phy_stop(dev->phydev);
938
939 spin_lock_irqsave(&aup->lock, flags);
940
941 au1000_reset_mac_unlocked(dev);
942
943
944 netif_stop_queue(dev);
945
946
947 free_irq(dev->irq, dev);
948 spin_unlock_irqrestore(&aup->lock, flags);
949
950 return 0;
951}
952
953
954
955
956static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
957{
958 struct au1000_private *aup = netdev_priv(dev);
959 struct net_device_stats *ps = &dev->stats;
960 struct tx_dma *ptxd;
961 u32 buff_stat;
962 struct db_dest *pDB;
963 int i;
964
965 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
966 (unsigned)aup, skb->len,
967 skb->data, aup->tx_head);
968
969 ptxd = aup->tx_dma_ring[aup->tx_head];
970 buff_stat = ptxd->buff_stat;
971 if (buff_stat & TX_DMA_ENABLE) {
972
973 netif_stop_queue(dev);
974 aup->tx_full = 1;
975 return NETDEV_TX_BUSY;
976 } else if (buff_stat & TX_T_DONE) {
977 au1000_update_tx_stats(dev, ptxd->status);
978 ptxd->len = 0;
979 }
980
981 if (aup->tx_full) {
982 aup->tx_full = 0;
983 netif_wake_queue(dev);
984 }
985
986 pDB = aup->tx_db_inuse[aup->tx_head];
987 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
988 if (skb->len < ETH_ZLEN) {
989 for (i = skb->len; i < ETH_ZLEN; i++)
990 ((char *)pDB->vaddr)[i] = 0;
991
992 ptxd->len = ETH_ZLEN;
993 } else
994 ptxd->len = skb->len;
995
996 ps->tx_packets++;
997 ps->tx_bytes += ptxd->len;
998
999 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1000 wmb();
1001 dev_kfree_skb(skb);
1002 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1003 return NETDEV_TX_OK;
1004}
1005
1006
1007
1008
1009
1010static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
1011{
1012 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
1013 au1000_reset_mac(dev);
1014 au1000_init(dev);
1015 netif_trans_update(dev);
1016 netif_wake_queue(dev);
1017}
1018
1019static void au1000_multicast_list(struct net_device *dev)
1020{
1021 struct au1000_private *aup = netdev_priv(dev);
1022 u32 reg;
1023
1024 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
1025 reg = readl(&aup->mac->control);
1026 if (dev->flags & IFF_PROMISC) {
1027 reg |= MAC_PROMISCUOUS;
1028 } else if ((dev->flags & IFF_ALLMULTI) ||
1029 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
1030 reg |= MAC_PASS_ALL_MULTI;
1031 reg &= ~MAC_PROMISCUOUS;
1032 netdev_info(dev, "Pass all multicast\n");
1033 } else {
1034 struct netdev_hw_addr *ha;
1035 u32 mc_filter[2];
1036
1037 mc_filter[1] = mc_filter[0] = 0;
1038 netdev_for_each_mc_addr(ha, dev)
1039 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
1040 (long *)mc_filter);
1041 writel(mc_filter[1], &aup->mac->multi_hash_high);
1042 writel(mc_filter[0], &aup->mac->multi_hash_low);
1043 reg &= ~MAC_PROMISCUOUS;
1044 reg |= MAC_HASH_MODE;
1045 }
1046 writel(reg, &aup->mac->control);
1047}
1048
1049static const struct net_device_ops au1000_netdev_ops = {
1050 .ndo_open = au1000_open,
1051 .ndo_stop = au1000_close,
1052 .ndo_start_xmit = au1000_tx,
1053 .ndo_set_rx_mode = au1000_multicast_list,
1054 .ndo_do_ioctl = phy_do_ioctl_running,
1055 .ndo_tx_timeout = au1000_tx_timeout,
1056 .ndo_set_mac_address = eth_mac_addr,
1057 .ndo_validate_addr = eth_validate_addr,
1058};
1059
1060static int au1000_probe(struct platform_device *pdev)
1061{
1062 struct au1000_private *aup = NULL;
1063 struct au1000_eth_platform_data *pd;
1064 struct net_device *dev = NULL;
1065 struct db_dest *pDB, *pDBfree;
1066 int irq, i, err = 0;
1067 struct resource *base, *macen, *macdma;
1068
1069 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1070 if (!base) {
1071 dev_err(&pdev->dev, "failed to retrieve base register\n");
1072 err = -ENODEV;
1073 goto out;
1074 }
1075
1076 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1077 if (!macen) {
1078 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1079 err = -ENODEV;
1080 goto out;
1081 }
1082
1083 irq = platform_get_irq(pdev, 0);
1084 if (irq < 0) {
1085 err = -ENODEV;
1086 goto out;
1087 }
1088
1089 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1090 if (!macdma) {
1091 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1092 err = -ENODEV;
1093 goto out;
1094 }
1095
1096 if (!request_mem_region(base->start, resource_size(base),
1097 pdev->name)) {
1098 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1099 err = -ENXIO;
1100 goto out;
1101 }
1102
1103 if (!request_mem_region(macen->start, resource_size(macen),
1104 pdev->name)) {
1105 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1106 err = -ENXIO;
1107 goto err_request;
1108 }
1109
1110 if (!request_mem_region(macdma->start, resource_size(macdma),
1111 pdev->name)) {
1112 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1113 err = -ENXIO;
1114 goto err_macdma;
1115 }
1116
1117 dev = alloc_etherdev(sizeof(struct au1000_private));
1118 if (!dev) {
1119 err = -ENOMEM;
1120 goto err_alloc;
1121 }
1122
1123 SET_NETDEV_DEV(dev, &pdev->dev);
1124 platform_set_drvdata(pdev, dev);
1125 aup = netdev_priv(dev);
1126
1127 spin_lock_init(&aup->lock);
1128 aup->msg_enable = (au1000_debug < 4 ?
1129 AU1000_DEF_MSG_ENABLE : au1000_debug);
1130
1131
1132
1133
1134 aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
1135 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1136 &aup->dma_addr, 0);
1137 if (!aup->vaddr) {
1138 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1139 err = -ENOMEM;
1140 goto err_vaddr;
1141 }
1142
1143
1144 aup->mac = (struct mac_reg *)
1145 ioremap(base->start, resource_size(base));
1146 if (!aup->mac) {
1147 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1148 err = -ENXIO;
1149 goto err_remap1;
1150 }
1151
1152
1153 aup->enable = (u32 *)ioremap(macen->start,
1154 resource_size(macen));
1155 if (!aup->enable) {
1156 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1157 err = -ENXIO;
1158 goto err_remap2;
1159 }
1160 aup->mac_id = pdev->id;
1161
1162 aup->macdma = ioremap(macdma->start, resource_size(macdma));
1163 if (!aup->macdma) {
1164 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1165 err = -ENXIO;
1166 goto err_remap3;
1167 }
1168
1169 au1000_setup_hw_rings(aup, aup->macdma);
1170
1171 writel(0, aup->enable);
1172 aup->mac_enabled = 0;
1173
1174 pd = dev_get_platdata(&pdev->dev);
1175 if (!pd) {
1176 dev_info(&pdev->dev, "no platform_data passed,"
1177 " PHY search on MAC0\n");
1178 aup->phy1_search_mac0 = 1;
1179 } else {
1180 if (is_valid_ether_addr(pd->mac)) {
1181 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1182 } else {
1183
1184 eth_hw_addr_random(dev);
1185 }
1186
1187 aup->phy_static_config = pd->phy_static_config;
1188 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1189 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1190 aup->phy_addr = pd->phy_addr;
1191 aup->phy_busid = pd->phy_busid;
1192 aup->phy_irq = pd->phy_irq;
1193 }
1194
1195 if (aup->phy_busid > 0) {
1196 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1197 err = -ENODEV;
1198 goto err_mdiobus_alloc;
1199 }
1200
1201 aup->mii_bus = mdiobus_alloc();
1202 if (aup->mii_bus == NULL) {
1203 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1204 err = -ENOMEM;
1205 goto err_mdiobus_alloc;
1206 }
1207
1208 aup->mii_bus->priv = dev;
1209 aup->mii_bus->read = au1000_mdiobus_read;
1210 aup->mii_bus->write = au1000_mdiobus_write;
1211 aup->mii_bus->reset = au1000_mdiobus_reset;
1212 aup->mii_bus->name = "au1000_eth_mii";
1213 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1214 pdev->name, aup->mac_id);
1215
1216
1217 if (aup->phy_static_config)
1218 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1219 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1220
1221 err = mdiobus_register(aup->mii_bus);
1222 if (err) {
1223 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1224 goto err_mdiobus_reg;
1225 }
1226
1227 err = au1000_mii_probe(dev);
1228 if (err != 0)
1229 goto err_out;
1230
1231 pDBfree = NULL;
1232
1233 pDB = aup->db;
1234 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1235 pDB->pnext = pDBfree;
1236 pDBfree = pDB;
1237 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1238 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1239 pDB++;
1240 }
1241 aup->pDBfree = pDBfree;
1242
1243 err = -ENODEV;
1244 for (i = 0; i < NUM_RX_DMA; i++) {
1245 pDB = au1000_GetFreeDB(aup);
1246 if (!pDB)
1247 goto err_out;
1248
1249 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1250 aup->rx_db_inuse[i] = pDB;
1251 }
1252
1253 for (i = 0; i < NUM_TX_DMA; i++) {
1254 pDB = au1000_GetFreeDB(aup);
1255 if (!pDB)
1256 goto err_out;
1257
1258 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1259 aup->tx_dma_ring[i]->len = 0;
1260 aup->tx_db_inuse[i] = pDB;
1261 }
1262
1263 dev->base_addr = base->start;
1264 dev->irq = irq;
1265 dev->netdev_ops = &au1000_netdev_ops;
1266 dev->ethtool_ops = &au1000_ethtool_ops;
1267 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1268
1269
1270
1271
1272
1273 au1000_reset_mac(dev);
1274
1275 err = register_netdev(dev);
1276 if (err) {
1277 netdev_err(dev, "Cannot register net device, aborting.\n");
1278 goto err_out;
1279 }
1280
1281 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1282 (unsigned long)base->start, irq);
1283
1284 return 0;
1285
1286err_out:
1287 if (aup->mii_bus != NULL)
1288 mdiobus_unregister(aup->mii_bus);
1289
1290
1291
1292
1293 au1000_reset_mac(dev);
1294
1295 for (i = 0; i < NUM_RX_DMA; i++) {
1296 if (aup->rx_db_inuse[i])
1297 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1298 }
1299 for (i = 0; i < NUM_TX_DMA; i++) {
1300 if (aup->tx_db_inuse[i])
1301 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1302 }
1303err_mdiobus_reg:
1304 mdiobus_free(aup->mii_bus);
1305err_mdiobus_alloc:
1306 iounmap(aup->macdma);
1307err_remap3:
1308 iounmap(aup->enable);
1309err_remap2:
1310 iounmap(aup->mac);
1311err_remap1:
1312 dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1313 (void *)aup->vaddr, aup->dma_addr);
1314err_vaddr:
1315 free_netdev(dev);
1316err_alloc:
1317 release_mem_region(macdma->start, resource_size(macdma));
1318err_macdma:
1319 release_mem_region(macen->start, resource_size(macen));
1320err_request:
1321 release_mem_region(base->start, resource_size(base));
1322out:
1323 return err;
1324}
1325
1326static int au1000_remove(struct platform_device *pdev)
1327{
1328 struct net_device *dev = platform_get_drvdata(pdev);
1329 struct au1000_private *aup = netdev_priv(dev);
1330 int i;
1331 struct resource *base, *macen;
1332
1333 unregister_netdev(dev);
1334 mdiobus_unregister(aup->mii_bus);
1335 mdiobus_free(aup->mii_bus);
1336
1337 for (i = 0; i < NUM_RX_DMA; i++)
1338 if (aup->rx_db_inuse[i])
1339 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1340
1341 for (i = 0; i < NUM_TX_DMA; i++)
1342 if (aup->tx_db_inuse[i])
1343 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1344
1345 dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1346 (void *)aup->vaddr, aup->dma_addr);
1347
1348 iounmap(aup->macdma);
1349 iounmap(aup->mac);
1350 iounmap(aup->enable);
1351
1352 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1353 release_mem_region(base->start, resource_size(base));
1354
1355 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1356 release_mem_region(base->start, resource_size(base));
1357
1358 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1359 release_mem_region(macen->start, resource_size(macen));
1360
1361 free_netdev(dev);
1362
1363 return 0;
1364}
1365
1366static struct platform_driver au1000_eth_driver = {
1367 .probe = au1000_probe,
1368 .remove = au1000_remove,
1369 .driver = {
1370 .name = "au1000-eth",
1371 },
1372};
1373
1374module_platform_driver(au1000_eth_driver);
1375
1376MODULE_ALIAS("platform:au1000-eth");
1377