1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
38#include <linux/capability.h>
39#include <linux/dma-mapping.h>
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/string.h>
43#include <linux/timer.h>
44#include <linux/errno.h>
45#include <linux/in.h>
46#include <linux/ioport.h>
47#include <linux/bitops.h>
48#include <linux/slab.h>
49#include <linux/interrupt.h>
50#include <linux/netdevice.h>
51#include <linux/etherdevice.h>
52#include <linux/ethtool.h>
53#include <linux/mii.h>
54#include <linux/skbuff.h>
55#include <linux/delay.h>
56#include <linux/crc32.h>
57#include <linux/phy.h>
58#include <linux/platform_device.h>
59#include <linux/cpu.h>
60#include <linux/io.h>
61
62#include <asm/mipsregs.h>
63#include <asm/irq.h>
64#include <asm/processor.h>
65
66#include <au1000.h>
67#include <au1xxx_eth.h>
68#include <prom.h>
69
70#include "au1000_eth.h"
71
72#ifdef AU1000_ETH_DEBUG
73static int au1000_debug = 5;
74#else
75static int au1000_debug = 3;
76#endif
77
78#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK)
81
82#define DRV_NAME "au1000_eth"
83#define DRV_VERSION "1.7"
84#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
85#define DRV_DESC "Au1xxx on-chip Ethernet driver"
86
87MODULE_AUTHOR(DRV_AUTHOR);
88MODULE_DESCRIPTION(DRV_DESC);
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
92
93#define MAC_CONTROL 0x0
94# define MAC_RX_ENABLE (1 << 2)
95# define MAC_TX_ENABLE (1 << 3)
96# define MAC_DEF_CHECK (1 << 5)
97# define MAC_SET_BL(X) (((X) & 0x3) << 6)
98# define MAC_AUTO_PAD (1 << 8)
99# define MAC_DISABLE_RETRY (1 << 10)
100# define MAC_DISABLE_BCAST (1 << 11)
101# define MAC_LATE_COL (1 << 12)
102# define MAC_HASH_MODE (1 << 13)
103# define MAC_HASH_ONLY (1 << 15)
104# define MAC_PASS_ALL (1 << 16)
105# define MAC_INVERSE_FILTER (1 << 17)
106# define MAC_PROMISCUOUS (1 << 18)
107# define MAC_PASS_ALL_MULTI (1 << 19)
108# define MAC_FULL_DUPLEX (1 << 20)
109# define MAC_NORMAL_MODE 0
110# define MAC_INT_LOOPBACK (1 << 21)
111# define MAC_EXT_LOOPBACK (1 << 22)
112# define MAC_DISABLE_RX_OWN (1 << 23)
113# define MAC_BIG_ENDIAN (1 << 30)
114# define MAC_RX_ALL (1 << 31)
115#define MAC_ADDRESS_HIGH 0x4
116#define MAC_ADDRESS_LOW 0x8
117#define MAC_MCAST_HIGH 0xC
118#define MAC_MCAST_LOW 0x10
119#define MAC_MII_CNTRL 0x14
120# define MAC_MII_BUSY (1 << 0)
121# define MAC_MII_READ 0
122# define MAC_MII_WRITE (1 << 1)
123# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
124# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
125#define MAC_MII_DATA 0x18
126#define MAC_FLOW_CNTRL 0x1C
127# define MAC_FLOW_CNTRL_BUSY (1 << 0)
128# define MAC_FLOW_CNTRL_ENABLE (1 << 1)
129# define MAC_PASS_CONTROL (1 << 2)
130# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
131#define MAC_VLAN1_TAG 0x20
132#define MAC_VLAN2_TAG 0x24
133
134
135# define MAC_EN_CLOCK_ENABLE (1 << 0)
136# define MAC_EN_RESET0 (1 << 1)
137# define MAC_EN_TOSS (0 << 2)
138# define MAC_EN_CACHEABLE (1 << 3)
139# define MAC_EN_RESET1 (1 << 4)
140# define MAC_EN_RESET2 (1 << 5)
141# define MAC_DMA_RESET (1 << 6)
142
143
144
145#define MAC_TX_BUFF0_STATUS 0x0
146# define TX_FRAME_ABORTED (1 << 0)
147# define TX_JAB_TIMEOUT (1 << 1)
148# define TX_NO_CARRIER (1 << 2)
149# define TX_LOSS_CARRIER (1 << 3)
150# define TX_EXC_DEF (1 << 4)
151# define TX_LATE_COLL_ABORT (1 << 5)
152# define TX_EXC_COLL (1 << 6)
153# define TX_UNDERRUN (1 << 7)
154# define TX_DEFERRED (1 << 8)
155# define TX_LATE_COLL (1 << 9)
156# define TX_COLL_CNT_MASK (0xF << 10)
157# define TX_PKT_RETRY (1 << 31)
158#define MAC_TX_BUFF0_ADDR 0x4
159# define TX_DMA_ENABLE (1 << 0)
160# define TX_T_DONE (1 << 1)
161# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
162#define MAC_TX_BUFF0_LEN 0x8
163#define MAC_TX_BUFF1_STATUS 0x10
164#define MAC_TX_BUFF1_ADDR 0x14
165#define MAC_TX_BUFF1_LEN 0x18
166#define MAC_TX_BUFF2_STATUS 0x20
167#define MAC_TX_BUFF2_ADDR 0x24
168#define MAC_TX_BUFF2_LEN 0x28
169#define MAC_TX_BUFF3_STATUS 0x30
170#define MAC_TX_BUFF3_ADDR 0x34
171#define MAC_TX_BUFF3_LEN 0x38
172
173
174#define MAC_RX_BUFF0_STATUS 0x0
175# define RX_FRAME_LEN_MASK 0x3fff
176# define RX_WDOG_TIMER (1 << 14)
177# define RX_RUNT (1 << 15)
178# define RX_OVERLEN (1 << 16)
179# define RX_COLL (1 << 17)
180# define RX_ETHER (1 << 18)
181# define RX_MII_ERROR (1 << 19)
182# define RX_DRIBBLING (1 << 20)
183# define RX_CRC_ERROR (1 << 21)
184# define RX_VLAN1 (1 << 22)
185# define RX_VLAN2 (1 << 23)
186# define RX_LEN_ERROR (1 << 24)
187# define RX_CNTRL_FRAME (1 << 25)
188# define RX_U_CNTRL_FRAME (1 << 26)
189# define RX_MCAST_FRAME (1 << 27)
190# define RX_BCAST_FRAME (1 << 28)
191# define RX_FILTER_FAIL (1 << 29)
192# define RX_PACKET_FILTER (1 << 30)
193# define RX_MISSED_FRAME (1 << 31)
194
195# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
196 RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
197 RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
198#define MAC_RX_BUFF0_ADDR 0x4
199# define RX_DMA_ENABLE (1 << 0)
200# define RX_T_DONE (1 << 1)
201# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
202# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
203#define MAC_RX_BUFF1_STATUS 0x10
204#define MAC_RX_BUFF1_ADDR 0x14
205#define MAC_RX_BUFF2_STATUS 0x20
206#define MAC_RX_BUFF2_ADDR 0x24
207#define MAC_RX_BUFF3_STATUS 0x30
208#define MAC_RX_BUFF3_ADDR 0x34
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264static void au1000_enable_mac(struct net_device *dev, int force_reset)
265{
266 unsigned long flags;
267 struct au1000_private *aup = netdev_priv(dev);
268
269 spin_lock_irqsave(&aup->lock, flags);
270
271 if (force_reset || (!aup->mac_enabled)) {
272 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
273 wmb();
274 mdelay(2);
275 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
276 | MAC_EN_CLOCK_ENABLE), aup->enable);
277 wmb();
278 mdelay(2);
279
280 aup->mac_enabled = 1;
281 }
282
283 spin_unlock_irqrestore(&aup->lock, flags);
284}
285
286
287
288
289static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
290{
291 struct au1000_private *aup = netdev_priv(dev);
292 u32 *const mii_control_reg = &aup->mac->mii_control;
293 u32 *const mii_data_reg = &aup->mac->mii_data;
294 u32 timedout = 20;
295 u32 mii_control;
296
297 while (readl(mii_control_reg) & MAC_MII_BUSY) {
298 mdelay(1);
299 if (--timedout == 0) {
300 netdev_err(dev, "read_MII busy timeout!!\n");
301 return -1;
302 }
303 }
304
305 mii_control = MAC_SET_MII_SELECT_REG(reg) |
306 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
307
308 writel(mii_control, mii_control_reg);
309
310 timedout = 20;
311 while (readl(mii_control_reg) & MAC_MII_BUSY) {
312 mdelay(1);
313 if (--timedout == 0) {
314 netdev_err(dev, "mdio_read busy timeout!!\n");
315 return -1;
316 }
317 }
318 return readl(mii_data_reg);
319}
320
321static void au1000_mdio_write(struct net_device *dev, int phy_addr,
322 int reg, u16 value)
323{
324 struct au1000_private *aup = netdev_priv(dev);
325 u32 *const mii_control_reg = &aup->mac->mii_control;
326 u32 *const mii_data_reg = &aup->mac->mii_data;
327 u32 timedout = 20;
328 u32 mii_control;
329
330 while (readl(mii_control_reg) & MAC_MII_BUSY) {
331 mdelay(1);
332 if (--timedout == 0) {
333 netdev_err(dev, "mdio_write busy timeout!!\n");
334 return;
335 }
336 }
337
338 mii_control = MAC_SET_MII_SELECT_REG(reg) |
339 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
340
341 writel(value, mii_data_reg);
342 writel(mii_control, mii_control_reg);
343}
344
345static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
346{
347
348
349
350 struct net_device *const dev = bus->priv;
351
352
353
354
355 au1000_enable_mac(dev, 0);
356
357 return au1000_mdio_read(dev, phy_addr, regnum);
358}
359
360static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
361 u16 value)
362{
363 struct net_device *const dev = bus->priv;
364
365
366
367
368 au1000_enable_mac(dev, 0);
369
370 au1000_mdio_write(dev, phy_addr, regnum, value);
371 return 0;
372}
373
374static int au1000_mdiobus_reset(struct mii_bus *bus)
375{
376 struct net_device *const dev = bus->priv;
377
378
379
380
381 au1000_enable_mac(dev, 0);
382
383 return 0;
384}
385
386static void au1000_hard_stop(struct net_device *dev)
387{
388 struct au1000_private *aup = netdev_priv(dev);
389 u32 reg;
390
391 netif_dbg(aup, drv, dev, "hard stop\n");
392
393 reg = readl(&aup->mac->control);
394 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
395 writel(reg, &aup->mac->control);
396 wmb();
397 mdelay(10);
398}
399
400static void au1000_enable_rx_tx(struct net_device *dev)
401{
402 struct au1000_private *aup = netdev_priv(dev);
403 u32 reg;
404
405 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
406
407 reg = readl(&aup->mac->control);
408 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
409 writel(reg, &aup->mac->control);
410 wmb();
411 mdelay(10);
412}
413
414static void
415au1000_adjust_link(struct net_device *dev)
416{
417 struct au1000_private *aup = netdev_priv(dev);
418 struct phy_device *phydev = aup->phy_dev;
419 unsigned long flags;
420 u32 reg;
421
422 int status_change = 0;
423
424 BUG_ON(!aup->phy_dev);
425
426 spin_lock_irqsave(&aup->lock, flags);
427
428 if (phydev->link && (aup->old_speed != phydev->speed)) {
429
430
431 switch (phydev->speed) {
432 case SPEED_10:
433 case SPEED_100:
434 break;
435 default:
436 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
437 phydev->speed);
438 break;
439 }
440
441 aup->old_speed = phydev->speed;
442
443 status_change = 1;
444 }
445
446 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
447
448
449
450 au1000_hard_stop(dev);
451
452 reg = readl(&aup->mac->control);
453 if (DUPLEX_FULL == phydev->duplex) {
454 reg |= MAC_FULL_DUPLEX;
455 reg &= ~MAC_DISABLE_RX_OWN;
456 } else {
457 reg &= ~MAC_FULL_DUPLEX;
458 reg |= MAC_DISABLE_RX_OWN;
459 }
460 writel(reg, &aup->mac->control);
461 wmb();
462 mdelay(1);
463
464 au1000_enable_rx_tx(dev);
465 aup->old_duplex = phydev->duplex;
466
467 status_change = 1;
468 }
469
470 if (phydev->link != aup->old_link) {
471
472
473 if (!phydev->link) {
474
475 aup->old_speed = 0;
476 aup->old_duplex = -1;
477 }
478
479 aup->old_link = phydev->link;
480 status_change = 1;
481 }
482
483 spin_unlock_irqrestore(&aup->lock, flags);
484
485 if (status_change) {
486 if (phydev->link)
487 netdev_info(dev, "link up (%d/%s)\n",
488 phydev->speed,
489 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
490 else
491 netdev_info(dev, "link down\n");
492 }
493}
494
495static int au1000_mii_probe(struct net_device *dev)
496{
497 struct au1000_private *const aup = netdev_priv(dev);
498 struct phy_device *phydev = NULL;
499 int phy_addr;
500
501 if (aup->phy_static_config) {
502 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
503
504 if (aup->phy_addr)
505 phydev = aup->mii_bus->phy_map[aup->phy_addr];
506 else
507 netdev_info(dev, "using PHY-less setup\n");
508 return 0;
509 }
510
511
512
513
514 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
515 if (aup->mii_bus->phy_map[phy_addr]) {
516 phydev = aup->mii_bus->phy_map[phy_addr];
517 if (!aup->phy_search_highest_addr)
518
519 break;
520 }
521
522 if (aup->phy1_search_mac0) {
523
524 if (!phydev && (aup->mac_id == 1)) {
525
526 dev_info(&dev->dev, ": no PHY found on MAC1, "
527 "let's see if it's attached to MAC0...\n");
528
529
530
531
532 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
533 struct phy_device *const tmp_phydev =
534 aup->mii_bus->phy_map[phy_addr];
535
536 if (aup->mac_id == 1)
537 break;
538
539
540 if (!tmp_phydev)
541 continue;
542
543
544 if (tmp_phydev->attached_dev)
545 continue;
546
547 phydev = tmp_phydev;
548 break;
549 }
550 }
551 }
552
553 if (!phydev) {
554 netdev_err(dev, "no PHY found\n");
555 return -1;
556 }
557
558
559 BUG_ON(phydev->attached_dev);
560
561 phydev = phy_connect(dev, dev_name(&phydev->dev),
562 &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
563
564 if (IS_ERR(phydev)) {
565 netdev_err(dev, "Could not attach to PHY\n");
566 return PTR_ERR(phydev);
567 }
568
569
570 phydev->supported &= (SUPPORTED_10baseT_Half
571 | SUPPORTED_10baseT_Full
572 | SUPPORTED_100baseT_Half
573 | SUPPORTED_100baseT_Full
574 | SUPPORTED_Autoneg
575
576 | SUPPORTED_MII
577 | SUPPORTED_TP);
578
579 phydev->advertising = phydev->supported;
580
581 aup->old_link = 0;
582 aup->old_speed = 0;
583 aup->old_duplex = -1;
584 aup->phy_dev = phydev;
585
586 netdev_info(dev, "attached PHY driver [%s] "
587 "(mii_bus:phy_addr=%s, irq=%d)\n",
588 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
589
590 return 0;
591}
592
593
594
595
596
597
598
599static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
600{
601 struct db_dest *pDB;
602 pDB = aup->pDBfree;
603
604 if (pDB)
605 aup->pDBfree = pDB->pnext;
606
607 return pDB;
608}
609
610void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
611{
612 struct db_dest *pDBfree = aup->pDBfree;
613 if (pDBfree)
614 pDBfree->pnext = pDB;
615 aup->pDBfree = pDB;
616}
617
618static void au1000_reset_mac_unlocked(struct net_device *dev)
619{
620 struct au1000_private *const aup = netdev_priv(dev);
621 int i;
622
623 au1000_hard_stop(dev);
624
625 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
626 wmb();
627 mdelay(2);
628 writel(0, aup->enable);
629 wmb();
630 mdelay(2);
631
632 aup->tx_full = 0;
633 for (i = 0; i < NUM_RX_DMA; i++) {
634
635 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
636 }
637 for (i = 0; i < NUM_TX_DMA; i++) {
638
639 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
640 }
641
642 aup->mac_enabled = 0;
643
644}
645
646static void au1000_reset_mac(struct net_device *dev)
647{
648 struct au1000_private *const aup = netdev_priv(dev);
649 unsigned long flags;
650
651 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
652 (unsigned)aup);
653
654 spin_lock_irqsave(&aup->lock, flags);
655
656 au1000_reset_mac_unlocked(dev);
657
658 spin_unlock_irqrestore(&aup->lock, flags);
659}
660
661
662
663
664
665
666static void
667au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
668{
669 int i;
670
671 for (i = 0; i < NUM_RX_DMA; i++) {
672 aup->rx_dma_ring[i] = (struct rx_dma *)
673 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
674 }
675 for (i = 0; i < NUM_TX_DMA; i++) {
676 aup->tx_dma_ring[i] = (struct tx_dma *)
677 (tx_base + sizeof(struct tx_dma) * i);
678 }
679}
680
681
682
683
684
685static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
686{
687 struct au1000_private *aup = netdev_priv(dev);
688
689 if (aup->phy_dev)
690 return phy_ethtool_gset(aup->phy_dev, cmd);
691
692 return -EINVAL;
693}
694
695static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
696{
697 struct au1000_private *aup = netdev_priv(dev);
698
699 if (!capable(CAP_NET_ADMIN))
700 return -EPERM;
701
702 if (aup->phy_dev)
703 return phy_ethtool_sset(aup->phy_dev, cmd);
704
705 return -EINVAL;
706}
707
708static void
709au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
710{
711 struct au1000_private *aup = netdev_priv(dev);
712
713 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
714 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
715 snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
716 aup->mac_id);
717 info->regdump_len = 0;
718}
719
720static void au1000_set_msglevel(struct net_device *dev, u32 value)
721{
722 struct au1000_private *aup = netdev_priv(dev);
723 aup->msg_enable = value;
724}
725
726static u32 au1000_get_msglevel(struct net_device *dev)
727{
728 struct au1000_private *aup = netdev_priv(dev);
729 return aup->msg_enable;
730}
731
732static const struct ethtool_ops au1000_ethtool_ops = {
733 .get_settings = au1000_get_settings,
734 .set_settings = au1000_set_settings,
735 .get_drvinfo = au1000_get_drvinfo,
736 .get_link = ethtool_op_get_link,
737 .get_msglevel = au1000_get_msglevel,
738 .set_msglevel = au1000_set_msglevel,
739};
740
741
742
743
744
745
746
747
748
749
750
751static int au1000_init(struct net_device *dev)
752{
753 struct au1000_private *aup = netdev_priv(dev);
754 unsigned long flags;
755 int i;
756 u32 control;
757
758 netif_dbg(aup, hw, dev, "au1000_init\n");
759
760
761 au1000_enable_mac(dev, 1);
762
763 spin_lock_irqsave(&aup->lock, flags);
764
765 writel(0, &aup->mac->control);
766 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
767 aup->tx_tail = aup->tx_head;
768 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
769
770 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
771 &aup->mac->mac_addr_high);
772 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
773 dev->dev_addr[1]<<8 | dev->dev_addr[0],
774 &aup->mac->mac_addr_low);
775
776
777 for (i = 0; i < NUM_RX_DMA; i++)
778 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
779
780 wmb();
781
782 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
783#ifndef CONFIG_CPU_LITTLE_ENDIAN
784 control |= MAC_BIG_ENDIAN;
785#endif
786 if (aup->phy_dev) {
787 if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
788 control |= MAC_FULL_DUPLEX;
789 else
790 control |= MAC_DISABLE_RX_OWN;
791 } else {
792 control |= MAC_FULL_DUPLEX;
793 }
794
795 writel(control, &aup->mac->control);
796 writel(0x8100, &aup->mac->vlan1_tag);
797 wmb();
798
799 spin_unlock_irqrestore(&aup->lock, flags);
800 return 0;
801}
802
803static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
804{
805 struct net_device_stats *ps = &dev->stats;
806
807 ps->rx_packets++;
808 if (status & RX_MCAST_FRAME)
809 ps->multicast++;
810
811 if (status & RX_ERROR) {
812 ps->rx_errors++;
813 if (status & RX_MISSED_FRAME)
814 ps->rx_missed_errors++;
815 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
816 ps->rx_length_errors++;
817 if (status & RX_CRC_ERROR)
818 ps->rx_crc_errors++;
819 if (status & RX_COLL)
820 ps->collisions++;
821 } else
822 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
823
824}
825
826
827
828
829static int au1000_rx(struct net_device *dev)
830{
831 struct au1000_private *aup = netdev_priv(dev);
832 struct sk_buff *skb;
833 struct rx_dma *prxd;
834 u32 buff_stat, status;
835 struct db_dest *pDB;
836 u32 frmlen;
837
838 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
839
840 prxd = aup->rx_dma_ring[aup->rx_head];
841 buff_stat = prxd->buff_stat;
842 while (buff_stat & RX_T_DONE) {
843 status = prxd->status;
844 pDB = aup->rx_db_inuse[aup->rx_head];
845 au1000_update_rx_stats(dev, status);
846 if (!(status & RX_ERROR)) {
847
848
849 frmlen = (status & RX_FRAME_LEN_MASK);
850 frmlen -= 4;
851 skb = netdev_alloc_skb(dev, frmlen + 2);
852 if (skb == NULL) {
853 dev->stats.rx_dropped++;
854 continue;
855 }
856 skb_reserve(skb, 2);
857 skb_copy_to_linear_data(skb,
858 (unsigned char *)pDB->vaddr, frmlen);
859 skb_put(skb, frmlen);
860 skb->protocol = eth_type_trans(skb, dev);
861 netif_rx(skb);
862 } else {
863 if (au1000_debug > 4) {
864 pr_err("rx_error(s):");
865 if (status & RX_MISSED_FRAME)
866 pr_cont(" miss");
867 if (status & RX_WDOG_TIMER)
868 pr_cont(" wdog");
869 if (status & RX_RUNT)
870 pr_cont(" runt");
871 if (status & RX_OVERLEN)
872 pr_cont(" overlen");
873 if (status & RX_COLL)
874 pr_cont(" coll");
875 if (status & RX_MII_ERROR)
876 pr_cont(" mii error");
877 if (status & RX_CRC_ERROR)
878 pr_cont(" crc error");
879 if (status & RX_LEN_ERROR)
880 pr_cont(" len error");
881 if (status & RX_U_CNTRL_FRAME)
882 pr_cont(" u control frame");
883 pr_cont("\n");
884 }
885 }
886 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
887 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
888 wmb();
889
890
891 prxd = aup->rx_dma_ring[aup->rx_head];
892 buff_stat = prxd->buff_stat;
893 }
894 return 0;
895}
896
897static void au1000_update_tx_stats(struct net_device *dev, u32 status)
898{
899 struct au1000_private *aup = netdev_priv(dev);
900 struct net_device_stats *ps = &dev->stats;
901
902 if (status & TX_FRAME_ABORTED) {
903 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
904 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
905
906
907
908 ps->tx_errors++;
909 ps->tx_aborted_errors++;
910 }
911 } else {
912 ps->tx_errors++;
913 ps->tx_aborted_errors++;
914 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
915 ps->tx_carrier_errors++;
916 }
917 }
918}
919
920
921
922
923
924
925static void au1000_tx_ack(struct net_device *dev)
926{
927 struct au1000_private *aup = netdev_priv(dev);
928 struct tx_dma *ptxd;
929
930 ptxd = aup->tx_dma_ring[aup->tx_tail];
931
932 while (ptxd->buff_stat & TX_T_DONE) {
933 au1000_update_tx_stats(dev, ptxd->status);
934 ptxd->buff_stat &= ~TX_T_DONE;
935 ptxd->len = 0;
936 wmb();
937
938 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
939 ptxd = aup->tx_dma_ring[aup->tx_tail];
940
941 if (aup->tx_full) {
942 aup->tx_full = 0;
943 netif_wake_queue(dev);
944 }
945 }
946}
947
948
949
950
951static irqreturn_t au1000_interrupt(int irq, void *dev_id)
952{
953 struct net_device *dev = dev_id;
954
955
956
957 au1000_rx(dev);
958 au1000_tx_ack(dev);
959 return IRQ_RETVAL(1);
960}
961
962static int au1000_open(struct net_device *dev)
963{
964 int retval;
965 struct au1000_private *aup = netdev_priv(dev);
966
967 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
968
969 retval = request_irq(dev->irq, au1000_interrupt, 0,
970 dev->name, dev);
971 if (retval) {
972 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
973 return retval;
974 }
975
976 retval = au1000_init(dev);
977 if (retval) {
978 netdev_err(dev, "error in au1000_init\n");
979 free_irq(dev->irq, dev);
980 return retval;
981 }
982
983 if (aup->phy_dev) {
984
985 aup->phy_dev->state = PHY_CHANGELINK;
986 phy_start(aup->phy_dev);
987 }
988
989 netif_start_queue(dev);
990
991 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
992
993 return 0;
994}
995
996static int au1000_close(struct net_device *dev)
997{
998 unsigned long flags;
999 struct au1000_private *const aup = netdev_priv(dev);
1000
1001 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
1002
1003 if (aup->phy_dev)
1004 phy_stop(aup->phy_dev);
1005
1006 spin_lock_irqsave(&aup->lock, flags);
1007
1008 au1000_reset_mac_unlocked(dev);
1009
1010
1011 netif_stop_queue(dev);
1012
1013
1014 free_irq(dev->irq, dev);
1015 spin_unlock_irqrestore(&aup->lock, flags);
1016
1017 return 0;
1018}
1019
1020
1021
1022
1023static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
1024{
1025 struct au1000_private *aup = netdev_priv(dev);
1026 struct net_device_stats *ps = &dev->stats;
1027 struct tx_dma *ptxd;
1028 u32 buff_stat;
1029 struct db_dest *pDB;
1030 int i;
1031
1032 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
1033 (unsigned)aup, skb->len,
1034 skb->data, aup->tx_head);
1035
1036 ptxd = aup->tx_dma_ring[aup->tx_head];
1037 buff_stat = ptxd->buff_stat;
1038 if (buff_stat & TX_DMA_ENABLE) {
1039
1040 netif_stop_queue(dev);
1041 aup->tx_full = 1;
1042 return NETDEV_TX_BUSY;
1043 } else if (buff_stat & TX_T_DONE) {
1044 au1000_update_tx_stats(dev, ptxd->status);
1045 ptxd->len = 0;
1046 }
1047
1048 if (aup->tx_full) {
1049 aup->tx_full = 0;
1050 netif_wake_queue(dev);
1051 }
1052
1053 pDB = aup->tx_db_inuse[aup->tx_head];
1054 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
1055 if (skb->len < ETH_ZLEN) {
1056 for (i = skb->len; i < ETH_ZLEN; i++)
1057 ((char *)pDB->vaddr)[i] = 0;
1058
1059 ptxd->len = ETH_ZLEN;
1060 } else
1061 ptxd->len = skb->len;
1062
1063 ps->tx_packets++;
1064 ps->tx_bytes += ptxd->len;
1065
1066 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1067 wmb();
1068 dev_kfree_skb(skb);
1069 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1070 return NETDEV_TX_OK;
1071}
1072
1073
1074
1075
1076
1077static void au1000_tx_timeout(struct net_device *dev)
1078{
1079 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
1080 au1000_reset_mac(dev);
1081 au1000_init(dev);
1082 dev->trans_start = jiffies;
1083 netif_wake_queue(dev);
1084}
1085
1086static void au1000_multicast_list(struct net_device *dev)
1087{
1088 struct au1000_private *aup = netdev_priv(dev);
1089 u32 reg;
1090
1091 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
1092 reg = readl(&aup->mac->control);
1093 if (dev->flags & IFF_PROMISC) {
1094 reg |= MAC_PROMISCUOUS;
1095 } else if ((dev->flags & IFF_ALLMULTI) ||
1096 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
1097 reg |= MAC_PASS_ALL_MULTI;
1098 reg &= ~MAC_PROMISCUOUS;
1099 netdev_info(dev, "Pass all multicast\n");
1100 } else {
1101 struct netdev_hw_addr *ha;
1102 u32 mc_filter[2];
1103
1104 mc_filter[1] = mc_filter[0] = 0;
1105 netdev_for_each_mc_addr(ha, dev)
1106 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
1107 (long *)mc_filter);
1108 writel(mc_filter[1], &aup->mac->multi_hash_high);
1109 writel(mc_filter[0], &aup->mac->multi_hash_low);
1110 reg &= ~MAC_PROMISCUOUS;
1111 reg |= MAC_HASH_MODE;
1112 }
1113 writel(reg, &aup->mac->control);
1114}
1115
1116static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1117{
1118 struct au1000_private *aup = netdev_priv(dev);
1119
1120 if (!netif_running(dev))
1121 return -EINVAL;
1122
1123 if (!aup->phy_dev)
1124 return -EINVAL;
1125
1126 return phy_mii_ioctl(aup->phy_dev, rq, cmd);
1127}
1128
1129static const struct net_device_ops au1000_netdev_ops = {
1130 .ndo_open = au1000_open,
1131 .ndo_stop = au1000_close,
1132 .ndo_start_xmit = au1000_tx,
1133 .ndo_set_rx_mode = au1000_multicast_list,
1134 .ndo_do_ioctl = au1000_ioctl,
1135 .ndo_tx_timeout = au1000_tx_timeout,
1136 .ndo_set_mac_address = eth_mac_addr,
1137 .ndo_validate_addr = eth_validate_addr,
1138 .ndo_change_mtu = eth_change_mtu,
1139};
1140
1141static int au1000_probe(struct platform_device *pdev)
1142{
1143 struct au1000_private *aup = NULL;
1144 struct au1000_eth_platform_data *pd;
1145 struct net_device *dev = NULL;
1146 struct db_dest *pDB, *pDBfree;
1147 int irq, i, err = 0;
1148 struct resource *base, *macen, *macdma;
1149
1150 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1151 if (!base) {
1152 dev_err(&pdev->dev, "failed to retrieve base register\n");
1153 err = -ENODEV;
1154 goto out;
1155 }
1156
1157 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1158 if (!macen) {
1159 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1160 err = -ENODEV;
1161 goto out;
1162 }
1163
1164 irq = platform_get_irq(pdev, 0);
1165 if (irq < 0) {
1166 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1167 err = -ENODEV;
1168 goto out;
1169 }
1170
1171 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1172 if (!macdma) {
1173 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1174 err = -ENODEV;
1175 goto out;
1176 }
1177
1178 if (!request_mem_region(base->start, resource_size(base),
1179 pdev->name)) {
1180 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1181 err = -ENXIO;
1182 goto out;
1183 }
1184
1185 if (!request_mem_region(macen->start, resource_size(macen),
1186 pdev->name)) {
1187 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1188 err = -ENXIO;
1189 goto err_request;
1190 }
1191
1192 if (!request_mem_region(macdma->start, resource_size(macdma),
1193 pdev->name)) {
1194 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1195 err = -ENXIO;
1196 goto err_macdma;
1197 }
1198
1199 dev = alloc_etherdev(sizeof(struct au1000_private));
1200 if (!dev) {
1201 err = -ENOMEM;
1202 goto err_alloc;
1203 }
1204
1205 SET_NETDEV_DEV(dev, &pdev->dev);
1206 platform_set_drvdata(pdev, dev);
1207 aup = netdev_priv(dev);
1208
1209 spin_lock_init(&aup->lock);
1210 aup->msg_enable = (au1000_debug < 4 ?
1211 AU1000_DEF_MSG_ENABLE : au1000_debug);
1212
1213
1214
1215
1216 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1217 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1218 &aup->dma_addr, 0);
1219 if (!aup->vaddr) {
1220 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1221 err = -ENOMEM;
1222 goto err_vaddr;
1223 }
1224
1225
1226 aup->mac = (struct mac_reg *)
1227 ioremap_nocache(base->start, resource_size(base));
1228 if (!aup->mac) {
1229 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1230 err = -ENXIO;
1231 goto err_remap1;
1232 }
1233
1234
1235 aup->enable = (u32 *)ioremap_nocache(macen->start,
1236 resource_size(macen));
1237 if (!aup->enable) {
1238 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1239 err = -ENXIO;
1240 goto err_remap2;
1241 }
1242 aup->mac_id = pdev->id;
1243
1244 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1245 if (!aup->macdma) {
1246 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1247 err = -ENXIO;
1248 goto err_remap3;
1249 }
1250
1251 au1000_setup_hw_rings(aup, aup->macdma);
1252
1253 writel(0, aup->enable);
1254 aup->mac_enabled = 0;
1255
1256 pd = dev_get_platdata(&pdev->dev);
1257 if (!pd) {
1258 dev_info(&pdev->dev, "no platform_data passed,"
1259 " PHY search on MAC0\n");
1260 aup->phy1_search_mac0 = 1;
1261 } else {
1262 if (is_valid_ether_addr(pd->mac)) {
1263 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1264 } else {
1265
1266 eth_hw_addr_random(dev);
1267 }
1268
1269 aup->phy_static_config = pd->phy_static_config;
1270 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1271 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1272 aup->phy_addr = pd->phy_addr;
1273 aup->phy_busid = pd->phy_busid;
1274 aup->phy_irq = pd->phy_irq;
1275 }
1276
1277 if (aup->phy_busid && aup->phy_busid > 0) {
1278 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1279 err = -ENODEV;
1280 goto err_mdiobus_alloc;
1281 }
1282
1283 aup->mii_bus = mdiobus_alloc();
1284 if (aup->mii_bus == NULL) {
1285 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1286 err = -ENOMEM;
1287 goto err_mdiobus_alloc;
1288 }
1289
1290 aup->mii_bus->priv = dev;
1291 aup->mii_bus->read = au1000_mdiobus_read;
1292 aup->mii_bus->write = au1000_mdiobus_write;
1293 aup->mii_bus->reset = au1000_mdiobus_reset;
1294 aup->mii_bus->name = "au1000_eth_mii";
1295 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1296 pdev->name, aup->mac_id);
1297 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1298 if (aup->mii_bus->irq == NULL) {
1299 err = -ENOMEM;
1300 goto err_out;
1301 }
1302
1303 for (i = 0; i < PHY_MAX_ADDR; ++i)
1304 aup->mii_bus->irq[i] = PHY_POLL;
1305
1306 if (aup->phy_static_config)
1307 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1308 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1309
1310 err = mdiobus_register(aup->mii_bus);
1311 if (err) {
1312 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1313 goto err_mdiobus_reg;
1314 }
1315
1316 err = au1000_mii_probe(dev);
1317 if (err != 0)
1318 goto err_out;
1319
1320 pDBfree = NULL;
1321
1322 pDB = aup->db;
1323 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1324 pDB->pnext = pDBfree;
1325 pDBfree = pDB;
1326 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1327 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1328 pDB++;
1329 }
1330 aup->pDBfree = pDBfree;
1331
1332 err = -ENODEV;
1333 for (i = 0; i < NUM_RX_DMA; i++) {
1334 pDB = au1000_GetFreeDB(aup);
1335 if (!pDB)
1336 goto err_out;
1337
1338 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1339 aup->rx_db_inuse[i] = pDB;
1340 }
1341
1342 err = -ENODEV;
1343 for (i = 0; i < NUM_TX_DMA; i++) {
1344 pDB = au1000_GetFreeDB(aup);
1345 if (!pDB)
1346 goto err_out;
1347
1348 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1349 aup->tx_dma_ring[i]->len = 0;
1350 aup->tx_db_inuse[i] = pDB;
1351 }
1352
1353 dev->base_addr = base->start;
1354 dev->irq = irq;
1355 dev->netdev_ops = &au1000_netdev_ops;
1356 dev->ethtool_ops = &au1000_ethtool_ops;
1357 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1358
1359
1360
1361
1362
1363 au1000_reset_mac(dev);
1364
1365 err = register_netdev(dev);
1366 if (err) {
1367 netdev_err(dev, "Cannot register net device, aborting.\n");
1368 goto err_out;
1369 }
1370
1371 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1372 (unsigned long)base->start, irq);
1373
1374 pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1375
1376 return 0;
1377
1378err_out:
1379 if (aup->mii_bus != NULL)
1380 mdiobus_unregister(aup->mii_bus);
1381
1382
1383
1384
1385 au1000_reset_mac(dev);
1386
1387 for (i = 0; i < NUM_RX_DMA; i++) {
1388 if (aup->rx_db_inuse[i])
1389 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1390 }
1391 for (i = 0; i < NUM_TX_DMA; i++) {
1392 if (aup->tx_db_inuse[i])
1393 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1394 }
1395err_mdiobus_reg:
1396 mdiobus_free(aup->mii_bus);
1397err_mdiobus_alloc:
1398 iounmap(aup->macdma);
1399err_remap3:
1400 iounmap(aup->enable);
1401err_remap2:
1402 iounmap(aup->mac);
1403err_remap1:
1404 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1405 (void *)aup->vaddr, aup->dma_addr);
1406err_vaddr:
1407 free_netdev(dev);
1408err_alloc:
1409 release_mem_region(macdma->start, resource_size(macdma));
1410err_macdma:
1411 release_mem_region(macen->start, resource_size(macen));
1412err_request:
1413 release_mem_region(base->start, resource_size(base));
1414out:
1415 return err;
1416}
1417
1418static int au1000_remove(struct platform_device *pdev)
1419{
1420 struct net_device *dev = platform_get_drvdata(pdev);
1421 struct au1000_private *aup = netdev_priv(dev);
1422 int i;
1423 struct resource *base, *macen;
1424
1425 unregister_netdev(dev);
1426 mdiobus_unregister(aup->mii_bus);
1427 mdiobus_free(aup->mii_bus);
1428
1429 for (i = 0; i < NUM_RX_DMA; i++)
1430 if (aup->rx_db_inuse[i])
1431 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1432
1433 for (i = 0; i < NUM_TX_DMA; i++)
1434 if (aup->tx_db_inuse[i])
1435 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1436
1437 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1438 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1439 (void *)aup->vaddr, aup->dma_addr);
1440
1441 iounmap(aup->macdma);
1442 iounmap(aup->mac);
1443 iounmap(aup->enable);
1444
1445 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1446 release_mem_region(base->start, resource_size(base));
1447
1448 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1449 release_mem_region(base->start, resource_size(base));
1450
1451 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1452 release_mem_region(macen->start, resource_size(macen));
1453
1454 free_netdev(dev);
1455
1456 return 0;
1457}
1458
1459static struct platform_driver au1000_eth_driver = {
1460 .probe = au1000_probe,
1461 .remove = au1000_remove,
1462 .driver = {
1463 .name = "au1000-eth",
1464 },
1465};
1466
1467module_platform_driver(au1000_eth_driver);
1468
1469MODULE_ALIAS("platform:au1000-eth");
1470