1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/in.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <linux/crc32.h>
31#include <linux/random.h>
32#include <linux/errno.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/mm.h>
37#include <linux/bitops.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/byteorder.h>
43
44#ifdef CONFIG_SPARC
45#include <linux/of.h>
46#include <linux/of_device.h>
47#include <asm/idprom.h>
48#include <asm/openprom.h>
49#include <asm/oplib.h>
50#include <asm/prom.h>
51#include <asm/auxio.h>
52#endif
53#include <linux/uaccess.h>
54
55#include <asm/pgtable.h>
56#include <asm/irq.h>
57
58#ifdef CONFIG_PCI
59#include <linux/pci.h>
60#endif
61
62#include "sunhme.h"
63
64#define DRV_NAME "sunhme"
65#define DRV_VERSION "3.10"
66#define DRV_RELDATE "August 26, 2008"
67#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
68
69static char version[] =
70 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
71
72MODULE_VERSION(DRV_VERSION);
73MODULE_AUTHOR(DRV_AUTHOR);
74MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
75MODULE_LICENSE("GPL");
76
77static int macaddr[6];
78
79
80module_param_array(macaddr, int, NULL, 0);
81MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
82
83#ifdef CONFIG_SBUS
84static struct quattro *qfe_sbus_list;
85#endif
86
87#ifdef CONFIG_PCI
88static struct quattro *qfe_pci_list;
89#endif
90
91#undef HMEDEBUG
92#undef SXDEBUG
93#undef RXDEBUG
94#undef TXDEBUG
95#undef TXLOGGING
96
97#ifdef TXLOGGING
98struct hme_tx_logent {
99 unsigned int tstamp;
100 int tx_new, tx_old;
101 unsigned int action;
102#define TXLOG_ACTION_IRQ 0x01
103#define TXLOG_ACTION_TXMIT 0x02
104#define TXLOG_ACTION_TBUSY 0x04
105#define TXLOG_ACTION_NBUFS 0x08
106 unsigned int status;
107};
108#define TX_LOG_LEN 128
109static struct hme_tx_logent tx_log[TX_LOG_LEN];
110static int txlog_cur_entry;
111static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
112{
113 struct hme_tx_logent *tlp;
114 unsigned long flags;
115
116 local_irq_save(flags);
117 tlp = &tx_log[txlog_cur_entry];
118 tlp->tstamp = (unsigned int)jiffies;
119 tlp->tx_new = hp->tx_new;
120 tlp->tx_old = hp->tx_old;
121 tlp->action = a;
122 tlp->status = s;
123 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
124 local_irq_restore(flags);
125}
126static __inline__ void tx_dump_log(void)
127{
128 int i, this;
129
130 this = txlog_cur_entry;
131 for (i = 0; i < TX_LOG_LEN; i++) {
132 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
133 tx_log[this].tstamp,
134 tx_log[this].tx_new, tx_log[this].tx_old,
135 tx_log[this].action, tx_log[this].status);
136 this = (this + 1) & (TX_LOG_LEN - 1);
137 }
138}
139static __inline__ void tx_dump_ring(struct happy_meal *hp)
140{
141 struct hmeal_init_block *hb = hp->happy_block;
142 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
143 int i;
144
145 for (i = 0; i < TX_RING_SIZE; i+=4) {
146 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
147 i, i + 4,
148 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
149 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
150 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
151 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
152 }
153}
154#else
155#define tx_add_log(hp, a, s) do { } while(0)
156#define tx_dump_log() do { } while(0)
157#define tx_dump_ring(hp) do { } while(0)
158#endif
159
160#ifdef HMEDEBUG
161#define HMD(x) printk x
162#else
163#define HMD(x)
164#endif
165
166
167
168#ifdef AUTO_SWITCH_DEBUG
169#define ASD(x) printk x
170#else
171#define ASD(x)
172#endif
173
174#define DEFAULT_IPG0 16
175#define DEFAULT_IPG1 8
176#define DEFAULT_IPG2 4
177#define DEFAULT_JAMSIZE 4
178
179
180
181
182
183
184
185
186#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
187static void sbus_hme_write32(void __iomem *reg, u32 val)
188{
189 sbus_writel(val, reg);
190}
191
192static u32 sbus_hme_read32(void __iomem *reg)
193{
194 return sbus_readl(reg);
195}
196
197static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
198{
199 rxd->rx_addr = (__force hme32)addr;
200 dma_wmb();
201 rxd->rx_flags = (__force hme32)flags;
202}
203
204static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
205{
206 txd->tx_addr = (__force hme32)addr;
207 dma_wmb();
208 txd->tx_flags = (__force hme32)flags;
209}
210
211static u32 sbus_hme_read_desc32(hme32 *p)
212{
213 return (__force u32)*p;
214}
215
216static void pci_hme_write32(void __iomem *reg, u32 val)
217{
218 writel(val, reg);
219}
220
221static u32 pci_hme_read32(void __iomem *reg)
222{
223 return readl(reg);
224}
225
226static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
227{
228 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
229 dma_wmb();
230 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
231}
232
233static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
234{
235 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
236 dma_wmb();
237 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
238}
239
240static u32 pci_hme_read_desc32(hme32 *p)
241{
242 return le32_to_cpup((__le32 *)p);
243}
244
245#define hme_write32(__hp, __reg, __val) \
246 ((__hp)->write32((__reg), (__val)))
247#define hme_read32(__hp, __reg) \
248 ((__hp)->read32(__reg))
249#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
250 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
251#define hme_write_txd(__hp, __txd, __flags, __addr) \
252 ((__hp)->write_txd((__txd), (__flags), (__addr)))
253#define hme_read_desc32(__hp, __p) \
254 ((__hp)->read_desc32(__p))
255#define hme_dma_map(__hp, __ptr, __size, __dir) \
256 ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
257#define hme_dma_unmap(__hp, __addr, __size, __dir) \
258 ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
259#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
260 ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
261#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
262 ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
263#else
264#ifdef CONFIG_SBUS
265
266#define hme_write32(__hp, __reg, __val) \
267 sbus_writel((__val), (__reg))
268#define hme_read32(__hp, __reg) \
269 sbus_readl(__reg)
270#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
271do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
272 dma_wmb(); \
273 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
274} while(0)
275#define hme_write_txd(__hp, __txd, __flags, __addr) \
276do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
277 dma_wmb(); \
278 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
279} while(0)
280#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
281#define hme_dma_map(__hp, __ptr, __size, __dir) \
282 dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
283#define hme_dma_unmap(__hp, __addr, __size, __dir) \
284 dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
285#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
286 dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
287#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
288 dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
289#else
290
291#define hme_write32(__hp, __reg, __val) \
292 writel((__val), (__reg))
293#define hme_read32(__hp, __reg) \
294 readl(__reg)
295#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
296do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
297 dma_wmb(); \
298 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
299} while(0)
300#define hme_write_txd(__hp, __txd, __flags, __addr) \
301do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
302 dma_wmb(); \
303 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
304} while(0)
305static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
306{
307 return le32_to_cpup((__le32 *)p);
308}
309#define hme_dma_map(__hp, __ptr, __size, __dir) \
310 pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
311#define hme_dma_unmap(__hp, __addr, __size, __dir) \
312 pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
313#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
314 pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
315#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
316 pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
317#endif
318#endif
319
320
321
322static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
323{
324 hme_write32(hp, tregs + TCVR_BBDATA, bit);
325 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
326 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
327}
328
329#if 0
330static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
331{
332 u32 ret;
333
334 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
335 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
336 ret = hme_read32(hp, tregs + TCVR_CFG);
337 if (internal)
338 ret &= TCV_CFG_MDIO0;
339 else
340 ret &= TCV_CFG_MDIO1;
341
342 return ret;
343}
344#endif
345
346static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
347{
348 u32 retval;
349
350 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
351 udelay(1);
352 retval = hme_read32(hp, tregs + TCVR_CFG);
353 if (internal)
354 retval &= TCV_CFG_MDIO0;
355 else
356 retval &= TCV_CFG_MDIO1;
357 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
358
359 return retval;
360}
361
362#define TCVR_FAILURE 0x80000000
363
364static int happy_meal_bb_read(struct happy_meal *hp,
365 void __iomem *tregs, int reg)
366{
367 u32 tmp;
368 int retval = 0;
369 int i;
370
371 ASD(("happy_meal_bb_read: reg=%d ", reg));
372
373
374 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
375
376
377 for (i = 0; i < 32; i++)
378 BB_PUT_BIT(hp, tregs, 1);
379
380
381 BB_PUT_BIT(hp, tregs, 0);
382 BB_PUT_BIT(hp, tregs, 1);
383 BB_PUT_BIT(hp, tregs, 1);
384 BB_PUT_BIT(hp, tregs, 0);
385
386
387 tmp = hp->paddr & 0xff;
388 for (i = 4; i >= 0; i--)
389 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
390
391
392 tmp = (reg & 0xff);
393 for (i = 4; i >= 0; i--)
394 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
395
396
397 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
398
399
400 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
401 for (i = 15; i >= 0; i--)
402 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
403 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
404 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
405 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
406 ASD(("value=%x\n", retval));
407 return retval;
408}
409
410static void happy_meal_bb_write(struct happy_meal *hp,
411 void __iomem *tregs, int reg,
412 unsigned short value)
413{
414 u32 tmp;
415 int i;
416
417 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
418
419
420 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
421
422
423 for (i = 0; i < 32; i++)
424 BB_PUT_BIT(hp, tregs, 1);
425
426
427 BB_PUT_BIT(hp, tregs, 0);
428 BB_PUT_BIT(hp, tregs, 1);
429 BB_PUT_BIT(hp, tregs, 0);
430 BB_PUT_BIT(hp, tregs, 1);
431
432
433 tmp = (hp->paddr & 0xff);
434 for (i = 4; i >= 0; i--)
435 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
436
437
438 tmp = (reg & 0xff);
439 for (i = 4; i >= 0; i--)
440 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
441
442
443 BB_PUT_BIT(hp, tregs, 1);
444 BB_PUT_BIT(hp, tregs, 0);
445
446 for (i = 15; i >= 0; i--)
447 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
448
449
450 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
451}
452
453#define TCVR_READ_TRIES 16
454
455static int happy_meal_tcvr_read(struct happy_meal *hp,
456 void __iomem *tregs, int reg)
457{
458 int tries = TCVR_READ_TRIES;
459 int retval;
460
461 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
462 if (hp->tcvr_type == none) {
463 ASD(("no transceiver, value=TCVR_FAILURE\n"));
464 return TCVR_FAILURE;
465 }
466
467 if (!(hp->happy_flags & HFLAG_FENABLE)) {
468 ASD(("doing bit bang\n"));
469 return happy_meal_bb_read(hp, tregs, reg);
470 }
471
472 hme_write32(hp, tregs + TCVR_FRAME,
473 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
474 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
475 udelay(20);
476 if (!tries) {
477 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
478 return TCVR_FAILURE;
479 }
480 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
481 ASD(("value=%04x\n", retval));
482 return retval;
483}
484
485#define TCVR_WRITE_TRIES 16
486
487static void happy_meal_tcvr_write(struct happy_meal *hp,
488 void __iomem *tregs, int reg,
489 unsigned short value)
490{
491 int tries = TCVR_WRITE_TRIES;
492
493 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
494
495
496 if (!(hp->happy_flags & HFLAG_FENABLE)) {
497 happy_meal_bb_write(hp, tregs, reg, value);
498 return;
499 }
500
501
502 hme_write32(hp, tregs + TCVR_FRAME,
503 (FRAME_WRITE | (hp->paddr << 23) |
504 ((reg & 0xff) << 18) | (value & 0xffff)));
505 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
506 udelay(20);
507
508
509 if (!tries)
510 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
511
512
513}
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
548{
549 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
550
551
552
553
554 if (hp->sw_bmcr & BMCR_FULLDPLX) {
555 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
556 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
557 return 0;
558 }
559
560
561 if (hp->sw_bmcr & BMCR_SPEED100) {
562 hp->sw_bmcr &= ~(BMCR_SPEED100);
563 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
564 return 0;
565 }
566
567
568 return -1;
569}
570
571static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
572{
573 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
574 if (hp->tcvr_type == external)
575 printk("external ");
576 else
577 printk("internal ");
578 printk("transceiver at ");
579 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
580 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
581 if (hp->sw_lpa & LPA_100FULL)
582 printk("100Mb/s, Full Duplex.\n");
583 else
584 printk("100Mb/s, Half Duplex.\n");
585 } else {
586 if (hp->sw_lpa & LPA_10FULL)
587 printk("10Mb/s, Full Duplex.\n");
588 else
589 printk("10Mb/s, Half Duplex.\n");
590 }
591}
592
593static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
594{
595 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
596 if (hp->tcvr_type == external)
597 printk("external ");
598 else
599 printk("internal ");
600 printk("transceiver at ");
601 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
602 if (hp->sw_bmcr & BMCR_SPEED100)
603 printk("100Mb/s, ");
604 else
605 printk("10Mb/s, ");
606 if (hp->sw_bmcr & BMCR_FULLDPLX)
607 printk("Full Duplex.\n");
608 else
609 printk("Half Duplex.\n");
610}
611
612static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
613{
614 int full;
615
616
617
618
619 if (hp->timer_state == arbwait) {
620 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
621 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
622 goto no_response;
623 if (hp->sw_lpa & LPA_100FULL)
624 full = 1;
625 else if (hp->sw_lpa & LPA_100HALF)
626 full = 0;
627 else if (hp->sw_lpa & LPA_10FULL)
628 full = 1;
629 else
630 full = 0;
631 } else {
632
633 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
634 if (hp->sw_bmcr & BMCR_FULLDPLX)
635 full = 1;
636 else
637 full = 0;
638 }
639
640
641
642
643
644
645
646
647
648 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
649 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
650 ~(BIGMAC_TXCFG_ENABLE));
651 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
652 barrier();
653 if (full) {
654 hp->happy_flags |= HFLAG_FULL;
655 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
656 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
657 BIGMAC_TXCFG_FULLDPLX);
658 } else {
659 hp->happy_flags &= ~(HFLAG_FULL);
660 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
661 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
662 ~(BIGMAC_TXCFG_FULLDPLX));
663 }
664 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
665 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
666 BIGMAC_TXCFG_ENABLE);
667 return 0;
668no_response:
669 return 1;
670}
671
672static int happy_meal_init(struct happy_meal *hp);
673
674static int is_lucent_phy(struct happy_meal *hp)
675{
676 void __iomem *tregs = hp->tcvregs;
677 unsigned short mr2, mr3;
678 int ret = 0;
679
680 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
681 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
682 if ((mr2 & 0xffff) == 0x0180 &&
683 ((mr3 & 0xffff) >> 10) == 0x1d)
684 ret = 1;
685
686 return ret;
687}
688
689static void happy_meal_timer(struct timer_list *t)
690{
691 struct happy_meal *hp = from_timer(hp, t, happy_timer);
692 void __iomem *tregs = hp->tcvregs;
693 int restart_timer = 0;
694
695 spin_lock_irq(&hp->happy_lock);
696
697 hp->timer_ticks++;
698 switch(hp->timer_state) {
699 case arbwait:
700
701
702
703 if (hp->timer_ticks >= 10) {
704
705 do_force_mode:
706 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
707 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
708 hp->dev->name);
709 hp->sw_bmcr = BMCR_SPEED100;
710 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
711
712 if (!is_lucent_phy(hp)) {
713
714
715
716
717 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
718 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
719 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
720 }
721 hp->timer_state = ltrywait;
722 hp->timer_ticks = 0;
723 restart_timer = 1;
724 } else {
725
726 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
727 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
728 int ret;
729
730
731 ret = set_happy_link_modes(hp, tregs);
732 if (ret) {
733
734
735
736
737
738
739 goto do_force_mode;
740 }
741
742
743 hp->timer_state = lupwait;
744 restart_timer = 1;
745 } else {
746 restart_timer = 1;
747 }
748 }
749 break;
750
751 case lupwait:
752
753
754
755
756
757 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
758 if (hp->sw_bmsr & BMSR_LSTATUS) {
759
760
761
762 display_link_mode(hp, tregs);
763 hp->timer_state = asleep;
764 restart_timer = 0;
765 } else {
766 if (hp->timer_ticks >= 10) {
767 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
768 "not completely up.\n", hp->dev->name);
769 hp->timer_ticks = 0;
770 restart_timer = 1;
771 } else {
772 restart_timer = 1;
773 }
774 }
775 break;
776
777 case ltrywait:
778
779
780
781
782
783 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
784 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
785 if (hp->timer_ticks == 1) {
786 if (!is_lucent_phy(hp)) {
787
788
789
790 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
791 happy_meal_tcvr_write(hp, tregs,
792 DP83840_CSCONFIG, hp->sw_csconfig);
793 }
794 restart_timer = 1;
795 break;
796 }
797 if (hp->timer_ticks == 2) {
798 if (!is_lucent_phy(hp)) {
799 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
800 happy_meal_tcvr_write(hp, tregs,
801 DP83840_CSCONFIG, hp->sw_csconfig);
802 }
803 restart_timer = 1;
804 break;
805 }
806 if (hp->sw_bmsr & BMSR_LSTATUS) {
807
808 display_forced_link_mode(hp, tregs);
809 set_happy_link_modes(hp, tregs);
810 hp->timer_state = asleep;
811 restart_timer = 0;
812 } else {
813 if (hp->timer_ticks >= 4) {
814 int ret;
815
816 ret = try_next_permutation(hp, tregs);
817 if (ret == -1) {
818
819
820
821
822
823 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
824 hp->dev->name);
825
826 ret = happy_meal_init(hp);
827 if (ret) {
828
829 printk(KERN_ERR "%s: Error, cannot re-init the "
830 "Happy Meal.\n", hp->dev->name);
831 }
832 goto out;
833 }
834 if (!is_lucent_phy(hp)) {
835 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
836 DP83840_CSCONFIG);
837 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
838 happy_meal_tcvr_write(hp, tregs,
839 DP83840_CSCONFIG, hp->sw_csconfig);
840 }
841 hp->timer_ticks = 0;
842 restart_timer = 1;
843 } else {
844 restart_timer = 1;
845 }
846 }
847 break;
848
849 case asleep:
850 default:
851
852 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
853 hp->dev->name);
854 restart_timer = 0;
855 hp->timer_ticks = 0;
856 hp->timer_state = asleep;
857 break;
858 }
859
860 if (restart_timer) {
861 hp->happy_timer.expires = jiffies + ((12 * HZ)/10);
862 add_timer(&hp->happy_timer);
863 }
864
865out:
866 spin_unlock_irq(&hp->happy_lock);
867}
868
869#define TX_RESET_TRIES 32
870#define RX_RESET_TRIES 32
871
872
873static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
874{
875 int tries = TX_RESET_TRIES;
876
877 HMD(("happy_meal_tx_reset: reset, "));
878
879
880 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
881 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
882 udelay(20);
883
884
885 if (!tries)
886 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
887
888
889 HMD(("done\n"));
890}
891
892
893static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
894{
895 int tries = RX_RESET_TRIES;
896
897 HMD(("happy_meal_rx_reset: reset, "));
898
899
900 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
901 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
902 udelay(20);
903
904
905 if (!tries)
906 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
907
908
909 HMD(("done\n"));
910}
911
912#define STOP_TRIES 16
913
914
915static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
916{
917 int tries = STOP_TRIES;
918
919 HMD(("happy_meal_stop: reset, "));
920
921
922 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
923 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
924 udelay(20);
925
926
927 if (!tries)
928 printk(KERN_ERR "happy meal: Fry guys.");
929
930
931 HMD(("done\n"));
932}
933
934
935static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
936{
937 struct net_device_stats *stats = &hp->dev->stats;
938
939 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
940 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
941
942 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
943 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
944
945 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
946 hme_write32(hp, bregs + BMAC_GLECTR, 0);
947
948 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
949
950 stats->collisions +=
951 (hme_read32(hp, bregs + BMAC_EXCTR) +
952 hme_read32(hp, bregs + BMAC_LTCTR));
953 hme_write32(hp, bregs + BMAC_EXCTR, 0);
954 hme_write32(hp, bregs + BMAC_LTCTR, 0);
955}
956
957
958static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
959{
960 ASD(("happy_meal_poll_stop: "));
961
962
963 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
964 (HFLAG_POLLENABLE | HFLAG_POLL)) {
965 HMD(("not polling, return\n"));
966 return;
967 }
968
969
970 ASD(("were polling, mif ints off, "));
971 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
972
973
974 ASD(("polling off, "));
975 hme_write32(hp, tregs + TCVR_CFG,
976 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
977
978
979 hp->happy_flags &= ~(HFLAG_POLL);
980
981
982 udelay(200);
983 ASD(("done\n"));
984}
985
986
987
988
989#define TCVR_RESET_TRIES 16
990#define TCVR_UNISOLATE_TRIES 32
991
992
993static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
994{
995 u32 tconfig;
996 int result, tries = TCVR_RESET_TRIES;
997
998 tconfig = hme_read32(hp, tregs + TCVR_CFG);
999 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
1000 if (hp->tcvr_type == external) {
1001 ASD(("external<"));
1002 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1003 hp->tcvr_type = internal;
1004 hp->paddr = TCV_PADDR_ITX;
1005 ASD(("ISOLATE,"));
1006 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1007 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1008 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1009 if (result == TCVR_FAILURE) {
1010 ASD(("phyread_fail>\n"));
1011 return -1;
1012 }
1013 ASD(("phyread_ok,PSELECT>"));
1014 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1015 hp->tcvr_type = external;
1016 hp->paddr = TCV_PADDR_ETX;
1017 } else {
1018 if (tconfig & TCV_CFG_MDIO1) {
1019 ASD(("internal<PSELECT,"));
1020 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1021 ASD(("ISOLATE,"));
1022 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1023 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1024 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1025 if (result == TCVR_FAILURE) {
1026 ASD(("phyread_fail>\n"));
1027 return -1;
1028 }
1029 ASD(("phyread_ok,~PSELECT>"));
1030 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1031 hp->tcvr_type = internal;
1032 hp->paddr = TCV_PADDR_ITX;
1033 }
1034 }
1035
1036 ASD(("BMCR_RESET "));
1037 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1038
1039 while (--tries) {
1040 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1041 if (result == TCVR_FAILURE)
1042 return -1;
1043 hp->sw_bmcr = result;
1044 if (!(result & BMCR_RESET))
1045 break;
1046 udelay(20);
1047 }
1048 if (!tries) {
1049 ASD(("BMCR RESET FAILED!\n"));
1050 return -1;
1051 }
1052 ASD(("RESET_OK\n"));
1053
1054
1055 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1056 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1057 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1058 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1059
1060 ASD(("UNISOLATE"));
1061 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1062 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1063
1064 tries = TCVR_UNISOLATE_TRIES;
1065 while (--tries) {
1066 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1067 if (result == TCVR_FAILURE)
1068 return -1;
1069 if (!(result & BMCR_ISOLATE))
1070 break;
1071 udelay(20);
1072 }
1073 if (!tries) {
1074 ASD((" FAILED!\n"));
1075 return -1;
1076 }
1077 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1078 if (!is_lucent_phy(hp)) {
1079 result = happy_meal_tcvr_read(hp, tregs,
1080 DP83840_CSCONFIG);
1081 happy_meal_tcvr_write(hp, tregs,
1082 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1083 }
1084 return 0;
1085}
1086
1087
1088
1089
1090
1091static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1092{
1093 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1094
1095 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1096 if (hp->happy_flags & HFLAG_POLL) {
1097
1098 ASD(("<polling> "));
1099 if (hp->tcvr_type == internal) {
1100 if (tconfig & TCV_CFG_MDIO1) {
1101 ASD(("<internal> <poll stop> "));
1102 happy_meal_poll_stop(hp, tregs);
1103 hp->paddr = TCV_PADDR_ETX;
1104 hp->tcvr_type = external;
1105 ASD(("<external>\n"));
1106 tconfig &= ~(TCV_CFG_PENABLE);
1107 tconfig |= TCV_CFG_PSELECT;
1108 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1109 }
1110 } else {
1111 if (hp->tcvr_type == external) {
1112 ASD(("<external> "));
1113 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1114 ASD(("<poll stop> "));
1115 happy_meal_poll_stop(hp, tregs);
1116 hp->paddr = TCV_PADDR_ITX;
1117 hp->tcvr_type = internal;
1118 ASD(("<internal>\n"));
1119 hme_write32(hp, tregs + TCVR_CFG,
1120 hme_read32(hp, tregs + TCVR_CFG) &
1121 ~(TCV_CFG_PSELECT));
1122 }
1123 ASD(("\n"));
1124 } else {
1125 ASD(("<none>\n"));
1126 }
1127 }
1128 } else {
1129 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1130
1131
1132 ASD(("<not polling> "));
1133 if (reread & TCV_CFG_MDIO1) {
1134 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1135 hp->paddr = TCV_PADDR_ETX;
1136 hp->tcvr_type = external;
1137 ASD(("<external>\n"));
1138 } else {
1139 if (reread & TCV_CFG_MDIO0) {
1140 hme_write32(hp, tregs + TCVR_CFG,
1141 tconfig & ~(TCV_CFG_PSELECT));
1142 hp->paddr = TCV_PADDR_ITX;
1143 hp->tcvr_type = internal;
1144 ASD(("<internal>\n"));
1145 } else {
1146 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1147 hp->tcvr_type = none;
1148 ASD(("<none>\n"));
1149 }
1150 }
1151 }
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198static void happy_meal_clean_rings(struct happy_meal *hp)
1199{
1200 int i;
1201
1202 for (i = 0; i < RX_RING_SIZE; i++) {
1203 if (hp->rx_skbs[i] != NULL) {
1204 struct sk_buff *skb = hp->rx_skbs[i];
1205 struct happy_meal_rxd *rxd;
1206 u32 dma_addr;
1207
1208 rxd = &hp->happy_block->happy_meal_rxd[i];
1209 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1210 dma_unmap_single(hp->dma_dev, dma_addr,
1211 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1212 dev_kfree_skb_any(skb);
1213 hp->rx_skbs[i] = NULL;
1214 }
1215 }
1216
1217 for (i = 0; i < TX_RING_SIZE; i++) {
1218 if (hp->tx_skbs[i] != NULL) {
1219 struct sk_buff *skb = hp->tx_skbs[i];
1220 struct happy_meal_txd *txd;
1221 u32 dma_addr;
1222 int frag;
1223
1224 hp->tx_skbs[i] = NULL;
1225
1226 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1227 txd = &hp->happy_block->happy_meal_txd[i];
1228 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1229 if (!frag)
1230 dma_unmap_single(hp->dma_dev, dma_addr,
1231 (hme_read_desc32(hp, &txd->tx_flags)
1232 & TXFLAG_SIZE),
1233 DMA_TO_DEVICE);
1234 else
1235 dma_unmap_page(hp->dma_dev, dma_addr,
1236 (hme_read_desc32(hp, &txd->tx_flags)
1237 & TXFLAG_SIZE),
1238 DMA_TO_DEVICE);
1239
1240 if (frag != skb_shinfo(skb)->nr_frags)
1241 i++;
1242 }
1243
1244 dev_kfree_skb_any(skb);
1245 }
1246 }
1247}
1248
1249
1250static void happy_meal_init_rings(struct happy_meal *hp)
1251{
1252 struct hmeal_init_block *hb = hp->happy_block;
1253 int i;
1254
1255 HMD(("happy_meal_init_rings: counters to zero, "));
1256 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1257
1258
1259 HMD(("clean, "));
1260 happy_meal_clean_rings(hp);
1261
1262
1263 HMD(("init rxring, "));
1264 for (i = 0; i < RX_RING_SIZE; i++) {
1265 struct sk_buff *skb;
1266 u32 mapping;
1267
1268 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1269 if (!skb) {
1270 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1271 continue;
1272 }
1273 hp->rx_skbs[i] = skb;
1274
1275
1276 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1277 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1278 DMA_FROM_DEVICE);
1279 if (dma_mapping_error(hp->dma_dev, mapping)) {
1280 dev_kfree_skb_any(skb);
1281 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1282 continue;
1283 }
1284 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1285 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1286 mapping);
1287 skb_reserve(skb, RX_OFFSET);
1288 }
1289
1290 HMD(("init txring, "));
1291 for (i = 0; i < TX_RING_SIZE; i++)
1292 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1293
1294 HMD(("done\n"));
1295}
1296
1297
1298static void
1299happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1300 void __iomem *tregs,
1301 const struct ethtool_link_ksettings *ep)
1302{
1303 int timeout;
1304
1305
1306 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1307 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1308 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1309 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1310
1311
1312
1313 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1314 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1315
1316 if (hp->sw_bmsr & BMSR_10HALF)
1317 hp->sw_advertise |= (ADVERTISE_10HALF);
1318 else
1319 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1320
1321 if (hp->sw_bmsr & BMSR_10FULL)
1322 hp->sw_advertise |= (ADVERTISE_10FULL);
1323 else
1324 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1325 if (hp->sw_bmsr & BMSR_100HALF)
1326 hp->sw_advertise |= (ADVERTISE_100HALF);
1327 else
1328 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1329 if (hp->sw_bmsr & BMSR_100FULL)
1330 hp->sw_advertise |= (ADVERTISE_100FULL);
1331 else
1332 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1333 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1334
1335
1336
1337
1338
1339
1340
1341#ifdef AUTO_SWITCH_DEBUG
1342 ASD(("%s: Advertising [ ", hp->dev->name));
1343 if (hp->sw_advertise & ADVERTISE_10HALF)
1344 ASD(("10H "));
1345 if (hp->sw_advertise & ADVERTISE_10FULL)
1346 ASD(("10F "));
1347 if (hp->sw_advertise & ADVERTISE_100HALF)
1348 ASD(("100H "));
1349 if (hp->sw_advertise & ADVERTISE_100FULL)
1350 ASD(("100F "));
1351#endif
1352
1353
1354 hp->sw_bmcr |= BMCR_ANENABLE;
1355 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1356
1357
1358 hp->sw_bmcr |= BMCR_ANRESTART;
1359 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1360
1361
1362
1363 timeout = 64;
1364 while (--timeout) {
1365 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1366 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1367 break;
1368 udelay(10);
1369 }
1370 if (!timeout) {
1371 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1372 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1373 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1374 hp->dev->name);
1375 goto force_link;
1376 } else {
1377 hp->timer_state = arbwait;
1378 }
1379 } else {
1380force_link:
1381
1382
1383
1384
1385
1386
1387
1388
1389 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1390 hp->sw_bmcr = BMCR_SPEED100;
1391 } else {
1392 if (ep->base.speed == SPEED_100)
1393 hp->sw_bmcr = BMCR_SPEED100;
1394 else
1395 hp->sw_bmcr = 0;
1396 if (ep->base.duplex == DUPLEX_FULL)
1397 hp->sw_bmcr |= BMCR_FULLDPLX;
1398 }
1399 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1400
1401 if (!is_lucent_phy(hp)) {
1402
1403
1404
1405
1406 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1407 DP83840_CSCONFIG);
1408 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1409 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1410 hp->sw_csconfig);
1411 }
1412 hp->timer_state = ltrywait;
1413 }
1414
1415 hp->timer_ticks = 0;
1416 hp->happy_timer.expires = jiffies + (12 * HZ)/10;
1417 add_timer(&hp->happy_timer);
1418}
1419
1420
1421static int happy_meal_init(struct happy_meal *hp)
1422{
1423 void __iomem *gregs = hp->gregs;
1424 void __iomem *etxregs = hp->etxregs;
1425 void __iomem *erxregs = hp->erxregs;
1426 void __iomem *bregs = hp->bigmacregs;
1427 void __iomem *tregs = hp->tcvregs;
1428 u32 regtmp, rxcfg;
1429 unsigned char *e = &hp->dev->dev_addr[0];
1430
1431
1432 del_timer(&hp->happy_timer);
1433
1434 HMD(("happy_meal_init: happy_flags[%08x] ",
1435 hp->happy_flags));
1436 if (!(hp->happy_flags & HFLAG_INIT)) {
1437 HMD(("set HFLAG_INIT, "));
1438 hp->happy_flags |= HFLAG_INIT;
1439 happy_meal_get_counters(hp, bregs);
1440 }
1441
1442
1443 HMD(("to happy_meal_poll_stop\n"));
1444 happy_meal_poll_stop(hp, tregs);
1445
1446
1447 HMD(("happy_meal_init: to happy_meal_stop\n"));
1448 happy_meal_stop(hp, gregs);
1449
1450
1451 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1452 happy_meal_init_rings(hp);
1453
1454
1455 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1456 hme_read32(hp, tregs + TCVR_IMASK)));
1457 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1458
1459
1460 if (hp->happy_flags & HFLAG_FENABLE) {
1461 HMD(("use frame old[%08x], ",
1462 hme_read32(hp, tregs + TCVR_CFG)));
1463 hme_write32(hp, tregs + TCVR_CFG,
1464 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1465 } else {
1466 HMD(("use bitbang old[%08x], ",
1467 hme_read32(hp, tregs + TCVR_CFG)));
1468 hme_write32(hp, tregs + TCVR_CFG,
1469 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1470 }
1471
1472
1473 HMD(("to happy_meal_transceiver_check\n"));
1474 happy_meal_transceiver_check(hp, tregs);
1475
1476
1477 HMD(("happy_meal_init: "));
1478 switch(hp->tcvr_type) {
1479 case none:
1480
1481 HMD(("AAIEEE no transceiver type, EAGAIN"));
1482 return -EAGAIN;
1483
1484 case internal:
1485
1486 HMD(("internal, using MII, "));
1487 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1488 break;
1489
1490 case external:
1491
1492 HMD(("external, disable MII, "));
1493 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1494 break;
1495 }
1496
1497 if (happy_meal_tcvr_reset(hp, tregs))
1498 return -EAGAIN;
1499
1500
1501 HMD(("tx/rx reset, "));
1502 happy_meal_tx_reset(hp, bregs);
1503 happy_meal_rx_reset(hp, bregs);
1504
1505
1506 HMD(("jsize/ipg1/ipg2, "));
1507 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1508 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1509 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1510
1511
1512 HMD(("rseed/macaddr, "));
1513
1514
1515 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1516
1517 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1518 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1519 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1520
1521 HMD(("htable, "));
1522 if ((hp->dev->flags & IFF_ALLMULTI) ||
1523 (netdev_mc_count(hp->dev) > 64)) {
1524 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1525 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1526 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1527 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1528 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1529 u16 hash_table[4];
1530 struct netdev_hw_addr *ha;
1531 u32 crc;
1532
1533 memset(hash_table, 0, sizeof(hash_table));
1534 netdev_for_each_mc_addr(ha, hp->dev) {
1535 crc = ether_crc_le(6, ha->addr);
1536 crc >>= 26;
1537 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1538 }
1539 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1540 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1541 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1542 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1543 } else {
1544 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1545 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1546 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1547 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1548 }
1549
1550
1551 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1552 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1553 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1554 hme_write32(hp, erxregs + ERX_RING,
1555 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1556 hme_write32(hp, etxregs + ETX_RING,
1557 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1558
1559
1560
1561
1562
1563
1564 if (hme_read32(hp, erxregs + ERX_RING) !=
1565 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1566 hme_write32(hp, erxregs + ERX_RING,
1567 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1568 | 0x4);
1569
1570
1571 HMD(("happy_meal_init: old[%08x] bursts<",
1572 hme_read32(hp, gregs + GREG_CFG)));
1573
1574#ifndef CONFIG_SPARC
1575
1576 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1577#else
1578 if ((hp->happy_bursts & DMA_BURST64) &&
1579 ((hp->happy_flags & HFLAG_PCI) != 0
1580#ifdef CONFIG_SBUS
1581 || sbus_can_burst64()
1582#endif
1583 || 0)) {
1584 u32 gcfg = GREG_CFG_BURST64;
1585
1586
1587
1588
1589
1590#ifdef CONFIG_SBUS
1591 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1592 struct platform_device *op = hp->happy_dev;
1593 if (sbus_can_dma_64bit()) {
1594 sbus_set_sbus64(&op->dev,
1595 hp->happy_bursts);
1596 gcfg |= GREG_CFG_64BIT;
1597 }
1598 }
1599#endif
1600
1601 HMD(("64>"));
1602 hme_write32(hp, gregs + GREG_CFG, gcfg);
1603 } else if (hp->happy_bursts & DMA_BURST32) {
1604 HMD(("32>"));
1605 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1606 } else if (hp->happy_bursts & DMA_BURST16) {
1607 HMD(("16>"));
1608 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1609 } else {
1610 HMD(("XXX>"));
1611 hme_write32(hp, gregs + GREG_CFG, 0);
1612 }
1613#endif
1614
1615
1616 HMD((", enable global interrupts, "));
1617 hme_write32(hp, gregs + GREG_IMASK,
1618 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1619 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1620
1621
1622 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1623 hme_read32(hp, etxregs + ETX_RSIZE)));
1624 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1625
1626
1627 HMD(("tx dma enable old[%08x], ",
1628 hme_read32(hp, etxregs + ETX_CFG)));
1629 hme_write32(hp, etxregs + ETX_CFG,
1630 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1631
1632
1633
1634
1635
1636
1637 HMD(("erx regs bug old[%08x]\n",
1638 hme_read32(hp, erxregs + ERX_CFG)));
1639 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1640 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1641 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1642 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1643 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1644 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1645 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1646
1647 }
1648
1649
1650 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1651 hme_read32(hp, bregs + BMAC_RXCFG)));
1652 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1653 if (hp->dev->flags & IFF_PROMISC)
1654 rxcfg |= BIGMAC_RXCFG_PMISC;
1655 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1656
1657
1658 udelay(10);
1659
1660
1661 HMD(("BIGMAC init, "));
1662 regtmp = 0;
1663 if (hp->happy_flags & HFLAG_FULL)
1664 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1665
1666
1667
1668
1669 hme_write32(hp, bregs + BMAC_TXCFG, regtmp );
1670
1671
1672 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1673
1674
1675 regtmp = BIGMAC_XCFG_ODENABLE;
1676
1677
1678 if (hp->happy_flags & HFLAG_LANCE)
1679 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1680
1681
1682 if (hp->tcvr_type == external)
1683 regtmp |= BIGMAC_XCFG_MIIDISAB;
1684
1685 HMD(("XIF config old[%08x], ",
1686 hme_read32(hp, bregs + BMAC_XIFCFG)));
1687 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1688
1689
1690 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1691 hme_read32(hp, bregs + BMAC_TXCFG),
1692 hme_read32(hp, bregs + BMAC_RXCFG)));
1693
1694
1695 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1696 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1697
1698 hme_write32(hp, bregs + BMAC_TXCFG,
1699 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1700 hme_write32(hp, bregs + BMAC_RXCFG,
1701 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1702
1703
1704 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1705
1706
1707 return 0;
1708}
1709
1710
1711static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1712{
1713 void __iomem *tregs = hp->tcvregs;
1714 void __iomem *bregs = hp->bigmacregs;
1715 void __iomem *gregs = hp->gregs;
1716
1717 happy_meal_stop(hp, gregs);
1718 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1719 if (hp->happy_flags & HFLAG_FENABLE)
1720 hme_write32(hp, tregs + TCVR_CFG,
1721 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1722 else
1723 hme_write32(hp, tregs + TCVR_CFG,
1724 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1725 happy_meal_transceiver_check(hp, tregs);
1726 switch(hp->tcvr_type) {
1727 case none:
1728 return;
1729 case internal:
1730 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1731 break;
1732 case external:
1733 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1734 break;
1735 }
1736 if (happy_meal_tcvr_reset(hp, tregs))
1737 return;
1738
1739
1740 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1741 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1742
1743
1744 if (hp->sw_bmsr & BMSR_10HALF)
1745 hp->sw_advertise |= (ADVERTISE_10HALF);
1746 else
1747 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1748
1749 if (hp->sw_bmsr & BMSR_10FULL)
1750 hp->sw_advertise |= (ADVERTISE_10FULL);
1751 else
1752 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1753 if (hp->sw_bmsr & BMSR_100HALF)
1754 hp->sw_advertise |= (ADVERTISE_100HALF);
1755 else
1756 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1757 if (hp->sw_bmsr & BMSR_100FULL)
1758 hp->sw_advertise |= (ADVERTISE_100FULL);
1759 else
1760 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1761
1762
1763 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1764}
1765
1766
1767
1768
1769
1770
1771static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1772{
1773 int reset = 0;
1774
1775
1776 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1777 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1778 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1779 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1780 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1781 GREG_STAT_SLVPERR))
1782 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1783 hp->dev->name, status);
1784
1785 if (status & GREG_STAT_RFIFOVF) {
1786
1787
1788 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1789 }
1790
1791 if (status & GREG_STAT_STSTERR) {
1792
1793 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1794 reset = 1;
1795 }
1796
1797 if (status & GREG_STAT_TFIFO_UND) {
1798
1799 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1800 hp->dev->name);
1801 reset = 1;
1802 }
1803
1804 if (status & GREG_STAT_MAXPKTERR) {
1805
1806
1807
1808 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1809 reset = 1;
1810 }
1811
1812 if (status & GREG_STAT_NORXD) {
1813
1814
1815
1816
1817
1818 printk(KERN_INFO "%s: Happy Meal out of receive "
1819 "descriptors, packet dropped.\n",
1820 hp->dev->name);
1821 }
1822
1823 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1824
1825 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1826 if (status & GREG_STAT_RXERR)
1827 printk("GenericError ");
1828 if (status & GREG_STAT_RXPERR)
1829 printk("ParityError ");
1830 if (status & GREG_STAT_RXTERR)
1831 printk("RxTagBotch ");
1832 printk("]\n");
1833 reset = 1;
1834 }
1835
1836 if (status & GREG_STAT_EOPERR) {
1837
1838
1839
1840 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1841 hp->dev->name);
1842 reset = 1;
1843 }
1844
1845 if (status & GREG_STAT_MIFIRQ) {
1846
1847 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1848 }
1849
1850 if (status &
1851 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1852
1853 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1854 if (status & GREG_STAT_TXEACK)
1855 printk("GenericError ");
1856 if (status & GREG_STAT_TXLERR)
1857 printk("LateError ");
1858 if (status & GREG_STAT_TXPERR)
1859 printk("ParityError ");
1860 if (status & GREG_STAT_TXTERR)
1861 printk("TagBotch ");
1862 printk("]\n");
1863 reset = 1;
1864 }
1865
1866 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1867
1868
1869
1870 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1871 hp->dev->name,
1872 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1873 reset = 1;
1874 }
1875
1876 if (reset) {
1877 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1878 happy_meal_init(hp);
1879 return 1;
1880 }
1881 return 0;
1882}
1883
1884
1885static void happy_meal_mif_interrupt(struct happy_meal *hp)
1886{
1887 void __iomem *tregs = hp->tcvregs;
1888
1889 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1890 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1891 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1892
1893
1894 if (hp->sw_lpa & LPA_100FULL) {
1895 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1896 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1897 } else if (hp->sw_lpa & LPA_100HALF) {
1898 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1899 hp->sw_bmcr |= BMCR_SPEED100;
1900 } else if (hp->sw_lpa & LPA_10FULL) {
1901 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1902 hp->sw_bmcr |= BMCR_FULLDPLX;
1903 } else {
1904 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1905 }
1906 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1907
1908
1909 happy_meal_poll_stop(hp, tregs);
1910}
1911
1912#ifdef TXDEBUG
1913#define TXD(x) printk x
1914#else
1915#define TXD(x)
1916#endif
1917
1918
1919static void happy_meal_tx(struct happy_meal *hp)
1920{
1921 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1922 struct happy_meal_txd *this;
1923 struct net_device *dev = hp->dev;
1924 int elem;
1925
1926 elem = hp->tx_old;
1927 TXD(("TX<"));
1928 while (elem != hp->tx_new) {
1929 struct sk_buff *skb;
1930 u32 flags, dma_addr, dma_len;
1931 int frag;
1932
1933 TXD(("[%d]", elem));
1934 this = &txbase[elem];
1935 flags = hme_read_desc32(hp, &this->tx_flags);
1936 if (flags & TXFLAG_OWN)
1937 break;
1938 skb = hp->tx_skbs[elem];
1939 if (skb_shinfo(skb)->nr_frags) {
1940 int last;
1941
1942 last = elem + skb_shinfo(skb)->nr_frags;
1943 last &= (TX_RING_SIZE - 1);
1944 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1945 if (flags & TXFLAG_OWN)
1946 break;
1947 }
1948 hp->tx_skbs[elem] = NULL;
1949 dev->stats.tx_bytes += skb->len;
1950
1951 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1952 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1953 dma_len = hme_read_desc32(hp, &this->tx_flags);
1954
1955 dma_len &= TXFLAG_SIZE;
1956 if (!frag)
1957 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1958 else
1959 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1960
1961 elem = NEXT_TX(elem);
1962 this = &txbase[elem];
1963 }
1964
1965 dev_kfree_skb_irq(skb);
1966 dev->stats.tx_packets++;
1967 }
1968 hp->tx_old = elem;
1969 TXD((">"));
1970
1971 if (netif_queue_stopped(dev) &&
1972 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1973 netif_wake_queue(dev);
1974}
1975
1976#ifdef RXDEBUG
1977#define RXD(x) printk x
1978#else
1979#define RXD(x)
1980#endif
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1992{
1993 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1994 struct happy_meal_rxd *this;
1995 int elem = hp->rx_new, drops = 0;
1996 u32 flags;
1997
1998 RXD(("RX<"));
1999 this = &rxbase[elem];
2000 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
2001 struct sk_buff *skb;
2002 int len = flags >> 16;
2003 u16 csum = flags & RXFLAG_CSUM;
2004 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
2005
2006 RXD(("[%d ", elem));
2007
2008
2009 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
2010 RXD(("ERR(%08x)]", flags));
2011 dev->stats.rx_errors++;
2012 if (len < ETH_ZLEN)
2013 dev->stats.rx_length_errors++;
2014 if (len & (RXFLAG_OVERFLOW >> 16)) {
2015 dev->stats.rx_over_errors++;
2016 dev->stats.rx_fifo_errors++;
2017 }
2018
2019
2020 drop_it:
2021 dev->stats.rx_dropped++;
2022 hme_write_rxd(hp, this,
2023 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2024 dma_addr);
2025 goto next;
2026 }
2027 skb = hp->rx_skbs[elem];
2028 if (len > RX_COPY_THRESHOLD) {
2029 struct sk_buff *new_skb;
2030 u32 mapping;
2031
2032
2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2034 if (new_skb == NULL) {
2035 drops++;
2036 goto drop_it;
2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2049 hp->rx_skbs[elem] = new_skb;
2050 hme_write_rxd(hp, this,
2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2052 mapping);
2053 skb_reserve(new_skb, RX_OFFSET);
2054
2055
2056 skb_trim(skb, len);
2057 } else {
2058 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
2059
2060 if (copy_skb == NULL) {
2061 drops++;
2062 goto drop_it;
2063 }
2064
2065 skb_reserve(copy_skb, 2);
2066 skb_put(copy_skb, len);
2067 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2068 skb_copy_from_linear_data(skb, copy_skb->data, len);
2069 dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2070
2071 hme_write_rxd(hp, this,
2072 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2073 dma_addr);
2074
2075 skb = copy_skb;
2076 }
2077
2078
2079 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
2080 skb->ip_summed = CHECKSUM_COMPLETE;
2081
2082 RXD(("len=%d csum=%4x]", len, csum));
2083 skb->protocol = eth_type_trans(skb, dev);
2084 netif_rx(skb);
2085
2086 dev->stats.rx_packets++;
2087 dev->stats.rx_bytes += len;
2088 next:
2089 elem = NEXT_RX(elem);
2090 this = &rxbase[elem];
2091 }
2092 hp->rx_new = elem;
2093 if (drops)
2094 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2095 RXD((">"));
2096}
2097
2098static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
2099{
2100 struct net_device *dev = dev_id;
2101 struct happy_meal *hp = netdev_priv(dev);
2102 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2103
2104 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2105
2106 spin_lock(&hp->happy_lock);
2107
2108 if (happy_status & GREG_STAT_ERRORS) {
2109 HMD(("ERRORS "));
2110 if (happy_meal_is_not_so_happy(hp, happy_status))
2111 goto out;
2112 }
2113
2114 if (happy_status & GREG_STAT_MIFIRQ) {
2115 HMD(("MIFIRQ "));
2116 happy_meal_mif_interrupt(hp);
2117 }
2118
2119 if (happy_status & GREG_STAT_TXALL) {
2120 HMD(("TXALL "));
2121 happy_meal_tx(hp);
2122 }
2123
2124 if (happy_status & GREG_STAT_RXTOHOST) {
2125 HMD(("RXTOHOST "));
2126 happy_meal_rx(hp, dev);
2127 }
2128
2129 HMD(("done\n"));
2130out:
2131 spin_unlock(&hp->happy_lock);
2132
2133 return IRQ_HANDLED;
2134}
2135
2136#ifdef CONFIG_SBUS
2137static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2138{
2139 struct quattro *qp = (struct quattro *) cookie;
2140 int i;
2141
2142 for (i = 0; i < 4; i++) {
2143 struct net_device *dev = qp->happy_meals[i];
2144 struct happy_meal *hp = netdev_priv(dev);
2145 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2146
2147 HMD(("quattro_interrupt: status=%08x ", happy_status));
2148
2149 if (!(happy_status & (GREG_STAT_ERRORS |
2150 GREG_STAT_MIFIRQ |
2151 GREG_STAT_TXALL |
2152 GREG_STAT_RXTOHOST)))
2153 continue;
2154
2155 spin_lock(&hp->happy_lock);
2156
2157 if (happy_status & GREG_STAT_ERRORS) {
2158 HMD(("ERRORS "));
2159 if (happy_meal_is_not_so_happy(hp, happy_status))
2160 goto next;
2161 }
2162
2163 if (happy_status & GREG_STAT_MIFIRQ) {
2164 HMD(("MIFIRQ "));
2165 happy_meal_mif_interrupt(hp);
2166 }
2167
2168 if (happy_status & GREG_STAT_TXALL) {
2169 HMD(("TXALL "));
2170 happy_meal_tx(hp);
2171 }
2172
2173 if (happy_status & GREG_STAT_RXTOHOST) {
2174 HMD(("RXTOHOST "));
2175 happy_meal_rx(hp, dev);
2176 }
2177
2178 next:
2179 spin_unlock(&hp->happy_lock);
2180 }
2181 HMD(("done\n"));
2182
2183 return IRQ_HANDLED;
2184}
2185#endif
2186
2187static int happy_meal_open(struct net_device *dev)
2188{
2189 struct happy_meal *hp = netdev_priv(dev);
2190 int res;
2191
2192 HMD(("happy_meal_open: "));
2193
2194
2195
2196
2197 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2198 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2199 dev->name, dev);
2200 if (res) {
2201 HMD(("EAGAIN\n"));
2202 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2203 hp->irq);
2204
2205 return -EAGAIN;
2206 }
2207 }
2208
2209 HMD(("to happy_meal_init\n"));
2210
2211 spin_lock_irq(&hp->happy_lock);
2212 res = happy_meal_init(hp);
2213 spin_unlock_irq(&hp->happy_lock);
2214
2215 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2216 free_irq(hp->irq, dev);
2217 return res;
2218}
2219
2220static int happy_meal_close(struct net_device *dev)
2221{
2222 struct happy_meal *hp = netdev_priv(dev);
2223
2224 spin_lock_irq(&hp->happy_lock);
2225 happy_meal_stop(hp, hp->gregs);
2226 happy_meal_clean_rings(hp);
2227
2228
2229 del_timer(&hp->happy_timer);
2230
2231 spin_unlock_irq(&hp->happy_lock);
2232
2233
2234
2235
2236
2237 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2238 free_irq(hp->irq, dev);
2239
2240 return 0;
2241}
2242
2243#ifdef SXDEBUG
2244#define SXD(x) printk x
2245#else
2246#define SXD(x)
2247#endif
2248
2249static void happy_meal_tx_timeout(struct net_device *dev)
2250{
2251 struct happy_meal *hp = netdev_priv(dev);
2252
2253 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2254 tx_dump_log();
2255 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2256 hme_read32(hp, hp->gregs + GREG_STAT),
2257 hme_read32(hp, hp->etxregs + ETX_CFG),
2258 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2259
2260 spin_lock_irq(&hp->happy_lock);
2261 happy_meal_init(hp);
2262 spin_unlock_irq(&hp->happy_lock);
2263
2264 netif_wake_queue(dev);
2265}
2266
2267static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269{
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284}
2285
2286static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2287 struct net_device *dev)
2288{
2289 struct happy_meal *hp = netdev_priv(dev);
2290 int entry;
2291 u32 tx_flags;
2292
2293 tx_flags = TXFLAG_OWN;
2294 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2295 const u32 csum_start_off = skb_checksum_start_offset(skb);
2296 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2297
2298 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2299 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2300 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2301 }
2302
2303 spin_lock_irq(&hp->happy_lock);
2304
2305 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2306 netif_stop_queue(dev);
2307 spin_unlock_irq(&hp->happy_lock);
2308 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2309 dev->name);
2310 return NETDEV_TX_BUSY;
2311 }
2312
2313 entry = hp->tx_new;
2314 SXD(("SX<l[%d]e[%d]>", len, entry));
2315 hp->tx_skbs[entry] = skb;
2316
2317 if (skb_shinfo(skb)->nr_frags == 0) {
2318 u32 mapping, len;
2319
2320 len = skb->len;
2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2326 (tx_flags | (len & TXFLAG_SIZE)),
2327 mapping);
2328 entry = NEXT_TX(entry);
2329 } else {
2330 u32 first_len, first_mapping;
2331 int frag, first_entry = entry;
2332
2333
2334
2335
2336 first_len = skb_headlen(skb);
2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2341 entry = NEXT_TX(entry);
2342
2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2344 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2345 u32 len, mapping, this_txflags;
2346
2347 len = skb_frag_size(this_frag);
2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2355 this_txflags = tx_flags;
2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2357 this_txflags |= TXFLAG_EOP;
2358 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2359 (this_txflags | (len & TXFLAG_SIZE)),
2360 mapping);
2361 entry = NEXT_TX(entry);
2362 }
2363 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2364 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2365 first_mapping);
2366 }
2367
2368 hp->tx_new = entry;
2369
2370 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2371 netif_stop_queue(dev);
2372
2373
2374 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2375
2376 spin_unlock_irq(&hp->happy_lock);
2377
2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2379 return NETDEV_TX_OK;
2380
2381out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2388}
2389
2390static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2391{
2392 struct happy_meal *hp = netdev_priv(dev);
2393
2394 spin_lock_irq(&hp->happy_lock);
2395 happy_meal_get_counters(hp, hp->bigmacregs);
2396 spin_unlock_irq(&hp->happy_lock);
2397
2398 return &dev->stats;
2399}
2400
2401static void happy_meal_set_multicast(struct net_device *dev)
2402{
2403 struct happy_meal *hp = netdev_priv(dev);
2404 void __iomem *bregs = hp->bigmacregs;
2405 struct netdev_hw_addr *ha;
2406 u32 crc;
2407
2408 spin_lock_irq(&hp->happy_lock);
2409
2410 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2411 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2412 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2413 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2414 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2415 } else if (dev->flags & IFF_PROMISC) {
2416 hme_write32(hp, bregs + BMAC_RXCFG,
2417 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2418 } else {
2419 u16 hash_table[4];
2420
2421 memset(hash_table, 0, sizeof(hash_table));
2422 netdev_for_each_mc_addr(ha, dev) {
2423 crc = ether_crc_le(6, ha->addr);
2424 crc >>= 26;
2425 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2426 }
2427 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2428 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2429 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2430 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2431 }
2432
2433 spin_unlock_irq(&hp->happy_lock);
2434}
2435
2436
2437static int hme_get_link_ksettings(struct net_device *dev,
2438 struct ethtool_link_ksettings *cmd)
2439{
2440 struct happy_meal *hp = netdev_priv(dev);
2441 u32 speed;
2442 u32 supported;
2443
2444 supported =
2445 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2446 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2447 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2448
2449
2450 cmd->base.port = PORT_TP;
2451 cmd->base.phy_address = 0;
2452
2453
2454 spin_lock_irq(&hp->happy_lock);
2455 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2456 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2457 spin_unlock_irq(&hp->happy_lock);
2458
2459 if (hp->sw_bmcr & BMCR_ANENABLE) {
2460 cmd->base.autoneg = AUTONEG_ENABLE;
2461 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2462 SPEED_100 : SPEED_10);
2463 if (speed == SPEED_100)
2464 cmd->base.duplex =
2465 (hp->sw_lpa & (LPA_100FULL)) ?
2466 DUPLEX_FULL : DUPLEX_HALF;
2467 else
2468 cmd->base.duplex =
2469 (hp->sw_lpa & (LPA_10FULL)) ?
2470 DUPLEX_FULL : DUPLEX_HALF;
2471 } else {
2472 cmd->base.autoneg = AUTONEG_DISABLE;
2473 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2474 cmd->base.duplex =
2475 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2476 DUPLEX_FULL : DUPLEX_HALF;
2477 }
2478 cmd->base.speed = speed;
2479 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2480 supported);
2481
2482 return 0;
2483}
2484
2485static int hme_set_link_ksettings(struct net_device *dev,
2486 const struct ethtool_link_ksettings *cmd)
2487{
2488 struct happy_meal *hp = netdev_priv(dev);
2489
2490
2491 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2492 cmd->base.autoneg != AUTONEG_DISABLE)
2493 return -EINVAL;
2494 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2495 ((cmd->base.speed != SPEED_100 &&
2496 cmd->base.speed != SPEED_10) ||
2497 (cmd->base.duplex != DUPLEX_HALF &&
2498 cmd->base.duplex != DUPLEX_FULL)))
2499 return -EINVAL;
2500
2501
2502 spin_lock_irq(&hp->happy_lock);
2503 del_timer(&hp->happy_timer);
2504 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2505 spin_unlock_irq(&hp->happy_lock);
2506
2507 return 0;
2508}
2509
2510static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2511{
2512 struct happy_meal *hp = netdev_priv(dev);
2513
2514 strlcpy(info->driver, "sunhme", sizeof(info->driver));
2515 strlcpy(info->version, "2.02", sizeof(info->version));
2516 if (hp->happy_flags & HFLAG_PCI) {
2517 struct pci_dev *pdev = hp->happy_dev;
2518 strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2519 }
2520#ifdef CONFIG_SBUS
2521 else {
2522 const struct linux_prom_registers *regs;
2523 struct platform_device *op = hp->happy_dev;
2524 regs = of_get_property(op->dev.of_node, "regs", NULL);
2525 if (regs)
2526 snprintf(info->bus_info, sizeof(info->bus_info),
2527 "SBUS:%d",
2528 regs->which_io);
2529 }
2530#endif
2531}
2532
2533static u32 hme_get_link(struct net_device *dev)
2534{
2535 struct happy_meal *hp = netdev_priv(dev);
2536
2537 spin_lock_irq(&hp->happy_lock);
2538 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2539 spin_unlock_irq(&hp->happy_lock);
2540
2541 return hp->sw_bmsr & BMSR_LSTATUS;
2542}
2543
2544static const struct ethtool_ops hme_ethtool_ops = {
2545 .get_drvinfo = hme_get_drvinfo,
2546 .get_link = hme_get_link,
2547 .get_link_ksettings = hme_get_link_ksettings,
2548 .set_link_ksettings = hme_set_link_ksettings,
2549};
2550
2551static int hme_version_printed;
2552
2553#ifdef CONFIG_SBUS
2554
2555
2556
2557
2558
2559static struct quattro *quattro_sbus_find(struct platform_device *child)
2560{
2561 struct device *parent = child->dev.parent;
2562 struct platform_device *op;
2563 struct quattro *qp;
2564
2565 op = to_platform_device(parent);
2566 qp = platform_get_drvdata(op);
2567 if (qp)
2568 return qp;
2569
2570 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2571 if (qp != NULL) {
2572 int i;
2573
2574 for (i = 0; i < 4; i++)
2575 qp->happy_meals[i] = NULL;
2576
2577 qp->quattro_dev = child;
2578 qp->next = qfe_sbus_list;
2579 qfe_sbus_list = qp;
2580
2581 platform_set_drvdata(op, qp);
2582 }
2583 return qp;
2584}
2585
2586
2587
2588
2589
2590static int __init quattro_sbus_register_irqs(void)
2591{
2592 struct quattro *qp;
2593
2594 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2595 struct platform_device *op = qp->quattro_dev;
2596 int err, qfe_slot, skip = 0;
2597
2598 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2599 if (!qp->happy_meals[qfe_slot])
2600 skip = 1;
2601 }
2602 if (skip)
2603 continue;
2604
2605 err = request_irq(op->archdata.irqs[0],
2606 quattro_sbus_interrupt,
2607 IRQF_SHARED, "Quattro",
2608 qp);
2609 if (err != 0) {
2610 printk(KERN_ERR "Quattro HME: IRQ registration "
2611 "error %d.\n", err);
2612 return err;
2613 }
2614 }
2615
2616 return 0;
2617}
2618
2619static void quattro_sbus_free_irqs(void)
2620{
2621 struct quattro *qp;
2622
2623 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2624 struct platform_device *op = qp->quattro_dev;
2625 int qfe_slot, skip = 0;
2626
2627 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2628 if (!qp->happy_meals[qfe_slot])
2629 skip = 1;
2630 }
2631 if (skip)
2632 continue;
2633
2634 free_irq(op->archdata.irqs[0], qp);
2635 }
2636}
2637#endif
2638
2639#ifdef CONFIG_PCI
2640static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2641{
2642 struct pci_dev *bdev = pdev->bus->self;
2643 struct quattro *qp;
2644
2645 if (!bdev) return NULL;
2646 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2647 struct pci_dev *qpdev = qp->quattro_dev;
2648
2649 if (qpdev == bdev)
2650 return qp;
2651 }
2652 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2653 if (qp != NULL) {
2654 int i;
2655
2656 for (i = 0; i < 4; i++)
2657 qp->happy_meals[i] = NULL;
2658
2659 qp->quattro_dev = bdev;
2660 qp->next = qfe_pci_list;
2661 qfe_pci_list = qp;
2662
2663
2664 qp->nranges = 0;
2665 }
2666 return qp;
2667}
2668#endif
2669
2670static const struct net_device_ops hme_netdev_ops = {
2671 .ndo_open = happy_meal_open,
2672 .ndo_stop = happy_meal_close,
2673 .ndo_start_xmit = happy_meal_start_xmit,
2674 .ndo_tx_timeout = happy_meal_tx_timeout,
2675 .ndo_get_stats = happy_meal_get_stats,
2676 .ndo_set_rx_mode = happy_meal_set_multicast,
2677 .ndo_set_mac_address = eth_mac_addr,
2678 .ndo_validate_addr = eth_validate_addr,
2679};
2680
2681#ifdef CONFIG_SBUS
2682static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2683{
2684 struct device_node *dp = op->dev.of_node, *sbus_dp;
2685 struct quattro *qp = NULL;
2686 struct happy_meal *hp;
2687 struct net_device *dev;
2688 int i, qfe_slot = -1;
2689 int err = -ENODEV;
2690
2691 sbus_dp = op->dev.parent->of_node;
2692
2693
2694 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2695 return err;
2696
2697 if (is_qfe) {
2698 qp = quattro_sbus_find(op);
2699 if (qp == NULL)
2700 goto err_out;
2701 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2702 if (qp->happy_meals[qfe_slot] == NULL)
2703 break;
2704 if (qfe_slot == 4)
2705 goto err_out;
2706 }
2707
2708 err = -ENOMEM;
2709 dev = alloc_etherdev(sizeof(struct happy_meal));
2710 if (!dev)
2711 goto err_out;
2712 SET_NETDEV_DEV(dev, &op->dev);
2713
2714 if (hme_version_printed++ == 0)
2715 printk(KERN_INFO "%s", version);
2716
2717
2718
2719
2720 for (i = 0; i < 6; i++) {
2721 if (macaddr[i] != 0)
2722 break;
2723 }
2724 if (i < 6) {
2725 for (i = 0; i < 6; i++)
2726 dev->dev_addr[i] = macaddr[i];
2727 macaddr[5]++;
2728 } else {
2729 const unsigned char *addr;
2730 int len;
2731
2732 addr = of_get_property(dp, "local-mac-address", &len);
2733
2734 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2735 memcpy(dev->dev_addr, addr, ETH_ALEN);
2736 else
2737 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
2738 }
2739
2740 hp = netdev_priv(dev);
2741
2742 hp->happy_dev = op;
2743 hp->dma_dev = &op->dev;
2744
2745 spin_lock_init(&hp->happy_lock);
2746
2747 err = -ENODEV;
2748 if (qp != NULL) {
2749 hp->qfe_parent = qp;
2750 hp->qfe_ent = qfe_slot;
2751 qp->happy_meals[qfe_slot] = dev;
2752 }
2753
2754 hp->gregs = of_ioremap(&op->resource[0], 0,
2755 GREG_REG_SIZE, "HME Global Regs");
2756 if (!hp->gregs) {
2757 printk(KERN_ERR "happymeal: Cannot map global registers.\n");
2758 goto err_out_free_netdev;
2759 }
2760
2761 hp->etxregs = of_ioremap(&op->resource[1], 0,
2762 ETX_REG_SIZE, "HME TX Regs");
2763 if (!hp->etxregs) {
2764 printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
2765 goto err_out_iounmap;
2766 }
2767
2768 hp->erxregs = of_ioremap(&op->resource[2], 0,
2769 ERX_REG_SIZE, "HME RX Regs");
2770 if (!hp->erxregs) {
2771 printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
2772 goto err_out_iounmap;
2773 }
2774
2775 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2776 BMAC_REG_SIZE, "HME BIGMAC Regs");
2777 if (!hp->bigmacregs) {
2778 printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
2779 goto err_out_iounmap;
2780 }
2781
2782 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2783 TCVR_REG_SIZE, "HME Tranceiver Regs");
2784 if (!hp->tcvregs) {
2785 printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
2786 goto err_out_iounmap;
2787 }
2788
2789 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2790 if (hp->hm_revision == 0xff)
2791 hp->hm_revision = 0xa0;
2792
2793
2794 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2795 hp->happy_flags = HFLAG_20_21;
2796 else if (hp->hm_revision != 0xa0)
2797 hp->happy_flags = HFLAG_NOT_A0;
2798
2799 if (qp != NULL)
2800 hp->happy_flags |= HFLAG_QUATTRO;
2801
2802
2803 hp->happy_bursts = of_getintprop_default(sbus_dp,
2804 "burst-sizes", 0x00);
2805
2806 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2807 PAGE_SIZE,
2808 &hp->hblock_dvma,
2809 GFP_ATOMIC);
2810 err = -ENOMEM;
2811 if (!hp->happy_block)
2812 goto err_out_iounmap;
2813
2814
2815 hp->linkcheck = 0;
2816
2817
2818 hp->timer_state = asleep;
2819 hp->timer_ticks = 0;
2820
2821 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2822
2823 hp->dev = dev;
2824 dev->netdev_ops = &hme_netdev_ops;
2825 dev->watchdog_timeo = 5*HZ;
2826 dev->ethtool_ops = &hme_ethtool_ops;
2827
2828
2829 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2830 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2831
2832 hp->irq = op->archdata.irqs[0];
2833
2834#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2835
2836 hp->read_desc32 = sbus_hme_read_desc32;
2837 hp->write_txd = sbus_hme_write_txd;
2838 hp->write_rxd = sbus_hme_write_rxd;
2839 hp->read32 = sbus_hme_read32;
2840 hp->write32 = sbus_hme_write32;
2841#endif
2842
2843
2844
2845
2846 spin_lock_irq(&hp->happy_lock);
2847 happy_meal_set_initial_advertisement(hp);
2848 spin_unlock_irq(&hp->happy_lock);
2849
2850 err = register_netdev(hp->dev);
2851 if (err) {
2852 printk(KERN_ERR "happymeal: Cannot register net device, "
2853 "aborting.\n");
2854 goto err_out_free_coherent;
2855 }
2856
2857 platform_set_drvdata(op, hp);
2858
2859 if (qfe_slot != -1)
2860 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2861 dev->name, qfe_slot);
2862 else
2863 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2864 dev->name);
2865
2866 printk("%pM\n", dev->dev_addr);
2867
2868 return 0;
2869
2870err_out_free_coherent:
2871 dma_free_coherent(hp->dma_dev,
2872 PAGE_SIZE,
2873 hp->happy_block,
2874 hp->hblock_dvma);
2875
2876err_out_iounmap:
2877 if (hp->gregs)
2878 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2879 if (hp->etxregs)
2880 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2881 if (hp->erxregs)
2882 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2883 if (hp->bigmacregs)
2884 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2885 if (hp->tcvregs)
2886 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2887
2888 if (qp)
2889 qp->happy_meals[qfe_slot] = NULL;
2890
2891err_out_free_netdev:
2892 free_netdev(dev);
2893
2894err_out:
2895 return err;
2896}
2897#endif
2898
2899#ifdef CONFIG_PCI
2900#ifndef CONFIG_SPARC
2901static int is_quattro_p(struct pci_dev *pdev)
2902{
2903 struct pci_dev *busdev = pdev->bus->self;
2904 struct pci_dev *this_pdev;
2905 int n_hmes;
2906
2907 if (busdev == NULL ||
2908 busdev->vendor != PCI_VENDOR_ID_DEC ||
2909 busdev->device != PCI_DEVICE_ID_DEC_21153)
2910 return 0;
2911
2912 n_hmes = 0;
2913 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2914 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2915 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2916 n_hmes++;
2917 }
2918
2919 if (n_hmes != 4)
2920 return 0;
2921
2922 return 1;
2923}
2924
2925
2926static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2927{
2928 int this_offset;
2929
2930 for (this_offset = 0x20; this_offset < len; this_offset++) {
2931 void __iomem *p = rom_base + this_offset;
2932
2933 if (readb(p + 0) != 0x90 ||
2934 readb(p + 1) != 0x00 ||
2935 readb(p + 2) != 0x09 ||
2936 readb(p + 3) != 0x4e ||
2937 readb(p + 4) != 0x41 ||
2938 readb(p + 5) != 0x06)
2939 continue;
2940
2941 this_offset += 6;
2942 p += 6;
2943
2944 if (index == 0) {
2945 int i;
2946
2947 for (i = 0; i < 6; i++)
2948 dev_addr[i] = readb(p + i);
2949 return 1;
2950 }
2951 index--;
2952 }
2953 return 0;
2954}
2955
2956static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2957{
2958 size_t size;
2959 void __iomem *p = pci_map_rom(pdev, &size);
2960
2961 if (p) {
2962 int index = 0;
2963 int found;
2964
2965 if (is_quattro_p(pdev))
2966 index = PCI_SLOT(pdev->devfn);
2967
2968 found = readb(p) == 0x55 &&
2969 readb(p + 1) == 0xaa &&
2970 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2971 pci_unmap_rom(pdev, p);
2972 if (found)
2973 return;
2974 }
2975
2976
2977 dev_addr[0] = 0x08;
2978 dev_addr[1] = 0x00;
2979 dev_addr[2] = 0x20;
2980 get_random_bytes(&dev_addr[3], 3);
2981}
2982#endif
2983
2984static int happy_meal_pci_probe(struct pci_dev *pdev,
2985 const struct pci_device_id *ent)
2986{
2987 struct quattro *qp = NULL;
2988#ifdef CONFIG_SPARC
2989 struct device_node *dp;
2990#endif
2991 struct happy_meal *hp;
2992 struct net_device *dev;
2993 void __iomem *hpreg_base;
2994 unsigned long hpreg_res;
2995 int i, qfe_slot = -1;
2996 char prom_name[64];
2997 int err;
2998
2999
3000#ifdef CONFIG_SPARC
3001 dp = pci_device_to_OF_node(pdev);
3002 strcpy(prom_name, dp->name);
3003#else
3004 if (is_quattro_p(pdev))
3005 strcpy(prom_name, "SUNW,qfe");
3006 else
3007 strcpy(prom_name, "SUNW,hme");
3008#endif
3009
3010 err = -ENODEV;
3011
3012 if (pci_enable_device(pdev))
3013 goto err_out;
3014 pci_set_master(pdev);
3015
3016 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3017 qp = quattro_pci_find(pdev);
3018 if (qp == NULL)
3019 goto err_out;
3020 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
3021 if (qp->happy_meals[qfe_slot] == NULL)
3022 break;
3023 if (qfe_slot == 4)
3024 goto err_out;
3025 }
3026
3027 dev = alloc_etherdev(sizeof(struct happy_meal));
3028 err = -ENOMEM;
3029 if (!dev)
3030 goto err_out;
3031 SET_NETDEV_DEV(dev, &pdev->dev);
3032
3033 if (hme_version_printed++ == 0)
3034 printk(KERN_INFO "%s", version);
3035
3036 hp = netdev_priv(dev);
3037
3038 hp->happy_dev = pdev;
3039 hp->dma_dev = &pdev->dev;
3040
3041 spin_lock_init(&hp->happy_lock);
3042
3043 if (qp != NULL) {
3044 hp->qfe_parent = qp;
3045 hp->qfe_ent = qfe_slot;
3046 qp->happy_meals[qfe_slot] = dev;
3047 }
3048
3049 hpreg_res = pci_resource_start(pdev, 0);
3050 err = -ENODEV;
3051 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3052 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3053 goto err_out_clear_quattro;
3054 }
3055 if (pci_request_regions(pdev, DRV_NAME)) {
3056 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3057 "aborting.\n");
3058 goto err_out_clear_quattro;
3059 }
3060
3061 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
3062 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3063 goto err_out_free_res;
3064 }
3065
3066 for (i = 0; i < 6; i++) {
3067 if (macaddr[i] != 0)
3068 break;
3069 }
3070 if (i < 6) {
3071 for (i = 0; i < 6; i++)
3072 dev->dev_addr[i] = macaddr[i];
3073 macaddr[5]++;
3074 } else {
3075#ifdef CONFIG_SPARC
3076 const unsigned char *addr;
3077 int len;
3078
3079 if (qfe_slot != -1 &&
3080 (addr = of_get_property(dp, "local-mac-address", &len))
3081 != NULL &&
3082 len == 6) {
3083 memcpy(dev->dev_addr, addr, ETH_ALEN);
3084 } else {
3085 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
3086 }
3087#else
3088 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
3089#endif
3090 }
3091
3092
3093 hp->gregs = (hpreg_base + 0x0000UL);
3094 hp->etxregs = (hpreg_base + 0x2000UL);
3095 hp->erxregs = (hpreg_base + 0x4000UL);
3096 hp->bigmacregs = (hpreg_base + 0x6000UL);
3097 hp->tcvregs = (hpreg_base + 0x7000UL);
3098
3099#ifdef CONFIG_SPARC
3100 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3101 if (hp->hm_revision == 0xff)
3102 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3103#else
3104
3105 hp->hm_revision = 0x20;
3106#endif
3107
3108
3109 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3110 hp->happy_flags = HFLAG_20_21;
3111 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3112 hp->happy_flags = HFLAG_NOT_A0;
3113
3114 if (qp != NULL)
3115 hp->happy_flags |= HFLAG_QUATTRO;
3116
3117
3118 hp->happy_flags |= HFLAG_PCI;
3119
3120#ifdef CONFIG_SPARC
3121
3122 hp->happy_bursts = DMA_BURSTBITS;
3123#endif
3124
3125 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3126 &hp->hblock_dvma, GFP_KERNEL);
3127 err = -ENODEV;
3128 if (!hp->happy_block)
3129 goto err_out_iounmap;
3130
3131 hp->linkcheck = 0;
3132 hp->timer_state = asleep;
3133 hp->timer_ticks = 0;
3134
3135 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
3136
3137 hp->irq = pdev->irq;
3138 hp->dev = dev;
3139 dev->netdev_ops = &hme_netdev_ops;
3140 dev->watchdog_timeo = 5*HZ;
3141 dev->ethtool_ops = &hme_ethtool_ops;
3142
3143
3144 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3145 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3146
3147#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3148
3149 hp->read_desc32 = pci_hme_read_desc32;
3150 hp->write_txd = pci_hme_write_txd;
3151 hp->write_rxd = pci_hme_write_rxd;
3152 hp->read32 = pci_hme_read32;
3153 hp->write32 = pci_hme_write32;
3154#endif
3155
3156
3157
3158
3159 spin_lock_irq(&hp->happy_lock);
3160 happy_meal_set_initial_advertisement(hp);
3161 spin_unlock_irq(&hp->happy_lock);
3162
3163 err = register_netdev(hp->dev);
3164 if (err) {
3165 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3166 "aborting.\n");
3167 goto err_out_iounmap;
3168 }
3169
3170 pci_set_drvdata(pdev, hp);
3171
3172 if (!qfe_slot) {
3173 struct pci_dev *qpdev = qp->quattro_dev;
3174
3175 prom_name[0] = 0;
3176 if (!strncmp(dev->name, "eth", 3)) {
3177 int i = simple_strtoul(dev->name + 3, NULL, 10);
3178 sprintf(prom_name, "-%d", i + 3);
3179 }
3180 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3181 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3182 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3183 printk("DEC 21153 PCI Bridge\n");
3184 else
3185 printk("unknown bridge %04x.%04x\n",
3186 qpdev->vendor, qpdev->device);
3187 }
3188
3189 if (qfe_slot != -1)
3190 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3191 dev->name, qfe_slot);
3192 else
3193 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3194 dev->name);
3195
3196 printk("%pM\n", dev->dev_addr);
3197
3198 return 0;
3199
3200err_out_iounmap:
3201 iounmap(hp->gregs);
3202
3203err_out_free_res:
3204 pci_release_regions(pdev);
3205
3206err_out_clear_quattro:
3207 if (qp != NULL)
3208 qp->happy_meals[qfe_slot] = NULL;
3209
3210 free_netdev(dev);
3211
3212err_out:
3213 return err;
3214}
3215
3216static void happy_meal_pci_remove(struct pci_dev *pdev)
3217{
3218 struct happy_meal *hp = pci_get_drvdata(pdev);
3219 struct net_device *net_dev = hp->dev;
3220
3221 unregister_netdev(net_dev);
3222
3223 dma_free_coherent(hp->dma_dev, PAGE_SIZE,
3224 hp->happy_block, hp->hblock_dvma);
3225 iounmap(hp->gregs);
3226 pci_release_regions(hp->happy_dev);
3227
3228 free_netdev(net_dev);
3229}
3230
3231static const struct pci_device_id happymeal_pci_ids[] = {
3232 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3233 { }
3234};
3235
3236MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3237
3238static struct pci_driver hme_pci_driver = {
3239 .name = "hme",
3240 .id_table = happymeal_pci_ids,
3241 .probe = happy_meal_pci_probe,
3242 .remove = happy_meal_pci_remove,
3243};
3244
3245static int __init happy_meal_pci_init(void)
3246{
3247 return pci_register_driver(&hme_pci_driver);
3248}
3249
3250static void happy_meal_pci_exit(void)
3251{
3252 pci_unregister_driver(&hme_pci_driver);
3253
3254 while (qfe_pci_list) {
3255 struct quattro *qfe = qfe_pci_list;
3256 struct quattro *next = qfe->next;
3257
3258 kfree(qfe);
3259
3260 qfe_pci_list = next;
3261 }
3262}
3263
3264#endif
3265
3266#ifdef CONFIG_SBUS
3267static const struct of_device_id hme_sbus_match[];
3268static int hme_sbus_probe(struct platform_device *op)
3269{
3270 const struct of_device_id *match;
3271 struct device_node *dp = op->dev.of_node;
3272 const char *model = of_get_property(dp, "model", NULL);
3273 int is_qfe;
3274
3275 match = of_match_device(hme_sbus_match, &op->dev);
3276 if (!match)
3277 return -EINVAL;
3278 is_qfe = (match->data != NULL);
3279
3280 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3281 is_qfe = 1;
3282
3283 return happy_meal_sbus_probe_one(op, is_qfe);
3284}
3285
3286static int hme_sbus_remove(struct platform_device *op)
3287{
3288 struct happy_meal *hp = platform_get_drvdata(op);
3289 struct net_device *net_dev = hp->dev;
3290
3291 unregister_netdev(net_dev);
3292
3293
3294
3295 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3296 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3297 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3298 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3299 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3300 dma_free_coherent(hp->dma_dev,
3301 PAGE_SIZE,
3302 hp->happy_block,
3303 hp->hblock_dvma);
3304
3305 free_netdev(net_dev);
3306
3307 return 0;
3308}
3309
3310static const struct of_device_id hme_sbus_match[] = {
3311 {
3312 .name = "SUNW,hme",
3313 },
3314 {
3315 .name = "SUNW,qfe",
3316 .data = (void *) 1,
3317 },
3318 {
3319 .name = "qfe",
3320 .data = (void *) 1,
3321 },
3322 {},
3323};
3324
3325MODULE_DEVICE_TABLE(of, hme_sbus_match);
3326
3327static struct platform_driver hme_sbus_driver = {
3328 .driver = {
3329 .name = "hme",
3330 .of_match_table = hme_sbus_match,
3331 },
3332 .probe = hme_sbus_probe,
3333 .remove = hme_sbus_remove,
3334};
3335
3336static int __init happy_meal_sbus_init(void)
3337{
3338 int err;
3339
3340 err = platform_driver_register(&hme_sbus_driver);
3341 if (!err)
3342 err = quattro_sbus_register_irqs();
3343
3344 return err;
3345}
3346
3347static void happy_meal_sbus_exit(void)
3348{
3349 platform_driver_unregister(&hme_sbus_driver);
3350 quattro_sbus_free_irqs();
3351
3352 while (qfe_sbus_list) {
3353 struct quattro *qfe = qfe_sbus_list;
3354 struct quattro *next = qfe->next;
3355
3356 kfree(qfe);
3357
3358 qfe_sbus_list = next;
3359 }
3360}
3361#endif
3362
3363static int __init happy_meal_probe(void)
3364{
3365 int err = 0;
3366
3367#ifdef CONFIG_SBUS
3368 err = happy_meal_sbus_init();
3369#endif
3370#ifdef CONFIG_PCI
3371 if (!err) {
3372 err = happy_meal_pci_init();
3373#ifdef CONFIG_SBUS
3374 if (err)
3375 happy_meal_sbus_exit();
3376#endif
3377 }
3378#endif
3379
3380 return err;
3381}
3382
3383
3384static void __exit happy_meal_exit(void)
3385{
3386#ifdef CONFIG_SBUS
3387 happy_meal_sbus_exit();
3388#endif
3389#ifdef CONFIG_PCI
3390 happy_meal_pci_exit();
3391#endif
3392}
3393
3394module_init(happy_meal_probe);
3395module_exit(happy_meal_exit);
3396