1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/in.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <linux/crc32.h>
31#include <linux/random.h>
32#include <linux/errno.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/mm.h>
37#include <linux/bitops.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/byteorder.h>
43
44#ifdef CONFIG_SPARC
45#include <linux/of.h>
46#include <linux/of_device.h>
47#include <asm/idprom.h>
48#include <asm/openprom.h>
49#include <asm/oplib.h>
50#include <asm/prom.h>
51#include <asm/auxio.h>
52#endif
53#include <linux/uaccess.h>
54
55#include <asm/irq.h>
56
57#ifdef CONFIG_PCI
58#include <linux/pci.h>
59#endif
60
61#include "sunhme.h"
62
63#define DRV_NAME "sunhme"
64#define DRV_VERSION "3.10"
65#define DRV_RELDATE "August 26, 2008"
66#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
67
68static char version[] =
69 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
70
71MODULE_VERSION(DRV_VERSION);
72MODULE_AUTHOR(DRV_AUTHOR);
73MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
74MODULE_LICENSE("GPL");
75
76static int macaddr[6];
77
78
79module_param_array(macaddr, int, NULL, 0);
80MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
81
82#ifdef CONFIG_SBUS
83static struct quattro *qfe_sbus_list;
84#endif
85
86#ifdef CONFIG_PCI
87static struct quattro *qfe_pci_list;
88#endif
89
90#undef HMEDEBUG
91#undef SXDEBUG
92#undef RXDEBUG
93#undef TXDEBUG
94#undef TXLOGGING
95
96#ifdef TXLOGGING
97struct hme_tx_logent {
98 unsigned int tstamp;
99 int tx_new, tx_old;
100 unsigned int action;
101#define TXLOG_ACTION_IRQ 0x01
102#define TXLOG_ACTION_TXMIT 0x02
103#define TXLOG_ACTION_TBUSY 0x04
104#define TXLOG_ACTION_NBUFS 0x08
105 unsigned int status;
106};
107#define TX_LOG_LEN 128
108static struct hme_tx_logent tx_log[TX_LOG_LEN];
109static int txlog_cur_entry;
110static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
111{
112 struct hme_tx_logent *tlp;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 tlp = &tx_log[txlog_cur_entry];
117 tlp->tstamp = (unsigned int)jiffies;
118 tlp->tx_new = hp->tx_new;
119 tlp->tx_old = hp->tx_old;
120 tlp->action = a;
121 tlp->status = s;
122 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
123 local_irq_restore(flags);
124}
125static __inline__ void tx_dump_log(void)
126{
127 int i, this;
128
129 this = txlog_cur_entry;
130 for (i = 0; i < TX_LOG_LEN; i++) {
131 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
132 tx_log[this].tstamp,
133 tx_log[this].tx_new, tx_log[this].tx_old,
134 tx_log[this].action, tx_log[this].status);
135 this = (this + 1) & (TX_LOG_LEN - 1);
136 }
137}
138static __inline__ void tx_dump_ring(struct happy_meal *hp)
139{
140 struct hmeal_init_block *hb = hp->happy_block;
141 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
142 int i;
143
144 for (i = 0; i < TX_RING_SIZE; i+=4) {
145 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
146 i, i + 4,
147 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
148 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
149 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
150 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
151 }
152}
153#else
154#define tx_add_log(hp, a, s) do { } while(0)
155#define tx_dump_log() do { } while(0)
156#define tx_dump_ring(hp) do { } while(0)
157#endif
158
159#ifdef HMEDEBUG
160#define HMD(x) printk x
161#else
162#define HMD(x)
163#endif
164
165
166
167#ifdef AUTO_SWITCH_DEBUG
168#define ASD(x) printk x
169#else
170#define ASD(x)
171#endif
172
173#define DEFAULT_IPG0 16
174#define DEFAULT_IPG1 8
175#define DEFAULT_IPG2 4
176#define DEFAULT_JAMSIZE 4
177
178
179
180
181
182
183
184
185#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
186static void sbus_hme_write32(void __iomem *reg, u32 val)
187{
188 sbus_writel(val, reg);
189}
190
191static u32 sbus_hme_read32(void __iomem *reg)
192{
193 return sbus_readl(reg);
194}
195
196static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
197{
198 rxd->rx_addr = (__force hme32)addr;
199 dma_wmb();
200 rxd->rx_flags = (__force hme32)flags;
201}
202
203static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
204{
205 txd->tx_addr = (__force hme32)addr;
206 dma_wmb();
207 txd->tx_flags = (__force hme32)flags;
208}
209
210static u32 sbus_hme_read_desc32(hme32 *p)
211{
212 return (__force u32)*p;
213}
214
215static void pci_hme_write32(void __iomem *reg, u32 val)
216{
217 writel(val, reg);
218}
219
220static u32 pci_hme_read32(void __iomem *reg)
221{
222 return readl(reg);
223}
224
225static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
226{
227 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
228 dma_wmb();
229 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
230}
231
232static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
233{
234 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
235 dma_wmb();
236 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
237}
238
239static u32 pci_hme_read_desc32(hme32 *p)
240{
241 return le32_to_cpup((__le32 *)p);
242}
243
244#define hme_write32(__hp, __reg, __val) \
245 ((__hp)->write32((__reg), (__val)))
246#define hme_read32(__hp, __reg) \
247 ((__hp)->read32(__reg))
248#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
249 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
250#define hme_write_txd(__hp, __txd, __flags, __addr) \
251 ((__hp)->write_txd((__txd), (__flags), (__addr)))
252#define hme_read_desc32(__hp, __p) \
253 ((__hp)->read_desc32(__p))
254#else
255#ifdef CONFIG_SBUS
256
257#define hme_write32(__hp, __reg, __val) \
258 sbus_writel((__val), (__reg))
259#define hme_read32(__hp, __reg) \
260 sbus_readl(__reg)
261#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
262do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
263 dma_wmb(); \
264 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
265} while(0)
266#define hme_write_txd(__hp, __txd, __flags, __addr) \
267do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
268 dma_wmb(); \
269 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
270} while(0)
271#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
272#else
273
274#define hme_write32(__hp, __reg, __val) \
275 writel((__val), (__reg))
276#define hme_read32(__hp, __reg) \
277 readl(__reg)
278#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
279do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
280 dma_wmb(); \
281 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
282} while(0)
283#define hme_write_txd(__hp, __txd, __flags, __addr) \
284do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
285 dma_wmb(); \
286 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
287} while(0)
288static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
289{
290 return le32_to_cpup((__le32 *)p);
291}
292#endif
293#endif
294
295
296
297static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
298{
299 hme_write32(hp, tregs + TCVR_BBDATA, bit);
300 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
301 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
302}
303
304#if 0
305static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
306{
307 u32 ret;
308
309 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
310 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
311 ret = hme_read32(hp, tregs + TCVR_CFG);
312 if (internal)
313 ret &= TCV_CFG_MDIO0;
314 else
315 ret &= TCV_CFG_MDIO1;
316
317 return ret;
318}
319#endif
320
321static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
322{
323 u32 retval;
324
325 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
326 udelay(1);
327 retval = hme_read32(hp, tregs + TCVR_CFG);
328 if (internal)
329 retval &= TCV_CFG_MDIO0;
330 else
331 retval &= TCV_CFG_MDIO1;
332 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
333
334 return retval;
335}
336
337#define TCVR_FAILURE 0x80000000
338
339static int happy_meal_bb_read(struct happy_meal *hp,
340 void __iomem *tregs, int reg)
341{
342 u32 tmp;
343 int retval = 0;
344 int i;
345
346 ASD(("happy_meal_bb_read: reg=%d ", reg));
347
348
349 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
350
351
352 for (i = 0; i < 32; i++)
353 BB_PUT_BIT(hp, tregs, 1);
354
355
356 BB_PUT_BIT(hp, tregs, 0);
357 BB_PUT_BIT(hp, tregs, 1);
358 BB_PUT_BIT(hp, tregs, 1);
359 BB_PUT_BIT(hp, tregs, 0);
360
361
362 tmp = hp->paddr & 0xff;
363 for (i = 4; i >= 0; i--)
364 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
365
366
367 tmp = (reg & 0xff);
368 for (i = 4; i >= 0; i--)
369 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
370
371
372 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
373
374
375 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
376 for (i = 15; i >= 0; i--)
377 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
378 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
379 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
380 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
381 ASD(("value=%x\n", retval));
382 return retval;
383}
384
385static void happy_meal_bb_write(struct happy_meal *hp,
386 void __iomem *tregs, int reg,
387 unsigned short value)
388{
389 u32 tmp;
390 int i;
391
392 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
393
394
395 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
396
397
398 for (i = 0; i < 32; i++)
399 BB_PUT_BIT(hp, tregs, 1);
400
401
402 BB_PUT_BIT(hp, tregs, 0);
403 BB_PUT_BIT(hp, tregs, 1);
404 BB_PUT_BIT(hp, tregs, 0);
405 BB_PUT_BIT(hp, tregs, 1);
406
407
408 tmp = (hp->paddr & 0xff);
409 for (i = 4; i >= 0; i--)
410 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
411
412
413 tmp = (reg & 0xff);
414 for (i = 4; i >= 0; i--)
415 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
416
417
418 BB_PUT_BIT(hp, tregs, 1);
419 BB_PUT_BIT(hp, tregs, 0);
420
421 for (i = 15; i >= 0; i--)
422 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
423
424
425 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
426}
427
428#define TCVR_READ_TRIES 16
429
430static int happy_meal_tcvr_read(struct happy_meal *hp,
431 void __iomem *tregs, int reg)
432{
433 int tries = TCVR_READ_TRIES;
434 int retval;
435
436 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
437 if (hp->tcvr_type == none) {
438 ASD(("no transceiver, value=TCVR_FAILURE\n"));
439 return TCVR_FAILURE;
440 }
441
442 if (!(hp->happy_flags & HFLAG_FENABLE)) {
443 ASD(("doing bit bang\n"));
444 return happy_meal_bb_read(hp, tregs, reg);
445 }
446
447 hme_write32(hp, tregs + TCVR_FRAME,
448 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
449 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
450 udelay(20);
451 if (!tries) {
452 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
453 return TCVR_FAILURE;
454 }
455 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
456 ASD(("value=%04x\n", retval));
457 return retval;
458}
459
460#define TCVR_WRITE_TRIES 16
461
462static void happy_meal_tcvr_write(struct happy_meal *hp,
463 void __iomem *tregs, int reg,
464 unsigned short value)
465{
466 int tries = TCVR_WRITE_TRIES;
467
468 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
469
470
471 if (!(hp->happy_flags & HFLAG_FENABLE)) {
472 happy_meal_bb_write(hp, tregs, reg, value);
473 return;
474 }
475
476
477 hme_write32(hp, tregs + TCVR_FRAME,
478 (FRAME_WRITE | (hp->paddr << 23) |
479 ((reg & 0xff) << 18) | (value & 0xffff)));
480 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
481 udelay(20);
482
483
484 if (!tries)
485 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
486
487
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
523{
524 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
525
526
527
528
529 if (hp->sw_bmcr & BMCR_FULLDPLX) {
530 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
531 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
532 return 0;
533 }
534
535
536 if (hp->sw_bmcr & BMCR_SPEED100) {
537 hp->sw_bmcr &= ~(BMCR_SPEED100);
538 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
539 return 0;
540 }
541
542
543 return -1;
544}
545
546static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
547{
548 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
549 if (hp->tcvr_type == external)
550 printk("external ");
551 else
552 printk("internal ");
553 printk("transceiver at ");
554 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
555 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
556 if (hp->sw_lpa & LPA_100FULL)
557 printk("100Mb/s, Full Duplex.\n");
558 else
559 printk("100Mb/s, Half Duplex.\n");
560 } else {
561 if (hp->sw_lpa & LPA_10FULL)
562 printk("10Mb/s, Full Duplex.\n");
563 else
564 printk("10Mb/s, Half Duplex.\n");
565 }
566}
567
568static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
569{
570 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
571 if (hp->tcvr_type == external)
572 printk("external ");
573 else
574 printk("internal ");
575 printk("transceiver at ");
576 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
577 if (hp->sw_bmcr & BMCR_SPEED100)
578 printk("100Mb/s, ");
579 else
580 printk("10Mb/s, ");
581 if (hp->sw_bmcr & BMCR_FULLDPLX)
582 printk("Full Duplex.\n");
583 else
584 printk("Half Duplex.\n");
585}
586
587static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
588{
589 int full;
590
591
592
593
594 if (hp->timer_state == arbwait) {
595 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
596 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
597 goto no_response;
598 if (hp->sw_lpa & LPA_100FULL)
599 full = 1;
600 else if (hp->sw_lpa & LPA_100HALF)
601 full = 0;
602 else if (hp->sw_lpa & LPA_10FULL)
603 full = 1;
604 else
605 full = 0;
606 } else {
607
608 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
609 if (hp->sw_bmcr & BMCR_FULLDPLX)
610 full = 1;
611 else
612 full = 0;
613 }
614
615
616
617
618
619
620
621
622
623 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
624 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
625 ~(BIGMAC_TXCFG_ENABLE));
626 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
627 barrier();
628 if (full) {
629 hp->happy_flags |= HFLAG_FULL;
630 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
631 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
632 BIGMAC_TXCFG_FULLDPLX);
633 } else {
634 hp->happy_flags &= ~(HFLAG_FULL);
635 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
636 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
637 ~(BIGMAC_TXCFG_FULLDPLX));
638 }
639 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
640 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
641 BIGMAC_TXCFG_ENABLE);
642 return 0;
643no_response:
644 return 1;
645}
646
647static int happy_meal_init(struct happy_meal *hp);
648
649static int is_lucent_phy(struct happy_meal *hp)
650{
651 void __iomem *tregs = hp->tcvregs;
652 unsigned short mr2, mr3;
653 int ret = 0;
654
655 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
656 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
657 if ((mr2 & 0xffff) == 0x0180 &&
658 ((mr3 & 0xffff) >> 10) == 0x1d)
659 ret = 1;
660
661 return ret;
662}
663
664static void happy_meal_timer(struct timer_list *t)
665{
666 struct happy_meal *hp = from_timer(hp, t, happy_timer);
667 void __iomem *tregs = hp->tcvregs;
668 int restart_timer = 0;
669
670 spin_lock_irq(&hp->happy_lock);
671
672 hp->timer_ticks++;
673 switch(hp->timer_state) {
674 case arbwait:
675
676
677
678 if (hp->timer_ticks >= 10) {
679
680 do_force_mode:
681 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
682 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
683 hp->dev->name);
684 hp->sw_bmcr = BMCR_SPEED100;
685 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
686
687 if (!is_lucent_phy(hp)) {
688
689
690
691
692 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
693 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
694 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
695 }
696 hp->timer_state = ltrywait;
697 hp->timer_ticks = 0;
698 restart_timer = 1;
699 } else {
700
701 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
702 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
703 int ret;
704
705
706 ret = set_happy_link_modes(hp, tregs);
707 if (ret) {
708
709
710
711
712
713
714 goto do_force_mode;
715 }
716
717
718 hp->timer_state = lupwait;
719 restart_timer = 1;
720 } else {
721 restart_timer = 1;
722 }
723 }
724 break;
725
726 case lupwait:
727
728
729
730
731
732 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
733 if (hp->sw_bmsr & BMSR_LSTATUS) {
734
735
736
737 display_link_mode(hp, tregs);
738 hp->timer_state = asleep;
739 restart_timer = 0;
740 } else {
741 if (hp->timer_ticks >= 10) {
742 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
743 "not completely up.\n", hp->dev->name);
744 hp->timer_ticks = 0;
745 restart_timer = 1;
746 } else {
747 restart_timer = 1;
748 }
749 }
750 break;
751
752 case ltrywait:
753
754
755
756
757
758 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
759 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
760 if (hp->timer_ticks == 1) {
761 if (!is_lucent_phy(hp)) {
762
763
764
765 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
766 happy_meal_tcvr_write(hp, tregs,
767 DP83840_CSCONFIG, hp->sw_csconfig);
768 }
769 restart_timer = 1;
770 break;
771 }
772 if (hp->timer_ticks == 2) {
773 if (!is_lucent_phy(hp)) {
774 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
775 happy_meal_tcvr_write(hp, tregs,
776 DP83840_CSCONFIG, hp->sw_csconfig);
777 }
778 restart_timer = 1;
779 break;
780 }
781 if (hp->sw_bmsr & BMSR_LSTATUS) {
782
783 display_forced_link_mode(hp, tregs);
784 set_happy_link_modes(hp, tregs);
785 hp->timer_state = asleep;
786 restart_timer = 0;
787 } else {
788 if (hp->timer_ticks >= 4) {
789 int ret;
790
791 ret = try_next_permutation(hp, tregs);
792 if (ret == -1) {
793
794
795
796
797
798 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
799 hp->dev->name);
800
801 ret = happy_meal_init(hp);
802 if (ret) {
803
804 printk(KERN_ERR "%s: Error, cannot re-init the "
805 "Happy Meal.\n", hp->dev->name);
806 }
807 goto out;
808 }
809 if (!is_lucent_phy(hp)) {
810 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
811 DP83840_CSCONFIG);
812 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
813 happy_meal_tcvr_write(hp, tregs,
814 DP83840_CSCONFIG, hp->sw_csconfig);
815 }
816 hp->timer_ticks = 0;
817 restart_timer = 1;
818 } else {
819 restart_timer = 1;
820 }
821 }
822 break;
823
824 case asleep:
825 default:
826
827 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
828 hp->dev->name);
829 restart_timer = 0;
830 hp->timer_ticks = 0;
831 hp->timer_state = asleep;
832 break;
833 }
834
835 if (restart_timer) {
836 hp->happy_timer.expires = jiffies + ((12 * HZ)/10);
837 add_timer(&hp->happy_timer);
838 }
839
840out:
841 spin_unlock_irq(&hp->happy_lock);
842}
843
844#define TX_RESET_TRIES 32
845#define RX_RESET_TRIES 32
846
847
848static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
849{
850 int tries = TX_RESET_TRIES;
851
852 HMD(("happy_meal_tx_reset: reset, "));
853
854
855 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
856 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
857 udelay(20);
858
859
860 if (!tries)
861 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
862
863
864 HMD(("done\n"));
865}
866
867
868static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
869{
870 int tries = RX_RESET_TRIES;
871
872 HMD(("happy_meal_rx_reset: reset, "));
873
874
875 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
876 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
877 udelay(20);
878
879
880 if (!tries)
881 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
882
883
884 HMD(("done\n"));
885}
886
887#define STOP_TRIES 16
888
889
890static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
891{
892 int tries = STOP_TRIES;
893
894 HMD(("happy_meal_stop: reset, "));
895
896
897 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
898 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
899 udelay(20);
900
901
902 if (!tries)
903 printk(KERN_ERR "happy meal: Fry guys.");
904
905
906 HMD(("done\n"));
907}
908
909
910static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
911{
912 struct net_device_stats *stats = &hp->dev->stats;
913
914 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
915 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
916
917 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
918 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
919
920 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
921 hme_write32(hp, bregs + BMAC_GLECTR, 0);
922
923 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
924
925 stats->collisions +=
926 (hme_read32(hp, bregs + BMAC_EXCTR) +
927 hme_read32(hp, bregs + BMAC_LTCTR));
928 hme_write32(hp, bregs + BMAC_EXCTR, 0);
929 hme_write32(hp, bregs + BMAC_LTCTR, 0);
930}
931
932
933static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
934{
935 ASD(("happy_meal_poll_stop: "));
936
937
938 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
939 (HFLAG_POLLENABLE | HFLAG_POLL)) {
940 HMD(("not polling, return\n"));
941 return;
942 }
943
944
945 ASD(("were polling, mif ints off, "));
946 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
947
948
949 ASD(("polling off, "));
950 hme_write32(hp, tregs + TCVR_CFG,
951 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
952
953
954 hp->happy_flags &= ~(HFLAG_POLL);
955
956
957 udelay(200);
958 ASD(("done\n"));
959}
960
961
962
963
964#define TCVR_RESET_TRIES 16
965#define TCVR_UNISOLATE_TRIES 32
966
967
968static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
969{
970 u32 tconfig;
971 int result, tries = TCVR_RESET_TRIES;
972
973 tconfig = hme_read32(hp, tregs + TCVR_CFG);
974 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
975 if (hp->tcvr_type == external) {
976 ASD(("external<"));
977 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
978 hp->tcvr_type = internal;
979 hp->paddr = TCV_PADDR_ITX;
980 ASD(("ISOLATE,"));
981 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
982 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
983 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
984 if (result == TCVR_FAILURE) {
985 ASD(("phyread_fail>\n"));
986 return -1;
987 }
988 ASD(("phyread_ok,PSELECT>"));
989 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
990 hp->tcvr_type = external;
991 hp->paddr = TCV_PADDR_ETX;
992 } else {
993 if (tconfig & TCV_CFG_MDIO1) {
994 ASD(("internal<PSELECT,"));
995 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
996 ASD(("ISOLATE,"));
997 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
998 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
999 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1000 if (result == TCVR_FAILURE) {
1001 ASD(("phyread_fail>\n"));
1002 return -1;
1003 }
1004 ASD(("phyread_ok,~PSELECT>"));
1005 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1006 hp->tcvr_type = internal;
1007 hp->paddr = TCV_PADDR_ITX;
1008 }
1009 }
1010
1011 ASD(("BMCR_RESET "));
1012 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1013
1014 while (--tries) {
1015 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1016 if (result == TCVR_FAILURE)
1017 return -1;
1018 hp->sw_bmcr = result;
1019 if (!(result & BMCR_RESET))
1020 break;
1021 udelay(20);
1022 }
1023 if (!tries) {
1024 ASD(("BMCR RESET FAILED!\n"));
1025 return -1;
1026 }
1027 ASD(("RESET_OK\n"));
1028
1029
1030 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1031 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1032 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1033 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1034
1035 ASD(("UNISOLATE"));
1036 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1037 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1038
1039 tries = TCVR_UNISOLATE_TRIES;
1040 while (--tries) {
1041 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1042 if (result == TCVR_FAILURE)
1043 return -1;
1044 if (!(result & BMCR_ISOLATE))
1045 break;
1046 udelay(20);
1047 }
1048 if (!tries) {
1049 ASD((" FAILED!\n"));
1050 return -1;
1051 }
1052 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1053 if (!is_lucent_phy(hp)) {
1054 result = happy_meal_tcvr_read(hp, tregs,
1055 DP83840_CSCONFIG);
1056 happy_meal_tcvr_write(hp, tregs,
1057 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1058 }
1059 return 0;
1060}
1061
1062
1063
1064
1065
1066static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1067{
1068 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1069
1070 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1071 if (hp->happy_flags & HFLAG_POLL) {
1072
1073 ASD(("<polling> "));
1074 if (hp->tcvr_type == internal) {
1075 if (tconfig & TCV_CFG_MDIO1) {
1076 ASD(("<internal> <poll stop> "));
1077 happy_meal_poll_stop(hp, tregs);
1078 hp->paddr = TCV_PADDR_ETX;
1079 hp->tcvr_type = external;
1080 ASD(("<external>\n"));
1081 tconfig &= ~(TCV_CFG_PENABLE);
1082 tconfig |= TCV_CFG_PSELECT;
1083 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1084 }
1085 } else {
1086 if (hp->tcvr_type == external) {
1087 ASD(("<external> "));
1088 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1089 ASD(("<poll stop> "));
1090 happy_meal_poll_stop(hp, tregs);
1091 hp->paddr = TCV_PADDR_ITX;
1092 hp->tcvr_type = internal;
1093 ASD(("<internal>\n"));
1094 hme_write32(hp, tregs + TCVR_CFG,
1095 hme_read32(hp, tregs + TCVR_CFG) &
1096 ~(TCV_CFG_PSELECT));
1097 }
1098 ASD(("\n"));
1099 } else {
1100 ASD(("<none>\n"));
1101 }
1102 }
1103 } else {
1104 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1105
1106
1107 ASD(("<not polling> "));
1108 if (reread & TCV_CFG_MDIO1) {
1109 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1110 hp->paddr = TCV_PADDR_ETX;
1111 hp->tcvr_type = external;
1112 ASD(("<external>\n"));
1113 } else {
1114 if (reread & TCV_CFG_MDIO0) {
1115 hme_write32(hp, tregs + TCVR_CFG,
1116 tconfig & ~(TCV_CFG_PSELECT));
1117 hp->paddr = TCV_PADDR_ITX;
1118 hp->tcvr_type = internal;
1119 ASD(("<internal>\n"));
1120 } else {
1121 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1122 hp->tcvr_type = none;
1123 ASD(("<none>\n"));
1124 }
1125 }
1126 }
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static void happy_meal_clean_rings(struct happy_meal *hp)
1174{
1175 int i;
1176
1177 for (i = 0; i < RX_RING_SIZE; i++) {
1178 if (hp->rx_skbs[i] != NULL) {
1179 struct sk_buff *skb = hp->rx_skbs[i];
1180 struct happy_meal_rxd *rxd;
1181 u32 dma_addr;
1182
1183 rxd = &hp->happy_block->happy_meal_rxd[i];
1184 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1185 dma_unmap_single(hp->dma_dev, dma_addr,
1186 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1187 dev_kfree_skb_any(skb);
1188 hp->rx_skbs[i] = NULL;
1189 }
1190 }
1191
1192 for (i = 0; i < TX_RING_SIZE; i++) {
1193 if (hp->tx_skbs[i] != NULL) {
1194 struct sk_buff *skb = hp->tx_skbs[i];
1195 struct happy_meal_txd *txd;
1196 u32 dma_addr;
1197 int frag;
1198
1199 hp->tx_skbs[i] = NULL;
1200
1201 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1202 txd = &hp->happy_block->happy_meal_txd[i];
1203 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1204 if (!frag)
1205 dma_unmap_single(hp->dma_dev, dma_addr,
1206 (hme_read_desc32(hp, &txd->tx_flags)
1207 & TXFLAG_SIZE),
1208 DMA_TO_DEVICE);
1209 else
1210 dma_unmap_page(hp->dma_dev, dma_addr,
1211 (hme_read_desc32(hp, &txd->tx_flags)
1212 & TXFLAG_SIZE),
1213 DMA_TO_DEVICE);
1214
1215 if (frag != skb_shinfo(skb)->nr_frags)
1216 i++;
1217 }
1218
1219 dev_kfree_skb_any(skb);
1220 }
1221 }
1222}
1223
1224
1225static void happy_meal_init_rings(struct happy_meal *hp)
1226{
1227 struct hmeal_init_block *hb = hp->happy_block;
1228 int i;
1229
1230 HMD(("happy_meal_init_rings: counters to zero, "));
1231 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1232
1233
1234 HMD(("clean, "));
1235 happy_meal_clean_rings(hp);
1236
1237
1238 HMD(("init rxring, "));
1239 for (i = 0; i < RX_RING_SIZE; i++) {
1240 struct sk_buff *skb;
1241 u32 mapping;
1242
1243 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1244 if (!skb) {
1245 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1246 continue;
1247 }
1248 hp->rx_skbs[i] = skb;
1249
1250
1251 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1252 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1253 DMA_FROM_DEVICE);
1254 if (dma_mapping_error(hp->dma_dev, mapping)) {
1255 dev_kfree_skb_any(skb);
1256 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1257 continue;
1258 }
1259 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1260 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1261 mapping);
1262 skb_reserve(skb, RX_OFFSET);
1263 }
1264
1265 HMD(("init txring, "));
1266 for (i = 0; i < TX_RING_SIZE; i++)
1267 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1268
1269 HMD(("done\n"));
1270}
1271
1272
1273static void
1274happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1275 void __iomem *tregs,
1276 const struct ethtool_link_ksettings *ep)
1277{
1278 int timeout;
1279
1280
1281 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1282 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1283 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1284 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1285
1286
1287
1288 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1289 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1290
1291 if (hp->sw_bmsr & BMSR_10HALF)
1292 hp->sw_advertise |= (ADVERTISE_10HALF);
1293 else
1294 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1295
1296 if (hp->sw_bmsr & BMSR_10FULL)
1297 hp->sw_advertise |= (ADVERTISE_10FULL);
1298 else
1299 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1300 if (hp->sw_bmsr & BMSR_100HALF)
1301 hp->sw_advertise |= (ADVERTISE_100HALF);
1302 else
1303 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1304 if (hp->sw_bmsr & BMSR_100FULL)
1305 hp->sw_advertise |= (ADVERTISE_100FULL);
1306 else
1307 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1308 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1309
1310
1311
1312
1313
1314
1315
1316#ifdef AUTO_SWITCH_DEBUG
1317 ASD(("%s: Advertising [ ", hp->dev->name));
1318 if (hp->sw_advertise & ADVERTISE_10HALF)
1319 ASD(("10H "));
1320 if (hp->sw_advertise & ADVERTISE_10FULL)
1321 ASD(("10F "));
1322 if (hp->sw_advertise & ADVERTISE_100HALF)
1323 ASD(("100H "));
1324 if (hp->sw_advertise & ADVERTISE_100FULL)
1325 ASD(("100F "));
1326#endif
1327
1328
1329 hp->sw_bmcr |= BMCR_ANENABLE;
1330 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1331
1332
1333 hp->sw_bmcr |= BMCR_ANRESTART;
1334 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1335
1336
1337
1338 timeout = 64;
1339 while (--timeout) {
1340 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1341 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1342 break;
1343 udelay(10);
1344 }
1345 if (!timeout) {
1346 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1347 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1348 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1349 hp->dev->name);
1350 goto force_link;
1351 } else {
1352 hp->timer_state = arbwait;
1353 }
1354 } else {
1355force_link:
1356
1357
1358
1359
1360
1361
1362
1363
1364 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1365 hp->sw_bmcr = BMCR_SPEED100;
1366 } else {
1367 if (ep->base.speed == SPEED_100)
1368 hp->sw_bmcr = BMCR_SPEED100;
1369 else
1370 hp->sw_bmcr = 0;
1371 if (ep->base.duplex == DUPLEX_FULL)
1372 hp->sw_bmcr |= BMCR_FULLDPLX;
1373 }
1374 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1375
1376 if (!is_lucent_phy(hp)) {
1377
1378
1379
1380
1381 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1382 DP83840_CSCONFIG);
1383 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1384 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1385 hp->sw_csconfig);
1386 }
1387 hp->timer_state = ltrywait;
1388 }
1389
1390 hp->timer_ticks = 0;
1391 hp->happy_timer.expires = jiffies + (12 * HZ)/10;
1392 add_timer(&hp->happy_timer);
1393}
1394
1395
1396static int happy_meal_init(struct happy_meal *hp)
1397{
1398 const unsigned char *e = &hp->dev->dev_addr[0];
1399 void __iomem *gregs = hp->gregs;
1400 void __iomem *etxregs = hp->etxregs;
1401 void __iomem *erxregs = hp->erxregs;
1402 void __iomem *bregs = hp->bigmacregs;
1403 void __iomem *tregs = hp->tcvregs;
1404 u32 regtmp, rxcfg;
1405
1406
1407 del_timer(&hp->happy_timer);
1408
1409 HMD(("happy_meal_init: happy_flags[%08x] ",
1410 hp->happy_flags));
1411 if (!(hp->happy_flags & HFLAG_INIT)) {
1412 HMD(("set HFLAG_INIT, "));
1413 hp->happy_flags |= HFLAG_INIT;
1414 happy_meal_get_counters(hp, bregs);
1415 }
1416
1417
1418 HMD(("to happy_meal_poll_stop\n"));
1419 happy_meal_poll_stop(hp, tregs);
1420
1421
1422 HMD(("happy_meal_init: to happy_meal_stop\n"));
1423 happy_meal_stop(hp, gregs);
1424
1425
1426 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1427 happy_meal_init_rings(hp);
1428
1429
1430 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1431 hme_read32(hp, tregs + TCVR_IMASK)));
1432 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1433
1434
1435 if (hp->happy_flags & HFLAG_FENABLE) {
1436 HMD(("use frame old[%08x], ",
1437 hme_read32(hp, tregs + TCVR_CFG)));
1438 hme_write32(hp, tregs + TCVR_CFG,
1439 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1440 } else {
1441 HMD(("use bitbang old[%08x], ",
1442 hme_read32(hp, tregs + TCVR_CFG)));
1443 hme_write32(hp, tregs + TCVR_CFG,
1444 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1445 }
1446
1447
1448 HMD(("to happy_meal_transceiver_check\n"));
1449 happy_meal_transceiver_check(hp, tregs);
1450
1451
1452 HMD(("happy_meal_init: "));
1453 switch(hp->tcvr_type) {
1454 case none:
1455
1456 HMD(("AAIEEE no transceiver type, EAGAIN"));
1457 return -EAGAIN;
1458
1459 case internal:
1460
1461 HMD(("internal, using MII, "));
1462 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1463 break;
1464
1465 case external:
1466
1467 HMD(("external, disable MII, "));
1468 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1469 break;
1470 }
1471
1472 if (happy_meal_tcvr_reset(hp, tregs))
1473 return -EAGAIN;
1474
1475
1476 HMD(("tx/rx reset, "));
1477 happy_meal_tx_reset(hp, bregs);
1478 happy_meal_rx_reset(hp, bregs);
1479
1480
1481 HMD(("jsize/ipg1/ipg2, "));
1482 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1483 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1484 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1485
1486
1487 HMD(("rseed/macaddr, "));
1488
1489
1490 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1491
1492 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1493 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1494 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1495
1496 HMD(("htable, "));
1497 if ((hp->dev->flags & IFF_ALLMULTI) ||
1498 (netdev_mc_count(hp->dev) > 64)) {
1499 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1500 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1501 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1502 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1503 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1504 u16 hash_table[4];
1505 struct netdev_hw_addr *ha;
1506 u32 crc;
1507
1508 memset(hash_table, 0, sizeof(hash_table));
1509 netdev_for_each_mc_addr(ha, hp->dev) {
1510 crc = ether_crc_le(6, ha->addr);
1511 crc >>= 26;
1512 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1513 }
1514 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1515 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1516 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1517 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1518 } else {
1519 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1520 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1521 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1522 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1523 }
1524
1525
1526 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1527 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1528 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1529 hme_write32(hp, erxregs + ERX_RING,
1530 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1531 hme_write32(hp, etxregs + ETX_RING,
1532 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1533
1534
1535
1536
1537
1538
1539 if (hme_read32(hp, erxregs + ERX_RING) !=
1540 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1541 hme_write32(hp, erxregs + ERX_RING,
1542 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1543 | 0x4);
1544
1545
1546 HMD(("happy_meal_init: old[%08x] bursts<",
1547 hme_read32(hp, gregs + GREG_CFG)));
1548
1549#ifndef CONFIG_SPARC
1550
1551 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1552#else
1553 if ((hp->happy_bursts & DMA_BURST64) &&
1554 ((hp->happy_flags & HFLAG_PCI) != 0
1555#ifdef CONFIG_SBUS
1556 || sbus_can_burst64()
1557#endif
1558 || 0)) {
1559 u32 gcfg = GREG_CFG_BURST64;
1560
1561
1562
1563
1564
1565#ifdef CONFIG_SBUS
1566 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1567 struct platform_device *op = hp->happy_dev;
1568 if (sbus_can_dma_64bit()) {
1569 sbus_set_sbus64(&op->dev,
1570 hp->happy_bursts);
1571 gcfg |= GREG_CFG_64BIT;
1572 }
1573 }
1574#endif
1575
1576 HMD(("64>"));
1577 hme_write32(hp, gregs + GREG_CFG, gcfg);
1578 } else if (hp->happy_bursts & DMA_BURST32) {
1579 HMD(("32>"));
1580 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1581 } else if (hp->happy_bursts & DMA_BURST16) {
1582 HMD(("16>"));
1583 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1584 } else {
1585 HMD(("XXX>"));
1586 hme_write32(hp, gregs + GREG_CFG, 0);
1587 }
1588#endif
1589
1590
1591 HMD((", enable global interrupts, "));
1592 hme_write32(hp, gregs + GREG_IMASK,
1593 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1594 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1595
1596
1597 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1598 hme_read32(hp, etxregs + ETX_RSIZE)));
1599 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1600
1601
1602 HMD(("tx dma enable old[%08x], ",
1603 hme_read32(hp, etxregs + ETX_CFG)));
1604 hme_write32(hp, etxregs + ETX_CFG,
1605 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1606
1607
1608
1609
1610
1611
1612 HMD(("erx regs bug old[%08x]\n",
1613 hme_read32(hp, erxregs + ERX_CFG)));
1614 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1615 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1616 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1617 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1618 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1619 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1620 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1621
1622 }
1623
1624
1625 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1626 hme_read32(hp, bregs + BMAC_RXCFG)));
1627 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1628 if (hp->dev->flags & IFF_PROMISC)
1629 rxcfg |= BIGMAC_RXCFG_PMISC;
1630 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1631
1632
1633 udelay(10);
1634
1635
1636 HMD(("BIGMAC init, "));
1637 regtmp = 0;
1638 if (hp->happy_flags & HFLAG_FULL)
1639 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1640
1641
1642
1643
1644 hme_write32(hp, bregs + BMAC_TXCFG, regtmp );
1645
1646
1647 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1648
1649
1650 regtmp = BIGMAC_XCFG_ODENABLE;
1651
1652
1653 if (hp->happy_flags & HFLAG_LANCE)
1654 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1655
1656
1657 if (hp->tcvr_type == external)
1658 regtmp |= BIGMAC_XCFG_MIIDISAB;
1659
1660 HMD(("XIF config old[%08x], ",
1661 hme_read32(hp, bregs + BMAC_XIFCFG)));
1662 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1663
1664
1665 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1666 hme_read32(hp, bregs + BMAC_TXCFG),
1667 hme_read32(hp, bregs + BMAC_RXCFG)));
1668
1669
1670 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1671 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1672
1673 hme_write32(hp, bregs + BMAC_TXCFG,
1674 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1675 hme_write32(hp, bregs + BMAC_RXCFG,
1676 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1677
1678
1679 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1680
1681
1682 return 0;
1683}
1684
1685
1686static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1687{
1688 void __iomem *tregs = hp->tcvregs;
1689 void __iomem *bregs = hp->bigmacregs;
1690 void __iomem *gregs = hp->gregs;
1691
1692 happy_meal_stop(hp, gregs);
1693 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1694 if (hp->happy_flags & HFLAG_FENABLE)
1695 hme_write32(hp, tregs + TCVR_CFG,
1696 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1697 else
1698 hme_write32(hp, tregs + TCVR_CFG,
1699 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1700 happy_meal_transceiver_check(hp, tregs);
1701 switch(hp->tcvr_type) {
1702 case none:
1703 return;
1704 case internal:
1705 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1706 break;
1707 case external:
1708 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1709 break;
1710 }
1711 if (happy_meal_tcvr_reset(hp, tregs))
1712 return;
1713
1714
1715 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1716 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1717
1718
1719 if (hp->sw_bmsr & BMSR_10HALF)
1720 hp->sw_advertise |= (ADVERTISE_10HALF);
1721 else
1722 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1723
1724 if (hp->sw_bmsr & BMSR_10FULL)
1725 hp->sw_advertise |= (ADVERTISE_10FULL);
1726 else
1727 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1728 if (hp->sw_bmsr & BMSR_100HALF)
1729 hp->sw_advertise |= (ADVERTISE_100HALF);
1730 else
1731 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1732 if (hp->sw_bmsr & BMSR_100FULL)
1733 hp->sw_advertise |= (ADVERTISE_100FULL);
1734 else
1735 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1736
1737
1738 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1739}
1740
1741
1742
1743
1744
1745
1746static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1747{
1748 int reset = 0;
1749
1750
1751 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1752 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1753 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1754 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1755 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1756 GREG_STAT_SLVPERR))
1757 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1758 hp->dev->name, status);
1759
1760 if (status & GREG_STAT_RFIFOVF) {
1761
1762
1763 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1764 }
1765
1766 if (status & GREG_STAT_STSTERR) {
1767
1768 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1769 reset = 1;
1770 }
1771
1772 if (status & GREG_STAT_TFIFO_UND) {
1773
1774 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1775 hp->dev->name);
1776 reset = 1;
1777 }
1778
1779 if (status & GREG_STAT_MAXPKTERR) {
1780
1781
1782
1783 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1784 reset = 1;
1785 }
1786
1787 if (status & GREG_STAT_NORXD) {
1788
1789
1790
1791
1792
1793 printk(KERN_INFO "%s: Happy Meal out of receive "
1794 "descriptors, packet dropped.\n",
1795 hp->dev->name);
1796 }
1797
1798 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1799
1800 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1801 if (status & GREG_STAT_RXERR)
1802 printk("GenericError ");
1803 if (status & GREG_STAT_RXPERR)
1804 printk("ParityError ");
1805 if (status & GREG_STAT_RXTERR)
1806 printk("RxTagBotch ");
1807 printk("]\n");
1808 reset = 1;
1809 }
1810
1811 if (status & GREG_STAT_EOPERR) {
1812
1813
1814
1815 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1816 hp->dev->name);
1817 reset = 1;
1818 }
1819
1820 if (status & GREG_STAT_MIFIRQ) {
1821
1822 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1823 }
1824
1825 if (status &
1826 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1827
1828 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1829 if (status & GREG_STAT_TXEACK)
1830 printk("GenericError ");
1831 if (status & GREG_STAT_TXLERR)
1832 printk("LateError ");
1833 if (status & GREG_STAT_TXPERR)
1834 printk("ParityError ");
1835 if (status & GREG_STAT_TXTERR)
1836 printk("TagBotch ");
1837 printk("]\n");
1838 reset = 1;
1839 }
1840
1841 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1842
1843
1844
1845 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1846 hp->dev->name,
1847 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1848 reset = 1;
1849 }
1850
1851 if (reset) {
1852 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1853 happy_meal_init(hp);
1854 return 1;
1855 }
1856 return 0;
1857}
1858
1859
1860static void happy_meal_mif_interrupt(struct happy_meal *hp)
1861{
1862 void __iomem *tregs = hp->tcvregs;
1863
1864 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1865 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1866 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1867
1868
1869 if (hp->sw_lpa & LPA_100FULL) {
1870 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1871 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1872 } else if (hp->sw_lpa & LPA_100HALF) {
1873 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1874 hp->sw_bmcr |= BMCR_SPEED100;
1875 } else if (hp->sw_lpa & LPA_10FULL) {
1876 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1877 hp->sw_bmcr |= BMCR_FULLDPLX;
1878 } else {
1879 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1880 }
1881 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1882
1883
1884 happy_meal_poll_stop(hp, tregs);
1885}
1886
1887#ifdef TXDEBUG
1888#define TXD(x) printk x
1889#else
1890#define TXD(x)
1891#endif
1892
1893
1894static void happy_meal_tx(struct happy_meal *hp)
1895{
1896 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1897 struct happy_meal_txd *this;
1898 struct net_device *dev = hp->dev;
1899 int elem;
1900
1901 elem = hp->tx_old;
1902 TXD(("TX<"));
1903 while (elem != hp->tx_new) {
1904 struct sk_buff *skb;
1905 u32 flags, dma_addr, dma_len;
1906 int frag;
1907
1908 TXD(("[%d]", elem));
1909 this = &txbase[elem];
1910 flags = hme_read_desc32(hp, &this->tx_flags);
1911 if (flags & TXFLAG_OWN)
1912 break;
1913 skb = hp->tx_skbs[elem];
1914 if (skb_shinfo(skb)->nr_frags) {
1915 int last;
1916
1917 last = elem + skb_shinfo(skb)->nr_frags;
1918 last &= (TX_RING_SIZE - 1);
1919 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1920 if (flags & TXFLAG_OWN)
1921 break;
1922 }
1923 hp->tx_skbs[elem] = NULL;
1924 dev->stats.tx_bytes += skb->len;
1925
1926 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1927 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1928 dma_len = hme_read_desc32(hp, &this->tx_flags);
1929
1930 dma_len &= TXFLAG_SIZE;
1931 if (!frag)
1932 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1933 else
1934 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1935
1936 elem = NEXT_TX(elem);
1937 this = &txbase[elem];
1938 }
1939
1940 dev_consume_skb_irq(skb);
1941 dev->stats.tx_packets++;
1942 }
1943 hp->tx_old = elem;
1944 TXD((">"));
1945
1946 if (netif_queue_stopped(dev) &&
1947 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1948 netif_wake_queue(dev);
1949}
1950
1951#ifdef RXDEBUG
1952#define RXD(x) printk x
1953#else
1954#define RXD(x)
1955#endif
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1967{
1968 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1969 struct happy_meal_rxd *this;
1970 int elem = hp->rx_new, drops = 0;
1971 u32 flags;
1972
1973 RXD(("RX<"));
1974 this = &rxbase[elem];
1975 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
1976 struct sk_buff *skb;
1977 int len = flags >> 16;
1978 u16 csum = flags & RXFLAG_CSUM;
1979 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
1980
1981 RXD(("[%d ", elem));
1982
1983
1984 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
1985 RXD(("ERR(%08x)]", flags));
1986 dev->stats.rx_errors++;
1987 if (len < ETH_ZLEN)
1988 dev->stats.rx_length_errors++;
1989 if (len & (RXFLAG_OVERFLOW >> 16)) {
1990 dev->stats.rx_over_errors++;
1991 dev->stats.rx_fifo_errors++;
1992 }
1993
1994
1995 drop_it:
1996 dev->stats.rx_dropped++;
1997 hme_write_rxd(hp, this,
1998 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1999 dma_addr);
2000 goto next;
2001 }
2002 skb = hp->rx_skbs[elem];
2003 if (len > RX_COPY_THRESHOLD) {
2004 struct sk_buff *new_skb;
2005 u32 mapping;
2006
2007
2008 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2009 if (new_skb == NULL) {
2010 drops++;
2011 goto drop_it;
2012 }
2013 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2014 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2015 RX_BUF_ALLOC_SIZE,
2016 DMA_FROM_DEVICE);
2017 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2018 dev_kfree_skb_any(new_skb);
2019 drops++;
2020 goto drop_it;
2021 }
2022
2023 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2024 hp->rx_skbs[elem] = new_skb;
2025 hme_write_rxd(hp, this,
2026 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2027 mapping);
2028 skb_reserve(new_skb, RX_OFFSET);
2029
2030
2031 skb_trim(skb, len);
2032 } else {
2033 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
2034
2035 if (copy_skb == NULL) {
2036 drops++;
2037 goto drop_it;
2038 }
2039
2040 skb_reserve(copy_skb, 2);
2041 skb_put(copy_skb, len);
2042 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2043 skb_copy_from_linear_data(skb, copy_skb->data, len);
2044 dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2045
2046 hme_write_rxd(hp, this,
2047 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2048 dma_addr);
2049
2050 skb = copy_skb;
2051 }
2052
2053
2054 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
2055 skb->ip_summed = CHECKSUM_COMPLETE;
2056
2057 RXD(("len=%d csum=%4x]", len, csum));
2058 skb->protocol = eth_type_trans(skb, dev);
2059 netif_rx(skb);
2060
2061 dev->stats.rx_packets++;
2062 dev->stats.rx_bytes += len;
2063 next:
2064 elem = NEXT_RX(elem);
2065 this = &rxbase[elem];
2066 }
2067 hp->rx_new = elem;
2068 if (drops)
2069 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2070 RXD((">"));
2071}
2072
2073static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
2074{
2075 struct net_device *dev = dev_id;
2076 struct happy_meal *hp = netdev_priv(dev);
2077 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2078
2079 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2080
2081 spin_lock(&hp->happy_lock);
2082
2083 if (happy_status & GREG_STAT_ERRORS) {
2084 HMD(("ERRORS "));
2085 if (happy_meal_is_not_so_happy(hp, happy_status))
2086 goto out;
2087 }
2088
2089 if (happy_status & GREG_STAT_MIFIRQ) {
2090 HMD(("MIFIRQ "));
2091 happy_meal_mif_interrupt(hp);
2092 }
2093
2094 if (happy_status & GREG_STAT_TXALL) {
2095 HMD(("TXALL "));
2096 happy_meal_tx(hp);
2097 }
2098
2099 if (happy_status & GREG_STAT_RXTOHOST) {
2100 HMD(("RXTOHOST "));
2101 happy_meal_rx(hp, dev);
2102 }
2103
2104 HMD(("done\n"));
2105out:
2106 spin_unlock(&hp->happy_lock);
2107
2108 return IRQ_HANDLED;
2109}
2110
2111#ifdef CONFIG_SBUS
2112static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2113{
2114 struct quattro *qp = (struct quattro *) cookie;
2115 int i;
2116
2117 for (i = 0; i < 4; i++) {
2118 struct net_device *dev = qp->happy_meals[i];
2119 struct happy_meal *hp = netdev_priv(dev);
2120 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2121
2122 HMD(("quattro_interrupt: status=%08x ", happy_status));
2123
2124 if (!(happy_status & (GREG_STAT_ERRORS |
2125 GREG_STAT_MIFIRQ |
2126 GREG_STAT_TXALL |
2127 GREG_STAT_RXTOHOST)))
2128 continue;
2129
2130 spin_lock(&hp->happy_lock);
2131
2132 if (happy_status & GREG_STAT_ERRORS) {
2133 HMD(("ERRORS "));
2134 if (happy_meal_is_not_so_happy(hp, happy_status))
2135 goto next;
2136 }
2137
2138 if (happy_status & GREG_STAT_MIFIRQ) {
2139 HMD(("MIFIRQ "));
2140 happy_meal_mif_interrupt(hp);
2141 }
2142
2143 if (happy_status & GREG_STAT_TXALL) {
2144 HMD(("TXALL "));
2145 happy_meal_tx(hp);
2146 }
2147
2148 if (happy_status & GREG_STAT_RXTOHOST) {
2149 HMD(("RXTOHOST "));
2150 happy_meal_rx(hp, dev);
2151 }
2152
2153 next:
2154 spin_unlock(&hp->happy_lock);
2155 }
2156 HMD(("done\n"));
2157
2158 return IRQ_HANDLED;
2159}
2160#endif
2161
2162static int happy_meal_open(struct net_device *dev)
2163{
2164 struct happy_meal *hp = netdev_priv(dev);
2165 int res;
2166
2167 HMD(("happy_meal_open: "));
2168
2169
2170
2171
2172 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2173 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2174 dev->name, dev);
2175 if (res) {
2176 HMD(("EAGAIN\n"));
2177 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2178 hp->irq);
2179
2180 return -EAGAIN;
2181 }
2182 }
2183
2184 HMD(("to happy_meal_init\n"));
2185
2186 spin_lock_irq(&hp->happy_lock);
2187 res = happy_meal_init(hp);
2188 spin_unlock_irq(&hp->happy_lock);
2189
2190 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2191 free_irq(hp->irq, dev);
2192 return res;
2193}
2194
2195static int happy_meal_close(struct net_device *dev)
2196{
2197 struct happy_meal *hp = netdev_priv(dev);
2198
2199 spin_lock_irq(&hp->happy_lock);
2200 happy_meal_stop(hp, hp->gregs);
2201 happy_meal_clean_rings(hp);
2202
2203
2204 del_timer(&hp->happy_timer);
2205
2206 spin_unlock_irq(&hp->happy_lock);
2207
2208
2209
2210
2211
2212 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2213 free_irq(hp->irq, dev);
2214
2215 return 0;
2216}
2217
2218#ifdef SXDEBUG
2219#define SXD(x) printk x
2220#else
2221#define SXD(x)
2222#endif
2223
2224static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
2225{
2226 struct happy_meal *hp = netdev_priv(dev);
2227
2228 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2229 tx_dump_log();
2230 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2231 hme_read32(hp, hp->gregs + GREG_STAT),
2232 hme_read32(hp, hp->etxregs + ETX_CFG),
2233 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2234
2235 spin_lock_irq(&hp->happy_lock);
2236 happy_meal_init(hp);
2237 spin_unlock_irq(&hp->happy_lock);
2238
2239 netif_wake_queue(dev);
2240}
2241
2242static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2243 u32 first_len, u32 first_entry, u32 entry)
2244{
2245 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2246
2247 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2248
2249 first_entry = NEXT_TX(first_entry);
2250 while (first_entry != entry) {
2251 struct happy_meal_txd *this = &txbase[first_entry];
2252 u32 addr, len;
2253
2254 addr = hme_read_desc32(hp, &this->tx_addr);
2255 len = hme_read_desc32(hp, &this->tx_flags);
2256 len &= TXFLAG_SIZE;
2257 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2258 }
2259}
2260
2261static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2262 struct net_device *dev)
2263{
2264 struct happy_meal *hp = netdev_priv(dev);
2265 int entry;
2266 u32 tx_flags;
2267
2268 tx_flags = TXFLAG_OWN;
2269 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2270 const u32 csum_start_off = skb_checksum_start_offset(skb);
2271 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2272
2273 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2274 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2275 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2276 }
2277
2278 spin_lock_irq(&hp->happy_lock);
2279
2280 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2281 netif_stop_queue(dev);
2282 spin_unlock_irq(&hp->happy_lock);
2283 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2284 dev->name);
2285 return NETDEV_TX_BUSY;
2286 }
2287
2288 entry = hp->tx_new;
2289 SXD(("SX<l[%d]e[%d]>", len, entry));
2290 hp->tx_skbs[entry] = skb;
2291
2292 if (skb_shinfo(skb)->nr_frags == 0) {
2293 u32 mapping, len;
2294
2295 len = skb->len;
2296 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2297 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2298 goto out_dma_error;
2299 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2300 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2301 (tx_flags | (len & TXFLAG_SIZE)),
2302 mapping);
2303 entry = NEXT_TX(entry);
2304 } else {
2305 u32 first_len, first_mapping;
2306 int frag, first_entry = entry;
2307
2308
2309
2310
2311 first_len = skb_headlen(skb);
2312 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2313 DMA_TO_DEVICE);
2314 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2315 goto out_dma_error;
2316 entry = NEXT_TX(entry);
2317
2318 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2319 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2320 u32 len, mapping, this_txflags;
2321
2322 len = skb_frag_size(this_frag);
2323 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2324 0, len, DMA_TO_DEVICE);
2325 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2326 unmap_partial_tx_skb(hp, first_mapping, first_len,
2327 first_entry, entry);
2328 goto out_dma_error;
2329 }
2330 this_txflags = tx_flags;
2331 if (frag == skb_shinfo(skb)->nr_frags - 1)
2332 this_txflags |= TXFLAG_EOP;
2333 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2334 (this_txflags | (len & TXFLAG_SIZE)),
2335 mapping);
2336 entry = NEXT_TX(entry);
2337 }
2338 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2339 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2340 first_mapping);
2341 }
2342
2343 hp->tx_new = entry;
2344
2345 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2346 netif_stop_queue(dev);
2347
2348
2349 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2350
2351 spin_unlock_irq(&hp->happy_lock);
2352
2353 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2354 return NETDEV_TX_OK;
2355
2356out_dma_error:
2357 hp->tx_skbs[hp->tx_new] = NULL;
2358 spin_unlock_irq(&hp->happy_lock);
2359
2360 dev_kfree_skb_any(skb);
2361 dev->stats.tx_dropped++;
2362 return NETDEV_TX_OK;
2363}
2364
2365static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2366{
2367 struct happy_meal *hp = netdev_priv(dev);
2368
2369 spin_lock_irq(&hp->happy_lock);
2370 happy_meal_get_counters(hp, hp->bigmacregs);
2371 spin_unlock_irq(&hp->happy_lock);
2372
2373 return &dev->stats;
2374}
2375
2376static void happy_meal_set_multicast(struct net_device *dev)
2377{
2378 struct happy_meal *hp = netdev_priv(dev);
2379 void __iomem *bregs = hp->bigmacregs;
2380 struct netdev_hw_addr *ha;
2381 u32 crc;
2382
2383 spin_lock_irq(&hp->happy_lock);
2384
2385 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2386 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2387 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2388 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2389 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2390 } else if (dev->flags & IFF_PROMISC) {
2391 hme_write32(hp, bregs + BMAC_RXCFG,
2392 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2393 } else {
2394 u16 hash_table[4];
2395
2396 memset(hash_table, 0, sizeof(hash_table));
2397 netdev_for_each_mc_addr(ha, dev) {
2398 crc = ether_crc_le(6, ha->addr);
2399 crc >>= 26;
2400 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2401 }
2402 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2403 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2404 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2405 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2406 }
2407
2408 spin_unlock_irq(&hp->happy_lock);
2409}
2410
2411
2412static int hme_get_link_ksettings(struct net_device *dev,
2413 struct ethtool_link_ksettings *cmd)
2414{
2415 struct happy_meal *hp = netdev_priv(dev);
2416 u32 speed;
2417 u32 supported;
2418
2419 supported =
2420 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2421 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2422 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2423
2424
2425 cmd->base.port = PORT_TP;
2426 cmd->base.phy_address = 0;
2427
2428
2429 spin_lock_irq(&hp->happy_lock);
2430 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2431 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2432 spin_unlock_irq(&hp->happy_lock);
2433
2434 if (hp->sw_bmcr & BMCR_ANENABLE) {
2435 cmd->base.autoneg = AUTONEG_ENABLE;
2436 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2437 SPEED_100 : SPEED_10);
2438 if (speed == SPEED_100)
2439 cmd->base.duplex =
2440 (hp->sw_lpa & (LPA_100FULL)) ?
2441 DUPLEX_FULL : DUPLEX_HALF;
2442 else
2443 cmd->base.duplex =
2444 (hp->sw_lpa & (LPA_10FULL)) ?
2445 DUPLEX_FULL : DUPLEX_HALF;
2446 } else {
2447 cmd->base.autoneg = AUTONEG_DISABLE;
2448 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2449 cmd->base.duplex =
2450 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2451 DUPLEX_FULL : DUPLEX_HALF;
2452 }
2453 cmd->base.speed = speed;
2454 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2455 supported);
2456
2457 return 0;
2458}
2459
2460static int hme_set_link_ksettings(struct net_device *dev,
2461 const struct ethtool_link_ksettings *cmd)
2462{
2463 struct happy_meal *hp = netdev_priv(dev);
2464
2465
2466 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2467 cmd->base.autoneg != AUTONEG_DISABLE)
2468 return -EINVAL;
2469 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2470 ((cmd->base.speed != SPEED_100 &&
2471 cmd->base.speed != SPEED_10) ||
2472 (cmd->base.duplex != DUPLEX_HALF &&
2473 cmd->base.duplex != DUPLEX_FULL)))
2474 return -EINVAL;
2475
2476
2477 spin_lock_irq(&hp->happy_lock);
2478 del_timer(&hp->happy_timer);
2479 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2480 spin_unlock_irq(&hp->happy_lock);
2481
2482 return 0;
2483}
2484
2485static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2486{
2487 struct happy_meal *hp = netdev_priv(dev);
2488
2489 strlcpy(info->driver, "sunhme", sizeof(info->driver));
2490 strlcpy(info->version, "2.02", sizeof(info->version));
2491 if (hp->happy_flags & HFLAG_PCI) {
2492 struct pci_dev *pdev = hp->happy_dev;
2493 strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2494 }
2495#ifdef CONFIG_SBUS
2496 else {
2497 const struct linux_prom_registers *regs;
2498 struct platform_device *op = hp->happy_dev;
2499 regs = of_get_property(op->dev.of_node, "regs", NULL);
2500 if (regs)
2501 snprintf(info->bus_info, sizeof(info->bus_info),
2502 "SBUS:%d",
2503 regs->which_io);
2504 }
2505#endif
2506}
2507
2508static u32 hme_get_link(struct net_device *dev)
2509{
2510 struct happy_meal *hp = netdev_priv(dev);
2511
2512 spin_lock_irq(&hp->happy_lock);
2513 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2514 spin_unlock_irq(&hp->happy_lock);
2515
2516 return hp->sw_bmsr & BMSR_LSTATUS;
2517}
2518
2519static const struct ethtool_ops hme_ethtool_ops = {
2520 .get_drvinfo = hme_get_drvinfo,
2521 .get_link = hme_get_link,
2522 .get_link_ksettings = hme_get_link_ksettings,
2523 .set_link_ksettings = hme_set_link_ksettings,
2524};
2525
2526static int hme_version_printed;
2527
2528#ifdef CONFIG_SBUS
2529
2530
2531
2532
2533
2534static struct quattro *quattro_sbus_find(struct platform_device *child)
2535{
2536 struct device *parent = child->dev.parent;
2537 struct platform_device *op;
2538 struct quattro *qp;
2539
2540 op = to_platform_device(parent);
2541 qp = platform_get_drvdata(op);
2542 if (qp)
2543 return qp;
2544
2545 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2546 if (qp != NULL) {
2547 int i;
2548
2549 for (i = 0; i < 4; i++)
2550 qp->happy_meals[i] = NULL;
2551
2552 qp->quattro_dev = child;
2553 qp->next = qfe_sbus_list;
2554 qfe_sbus_list = qp;
2555
2556 platform_set_drvdata(op, qp);
2557 }
2558 return qp;
2559}
2560
2561
2562
2563
2564
2565static int __init quattro_sbus_register_irqs(void)
2566{
2567 struct quattro *qp;
2568
2569 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2570 struct platform_device *op = qp->quattro_dev;
2571 int err, qfe_slot, skip = 0;
2572
2573 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2574 if (!qp->happy_meals[qfe_slot])
2575 skip = 1;
2576 }
2577 if (skip)
2578 continue;
2579
2580 err = request_irq(op->archdata.irqs[0],
2581 quattro_sbus_interrupt,
2582 IRQF_SHARED, "Quattro",
2583 qp);
2584 if (err != 0) {
2585 printk(KERN_ERR "Quattro HME: IRQ registration "
2586 "error %d.\n", err);
2587 return err;
2588 }
2589 }
2590
2591 return 0;
2592}
2593
2594static void quattro_sbus_free_irqs(void)
2595{
2596 struct quattro *qp;
2597
2598 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2599 struct platform_device *op = qp->quattro_dev;
2600 int qfe_slot, skip = 0;
2601
2602 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2603 if (!qp->happy_meals[qfe_slot])
2604 skip = 1;
2605 }
2606 if (skip)
2607 continue;
2608
2609 free_irq(op->archdata.irqs[0], qp);
2610 }
2611}
2612#endif
2613
2614#ifdef CONFIG_PCI
2615static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2616{
2617 struct pci_dev *bdev = pdev->bus->self;
2618 struct quattro *qp;
2619
2620 if (!bdev) return NULL;
2621 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2622 struct pci_dev *qpdev = qp->quattro_dev;
2623
2624 if (qpdev == bdev)
2625 return qp;
2626 }
2627 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2628 if (qp != NULL) {
2629 int i;
2630
2631 for (i = 0; i < 4; i++)
2632 qp->happy_meals[i] = NULL;
2633
2634 qp->quattro_dev = bdev;
2635 qp->next = qfe_pci_list;
2636 qfe_pci_list = qp;
2637
2638
2639 qp->nranges = 0;
2640 }
2641 return qp;
2642}
2643#endif
2644
2645static const struct net_device_ops hme_netdev_ops = {
2646 .ndo_open = happy_meal_open,
2647 .ndo_stop = happy_meal_close,
2648 .ndo_start_xmit = happy_meal_start_xmit,
2649 .ndo_tx_timeout = happy_meal_tx_timeout,
2650 .ndo_get_stats = happy_meal_get_stats,
2651 .ndo_set_rx_mode = happy_meal_set_multicast,
2652 .ndo_set_mac_address = eth_mac_addr,
2653 .ndo_validate_addr = eth_validate_addr,
2654};
2655
2656#ifdef CONFIG_SBUS
2657static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2658{
2659 struct device_node *dp = op->dev.of_node, *sbus_dp;
2660 struct quattro *qp = NULL;
2661 struct happy_meal *hp;
2662 struct net_device *dev;
2663 int i, qfe_slot = -1;
2664 u8 addr[ETH_ALEN];
2665 int err = -ENODEV;
2666
2667 sbus_dp = op->dev.parent->of_node;
2668
2669
2670 if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2671 return err;
2672
2673 if (is_qfe) {
2674 qp = quattro_sbus_find(op);
2675 if (qp == NULL)
2676 goto err_out;
2677 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2678 if (qp->happy_meals[qfe_slot] == NULL)
2679 break;
2680 if (qfe_slot == 4)
2681 goto err_out;
2682 }
2683
2684 err = -ENOMEM;
2685 dev = alloc_etherdev(sizeof(struct happy_meal));
2686 if (!dev)
2687 goto err_out;
2688 SET_NETDEV_DEV(dev, &op->dev);
2689
2690 if (hme_version_printed++ == 0)
2691 printk(KERN_INFO "%s", version);
2692
2693
2694
2695
2696 for (i = 0; i < 6; i++) {
2697 if (macaddr[i] != 0)
2698 break;
2699 }
2700 if (i < 6) {
2701 for (i = 0; i < 6; i++)
2702 addr[i] = macaddr[i];
2703 eth_hw_addr_set(dev, addr);
2704 macaddr[5]++;
2705 } else {
2706 const unsigned char *addr;
2707 int len;
2708
2709 addr = of_get_property(dp, "local-mac-address", &len);
2710
2711 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2712 eth_hw_addr_set(dev, addr);
2713 else
2714 eth_hw_addr_set(dev, idprom->id_ethaddr);
2715 }
2716
2717 hp = netdev_priv(dev);
2718
2719 hp->happy_dev = op;
2720 hp->dma_dev = &op->dev;
2721
2722 spin_lock_init(&hp->happy_lock);
2723
2724 err = -ENODEV;
2725 if (qp != NULL) {
2726 hp->qfe_parent = qp;
2727 hp->qfe_ent = qfe_slot;
2728 qp->happy_meals[qfe_slot] = dev;
2729 }
2730
2731 hp->gregs = of_ioremap(&op->resource[0], 0,
2732 GREG_REG_SIZE, "HME Global Regs");
2733 if (!hp->gregs) {
2734 printk(KERN_ERR "happymeal: Cannot map global registers.\n");
2735 goto err_out_free_netdev;
2736 }
2737
2738 hp->etxregs = of_ioremap(&op->resource[1], 0,
2739 ETX_REG_SIZE, "HME TX Regs");
2740 if (!hp->etxregs) {
2741 printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
2742 goto err_out_iounmap;
2743 }
2744
2745 hp->erxregs = of_ioremap(&op->resource[2], 0,
2746 ERX_REG_SIZE, "HME RX Regs");
2747 if (!hp->erxregs) {
2748 printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
2749 goto err_out_iounmap;
2750 }
2751
2752 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2753 BMAC_REG_SIZE, "HME BIGMAC Regs");
2754 if (!hp->bigmacregs) {
2755 printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
2756 goto err_out_iounmap;
2757 }
2758
2759 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2760 TCVR_REG_SIZE, "HME Tranceiver Regs");
2761 if (!hp->tcvregs) {
2762 printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
2763 goto err_out_iounmap;
2764 }
2765
2766 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2767 if (hp->hm_revision == 0xff)
2768 hp->hm_revision = 0xa0;
2769
2770
2771 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2772 hp->happy_flags = HFLAG_20_21;
2773 else if (hp->hm_revision != 0xa0)
2774 hp->happy_flags = HFLAG_NOT_A0;
2775
2776 if (qp != NULL)
2777 hp->happy_flags |= HFLAG_QUATTRO;
2778
2779
2780 hp->happy_bursts = of_getintprop_default(sbus_dp,
2781 "burst-sizes", 0x00);
2782
2783 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2784 PAGE_SIZE,
2785 &hp->hblock_dvma,
2786 GFP_ATOMIC);
2787 err = -ENOMEM;
2788 if (!hp->happy_block)
2789 goto err_out_iounmap;
2790
2791
2792 hp->linkcheck = 0;
2793
2794
2795 hp->timer_state = asleep;
2796 hp->timer_ticks = 0;
2797
2798 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2799
2800 hp->dev = dev;
2801 dev->netdev_ops = &hme_netdev_ops;
2802 dev->watchdog_timeo = 5*HZ;
2803 dev->ethtool_ops = &hme_ethtool_ops;
2804
2805
2806 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2807 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2808
2809 hp->irq = op->archdata.irqs[0];
2810
2811#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2812
2813 hp->read_desc32 = sbus_hme_read_desc32;
2814 hp->write_txd = sbus_hme_write_txd;
2815 hp->write_rxd = sbus_hme_write_rxd;
2816 hp->read32 = sbus_hme_read32;
2817 hp->write32 = sbus_hme_write32;
2818#endif
2819
2820
2821
2822
2823 spin_lock_irq(&hp->happy_lock);
2824 happy_meal_set_initial_advertisement(hp);
2825 spin_unlock_irq(&hp->happy_lock);
2826
2827 err = register_netdev(hp->dev);
2828 if (err) {
2829 printk(KERN_ERR "happymeal: Cannot register net device, "
2830 "aborting.\n");
2831 goto err_out_free_coherent;
2832 }
2833
2834 platform_set_drvdata(op, hp);
2835
2836 if (qfe_slot != -1)
2837 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2838 dev->name, qfe_slot);
2839 else
2840 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2841 dev->name);
2842
2843 printk("%pM\n", dev->dev_addr);
2844
2845 return 0;
2846
2847err_out_free_coherent:
2848 dma_free_coherent(hp->dma_dev,
2849 PAGE_SIZE,
2850 hp->happy_block,
2851 hp->hblock_dvma);
2852
2853err_out_iounmap:
2854 if (hp->gregs)
2855 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2856 if (hp->etxregs)
2857 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2858 if (hp->erxregs)
2859 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2860 if (hp->bigmacregs)
2861 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2862 if (hp->tcvregs)
2863 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2864
2865 if (qp)
2866 qp->happy_meals[qfe_slot] = NULL;
2867
2868err_out_free_netdev:
2869 free_netdev(dev);
2870
2871err_out:
2872 return err;
2873}
2874#endif
2875
2876#ifdef CONFIG_PCI
2877#ifndef CONFIG_SPARC
2878static int is_quattro_p(struct pci_dev *pdev)
2879{
2880 struct pci_dev *busdev = pdev->bus->self;
2881 struct pci_dev *this_pdev;
2882 int n_hmes;
2883
2884 if (busdev == NULL ||
2885 busdev->vendor != PCI_VENDOR_ID_DEC ||
2886 busdev->device != PCI_DEVICE_ID_DEC_21153)
2887 return 0;
2888
2889 n_hmes = 0;
2890 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2891 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2892 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2893 n_hmes++;
2894 }
2895
2896 if (n_hmes != 4)
2897 return 0;
2898
2899 return 1;
2900}
2901
2902
2903static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2904{
2905 int this_offset;
2906
2907 for (this_offset = 0x20; this_offset < len; this_offset++) {
2908 void __iomem *p = rom_base + this_offset;
2909
2910 if (readb(p + 0) != 0x90 ||
2911 readb(p + 1) != 0x00 ||
2912 readb(p + 2) != 0x09 ||
2913 readb(p + 3) != 0x4e ||
2914 readb(p + 4) != 0x41 ||
2915 readb(p + 5) != 0x06)
2916 continue;
2917
2918 this_offset += 6;
2919 p += 6;
2920
2921 if (index == 0) {
2922 int i;
2923
2924 for (i = 0; i < 6; i++)
2925 dev_addr[i] = readb(p + i);
2926 return 1;
2927 }
2928 index--;
2929 }
2930 return 0;
2931}
2932
2933static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2934{
2935 size_t size;
2936 void __iomem *p = pci_map_rom(pdev, &size);
2937
2938 if (p) {
2939 int index = 0;
2940 int found;
2941
2942 if (is_quattro_p(pdev))
2943 index = PCI_SLOT(pdev->devfn);
2944
2945 found = readb(p) == 0x55 &&
2946 readb(p + 1) == 0xaa &&
2947 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2948 pci_unmap_rom(pdev, p);
2949 if (found)
2950 return;
2951 }
2952
2953
2954 dev_addr[0] = 0x08;
2955 dev_addr[1] = 0x00;
2956 dev_addr[2] = 0x20;
2957 get_random_bytes(&dev_addr[3], 3);
2958}
2959#endif
2960
2961static int happy_meal_pci_probe(struct pci_dev *pdev,
2962 const struct pci_device_id *ent)
2963{
2964 struct quattro *qp = NULL;
2965#ifdef CONFIG_SPARC
2966 struct device_node *dp;
2967#endif
2968 struct happy_meal *hp;
2969 struct net_device *dev;
2970 void __iomem *hpreg_base;
2971 unsigned long hpreg_res;
2972 int i, qfe_slot = -1;
2973 char prom_name[64];
2974 u8 addr[ETH_ALEN];
2975 int err;
2976
2977
2978#ifdef CONFIG_SPARC
2979 dp = pci_device_to_OF_node(pdev);
2980 snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
2981#else
2982 if (is_quattro_p(pdev))
2983 strcpy(prom_name, "SUNW,qfe");
2984 else
2985 strcpy(prom_name, "SUNW,hme");
2986#endif
2987
2988 err = -ENODEV;
2989
2990 if (pci_enable_device(pdev))
2991 goto err_out;
2992 pci_set_master(pdev);
2993
2994 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
2995 qp = quattro_pci_find(pdev);
2996 if (qp == NULL)
2997 goto err_out;
2998 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2999 if (qp->happy_meals[qfe_slot] == NULL)
3000 break;
3001 if (qfe_slot == 4)
3002 goto err_out;
3003 }
3004
3005 dev = alloc_etherdev(sizeof(struct happy_meal));
3006 err = -ENOMEM;
3007 if (!dev)
3008 goto err_out;
3009 SET_NETDEV_DEV(dev, &pdev->dev);
3010
3011 if (hme_version_printed++ == 0)
3012 printk(KERN_INFO "%s", version);
3013
3014 hp = netdev_priv(dev);
3015
3016 hp->happy_dev = pdev;
3017 hp->dma_dev = &pdev->dev;
3018
3019 spin_lock_init(&hp->happy_lock);
3020
3021 if (qp != NULL) {
3022 hp->qfe_parent = qp;
3023 hp->qfe_ent = qfe_slot;
3024 qp->happy_meals[qfe_slot] = dev;
3025 }
3026
3027 hpreg_res = pci_resource_start(pdev, 0);
3028 err = -ENODEV;
3029 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3030 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3031 goto err_out_clear_quattro;
3032 }
3033 if (pci_request_regions(pdev, DRV_NAME)) {
3034 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3035 "aborting.\n");
3036 goto err_out_clear_quattro;
3037 }
3038
3039 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
3040 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3041 goto err_out_free_res;
3042 }
3043
3044 for (i = 0; i < 6; i++) {
3045 if (macaddr[i] != 0)
3046 break;
3047 }
3048 if (i < 6) {
3049 for (i = 0; i < 6; i++)
3050 addr[i] = macaddr[i];
3051 eth_hw_addr_set(dev, addr);
3052 macaddr[5]++;
3053 } else {
3054#ifdef CONFIG_SPARC
3055 const unsigned char *addr;
3056 int len;
3057
3058 if (qfe_slot != -1 &&
3059 (addr = of_get_property(dp, "local-mac-address", &len))
3060 != NULL &&
3061 len == 6) {
3062 eth_hw_addr_set(dev, addr);
3063 } else {
3064 eth_hw_addr_set(dev, idprom->id_ethaddr);
3065 }
3066#else
3067 u8 addr[ETH_ALEN];
3068
3069 get_hme_mac_nonsparc(pdev, addr);
3070 eth_hw_addr_set(dev, addr);
3071#endif
3072 }
3073
3074
3075 hp->gregs = (hpreg_base + 0x0000UL);
3076 hp->etxregs = (hpreg_base + 0x2000UL);
3077 hp->erxregs = (hpreg_base + 0x4000UL);
3078 hp->bigmacregs = (hpreg_base + 0x6000UL);
3079 hp->tcvregs = (hpreg_base + 0x7000UL);
3080
3081#ifdef CONFIG_SPARC
3082 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3083 if (hp->hm_revision == 0xff)
3084 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3085#else
3086
3087 hp->hm_revision = 0x20;
3088#endif
3089
3090
3091 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3092 hp->happy_flags = HFLAG_20_21;
3093 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3094 hp->happy_flags = HFLAG_NOT_A0;
3095
3096 if (qp != NULL)
3097 hp->happy_flags |= HFLAG_QUATTRO;
3098
3099
3100 hp->happy_flags |= HFLAG_PCI;
3101
3102#ifdef CONFIG_SPARC
3103
3104 hp->happy_bursts = DMA_BURSTBITS;
3105#endif
3106
3107 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3108 &hp->hblock_dvma, GFP_KERNEL);
3109 err = -ENODEV;
3110 if (!hp->happy_block)
3111 goto err_out_iounmap;
3112
3113 hp->linkcheck = 0;
3114 hp->timer_state = asleep;
3115 hp->timer_ticks = 0;
3116
3117 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
3118
3119 hp->irq = pdev->irq;
3120 hp->dev = dev;
3121 dev->netdev_ops = &hme_netdev_ops;
3122 dev->watchdog_timeo = 5*HZ;
3123 dev->ethtool_ops = &hme_ethtool_ops;
3124
3125
3126 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3127 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3128
3129#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3130
3131 hp->read_desc32 = pci_hme_read_desc32;
3132 hp->write_txd = pci_hme_write_txd;
3133 hp->write_rxd = pci_hme_write_rxd;
3134 hp->read32 = pci_hme_read32;
3135 hp->write32 = pci_hme_write32;
3136#endif
3137
3138
3139
3140
3141 spin_lock_irq(&hp->happy_lock);
3142 happy_meal_set_initial_advertisement(hp);
3143 spin_unlock_irq(&hp->happy_lock);
3144
3145 err = register_netdev(hp->dev);
3146 if (err) {
3147 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3148 "aborting.\n");
3149 goto err_out_iounmap;
3150 }
3151
3152 pci_set_drvdata(pdev, hp);
3153
3154 if (!qfe_slot) {
3155 struct pci_dev *qpdev = qp->quattro_dev;
3156
3157 prom_name[0] = 0;
3158 if (!strncmp(dev->name, "eth", 3)) {
3159 int i = simple_strtoul(dev->name + 3, NULL, 10);
3160 sprintf(prom_name, "-%d", i + 3);
3161 }
3162 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3163 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3164 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3165 printk("DEC 21153 PCI Bridge\n");
3166 else
3167 printk("unknown bridge %04x.%04x\n",
3168 qpdev->vendor, qpdev->device);
3169 }
3170
3171 if (qfe_slot != -1)
3172 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3173 dev->name, qfe_slot);
3174 else
3175 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3176 dev->name);
3177
3178 printk("%pM\n", dev->dev_addr);
3179
3180 return 0;
3181
3182err_out_iounmap:
3183 iounmap(hp->gregs);
3184
3185err_out_free_res:
3186 pci_release_regions(pdev);
3187
3188err_out_clear_quattro:
3189 if (qp != NULL)
3190 qp->happy_meals[qfe_slot] = NULL;
3191
3192 free_netdev(dev);
3193
3194err_out:
3195 return err;
3196}
3197
3198static void happy_meal_pci_remove(struct pci_dev *pdev)
3199{
3200 struct happy_meal *hp = pci_get_drvdata(pdev);
3201 struct net_device *net_dev = hp->dev;
3202
3203 unregister_netdev(net_dev);
3204
3205 dma_free_coherent(hp->dma_dev, PAGE_SIZE,
3206 hp->happy_block, hp->hblock_dvma);
3207 iounmap(hp->gregs);
3208 pci_release_regions(hp->happy_dev);
3209
3210 free_netdev(net_dev);
3211}
3212
3213static const struct pci_device_id happymeal_pci_ids[] = {
3214 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3215 { }
3216};
3217
3218MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3219
3220static struct pci_driver hme_pci_driver = {
3221 .name = "hme",
3222 .id_table = happymeal_pci_ids,
3223 .probe = happy_meal_pci_probe,
3224 .remove = happy_meal_pci_remove,
3225};
3226
3227static int __init happy_meal_pci_init(void)
3228{
3229 return pci_register_driver(&hme_pci_driver);
3230}
3231
3232static void happy_meal_pci_exit(void)
3233{
3234 pci_unregister_driver(&hme_pci_driver);
3235
3236 while (qfe_pci_list) {
3237 struct quattro *qfe = qfe_pci_list;
3238 struct quattro *next = qfe->next;
3239
3240 kfree(qfe);
3241
3242 qfe_pci_list = next;
3243 }
3244}
3245
3246#endif
3247
3248#ifdef CONFIG_SBUS
3249static const struct of_device_id hme_sbus_match[];
3250static int hme_sbus_probe(struct platform_device *op)
3251{
3252 const struct of_device_id *match;
3253 struct device_node *dp = op->dev.of_node;
3254 const char *model = of_get_property(dp, "model", NULL);
3255 int is_qfe;
3256
3257 match = of_match_device(hme_sbus_match, &op->dev);
3258 if (!match)
3259 return -EINVAL;
3260 is_qfe = (match->data != NULL);
3261
3262 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3263 is_qfe = 1;
3264
3265 return happy_meal_sbus_probe_one(op, is_qfe);
3266}
3267
3268static int hme_sbus_remove(struct platform_device *op)
3269{
3270 struct happy_meal *hp = platform_get_drvdata(op);
3271 struct net_device *net_dev = hp->dev;
3272
3273 unregister_netdev(net_dev);
3274
3275
3276
3277 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3278 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3279 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3280 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3281 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3282 dma_free_coherent(hp->dma_dev,
3283 PAGE_SIZE,
3284 hp->happy_block,
3285 hp->hblock_dvma);
3286
3287 free_netdev(net_dev);
3288
3289 return 0;
3290}
3291
3292static const struct of_device_id hme_sbus_match[] = {
3293 {
3294 .name = "SUNW,hme",
3295 },
3296 {
3297 .name = "SUNW,qfe",
3298 .data = (void *) 1,
3299 },
3300 {
3301 .name = "qfe",
3302 .data = (void *) 1,
3303 },
3304 {},
3305};
3306
3307MODULE_DEVICE_TABLE(of, hme_sbus_match);
3308
3309static struct platform_driver hme_sbus_driver = {
3310 .driver = {
3311 .name = "hme",
3312 .of_match_table = hme_sbus_match,
3313 },
3314 .probe = hme_sbus_probe,
3315 .remove = hme_sbus_remove,
3316};
3317
3318static int __init happy_meal_sbus_init(void)
3319{
3320 int err;
3321
3322 err = platform_driver_register(&hme_sbus_driver);
3323 if (!err)
3324 err = quattro_sbus_register_irqs();
3325
3326 return err;
3327}
3328
3329static void happy_meal_sbus_exit(void)
3330{
3331 platform_driver_unregister(&hme_sbus_driver);
3332 quattro_sbus_free_irqs();
3333
3334 while (qfe_sbus_list) {
3335 struct quattro *qfe = qfe_sbus_list;
3336 struct quattro *next = qfe->next;
3337
3338 kfree(qfe);
3339
3340 qfe_sbus_list = next;
3341 }
3342}
3343#endif
3344
3345static int __init happy_meal_probe(void)
3346{
3347 int err = 0;
3348
3349#ifdef CONFIG_SBUS
3350 err = happy_meal_sbus_init();
3351#endif
3352#ifdef CONFIG_PCI
3353 if (!err) {
3354 err = happy_meal_pci_init();
3355#ifdef CONFIG_SBUS
3356 if (err)
3357 happy_meal_sbus_exit();
3358#endif
3359 }
3360#endif
3361
3362 return err;
3363}
3364
3365
3366static void __exit happy_meal_exit(void)
3367{
3368#ifdef CONFIG_SBUS
3369 happy_meal_sbus_exit();
3370#endif
3371#ifdef CONFIG_PCI
3372 happy_meal_pci_exit();
3373#endif
3374}
3375
3376module_init(happy_meal_probe);
3377module_exit(happy_meal_exit);
3378