1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/fcntl.h>
20#include <linux/interrupt.h>
21#include <linux/ioport.h>
22#include <linux/in.h>
23#include <linux/slab.h>
24#include <linux/string.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/ethtool.h>
28#include <linux/mii.h>
29#include <linux/crc32.h>
30#include <linux/random.h>
31#include <linux/errno.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/mm.h>
36#include <linux/bitops.h>
37#include <linux/dma-mapping.h>
38
39#include <asm/io.h>
40#include <asm/dma.h>
41#include <asm/byteorder.h>
42
43#ifdef CONFIG_SPARC
44#include <linux/of.h>
45#include <linux/of_device.h>
46#include <asm/idprom.h>
47#include <asm/openprom.h>
48#include <asm/oplib.h>
49#include <asm/prom.h>
50#include <asm/auxio.h>
51#endif
52#include <linux/uaccess.h>
53
54#include <asm/pgtable.h>
55#include <asm/irq.h>
56
57#ifdef CONFIG_PCI
58#include <linux/pci.h>
59#endif
60
61#include "sunhme.h"
62
63#define DRV_NAME "sunhme"
64#define DRV_VERSION "3.10"
65#define DRV_RELDATE "August 26, 2008"
66#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
67
68static char version[] =
69 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
70
71MODULE_VERSION(DRV_VERSION);
72MODULE_AUTHOR(DRV_AUTHOR);
73MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
74MODULE_LICENSE("GPL");
75
76static int macaddr[6];
77
78
79module_param_array(macaddr, int, NULL, 0);
80MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
81
82#ifdef CONFIG_SBUS
83static struct quattro *qfe_sbus_list;
84#endif
85
86#ifdef CONFIG_PCI
87static struct quattro *qfe_pci_list;
88#endif
89
90#undef HMEDEBUG
91#undef SXDEBUG
92#undef RXDEBUG
93#undef TXDEBUG
94#undef TXLOGGING
95
96#ifdef TXLOGGING
97struct hme_tx_logent {
98 unsigned int tstamp;
99 int tx_new, tx_old;
100 unsigned int action;
101#define TXLOG_ACTION_IRQ 0x01
102#define TXLOG_ACTION_TXMIT 0x02
103#define TXLOG_ACTION_TBUSY 0x04
104#define TXLOG_ACTION_NBUFS 0x08
105 unsigned int status;
106};
107#define TX_LOG_LEN 128
108static struct hme_tx_logent tx_log[TX_LOG_LEN];
109static int txlog_cur_entry;
110static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
111{
112 struct hme_tx_logent *tlp;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 tlp = &tx_log[txlog_cur_entry];
117 tlp->tstamp = (unsigned int)jiffies;
118 tlp->tx_new = hp->tx_new;
119 tlp->tx_old = hp->tx_old;
120 tlp->action = a;
121 tlp->status = s;
122 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
123 local_irq_restore(flags);
124}
125static __inline__ void tx_dump_log(void)
126{
127 int i, this;
128
129 this = txlog_cur_entry;
130 for (i = 0; i < TX_LOG_LEN; i++) {
131 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
132 tx_log[this].tstamp,
133 tx_log[this].tx_new, tx_log[this].tx_old,
134 tx_log[this].action, tx_log[this].status);
135 this = (this + 1) & (TX_LOG_LEN - 1);
136 }
137}
138static __inline__ void tx_dump_ring(struct happy_meal *hp)
139{
140 struct hmeal_init_block *hb = hp->happy_block;
141 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
142 int i;
143
144 for (i = 0; i < TX_RING_SIZE; i+=4) {
145 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
146 i, i + 4,
147 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
148 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
149 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
150 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
151 }
152}
153#else
154#define tx_add_log(hp, a, s) do { } while(0)
155#define tx_dump_log() do { } while(0)
156#define tx_dump_ring(hp) do { } while(0)
157#endif
158
159#ifdef HMEDEBUG
160#define HMD(x) printk x
161#else
162#define HMD(x)
163#endif
164
165
166
167#ifdef AUTO_SWITCH_DEBUG
168#define ASD(x) printk x
169#else
170#define ASD(x)
171#endif
172
173#define DEFAULT_IPG0 16
174#define DEFAULT_IPG1 8
175#define DEFAULT_IPG2 4
176#define DEFAULT_JAMSIZE 4
177
178
179
180
181
182
183
184
185#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
186static void sbus_hme_write32(void __iomem *reg, u32 val)
187{
188 sbus_writel(val, reg);
189}
190
191static u32 sbus_hme_read32(void __iomem *reg)
192{
193 return sbus_readl(reg);
194}
195
196static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
197{
198 rxd->rx_addr = (__force hme32)addr;
199 dma_wmb();
200 rxd->rx_flags = (__force hme32)flags;
201}
202
203static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
204{
205 txd->tx_addr = (__force hme32)addr;
206 dma_wmb();
207 txd->tx_flags = (__force hme32)flags;
208}
209
210static u32 sbus_hme_read_desc32(hme32 *p)
211{
212 return (__force u32)*p;
213}
214
215static void pci_hme_write32(void __iomem *reg, u32 val)
216{
217 writel(val, reg);
218}
219
220static u32 pci_hme_read32(void __iomem *reg)
221{
222 return readl(reg);
223}
224
225static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
226{
227 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
228 dma_wmb();
229 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
230}
231
232static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
233{
234 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
235 dma_wmb();
236 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
237}
238
239static u32 pci_hme_read_desc32(hme32 *p)
240{
241 return le32_to_cpup((__le32 *)p);
242}
243
244#define hme_write32(__hp, __reg, __val) \
245 ((__hp)->write32((__reg), (__val)))
246#define hme_read32(__hp, __reg) \
247 ((__hp)->read32(__reg))
248#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
249 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
250#define hme_write_txd(__hp, __txd, __flags, __addr) \
251 ((__hp)->write_txd((__txd), (__flags), (__addr)))
252#define hme_read_desc32(__hp, __p) \
253 ((__hp)->read_desc32(__p))
254#define hme_dma_map(__hp, __ptr, __size, __dir) \
255 ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
256#define hme_dma_unmap(__hp, __addr, __size, __dir) \
257 ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
258#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
259 ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
260#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
261 ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
262#else
263#ifdef CONFIG_SBUS
264
265#define hme_write32(__hp, __reg, __val) \
266 sbus_writel((__val), (__reg))
267#define hme_read32(__hp, __reg) \
268 sbus_readl(__reg)
269#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
270do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
271 dma_wmb(); \
272 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
273} while(0)
274#define hme_write_txd(__hp, __txd, __flags, __addr) \
275do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
276 dma_wmb(); \
277 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
278} while(0)
279#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
280#define hme_dma_map(__hp, __ptr, __size, __dir) \
281 dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
282#define hme_dma_unmap(__hp, __addr, __size, __dir) \
283 dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
284#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
285 dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
286#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
287 dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
288#else
289
290#define hme_write32(__hp, __reg, __val) \
291 writel((__val), (__reg))
292#define hme_read32(__hp, __reg) \
293 readl(__reg)
294#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
295do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
296 dma_wmb(); \
297 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
298} while(0)
299#define hme_write_txd(__hp, __txd, __flags, __addr) \
300do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
301 dma_wmb(); \
302 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
303} while(0)
304static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
305{
306 return le32_to_cpup((__le32 *)p);
307}
308#define hme_dma_map(__hp, __ptr, __size, __dir) \
309 pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
310#define hme_dma_unmap(__hp, __addr, __size, __dir) \
311 pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
312#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
313 pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
314#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
315 pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
316#endif
317#endif
318
319
320
321static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
322{
323 hme_write32(hp, tregs + TCVR_BBDATA, bit);
324 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
325 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
326}
327
328#if 0
329static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
330{
331 u32 ret;
332
333 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
334 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
335 ret = hme_read32(hp, tregs + TCVR_CFG);
336 if (internal)
337 ret &= TCV_CFG_MDIO0;
338 else
339 ret &= TCV_CFG_MDIO1;
340
341 return ret;
342}
343#endif
344
345static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
346{
347 u32 retval;
348
349 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
350 udelay(1);
351 retval = hme_read32(hp, tregs + TCVR_CFG);
352 if (internal)
353 retval &= TCV_CFG_MDIO0;
354 else
355 retval &= TCV_CFG_MDIO1;
356 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
357
358 return retval;
359}
360
361#define TCVR_FAILURE 0x80000000
362
363static int happy_meal_bb_read(struct happy_meal *hp,
364 void __iomem *tregs, int reg)
365{
366 u32 tmp;
367 int retval = 0;
368 int i;
369
370 ASD(("happy_meal_bb_read: reg=%d ", reg));
371
372
373 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
374
375
376 for (i = 0; i < 32; i++)
377 BB_PUT_BIT(hp, tregs, 1);
378
379
380 BB_PUT_BIT(hp, tregs, 0);
381 BB_PUT_BIT(hp, tregs, 1);
382 BB_PUT_BIT(hp, tregs, 1);
383 BB_PUT_BIT(hp, tregs, 0);
384
385
386 tmp = hp->paddr & 0xff;
387 for (i = 4; i >= 0; i--)
388 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
389
390
391 tmp = (reg & 0xff);
392 for (i = 4; i >= 0; i--)
393 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
394
395
396 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
397
398
399 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
400 for (i = 15; i >= 0; i--)
401 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
402 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
403 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
404 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
405 ASD(("value=%x\n", retval));
406 return retval;
407}
408
409static void happy_meal_bb_write(struct happy_meal *hp,
410 void __iomem *tregs, int reg,
411 unsigned short value)
412{
413 u32 tmp;
414 int i;
415
416 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
417
418
419 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
420
421
422 for (i = 0; i < 32; i++)
423 BB_PUT_BIT(hp, tregs, 1);
424
425
426 BB_PUT_BIT(hp, tregs, 0);
427 BB_PUT_BIT(hp, tregs, 1);
428 BB_PUT_BIT(hp, tregs, 0);
429 BB_PUT_BIT(hp, tregs, 1);
430
431
432 tmp = (hp->paddr & 0xff);
433 for (i = 4; i >= 0; i--)
434 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
435
436
437 tmp = (reg & 0xff);
438 for (i = 4; i >= 0; i--)
439 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
440
441
442 BB_PUT_BIT(hp, tregs, 1);
443 BB_PUT_BIT(hp, tregs, 0);
444
445 for (i = 15; i >= 0; i--)
446 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
447
448
449 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
450}
451
452#define TCVR_READ_TRIES 16
453
454static int happy_meal_tcvr_read(struct happy_meal *hp,
455 void __iomem *tregs, int reg)
456{
457 int tries = TCVR_READ_TRIES;
458 int retval;
459
460 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
461 if (hp->tcvr_type == none) {
462 ASD(("no transceiver, value=TCVR_FAILURE\n"));
463 return TCVR_FAILURE;
464 }
465
466 if (!(hp->happy_flags & HFLAG_FENABLE)) {
467 ASD(("doing bit bang\n"));
468 return happy_meal_bb_read(hp, tregs, reg);
469 }
470
471 hme_write32(hp, tregs + TCVR_FRAME,
472 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
473 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
474 udelay(20);
475 if (!tries) {
476 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
477 return TCVR_FAILURE;
478 }
479 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
480 ASD(("value=%04x\n", retval));
481 return retval;
482}
483
484#define TCVR_WRITE_TRIES 16
485
486static void happy_meal_tcvr_write(struct happy_meal *hp,
487 void __iomem *tregs, int reg,
488 unsigned short value)
489{
490 int tries = TCVR_WRITE_TRIES;
491
492 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
493
494
495 if (!(hp->happy_flags & HFLAG_FENABLE)) {
496 happy_meal_bb_write(hp, tregs, reg, value);
497 return;
498 }
499
500
501 hme_write32(hp, tregs + TCVR_FRAME,
502 (FRAME_WRITE | (hp->paddr << 23) |
503 ((reg & 0xff) << 18) | (value & 0xffff)));
504 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
505 udelay(20);
506
507
508 if (!tries)
509 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
510
511
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
547{
548 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
549
550
551
552
553 if (hp->sw_bmcr & BMCR_FULLDPLX) {
554 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
555 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
556 return 0;
557 }
558
559
560 if (hp->sw_bmcr & BMCR_SPEED100) {
561 hp->sw_bmcr &= ~(BMCR_SPEED100);
562 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
563 return 0;
564 }
565
566
567 return -1;
568}
569
570static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
571{
572 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
573 if (hp->tcvr_type == external)
574 printk("external ");
575 else
576 printk("internal ");
577 printk("transceiver at ");
578 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
579 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
580 if (hp->sw_lpa & LPA_100FULL)
581 printk("100Mb/s, Full Duplex.\n");
582 else
583 printk("100Mb/s, Half Duplex.\n");
584 } else {
585 if (hp->sw_lpa & LPA_10FULL)
586 printk("10Mb/s, Full Duplex.\n");
587 else
588 printk("10Mb/s, Half Duplex.\n");
589 }
590}
591
592static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
593{
594 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
595 if (hp->tcvr_type == external)
596 printk("external ");
597 else
598 printk("internal ");
599 printk("transceiver at ");
600 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
601 if (hp->sw_bmcr & BMCR_SPEED100)
602 printk("100Mb/s, ");
603 else
604 printk("10Mb/s, ");
605 if (hp->sw_bmcr & BMCR_FULLDPLX)
606 printk("Full Duplex.\n");
607 else
608 printk("Half Duplex.\n");
609}
610
611static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
612{
613 int full;
614
615
616
617
618 if (hp->timer_state == arbwait) {
619 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
620 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
621 goto no_response;
622 if (hp->sw_lpa & LPA_100FULL)
623 full = 1;
624 else if (hp->sw_lpa & LPA_100HALF)
625 full = 0;
626 else if (hp->sw_lpa & LPA_10FULL)
627 full = 1;
628 else
629 full = 0;
630 } else {
631
632 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
633 if (hp->sw_bmcr & BMCR_FULLDPLX)
634 full = 1;
635 else
636 full = 0;
637 }
638
639
640
641
642
643
644
645
646
647 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
648 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
649 ~(BIGMAC_TXCFG_ENABLE));
650 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
651 barrier();
652 if (full) {
653 hp->happy_flags |= HFLAG_FULL;
654 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
655 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
656 BIGMAC_TXCFG_FULLDPLX);
657 } else {
658 hp->happy_flags &= ~(HFLAG_FULL);
659 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
660 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
661 ~(BIGMAC_TXCFG_FULLDPLX));
662 }
663 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
664 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
665 BIGMAC_TXCFG_ENABLE);
666 return 0;
667no_response:
668 return 1;
669}
670
671static int happy_meal_init(struct happy_meal *hp);
672
673static int is_lucent_phy(struct happy_meal *hp)
674{
675 void __iomem *tregs = hp->tcvregs;
676 unsigned short mr2, mr3;
677 int ret = 0;
678
679 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
680 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
681 if ((mr2 & 0xffff) == 0x0180 &&
682 ((mr3 & 0xffff) >> 10) == 0x1d)
683 ret = 1;
684
685 return ret;
686}
687
688static void happy_meal_timer(unsigned long data)
689{
690 struct happy_meal *hp = (struct happy_meal *) data;
691 void __iomem *tregs = hp->tcvregs;
692 int restart_timer = 0;
693
694 spin_lock_irq(&hp->happy_lock);
695
696 hp->timer_ticks++;
697 switch(hp->timer_state) {
698 case arbwait:
699
700
701
702 if (hp->timer_ticks >= 10) {
703
704 do_force_mode:
705 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
706 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
707 hp->dev->name);
708 hp->sw_bmcr = BMCR_SPEED100;
709 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
710
711 if (!is_lucent_phy(hp)) {
712
713
714
715
716 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
717 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
718 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
719 }
720 hp->timer_state = ltrywait;
721 hp->timer_ticks = 0;
722 restart_timer = 1;
723 } else {
724
725 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
726 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
727 int ret;
728
729
730 ret = set_happy_link_modes(hp, tregs);
731 if (ret) {
732
733
734
735
736
737
738 goto do_force_mode;
739 }
740
741
742 hp->timer_state = lupwait;
743 restart_timer = 1;
744 } else {
745 restart_timer = 1;
746 }
747 }
748 break;
749
750 case lupwait:
751
752
753
754
755
756 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
757 if (hp->sw_bmsr & BMSR_LSTATUS) {
758
759
760
761 display_link_mode(hp, tregs);
762 hp->timer_state = asleep;
763 restart_timer = 0;
764 } else {
765 if (hp->timer_ticks >= 10) {
766 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
767 "not completely up.\n", hp->dev->name);
768 hp->timer_ticks = 0;
769 restart_timer = 1;
770 } else {
771 restart_timer = 1;
772 }
773 }
774 break;
775
776 case ltrywait:
777
778
779
780
781
782 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
783 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
784 if (hp->timer_ticks == 1) {
785 if (!is_lucent_phy(hp)) {
786
787
788
789 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
790 happy_meal_tcvr_write(hp, tregs,
791 DP83840_CSCONFIG, hp->sw_csconfig);
792 }
793 restart_timer = 1;
794 break;
795 }
796 if (hp->timer_ticks == 2) {
797 if (!is_lucent_phy(hp)) {
798 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
799 happy_meal_tcvr_write(hp, tregs,
800 DP83840_CSCONFIG, hp->sw_csconfig);
801 }
802 restart_timer = 1;
803 break;
804 }
805 if (hp->sw_bmsr & BMSR_LSTATUS) {
806
807 display_forced_link_mode(hp, tregs);
808 set_happy_link_modes(hp, tregs);
809 hp->timer_state = asleep;
810 restart_timer = 0;
811 } else {
812 if (hp->timer_ticks >= 4) {
813 int ret;
814
815 ret = try_next_permutation(hp, tregs);
816 if (ret == -1) {
817
818
819
820
821
822 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
823 hp->dev->name);
824
825 ret = happy_meal_init(hp);
826 if (ret) {
827
828 printk(KERN_ERR "%s: Error, cannot re-init the "
829 "Happy Meal.\n", hp->dev->name);
830 }
831 goto out;
832 }
833 if (!is_lucent_phy(hp)) {
834 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
835 DP83840_CSCONFIG);
836 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
837 happy_meal_tcvr_write(hp, tregs,
838 DP83840_CSCONFIG, hp->sw_csconfig);
839 }
840 hp->timer_ticks = 0;
841 restart_timer = 1;
842 } else {
843 restart_timer = 1;
844 }
845 }
846 break;
847
848 case asleep:
849 default:
850
851 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
852 hp->dev->name);
853 restart_timer = 0;
854 hp->timer_ticks = 0;
855 hp->timer_state = asleep;
856 break;
857 }
858
859 if (restart_timer) {
860 hp->happy_timer.expires = jiffies + ((12 * HZ)/10);
861 add_timer(&hp->happy_timer);
862 }
863
864out:
865 spin_unlock_irq(&hp->happy_lock);
866}
867
868#define TX_RESET_TRIES 32
869#define RX_RESET_TRIES 32
870
871
872static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
873{
874 int tries = TX_RESET_TRIES;
875
876 HMD(("happy_meal_tx_reset: reset, "));
877
878
879 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
880 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
881 udelay(20);
882
883
884 if (!tries)
885 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
886
887
888 HMD(("done\n"));
889}
890
891
892static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
893{
894 int tries = RX_RESET_TRIES;
895
896 HMD(("happy_meal_rx_reset: reset, "));
897
898
899 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
900 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
901 udelay(20);
902
903
904 if (!tries)
905 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
906
907
908 HMD(("done\n"));
909}
910
911#define STOP_TRIES 16
912
913
914static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
915{
916 int tries = STOP_TRIES;
917
918 HMD(("happy_meal_stop: reset, "));
919
920
921 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
922 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
923 udelay(20);
924
925
926 if (!tries)
927 printk(KERN_ERR "happy meal: Fry guys.");
928
929
930 HMD(("done\n"));
931}
932
933
934static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
935{
936 struct net_device_stats *stats = &hp->dev->stats;
937
938 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
939 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
940
941 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
942 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
943
944 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
945 hme_write32(hp, bregs + BMAC_GLECTR, 0);
946
947 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
948
949 stats->collisions +=
950 (hme_read32(hp, bregs + BMAC_EXCTR) +
951 hme_read32(hp, bregs + BMAC_LTCTR));
952 hme_write32(hp, bregs + BMAC_EXCTR, 0);
953 hme_write32(hp, bregs + BMAC_LTCTR, 0);
954}
955
956
957static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
958{
959 ASD(("happy_meal_poll_stop: "));
960
961
962 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
963 (HFLAG_POLLENABLE | HFLAG_POLL)) {
964 HMD(("not polling, return\n"));
965 return;
966 }
967
968
969 ASD(("were polling, mif ints off, "));
970 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
971
972
973 ASD(("polling off, "));
974 hme_write32(hp, tregs + TCVR_CFG,
975 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
976
977
978 hp->happy_flags &= ~(HFLAG_POLL);
979
980
981 udelay(200);
982 ASD(("done\n"));
983}
984
985
986
987
988#define TCVR_RESET_TRIES 16
989#define TCVR_UNISOLATE_TRIES 32
990
991
992static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
993{
994 u32 tconfig;
995 int result, tries = TCVR_RESET_TRIES;
996
997 tconfig = hme_read32(hp, tregs + TCVR_CFG);
998 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
999 if (hp->tcvr_type == external) {
1000 ASD(("external<"));
1001 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1002 hp->tcvr_type = internal;
1003 hp->paddr = TCV_PADDR_ITX;
1004 ASD(("ISOLATE,"));
1005 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1006 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1007 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1008 if (result == TCVR_FAILURE) {
1009 ASD(("phyread_fail>\n"));
1010 return -1;
1011 }
1012 ASD(("phyread_ok,PSELECT>"));
1013 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1014 hp->tcvr_type = external;
1015 hp->paddr = TCV_PADDR_ETX;
1016 } else {
1017 if (tconfig & TCV_CFG_MDIO1) {
1018 ASD(("internal<PSELECT,"));
1019 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1020 ASD(("ISOLATE,"));
1021 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1022 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1023 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1024 if (result == TCVR_FAILURE) {
1025 ASD(("phyread_fail>\n"));
1026 return -1;
1027 }
1028 ASD(("phyread_ok,~PSELECT>"));
1029 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1030 hp->tcvr_type = internal;
1031 hp->paddr = TCV_PADDR_ITX;
1032 }
1033 }
1034
1035 ASD(("BMCR_RESET "));
1036 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1037
1038 while (--tries) {
1039 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1040 if (result == TCVR_FAILURE)
1041 return -1;
1042 hp->sw_bmcr = result;
1043 if (!(result & BMCR_RESET))
1044 break;
1045 udelay(20);
1046 }
1047 if (!tries) {
1048 ASD(("BMCR RESET FAILED!\n"));
1049 return -1;
1050 }
1051 ASD(("RESET_OK\n"));
1052
1053
1054 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1055 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1056 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1057 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1058
1059 ASD(("UNISOLATE"));
1060 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1061 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1062
1063 tries = TCVR_UNISOLATE_TRIES;
1064 while (--tries) {
1065 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1066 if (result == TCVR_FAILURE)
1067 return -1;
1068 if (!(result & BMCR_ISOLATE))
1069 break;
1070 udelay(20);
1071 }
1072 if (!tries) {
1073 ASD((" FAILED!\n"));
1074 return -1;
1075 }
1076 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1077 if (!is_lucent_phy(hp)) {
1078 result = happy_meal_tcvr_read(hp, tregs,
1079 DP83840_CSCONFIG);
1080 happy_meal_tcvr_write(hp, tregs,
1081 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1082 }
1083 return 0;
1084}
1085
1086
1087
1088
1089
1090static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1091{
1092 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1093
1094 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1095 if (hp->happy_flags & HFLAG_POLL) {
1096
1097 ASD(("<polling> "));
1098 if (hp->tcvr_type == internal) {
1099 if (tconfig & TCV_CFG_MDIO1) {
1100 ASD(("<internal> <poll stop> "));
1101 happy_meal_poll_stop(hp, tregs);
1102 hp->paddr = TCV_PADDR_ETX;
1103 hp->tcvr_type = external;
1104 ASD(("<external>\n"));
1105 tconfig &= ~(TCV_CFG_PENABLE);
1106 tconfig |= TCV_CFG_PSELECT;
1107 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1108 }
1109 } else {
1110 if (hp->tcvr_type == external) {
1111 ASD(("<external> "));
1112 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1113 ASD(("<poll stop> "));
1114 happy_meal_poll_stop(hp, tregs);
1115 hp->paddr = TCV_PADDR_ITX;
1116 hp->tcvr_type = internal;
1117 ASD(("<internal>\n"));
1118 hme_write32(hp, tregs + TCVR_CFG,
1119 hme_read32(hp, tregs + TCVR_CFG) &
1120 ~(TCV_CFG_PSELECT));
1121 }
1122 ASD(("\n"));
1123 } else {
1124 ASD(("<none>\n"));
1125 }
1126 }
1127 } else {
1128 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1129
1130
1131 ASD(("<not polling> "));
1132 if (reread & TCV_CFG_MDIO1) {
1133 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1134 hp->paddr = TCV_PADDR_ETX;
1135 hp->tcvr_type = external;
1136 ASD(("<external>\n"));
1137 } else {
1138 if (reread & TCV_CFG_MDIO0) {
1139 hme_write32(hp, tregs + TCVR_CFG,
1140 tconfig & ~(TCV_CFG_PSELECT));
1141 hp->paddr = TCV_PADDR_ITX;
1142 hp->tcvr_type = internal;
1143 ASD(("<internal>\n"));
1144 } else {
1145 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1146 hp->tcvr_type = none;
1147 ASD(("<none>\n"));
1148 }
1149 }
1150 }
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static void happy_meal_clean_rings(struct happy_meal *hp)
1198{
1199 int i;
1200
1201 for (i = 0; i < RX_RING_SIZE; i++) {
1202 if (hp->rx_skbs[i] != NULL) {
1203 struct sk_buff *skb = hp->rx_skbs[i];
1204 struct happy_meal_rxd *rxd;
1205 u32 dma_addr;
1206
1207 rxd = &hp->happy_block->happy_meal_rxd[i];
1208 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1209 dma_unmap_single(hp->dma_dev, dma_addr,
1210 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1211 dev_kfree_skb_any(skb);
1212 hp->rx_skbs[i] = NULL;
1213 }
1214 }
1215
1216 for (i = 0; i < TX_RING_SIZE; i++) {
1217 if (hp->tx_skbs[i] != NULL) {
1218 struct sk_buff *skb = hp->tx_skbs[i];
1219 struct happy_meal_txd *txd;
1220 u32 dma_addr;
1221 int frag;
1222
1223 hp->tx_skbs[i] = NULL;
1224
1225 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1226 txd = &hp->happy_block->happy_meal_txd[i];
1227 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1228 if (!frag)
1229 dma_unmap_single(hp->dma_dev, dma_addr,
1230 (hme_read_desc32(hp, &txd->tx_flags)
1231 & TXFLAG_SIZE),
1232 DMA_TO_DEVICE);
1233 else
1234 dma_unmap_page(hp->dma_dev, dma_addr,
1235 (hme_read_desc32(hp, &txd->tx_flags)
1236 & TXFLAG_SIZE),
1237 DMA_TO_DEVICE);
1238
1239 if (frag != skb_shinfo(skb)->nr_frags)
1240 i++;
1241 }
1242
1243 dev_kfree_skb_any(skb);
1244 }
1245 }
1246}
1247
1248
1249static void happy_meal_init_rings(struct happy_meal *hp)
1250{
1251 struct hmeal_init_block *hb = hp->happy_block;
1252 int i;
1253
1254 HMD(("happy_meal_init_rings: counters to zero, "));
1255 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1256
1257
1258 HMD(("clean, "));
1259 happy_meal_clean_rings(hp);
1260
1261
1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb;
1265 u32 mapping;
1266
1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1268 if (!skb) {
1269 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1270 continue;
1271 }
1272 hp->rx_skbs[i] = skb;
1273
1274
1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1285 mapping);
1286 skb_reserve(skb, RX_OFFSET);
1287 }
1288
1289 HMD(("init txring, "));
1290 for (i = 0; i < TX_RING_SIZE; i++)
1291 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1292
1293 HMD(("done\n"));
1294}
1295
1296
1297static void
1298happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1299 void __iomem *tregs,
1300 const struct ethtool_link_ksettings *ep)
1301{
1302 int timeout;
1303
1304
1305 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1306 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1307 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1308 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1309
1310
1311
1312 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1313 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1314
1315 if (hp->sw_bmsr & BMSR_10HALF)
1316 hp->sw_advertise |= (ADVERTISE_10HALF);
1317 else
1318 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1319
1320 if (hp->sw_bmsr & BMSR_10FULL)
1321 hp->sw_advertise |= (ADVERTISE_10FULL);
1322 else
1323 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1324 if (hp->sw_bmsr & BMSR_100HALF)
1325 hp->sw_advertise |= (ADVERTISE_100HALF);
1326 else
1327 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1328 if (hp->sw_bmsr & BMSR_100FULL)
1329 hp->sw_advertise |= (ADVERTISE_100FULL);
1330 else
1331 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1332 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1333
1334
1335
1336
1337
1338
1339
1340#ifdef AUTO_SWITCH_DEBUG
1341 ASD(("%s: Advertising [ ", hp->dev->name));
1342 if (hp->sw_advertise & ADVERTISE_10HALF)
1343 ASD(("10H "));
1344 if (hp->sw_advertise & ADVERTISE_10FULL)
1345 ASD(("10F "));
1346 if (hp->sw_advertise & ADVERTISE_100HALF)
1347 ASD(("100H "));
1348 if (hp->sw_advertise & ADVERTISE_100FULL)
1349 ASD(("100F "));
1350#endif
1351
1352
1353 hp->sw_bmcr |= BMCR_ANENABLE;
1354 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1355
1356
1357 hp->sw_bmcr |= BMCR_ANRESTART;
1358 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1359
1360
1361
1362 timeout = 64;
1363 while (--timeout) {
1364 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1365 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1366 break;
1367 udelay(10);
1368 }
1369 if (!timeout) {
1370 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1371 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1372 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1373 hp->dev->name);
1374 goto force_link;
1375 } else {
1376 hp->timer_state = arbwait;
1377 }
1378 } else {
1379force_link:
1380
1381
1382
1383
1384
1385
1386
1387
1388 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1389 hp->sw_bmcr = BMCR_SPEED100;
1390 } else {
1391 if (ep->base.speed == SPEED_100)
1392 hp->sw_bmcr = BMCR_SPEED100;
1393 else
1394 hp->sw_bmcr = 0;
1395 if (ep->base.duplex == DUPLEX_FULL)
1396 hp->sw_bmcr |= BMCR_FULLDPLX;
1397 }
1398 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1399
1400 if (!is_lucent_phy(hp)) {
1401
1402
1403
1404
1405 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1406 DP83840_CSCONFIG);
1407 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1408 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1409 hp->sw_csconfig);
1410 }
1411 hp->timer_state = ltrywait;
1412 }
1413
1414 hp->timer_ticks = 0;
1415 hp->happy_timer.expires = jiffies + (12 * HZ)/10;
1416 hp->happy_timer.data = (unsigned long) hp;
1417 hp->happy_timer.function = happy_meal_timer;
1418 add_timer(&hp->happy_timer);
1419}
1420
1421
1422static int happy_meal_init(struct happy_meal *hp)
1423{
1424 void __iomem *gregs = hp->gregs;
1425 void __iomem *etxregs = hp->etxregs;
1426 void __iomem *erxregs = hp->erxregs;
1427 void __iomem *bregs = hp->bigmacregs;
1428 void __iomem *tregs = hp->tcvregs;
1429 u32 regtmp, rxcfg;
1430 unsigned char *e = &hp->dev->dev_addr[0];
1431
1432
1433 del_timer(&hp->happy_timer);
1434
1435 HMD(("happy_meal_init: happy_flags[%08x] ",
1436 hp->happy_flags));
1437 if (!(hp->happy_flags & HFLAG_INIT)) {
1438 HMD(("set HFLAG_INIT, "));
1439 hp->happy_flags |= HFLAG_INIT;
1440 happy_meal_get_counters(hp, bregs);
1441 }
1442
1443
1444 HMD(("to happy_meal_poll_stop\n"));
1445 happy_meal_poll_stop(hp, tregs);
1446
1447
1448 HMD(("happy_meal_init: to happy_meal_stop\n"));
1449 happy_meal_stop(hp, gregs);
1450
1451
1452 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1453 happy_meal_init_rings(hp);
1454
1455
1456 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1457 hme_read32(hp, tregs + TCVR_IMASK)));
1458 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1459
1460
1461 if (hp->happy_flags & HFLAG_FENABLE) {
1462 HMD(("use frame old[%08x], ",
1463 hme_read32(hp, tregs + TCVR_CFG)));
1464 hme_write32(hp, tregs + TCVR_CFG,
1465 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1466 } else {
1467 HMD(("use bitbang old[%08x], ",
1468 hme_read32(hp, tregs + TCVR_CFG)));
1469 hme_write32(hp, tregs + TCVR_CFG,
1470 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1471 }
1472
1473
1474 HMD(("to happy_meal_transceiver_check\n"));
1475 happy_meal_transceiver_check(hp, tregs);
1476
1477
1478 HMD(("happy_meal_init: "));
1479 switch(hp->tcvr_type) {
1480 case none:
1481
1482 HMD(("AAIEEE no transceiver type, EAGAIN"));
1483 return -EAGAIN;
1484
1485 case internal:
1486
1487 HMD(("internal, using MII, "));
1488 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1489 break;
1490
1491 case external:
1492
1493 HMD(("external, disable MII, "));
1494 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1495 break;
1496 }
1497
1498 if (happy_meal_tcvr_reset(hp, tregs))
1499 return -EAGAIN;
1500
1501
1502 HMD(("tx/rx reset, "));
1503 happy_meal_tx_reset(hp, bregs);
1504 happy_meal_rx_reset(hp, bregs);
1505
1506
1507 HMD(("jsize/ipg1/ipg2, "));
1508 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1509 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1510 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1511
1512
1513 HMD(("rseed/macaddr, "));
1514
1515
1516 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1517
1518 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1519 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1520 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1521
1522 HMD(("htable, "));
1523 if ((hp->dev->flags & IFF_ALLMULTI) ||
1524 (netdev_mc_count(hp->dev) > 64)) {
1525 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1526 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1527 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1528 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1529 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1530 u16 hash_table[4];
1531 struct netdev_hw_addr *ha;
1532 u32 crc;
1533
1534 memset(hash_table, 0, sizeof(hash_table));
1535 netdev_for_each_mc_addr(ha, hp->dev) {
1536 crc = ether_crc_le(6, ha->addr);
1537 crc >>= 26;
1538 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1539 }
1540 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1541 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1542 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1543 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1544 } else {
1545 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1546 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1547 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1548 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1549 }
1550
1551
1552 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1553 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1554 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1555 hme_write32(hp, erxregs + ERX_RING,
1556 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1557 hme_write32(hp, etxregs + ETX_RING,
1558 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1559
1560
1561
1562
1563
1564
1565 if (hme_read32(hp, erxregs + ERX_RING) !=
1566 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1567 hme_write32(hp, erxregs + ERX_RING,
1568 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1569 | 0x4);
1570
1571
1572 HMD(("happy_meal_init: old[%08x] bursts<",
1573 hme_read32(hp, gregs + GREG_CFG)));
1574
1575#ifndef CONFIG_SPARC
1576
1577 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1578#else
1579 if ((hp->happy_bursts & DMA_BURST64) &&
1580 ((hp->happy_flags & HFLAG_PCI) != 0
1581#ifdef CONFIG_SBUS
1582 || sbus_can_burst64()
1583#endif
1584 || 0)) {
1585 u32 gcfg = GREG_CFG_BURST64;
1586
1587
1588
1589
1590
1591#ifdef CONFIG_SBUS
1592 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1593 struct platform_device *op = hp->happy_dev;
1594 if (sbus_can_dma_64bit()) {
1595 sbus_set_sbus64(&op->dev,
1596 hp->happy_bursts);
1597 gcfg |= GREG_CFG_64BIT;
1598 }
1599 }
1600#endif
1601
1602 HMD(("64>"));
1603 hme_write32(hp, gregs + GREG_CFG, gcfg);
1604 } else if (hp->happy_bursts & DMA_BURST32) {
1605 HMD(("32>"));
1606 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1607 } else if (hp->happy_bursts & DMA_BURST16) {
1608 HMD(("16>"));
1609 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1610 } else {
1611 HMD(("XXX>"));
1612 hme_write32(hp, gregs + GREG_CFG, 0);
1613 }
1614#endif
1615
1616
1617 HMD((", enable global interrupts, "));
1618 hme_write32(hp, gregs + GREG_IMASK,
1619 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1620 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1621
1622
1623 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1624 hme_read32(hp, etxregs + ETX_RSIZE)));
1625 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1626
1627
1628 HMD(("tx dma enable old[%08x], ",
1629 hme_read32(hp, etxregs + ETX_CFG)));
1630 hme_write32(hp, etxregs + ETX_CFG,
1631 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1632
1633
1634
1635
1636
1637
1638 HMD(("erx regs bug old[%08x]\n",
1639 hme_read32(hp, erxregs + ERX_CFG)));
1640 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1641 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1642 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1643 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1644 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1645 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1646 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1647
1648 }
1649
1650
1651 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1652 hme_read32(hp, bregs + BMAC_RXCFG)));
1653 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1654 if (hp->dev->flags & IFF_PROMISC)
1655 rxcfg |= BIGMAC_RXCFG_PMISC;
1656 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1657
1658
1659 udelay(10);
1660
1661
1662 HMD(("BIGMAC init, "));
1663 regtmp = 0;
1664 if (hp->happy_flags & HFLAG_FULL)
1665 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1666
1667
1668
1669
1670 hme_write32(hp, bregs + BMAC_TXCFG, regtmp );
1671
1672
1673 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1674
1675
1676 regtmp = BIGMAC_XCFG_ODENABLE;
1677
1678
1679 if (hp->happy_flags & HFLAG_LANCE)
1680 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1681
1682
1683 if (hp->tcvr_type == external)
1684 regtmp |= BIGMAC_XCFG_MIIDISAB;
1685
1686 HMD(("XIF config old[%08x], ",
1687 hme_read32(hp, bregs + BMAC_XIFCFG)));
1688 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1689
1690
1691 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1692 hme_read32(hp, bregs + BMAC_TXCFG),
1693 hme_read32(hp, bregs + BMAC_RXCFG)));
1694
1695
1696 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1697 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1698
1699 hme_write32(hp, bregs + BMAC_TXCFG,
1700 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1701 hme_write32(hp, bregs + BMAC_RXCFG,
1702 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1703
1704
1705 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1706
1707
1708 return 0;
1709}
1710
1711
1712static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1713{
1714 void __iomem *tregs = hp->tcvregs;
1715 void __iomem *bregs = hp->bigmacregs;
1716 void __iomem *gregs = hp->gregs;
1717
1718 happy_meal_stop(hp, gregs);
1719 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1720 if (hp->happy_flags & HFLAG_FENABLE)
1721 hme_write32(hp, tregs + TCVR_CFG,
1722 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1723 else
1724 hme_write32(hp, tregs + TCVR_CFG,
1725 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1726 happy_meal_transceiver_check(hp, tregs);
1727 switch(hp->tcvr_type) {
1728 case none:
1729 return;
1730 case internal:
1731 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1732 break;
1733 case external:
1734 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1735 break;
1736 }
1737 if (happy_meal_tcvr_reset(hp, tregs))
1738 return;
1739
1740
1741 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1742 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1743
1744
1745 if (hp->sw_bmsr & BMSR_10HALF)
1746 hp->sw_advertise |= (ADVERTISE_10HALF);
1747 else
1748 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1749
1750 if (hp->sw_bmsr & BMSR_10FULL)
1751 hp->sw_advertise |= (ADVERTISE_10FULL);
1752 else
1753 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1754 if (hp->sw_bmsr & BMSR_100HALF)
1755 hp->sw_advertise |= (ADVERTISE_100HALF);
1756 else
1757 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1758 if (hp->sw_bmsr & BMSR_100FULL)
1759 hp->sw_advertise |= (ADVERTISE_100FULL);
1760 else
1761 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1762
1763
1764 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1765}
1766
1767
1768
1769
1770
1771
1772static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1773{
1774 int reset = 0;
1775
1776
1777 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1778 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1779 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1780 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1781 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1782 GREG_STAT_SLVPERR))
1783 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1784 hp->dev->name, status);
1785
1786 if (status & GREG_STAT_RFIFOVF) {
1787
1788
1789 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1790 }
1791
1792 if (status & GREG_STAT_STSTERR) {
1793
1794 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1795 reset = 1;
1796 }
1797
1798 if (status & GREG_STAT_TFIFO_UND) {
1799
1800 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1801 hp->dev->name);
1802 reset = 1;
1803 }
1804
1805 if (status & GREG_STAT_MAXPKTERR) {
1806
1807
1808
1809 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1810 reset = 1;
1811 }
1812
1813 if (status & GREG_STAT_NORXD) {
1814
1815
1816
1817
1818
1819 printk(KERN_INFO "%s: Happy Meal out of receive "
1820 "descriptors, packet dropped.\n",
1821 hp->dev->name);
1822 }
1823
1824 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1825
1826 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1827 if (status & GREG_STAT_RXERR)
1828 printk("GenericError ");
1829 if (status & GREG_STAT_RXPERR)
1830 printk("ParityError ");
1831 if (status & GREG_STAT_RXTERR)
1832 printk("RxTagBotch ");
1833 printk("]\n");
1834 reset = 1;
1835 }
1836
1837 if (status & GREG_STAT_EOPERR) {
1838
1839
1840
1841 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1842 hp->dev->name);
1843 reset = 1;
1844 }
1845
1846 if (status & GREG_STAT_MIFIRQ) {
1847
1848 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1849 }
1850
1851 if (status &
1852 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1853
1854 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1855 if (status & GREG_STAT_TXEACK)
1856 printk("GenericError ");
1857 if (status & GREG_STAT_TXLERR)
1858 printk("LateError ");
1859 if (status & GREG_STAT_TXPERR)
1860 printk("ParityError ");
1861 if (status & GREG_STAT_TXTERR)
1862 printk("TagBotch ");
1863 printk("]\n");
1864 reset = 1;
1865 }
1866
1867 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1868
1869
1870
1871 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1872 hp->dev->name,
1873 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1874 reset = 1;
1875 }
1876
1877 if (reset) {
1878 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1879 happy_meal_init(hp);
1880 return 1;
1881 }
1882 return 0;
1883}
1884
1885
1886static void happy_meal_mif_interrupt(struct happy_meal *hp)
1887{
1888 void __iomem *tregs = hp->tcvregs;
1889
1890 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1891 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1892 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1893
1894
1895 if (hp->sw_lpa & LPA_100FULL) {
1896 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1897 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1898 } else if (hp->sw_lpa & LPA_100HALF) {
1899 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1900 hp->sw_bmcr |= BMCR_SPEED100;
1901 } else if (hp->sw_lpa & LPA_10FULL) {
1902 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1903 hp->sw_bmcr |= BMCR_FULLDPLX;
1904 } else {
1905 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1906 }
1907 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1908
1909
1910 happy_meal_poll_stop(hp, tregs);
1911}
1912
1913#ifdef TXDEBUG
1914#define TXD(x) printk x
1915#else
1916#define TXD(x)
1917#endif
1918
1919
1920static void happy_meal_tx(struct happy_meal *hp)
1921{
1922 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1923 struct happy_meal_txd *this;
1924 struct net_device *dev = hp->dev;
1925 int elem;
1926
1927 elem = hp->tx_old;
1928 TXD(("TX<"));
1929 while (elem != hp->tx_new) {
1930 struct sk_buff *skb;
1931 u32 flags, dma_addr, dma_len;
1932 int frag;
1933
1934 TXD(("[%d]", elem));
1935 this = &txbase[elem];
1936 flags = hme_read_desc32(hp, &this->tx_flags);
1937 if (flags & TXFLAG_OWN)
1938 break;
1939 skb = hp->tx_skbs[elem];
1940 if (skb_shinfo(skb)->nr_frags) {
1941 int last;
1942
1943 last = elem + skb_shinfo(skb)->nr_frags;
1944 last &= (TX_RING_SIZE - 1);
1945 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1946 if (flags & TXFLAG_OWN)
1947 break;
1948 }
1949 hp->tx_skbs[elem] = NULL;
1950 dev->stats.tx_bytes += skb->len;
1951
1952 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1953 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1954 dma_len = hme_read_desc32(hp, &this->tx_flags);
1955
1956 dma_len &= TXFLAG_SIZE;
1957 if (!frag)
1958 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1959 else
1960 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1961
1962 elem = NEXT_TX(elem);
1963 this = &txbase[elem];
1964 }
1965
1966 dev_kfree_skb_irq(skb);
1967 dev->stats.tx_packets++;
1968 }
1969 hp->tx_old = elem;
1970 TXD((">"));
1971
1972 if (netif_queue_stopped(dev) &&
1973 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1974 netif_wake_queue(dev);
1975}
1976
1977#ifdef RXDEBUG
1978#define RXD(x) printk x
1979#else
1980#define RXD(x)
1981#endif
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1993{
1994 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1995 struct happy_meal_rxd *this;
1996 int elem = hp->rx_new, drops = 0;
1997 u32 flags;
1998
1999 RXD(("RX<"));
2000 this = &rxbase[elem];
2001 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
2002 struct sk_buff *skb;
2003 int len = flags >> 16;
2004 u16 csum = flags & RXFLAG_CSUM;
2005 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
2006
2007 RXD(("[%d ", elem));
2008
2009
2010 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
2011 RXD(("ERR(%08x)]", flags));
2012 dev->stats.rx_errors++;
2013 if (len < ETH_ZLEN)
2014 dev->stats.rx_length_errors++;
2015 if (len & (RXFLAG_OVERFLOW >> 16)) {
2016 dev->stats.rx_over_errors++;
2017 dev->stats.rx_fifo_errors++;
2018 }
2019
2020
2021 drop_it:
2022 dev->stats.rx_dropped++;
2023 hme_write_rxd(hp, this,
2024 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2025 dma_addr);
2026 goto next;
2027 }
2028 skb = hp->rx_skbs[elem];
2029 if (len > RX_COPY_THRESHOLD) {
2030 struct sk_buff *new_skb;
2031 u32 mapping;
2032
2033
2034 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2035 if (new_skb == NULL) {
2036 drops++;
2037 goto drop_it;
2038 }
2039 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2040 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2041 RX_BUF_ALLOC_SIZE,
2042 DMA_FROM_DEVICE);
2043 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2044 dev_kfree_skb_any(new_skb);
2045 drops++;
2046 goto drop_it;
2047 }
2048
2049 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2050 hp->rx_skbs[elem] = new_skb;
2051 hme_write_rxd(hp, this,
2052 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2053 mapping);
2054 skb_reserve(new_skb, RX_OFFSET);
2055
2056
2057 skb_trim(skb, len);
2058 } else {
2059 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
2060
2061 if (copy_skb == NULL) {
2062 drops++;
2063 goto drop_it;
2064 }
2065
2066 skb_reserve(copy_skb, 2);
2067 skb_put(copy_skb, len);
2068 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2069 skb_copy_from_linear_data(skb, copy_skb->data, len);
2070 dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2071
2072 hme_write_rxd(hp, this,
2073 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2074 dma_addr);
2075
2076 skb = copy_skb;
2077 }
2078
2079
2080 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
2081 skb->ip_summed = CHECKSUM_COMPLETE;
2082
2083 RXD(("len=%d csum=%4x]", len, csum));
2084 skb->protocol = eth_type_trans(skb, dev);
2085 netif_rx(skb);
2086
2087 dev->stats.rx_packets++;
2088 dev->stats.rx_bytes += len;
2089 next:
2090 elem = NEXT_RX(elem);
2091 this = &rxbase[elem];
2092 }
2093 hp->rx_new = elem;
2094 if (drops)
2095 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2096 RXD((">"));
2097}
2098
2099static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
2100{
2101 struct net_device *dev = dev_id;
2102 struct happy_meal *hp = netdev_priv(dev);
2103 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2104
2105 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2106
2107 spin_lock(&hp->happy_lock);
2108
2109 if (happy_status & GREG_STAT_ERRORS) {
2110 HMD(("ERRORS "));
2111 if (happy_meal_is_not_so_happy(hp, happy_status))
2112 goto out;
2113 }
2114
2115 if (happy_status & GREG_STAT_MIFIRQ) {
2116 HMD(("MIFIRQ "));
2117 happy_meal_mif_interrupt(hp);
2118 }
2119
2120 if (happy_status & GREG_STAT_TXALL) {
2121 HMD(("TXALL "));
2122 happy_meal_tx(hp);
2123 }
2124
2125 if (happy_status & GREG_STAT_RXTOHOST) {
2126 HMD(("RXTOHOST "));
2127 happy_meal_rx(hp, dev);
2128 }
2129
2130 HMD(("done\n"));
2131out:
2132 spin_unlock(&hp->happy_lock);
2133
2134 return IRQ_HANDLED;
2135}
2136
2137#ifdef CONFIG_SBUS
2138static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2139{
2140 struct quattro *qp = (struct quattro *) cookie;
2141 int i;
2142
2143 for (i = 0; i < 4; i++) {
2144 struct net_device *dev = qp->happy_meals[i];
2145 struct happy_meal *hp = netdev_priv(dev);
2146 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2147
2148 HMD(("quattro_interrupt: status=%08x ", happy_status));
2149
2150 if (!(happy_status & (GREG_STAT_ERRORS |
2151 GREG_STAT_MIFIRQ |
2152 GREG_STAT_TXALL |
2153 GREG_STAT_RXTOHOST)))
2154 continue;
2155
2156 spin_lock(&hp->happy_lock);
2157
2158 if (happy_status & GREG_STAT_ERRORS) {
2159 HMD(("ERRORS "));
2160 if (happy_meal_is_not_so_happy(hp, happy_status))
2161 goto next;
2162 }
2163
2164 if (happy_status & GREG_STAT_MIFIRQ) {
2165 HMD(("MIFIRQ "));
2166 happy_meal_mif_interrupt(hp);
2167 }
2168
2169 if (happy_status & GREG_STAT_TXALL) {
2170 HMD(("TXALL "));
2171 happy_meal_tx(hp);
2172 }
2173
2174 if (happy_status & GREG_STAT_RXTOHOST) {
2175 HMD(("RXTOHOST "));
2176 happy_meal_rx(hp, dev);
2177 }
2178
2179 next:
2180 spin_unlock(&hp->happy_lock);
2181 }
2182 HMD(("done\n"));
2183
2184 return IRQ_HANDLED;
2185}
2186#endif
2187
2188static int happy_meal_open(struct net_device *dev)
2189{
2190 struct happy_meal *hp = netdev_priv(dev);
2191 int res;
2192
2193 HMD(("happy_meal_open: "));
2194
2195
2196
2197
2198 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2199 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2200 dev->name, dev);
2201 if (res) {
2202 HMD(("EAGAIN\n"));
2203 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2204 hp->irq);
2205
2206 return -EAGAIN;
2207 }
2208 }
2209
2210 HMD(("to happy_meal_init\n"));
2211
2212 spin_lock_irq(&hp->happy_lock);
2213 res = happy_meal_init(hp);
2214 spin_unlock_irq(&hp->happy_lock);
2215
2216 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2217 free_irq(hp->irq, dev);
2218 return res;
2219}
2220
2221static int happy_meal_close(struct net_device *dev)
2222{
2223 struct happy_meal *hp = netdev_priv(dev);
2224
2225 spin_lock_irq(&hp->happy_lock);
2226 happy_meal_stop(hp, hp->gregs);
2227 happy_meal_clean_rings(hp);
2228
2229
2230 del_timer(&hp->happy_timer);
2231
2232 spin_unlock_irq(&hp->happy_lock);
2233
2234
2235
2236
2237
2238 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2239 free_irq(hp->irq, dev);
2240
2241 return 0;
2242}
2243
2244#ifdef SXDEBUG
2245#define SXD(x) printk x
2246#else
2247#define SXD(x)
2248#endif
2249
2250static void happy_meal_tx_timeout(struct net_device *dev)
2251{
2252 struct happy_meal *hp = netdev_priv(dev);
2253
2254 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2255 tx_dump_log();
2256 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2257 hme_read32(hp, hp->gregs + GREG_STAT),
2258 hme_read32(hp, hp->etxregs + ETX_CFG),
2259 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2260
2261 spin_lock_irq(&hp->happy_lock);
2262 happy_meal_init(hp);
2263 spin_unlock_irq(&hp->happy_lock);
2264
2265 netif_wake_queue(dev);
2266}
2267
2268static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2269 u32 first_len, u32 first_entry, u32 entry)
2270{
2271 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2272
2273 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2274
2275 first_entry = NEXT_TX(first_entry);
2276 while (first_entry != entry) {
2277 struct happy_meal_txd *this = &txbase[first_entry];
2278 u32 addr, len;
2279
2280 addr = hme_read_desc32(hp, &this->tx_addr);
2281 len = hme_read_desc32(hp, &this->tx_flags);
2282 len &= TXFLAG_SIZE;
2283 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2284 }
2285}
2286
2287static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2288 struct net_device *dev)
2289{
2290 struct happy_meal *hp = netdev_priv(dev);
2291 int entry;
2292 u32 tx_flags;
2293
2294 tx_flags = TXFLAG_OWN;
2295 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2296 const u32 csum_start_off = skb_checksum_start_offset(skb);
2297 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2298
2299 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2300 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2301 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2302 }
2303
2304 spin_lock_irq(&hp->happy_lock);
2305
2306 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2307 netif_stop_queue(dev);
2308 spin_unlock_irq(&hp->happy_lock);
2309 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2310 dev->name);
2311 return NETDEV_TX_BUSY;
2312 }
2313
2314 entry = hp->tx_new;
2315 SXD(("SX<l[%d]e[%d]>", len, entry));
2316 hp->tx_skbs[entry] = skb;
2317
2318 if (skb_shinfo(skb)->nr_frags == 0) {
2319 u32 mapping, len;
2320
2321 len = skb->len;
2322 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2323 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2324 goto out_dma_error;
2325 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2326 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2327 (tx_flags | (len & TXFLAG_SIZE)),
2328 mapping);
2329 entry = NEXT_TX(entry);
2330 } else {
2331 u32 first_len, first_mapping;
2332 int frag, first_entry = entry;
2333
2334
2335
2336
2337 first_len = skb_headlen(skb);
2338 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2339 DMA_TO_DEVICE);
2340 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2341 goto out_dma_error;
2342 entry = NEXT_TX(entry);
2343
2344 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2345 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2346 u32 len, mapping, this_txflags;
2347
2348 len = skb_frag_size(this_frag);
2349 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2350 0, len, DMA_TO_DEVICE);
2351 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2352 unmap_partial_tx_skb(hp, first_mapping, first_len,
2353 first_entry, entry);
2354 goto out_dma_error;
2355 }
2356 this_txflags = tx_flags;
2357 if (frag == skb_shinfo(skb)->nr_frags - 1)
2358 this_txflags |= TXFLAG_EOP;
2359 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2360 (this_txflags | (len & TXFLAG_SIZE)),
2361 mapping);
2362 entry = NEXT_TX(entry);
2363 }
2364 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2365 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2366 first_mapping);
2367 }
2368
2369 hp->tx_new = entry;
2370
2371 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2372 netif_stop_queue(dev);
2373
2374
2375 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2376
2377 spin_unlock_irq(&hp->happy_lock);
2378
2379 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2380 return NETDEV_TX_OK;
2381
2382out_dma_error:
2383 hp->tx_skbs[hp->tx_new] = NULL;
2384 spin_unlock_irq(&hp->happy_lock);
2385
2386 dev_kfree_skb_any(skb);
2387 dev->stats.tx_dropped++;
2388 return NETDEV_TX_OK;
2389}
2390
2391static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2392{
2393 struct happy_meal *hp = netdev_priv(dev);
2394
2395 spin_lock_irq(&hp->happy_lock);
2396 happy_meal_get_counters(hp, hp->bigmacregs);
2397 spin_unlock_irq(&hp->happy_lock);
2398
2399 return &dev->stats;
2400}
2401
2402static void happy_meal_set_multicast(struct net_device *dev)
2403{
2404 struct happy_meal *hp = netdev_priv(dev);
2405 void __iomem *bregs = hp->bigmacregs;
2406 struct netdev_hw_addr *ha;
2407 u32 crc;
2408
2409 spin_lock_irq(&hp->happy_lock);
2410
2411 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2412 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2413 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2414 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2415 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2416 } else if (dev->flags & IFF_PROMISC) {
2417 hme_write32(hp, bregs + BMAC_RXCFG,
2418 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2419 } else {
2420 u16 hash_table[4];
2421
2422 memset(hash_table, 0, sizeof(hash_table));
2423 netdev_for_each_mc_addr(ha, dev) {
2424 crc = ether_crc_le(6, ha->addr);
2425 crc >>= 26;
2426 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2427 }
2428 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2429 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2430 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2431 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2432 }
2433
2434 spin_unlock_irq(&hp->happy_lock);
2435}
2436
2437
2438static int hme_get_link_ksettings(struct net_device *dev,
2439 struct ethtool_link_ksettings *cmd)
2440{
2441 struct happy_meal *hp = netdev_priv(dev);
2442 u32 speed;
2443 u32 supported;
2444
2445 supported =
2446 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2447 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2448 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2449
2450
2451 cmd->base.port = PORT_TP;
2452 cmd->base.phy_address = 0;
2453
2454
2455 spin_lock_irq(&hp->happy_lock);
2456 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2457 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2458 spin_unlock_irq(&hp->happy_lock);
2459
2460 if (hp->sw_bmcr & BMCR_ANENABLE) {
2461 cmd->base.autoneg = AUTONEG_ENABLE;
2462 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2463 SPEED_100 : SPEED_10);
2464 if (speed == SPEED_100)
2465 cmd->base.duplex =
2466 (hp->sw_lpa & (LPA_100FULL)) ?
2467 DUPLEX_FULL : DUPLEX_HALF;
2468 else
2469 cmd->base.duplex =
2470 (hp->sw_lpa & (LPA_10FULL)) ?
2471 DUPLEX_FULL : DUPLEX_HALF;
2472 } else {
2473 cmd->base.autoneg = AUTONEG_DISABLE;
2474 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2475 cmd->base.duplex =
2476 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2477 DUPLEX_FULL : DUPLEX_HALF;
2478 }
2479 cmd->base.speed = speed;
2480 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2481 supported);
2482
2483 return 0;
2484}
2485
2486static int hme_set_link_ksettings(struct net_device *dev,
2487 const struct ethtool_link_ksettings *cmd)
2488{
2489 struct happy_meal *hp = netdev_priv(dev);
2490
2491
2492 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2493 cmd->base.autoneg != AUTONEG_DISABLE)
2494 return -EINVAL;
2495 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2496 ((cmd->base.speed != SPEED_100 &&
2497 cmd->base.speed != SPEED_10) ||
2498 (cmd->base.duplex != DUPLEX_HALF &&
2499 cmd->base.duplex != DUPLEX_FULL)))
2500 return -EINVAL;
2501
2502
2503 spin_lock_irq(&hp->happy_lock);
2504 del_timer(&hp->happy_timer);
2505 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2506 spin_unlock_irq(&hp->happy_lock);
2507
2508 return 0;
2509}
2510
2511static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2512{
2513 struct happy_meal *hp = netdev_priv(dev);
2514
2515 strlcpy(info->driver, "sunhme", sizeof(info->driver));
2516 strlcpy(info->version, "2.02", sizeof(info->version));
2517 if (hp->happy_flags & HFLAG_PCI) {
2518 struct pci_dev *pdev = hp->happy_dev;
2519 strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2520 }
2521#ifdef CONFIG_SBUS
2522 else {
2523 const struct linux_prom_registers *regs;
2524 struct platform_device *op = hp->happy_dev;
2525 regs = of_get_property(op->dev.of_node, "regs", NULL);
2526 if (regs)
2527 snprintf(info->bus_info, sizeof(info->bus_info),
2528 "SBUS:%d",
2529 regs->which_io);
2530 }
2531#endif
2532}
2533
2534static u32 hme_get_link(struct net_device *dev)
2535{
2536 struct happy_meal *hp = netdev_priv(dev);
2537
2538 spin_lock_irq(&hp->happy_lock);
2539 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2540 spin_unlock_irq(&hp->happy_lock);
2541
2542 return hp->sw_bmsr & BMSR_LSTATUS;
2543}
2544
2545static const struct ethtool_ops hme_ethtool_ops = {
2546 .get_drvinfo = hme_get_drvinfo,
2547 .get_link = hme_get_link,
2548 .get_link_ksettings = hme_get_link_ksettings,
2549 .set_link_ksettings = hme_set_link_ksettings,
2550};
2551
2552static int hme_version_printed;
2553
2554#ifdef CONFIG_SBUS
2555
2556
2557
2558
2559
2560static struct quattro *quattro_sbus_find(struct platform_device *child)
2561{
2562 struct device *parent = child->dev.parent;
2563 struct platform_device *op;
2564 struct quattro *qp;
2565
2566 op = to_platform_device(parent);
2567 qp = platform_get_drvdata(op);
2568 if (qp)
2569 return qp;
2570
2571 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2572 if (qp != NULL) {
2573 int i;
2574
2575 for (i = 0; i < 4; i++)
2576 qp->happy_meals[i] = NULL;
2577
2578 qp->quattro_dev = child;
2579 qp->next = qfe_sbus_list;
2580 qfe_sbus_list = qp;
2581
2582 platform_set_drvdata(op, qp);
2583 }
2584 return qp;
2585}
2586
2587
2588
2589
2590
2591static int __init quattro_sbus_register_irqs(void)
2592{
2593 struct quattro *qp;
2594
2595 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2596 struct platform_device *op = qp->quattro_dev;
2597 int err, qfe_slot, skip = 0;
2598
2599 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2600 if (!qp->happy_meals[qfe_slot])
2601 skip = 1;
2602 }
2603 if (skip)
2604 continue;
2605
2606 err = request_irq(op->archdata.irqs[0],
2607 quattro_sbus_interrupt,
2608 IRQF_SHARED, "Quattro",
2609 qp);
2610 if (err != 0) {
2611 printk(KERN_ERR "Quattro HME: IRQ registration "
2612 "error %d.\n", err);
2613 return err;
2614 }
2615 }
2616
2617 return 0;
2618}
2619
2620static void quattro_sbus_free_irqs(void)
2621{
2622 struct quattro *qp;
2623
2624 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2625 struct platform_device *op = qp->quattro_dev;
2626 int qfe_slot, skip = 0;
2627
2628 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2629 if (!qp->happy_meals[qfe_slot])
2630 skip = 1;
2631 }
2632 if (skip)
2633 continue;
2634
2635 free_irq(op->archdata.irqs[0], qp);
2636 }
2637}
2638#endif
2639
2640#ifdef CONFIG_PCI
2641static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2642{
2643 struct pci_dev *bdev = pdev->bus->self;
2644 struct quattro *qp;
2645
2646 if (!bdev) return NULL;
2647 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2648 struct pci_dev *qpdev = qp->quattro_dev;
2649
2650 if (qpdev == bdev)
2651 return qp;
2652 }
2653 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2654 if (qp != NULL) {
2655 int i;
2656
2657 for (i = 0; i < 4; i++)
2658 qp->happy_meals[i] = NULL;
2659
2660 qp->quattro_dev = bdev;
2661 qp->next = qfe_pci_list;
2662 qfe_pci_list = qp;
2663
2664
2665 qp->nranges = 0;
2666 }
2667 return qp;
2668}
2669#endif
2670
2671static const struct net_device_ops hme_netdev_ops = {
2672 .ndo_open = happy_meal_open,
2673 .ndo_stop = happy_meal_close,
2674 .ndo_start_xmit = happy_meal_start_xmit,
2675 .ndo_tx_timeout = happy_meal_tx_timeout,
2676 .ndo_get_stats = happy_meal_get_stats,
2677 .ndo_set_rx_mode = happy_meal_set_multicast,
2678 .ndo_set_mac_address = eth_mac_addr,
2679 .ndo_validate_addr = eth_validate_addr,
2680};
2681
2682#ifdef CONFIG_SBUS
2683static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2684{
2685 struct device_node *dp = op->dev.of_node, *sbus_dp;
2686 struct quattro *qp = NULL;
2687 struct happy_meal *hp;
2688 struct net_device *dev;
2689 int i, qfe_slot = -1;
2690 int err = -ENODEV;
2691
2692 sbus_dp = op->dev.parent->of_node;
2693
2694
2695 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2696 return err;
2697
2698 if (is_qfe) {
2699 qp = quattro_sbus_find(op);
2700 if (qp == NULL)
2701 goto err_out;
2702 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2703 if (qp->happy_meals[qfe_slot] == NULL)
2704 break;
2705 if (qfe_slot == 4)
2706 goto err_out;
2707 }
2708
2709 err = -ENOMEM;
2710 dev = alloc_etherdev(sizeof(struct happy_meal));
2711 if (!dev)
2712 goto err_out;
2713 SET_NETDEV_DEV(dev, &op->dev);
2714
2715 if (hme_version_printed++ == 0)
2716 printk(KERN_INFO "%s", version);
2717
2718
2719
2720
2721 for (i = 0; i < 6; i++) {
2722 if (macaddr[i] != 0)
2723 break;
2724 }
2725 if (i < 6) {
2726 for (i = 0; i < 6; i++)
2727 dev->dev_addr[i] = macaddr[i];
2728 macaddr[5]++;
2729 } else {
2730 const unsigned char *addr;
2731 int len;
2732
2733 addr = of_get_property(dp, "local-mac-address", &len);
2734
2735 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2736 memcpy(dev->dev_addr, addr, ETH_ALEN);
2737 else
2738 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
2739 }
2740
2741 hp = netdev_priv(dev);
2742
2743 hp->happy_dev = op;
2744 hp->dma_dev = &op->dev;
2745
2746 spin_lock_init(&hp->happy_lock);
2747
2748 err = -ENODEV;
2749 if (qp != NULL) {
2750 hp->qfe_parent = qp;
2751 hp->qfe_ent = qfe_slot;
2752 qp->happy_meals[qfe_slot] = dev;
2753 }
2754
2755 hp->gregs = of_ioremap(&op->resource[0], 0,
2756 GREG_REG_SIZE, "HME Global Regs");
2757 if (!hp->gregs) {
2758 printk(KERN_ERR "happymeal: Cannot map global registers.\n");
2759 goto err_out_free_netdev;
2760 }
2761
2762 hp->etxregs = of_ioremap(&op->resource[1], 0,
2763 ETX_REG_SIZE, "HME TX Regs");
2764 if (!hp->etxregs) {
2765 printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
2766 goto err_out_iounmap;
2767 }
2768
2769 hp->erxregs = of_ioremap(&op->resource[2], 0,
2770 ERX_REG_SIZE, "HME RX Regs");
2771 if (!hp->erxregs) {
2772 printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
2773 goto err_out_iounmap;
2774 }
2775
2776 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2777 BMAC_REG_SIZE, "HME BIGMAC Regs");
2778 if (!hp->bigmacregs) {
2779 printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
2780 goto err_out_iounmap;
2781 }
2782
2783 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2784 TCVR_REG_SIZE, "HME Tranceiver Regs");
2785 if (!hp->tcvregs) {
2786 printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
2787 goto err_out_iounmap;
2788 }
2789
2790 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2791 if (hp->hm_revision == 0xff)
2792 hp->hm_revision = 0xa0;
2793
2794
2795 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2796 hp->happy_flags = HFLAG_20_21;
2797 else if (hp->hm_revision != 0xa0)
2798 hp->happy_flags = HFLAG_NOT_A0;
2799
2800 if (qp != NULL)
2801 hp->happy_flags |= HFLAG_QUATTRO;
2802
2803
2804 hp->happy_bursts = of_getintprop_default(sbus_dp,
2805 "burst-sizes", 0x00);
2806
2807 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2808 PAGE_SIZE,
2809 &hp->hblock_dvma,
2810 GFP_ATOMIC);
2811 err = -ENOMEM;
2812 if (!hp->happy_block)
2813 goto err_out_iounmap;
2814
2815
2816 hp->linkcheck = 0;
2817
2818
2819 hp->timer_state = asleep;
2820 hp->timer_ticks = 0;
2821
2822 init_timer(&hp->happy_timer);
2823
2824 hp->dev = dev;
2825 dev->netdev_ops = &hme_netdev_ops;
2826 dev->watchdog_timeo = 5*HZ;
2827 dev->ethtool_ops = &hme_ethtool_ops;
2828
2829
2830 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2831 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2832
2833 hp->irq = op->archdata.irqs[0];
2834
2835#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2836
2837 hp->read_desc32 = sbus_hme_read_desc32;
2838 hp->write_txd = sbus_hme_write_txd;
2839 hp->write_rxd = sbus_hme_write_rxd;
2840 hp->read32 = sbus_hme_read32;
2841 hp->write32 = sbus_hme_write32;
2842#endif
2843
2844
2845
2846
2847 spin_lock_irq(&hp->happy_lock);
2848 happy_meal_set_initial_advertisement(hp);
2849 spin_unlock_irq(&hp->happy_lock);
2850
2851 err = register_netdev(hp->dev);
2852 if (err) {
2853 printk(KERN_ERR "happymeal: Cannot register net device, "
2854 "aborting.\n");
2855 goto err_out_free_coherent;
2856 }
2857
2858 platform_set_drvdata(op, hp);
2859
2860 if (qfe_slot != -1)
2861 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2862 dev->name, qfe_slot);
2863 else
2864 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2865 dev->name);
2866
2867 printk("%pM\n", dev->dev_addr);
2868
2869 return 0;
2870
2871err_out_free_coherent:
2872 dma_free_coherent(hp->dma_dev,
2873 PAGE_SIZE,
2874 hp->happy_block,
2875 hp->hblock_dvma);
2876
2877err_out_iounmap:
2878 if (hp->gregs)
2879 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2880 if (hp->etxregs)
2881 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2882 if (hp->erxregs)
2883 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2884 if (hp->bigmacregs)
2885 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2886 if (hp->tcvregs)
2887 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2888
2889 if (qp)
2890 qp->happy_meals[qfe_slot] = NULL;
2891
2892err_out_free_netdev:
2893 free_netdev(dev);
2894
2895err_out:
2896 return err;
2897}
2898#endif
2899
2900#ifdef CONFIG_PCI
2901#ifndef CONFIG_SPARC
2902static int is_quattro_p(struct pci_dev *pdev)
2903{
2904 struct pci_dev *busdev = pdev->bus->self;
2905 struct pci_dev *this_pdev;
2906 int n_hmes;
2907
2908 if (busdev == NULL ||
2909 busdev->vendor != PCI_VENDOR_ID_DEC ||
2910 busdev->device != PCI_DEVICE_ID_DEC_21153)
2911 return 0;
2912
2913 n_hmes = 0;
2914 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2915 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2916 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2917 n_hmes++;
2918 }
2919
2920 if (n_hmes != 4)
2921 return 0;
2922
2923 return 1;
2924}
2925
2926
2927static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2928{
2929 int this_offset;
2930
2931 for (this_offset = 0x20; this_offset < len; this_offset++) {
2932 void __iomem *p = rom_base + this_offset;
2933
2934 if (readb(p + 0) != 0x90 ||
2935 readb(p + 1) != 0x00 ||
2936 readb(p + 2) != 0x09 ||
2937 readb(p + 3) != 0x4e ||
2938 readb(p + 4) != 0x41 ||
2939 readb(p + 5) != 0x06)
2940 continue;
2941
2942 this_offset += 6;
2943 p += 6;
2944
2945 if (index == 0) {
2946 int i;
2947
2948 for (i = 0; i < 6; i++)
2949 dev_addr[i] = readb(p + i);
2950 return 1;
2951 }
2952 index--;
2953 }
2954 return 0;
2955}
2956
2957static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2958{
2959 size_t size;
2960 void __iomem *p = pci_map_rom(pdev, &size);
2961
2962 if (p) {
2963 int index = 0;
2964 int found;
2965
2966 if (is_quattro_p(pdev))
2967 index = PCI_SLOT(pdev->devfn);
2968
2969 found = readb(p) == 0x55 &&
2970 readb(p + 1) == 0xaa &&
2971 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2972 pci_unmap_rom(pdev, p);
2973 if (found)
2974 return;
2975 }
2976
2977
2978 dev_addr[0] = 0x08;
2979 dev_addr[1] = 0x00;
2980 dev_addr[2] = 0x20;
2981 get_random_bytes(&dev_addr[3], 3);
2982}
2983#endif
2984
2985static int happy_meal_pci_probe(struct pci_dev *pdev,
2986 const struct pci_device_id *ent)
2987{
2988 struct quattro *qp = NULL;
2989#ifdef CONFIG_SPARC
2990 struct device_node *dp;
2991#endif
2992 struct happy_meal *hp;
2993 struct net_device *dev;
2994 void __iomem *hpreg_base;
2995 unsigned long hpreg_res;
2996 int i, qfe_slot = -1;
2997 char prom_name[64];
2998 int err;
2999
3000
3001#ifdef CONFIG_SPARC
3002 dp = pci_device_to_OF_node(pdev);
3003 strcpy(prom_name, dp->name);
3004#else
3005 if (is_quattro_p(pdev))
3006 strcpy(prom_name, "SUNW,qfe");
3007 else
3008 strcpy(prom_name, "SUNW,hme");
3009#endif
3010
3011 err = -ENODEV;
3012
3013 if (pci_enable_device(pdev))
3014 goto err_out;
3015 pci_set_master(pdev);
3016
3017 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3018 qp = quattro_pci_find(pdev);
3019 if (qp == NULL)
3020 goto err_out;
3021 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
3022 if (qp->happy_meals[qfe_slot] == NULL)
3023 break;
3024 if (qfe_slot == 4)
3025 goto err_out;
3026 }
3027
3028 dev = alloc_etherdev(sizeof(struct happy_meal));
3029 err = -ENOMEM;
3030 if (!dev)
3031 goto err_out;
3032 SET_NETDEV_DEV(dev, &pdev->dev);
3033
3034 if (hme_version_printed++ == 0)
3035 printk(KERN_INFO "%s", version);
3036
3037 hp = netdev_priv(dev);
3038
3039 hp->happy_dev = pdev;
3040 hp->dma_dev = &pdev->dev;
3041
3042 spin_lock_init(&hp->happy_lock);
3043
3044 if (qp != NULL) {
3045 hp->qfe_parent = qp;
3046 hp->qfe_ent = qfe_slot;
3047 qp->happy_meals[qfe_slot] = dev;
3048 }
3049
3050 hpreg_res = pci_resource_start(pdev, 0);
3051 err = -ENODEV;
3052 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3053 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3054 goto err_out_clear_quattro;
3055 }
3056 if (pci_request_regions(pdev, DRV_NAME)) {
3057 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3058 "aborting.\n");
3059 goto err_out_clear_quattro;
3060 }
3061
3062 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
3063 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3064 goto err_out_free_res;
3065 }
3066
3067 for (i = 0; i < 6; i++) {
3068 if (macaddr[i] != 0)
3069 break;
3070 }
3071 if (i < 6) {
3072 for (i = 0; i < 6; i++)
3073 dev->dev_addr[i] = macaddr[i];
3074 macaddr[5]++;
3075 } else {
3076#ifdef CONFIG_SPARC
3077 const unsigned char *addr;
3078 int len;
3079
3080 if (qfe_slot != -1 &&
3081 (addr = of_get_property(dp, "local-mac-address", &len))
3082 != NULL &&
3083 len == 6) {
3084 memcpy(dev->dev_addr, addr, ETH_ALEN);
3085 } else {
3086 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
3087 }
3088#else
3089 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
3090#endif
3091 }
3092
3093
3094 hp->gregs = (hpreg_base + 0x0000UL);
3095 hp->etxregs = (hpreg_base + 0x2000UL);
3096 hp->erxregs = (hpreg_base + 0x4000UL);
3097 hp->bigmacregs = (hpreg_base + 0x6000UL);
3098 hp->tcvregs = (hpreg_base + 0x7000UL);
3099
3100#ifdef CONFIG_SPARC
3101 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3102 if (hp->hm_revision == 0xff)
3103 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3104#else
3105
3106 hp->hm_revision = 0x20;
3107#endif
3108
3109
3110 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3111 hp->happy_flags = HFLAG_20_21;
3112 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3113 hp->happy_flags = HFLAG_NOT_A0;
3114
3115 if (qp != NULL)
3116 hp->happy_flags |= HFLAG_QUATTRO;
3117
3118
3119 hp->happy_flags |= HFLAG_PCI;
3120
3121#ifdef CONFIG_SPARC
3122
3123 hp->happy_bursts = DMA_BURSTBITS;
3124#endif
3125
3126 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3127 &hp->hblock_dvma, GFP_KERNEL);
3128 err = -ENODEV;
3129 if (!hp->happy_block)
3130 goto err_out_iounmap;
3131
3132 hp->linkcheck = 0;
3133 hp->timer_state = asleep;
3134 hp->timer_ticks = 0;
3135
3136 init_timer(&hp->happy_timer);
3137
3138 hp->irq = pdev->irq;
3139 hp->dev = dev;
3140 dev->netdev_ops = &hme_netdev_ops;
3141 dev->watchdog_timeo = 5*HZ;
3142 dev->ethtool_ops = &hme_ethtool_ops;
3143
3144
3145 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3146 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3147
3148#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3149
3150 hp->read_desc32 = pci_hme_read_desc32;
3151 hp->write_txd = pci_hme_write_txd;
3152 hp->write_rxd = pci_hme_write_rxd;
3153 hp->read32 = pci_hme_read32;
3154 hp->write32 = pci_hme_write32;
3155#endif
3156
3157
3158
3159
3160 spin_lock_irq(&hp->happy_lock);
3161 happy_meal_set_initial_advertisement(hp);
3162 spin_unlock_irq(&hp->happy_lock);
3163
3164 err = register_netdev(hp->dev);
3165 if (err) {
3166 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3167 "aborting.\n");
3168 goto err_out_iounmap;
3169 }
3170
3171 pci_set_drvdata(pdev, hp);
3172
3173 if (!qfe_slot) {
3174 struct pci_dev *qpdev = qp->quattro_dev;
3175
3176 prom_name[0] = 0;
3177 if (!strncmp(dev->name, "eth", 3)) {
3178 int i = simple_strtoul(dev->name + 3, NULL, 10);
3179 sprintf(prom_name, "-%d", i + 3);
3180 }
3181 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3182 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3183 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3184 printk("DEC 21153 PCI Bridge\n");
3185 else
3186 printk("unknown bridge %04x.%04x\n",
3187 qpdev->vendor, qpdev->device);
3188 }
3189
3190 if (qfe_slot != -1)
3191 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3192 dev->name, qfe_slot);
3193 else
3194 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3195 dev->name);
3196
3197 printk("%pM\n", dev->dev_addr);
3198
3199 return 0;
3200
3201err_out_iounmap:
3202 iounmap(hp->gregs);
3203
3204err_out_free_res:
3205 pci_release_regions(pdev);
3206
3207err_out_clear_quattro:
3208 if (qp != NULL)
3209 qp->happy_meals[qfe_slot] = NULL;
3210
3211 free_netdev(dev);
3212
3213err_out:
3214 return err;
3215}
3216
3217static void happy_meal_pci_remove(struct pci_dev *pdev)
3218{
3219 struct happy_meal *hp = pci_get_drvdata(pdev);
3220 struct net_device *net_dev = hp->dev;
3221
3222 unregister_netdev(net_dev);
3223
3224 dma_free_coherent(hp->dma_dev, PAGE_SIZE,
3225 hp->happy_block, hp->hblock_dvma);
3226 iounmap(hp->gregs);
3227 pci_release_regions(hp->happy_dev);
3228
3229 free_netdev(net_dev);
3230}
3231
3232static const struct pci_device_id happymeal_pci_ids[] = {
3233 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3234 { }
3235};
3236
3237MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3238
3239static struct pci_driver hme_pci_driver = {
3240 .name = "hme",
3241 .id_table = happymeal_pci_ids,
3242 .probe = happy_meal_pci_probe,
3243 .remove = happy_meal_pci_remove,
3244};
3245
3246static int __init happy_meal_pci_init(void)
3247{
3248 return pci_register_driver(&hme_pci_driver);
3249}
3250
3251static void happy_meal_pci_exit(void)
3252{
3253 pci_unregister_driver(&hme_pci_driver);
3254
3255 while (qfe_pci_list) {
3256 struct quattro *qfe = qfe_pci_list;
3257 struct quattro *next = qfe->next;
3258
3259 kfree(qfe);
3260
3261 qfe_pci_list = next;
3262 }
3263}
3264
3265#endif
3266
3267#ifdef CONFIG_SBUS
3268static const struct of_device_id hme_sbus_match[];
3269static int hme_sbus_probe(struct platform_device *op)
3270{
3271 const struct of_device_id *match;
3272 struct device_node *dp = op->dev.of_node;
3273 const char *model = of_get_property(dp, "model", NULL);
3274 int is_qfe;
3275
3276 match = of_match_device(hme_sbus_match, &op->dev);
3277 if (!match)
3278 return -EINVAL;
3279 is_qfe = (match->data != NULL);
3280
3281 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3282 is_qfe = 1;
3283
3284 return happy_meal_sbus_probe_one(op, is_qfe);
3285}
3286
3287static int hme_sbus_remove(struct platform_device *op)
3288{
3289 struct happy_meal *hp = platform_get_drvdata(op);
3290 struct net_device *net_dev = hp->dev;
3291
3292 unregister_netdev(net_dev);
3293
3294
3295
3296 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3297 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3298 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3299 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3300 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3301 dma_free_coherent(hp->dma_dev,
3302 PAGE_SIZE,
3303 hp->happy_block,
3304 hp->hblock_dvma);
3305
3306 free_netdev(net_dev);
3307
3308 return 0;
3309}
3310
3311static const struct of_device_id hme_sbus_match[] = {
3312 {
3313 .name = "SUNW,hme",
3314 },
3315 {
3316 .name = "SUNW,qfe",
3317 .data = (void *) 1,
3318 },
3319 {
3320 .name = "qfe",
3321 .data = (void *) 1,
3322 },
3323 {},
3324};
3325
3326MODULE_DEVICE_TABLE(of, hme_sbus_match);
3327
3328static struct platform_driver hme_sbus_driver = {
3329 .driver = {
3330 .name = "hme",
3331 .of_match_table = hme_sbus_match,
3332 },
3333 .probe = hme_sbus_probe,
3334 .remove = hme_sbus_remove,
3335};
3336
3337static int __init happy_meal_sbus_init(void)
3338{
3339 int err;
3340
3341 err = platform_driver_register(&hme_sbus_driver);
3342 if (!err)
3343 err = quattro_sbus_register_irqs();
3344
3345 return err;
3346}
3347
3348static void happy_meal_sbus_exit(void)
3349{
3350 platform_driver_unregister(&hme_sbus_driver);
3351 quattro_sbus_free_irqs();
3352
3353 while (qfe_sbus_list) {
3354 struct quattro *qfe = qfe_sbus_list;
3355 struct quattro *next = qfe->next;
3356
3357 kfree(qfe);
3358
3359 qfe_sbus_list = next;
3360 }
3361}
3362#endif
3363
3364static int __init happy_meal_probe(void)
3365{
3366 int err = 0;
3367
3368#ifdef CONFIG_SBUS
3369 err = happy_meal_sbus_init();
3370#endif
3371#ifdef CONFIG_PCI
3372 if (!err) {
3373 err = happy_meal_pci_init();
3374#ifdef CONFIG_SBUS
3375 if (err)
3376 happy_meal_sbus_exit();
3377#endif
3378 }
3379#endif
3380
3381 return err;
3382}
3383
3384
3385static void __exit happy_meal_exit(void)
3386{
3387#ifdef CONFIG_SBUS
3388 happy_meal_sbus_exit();
3389#endif
3390#ifdef CONFIG_PCI
3391 happy_meal_pci_exit();
3392#endif
3393}
3394
3395module_init(happy_meal_probe);
3396module_exit(happy_meal_exit);
3397