1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/in.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <linux/crc32.h>
31#include <linux/random.h>
32#include <linux/errno.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/mm.h>
37#include <linux/bitops.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/byteorder.h>
43
44#ifdef CONFIG_SPARC
45#include <linux/of.h>
46#include <linux/of_device.h>
47#include <asm/idprom.h>
48#include <asm/openprom.h>
49#include <asm/oplib.h>
50#include <asm/prom.h>
51#include <asm/auxio.h>
52#endif
53#include <linux/uaccess.h>
54
55#include <asm/irq.h>
56
57#ifdef CONFIG_PCI
58#include <linux/pci.h>
59#endif
60
61#include "sunhme.h"
62
63#define DRV_NAME "sunhme"
64#define DRV_VERSION "3.10"
65#define DRV_RELDATE "August 26, 2008"
66#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
67
68static char version[] =
69 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
70
71MODULE_VERSION(DRV_VERSION);
72MODULE_AUTHOR(DRV_AUTHOR);
73MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
74MODULE_LICENSE("GPL");
75
76static int macaddr[6];
77
78
79module_param_array(macaddr, int, NULL, 0);
80MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
81
82#ifdef CONFIG_SBUS
83static struct quattro *qfe_sbus_list;
84#endif
85
86#ifdef CONFIG_PCI
87static struct quattro *qfe_pci_list;
88#endif
89
90#undef HMEDEBUG
91#undef SXDEBUG
92#undef RXDEBUG
93#undef TXDEBUG
94#undef TXLOGGING
95
96#ifdef TXLOGGING
97struct hme_tx_logent {
98 unsigned int tstamp;
99 int tx_new, tx_old;
100 unsigned int action;
101#define TXLOG_ACTION_IRQ 0x01
102#define TXLOG_ACTION_TXMIT 0x02
103#define TXLOG_ACTION_TBUSY 0x04
104#define TXLOG_ACTION_NBUFS 0x08
105 unsigned int status;
106};
107#define TX_LOG_LEN 128
108static struct hme_tx_logent tx_log[TX_LOG_LEN];
109static int txlog_cur_entry;
110static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
111{
112 struct hme_tx_logent *tlp;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 tlp = &tx_log[txlog_cur_entry];
117 tlp->tstamp = (unsigned int)jiffies;
118 tlp->tx_new = hp->tx_new;
119 tlp->tx_old = hp->tx_old;
120 tlp->action = a;
121 tlp->status = s;
122 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
123 local_irq_restore(flags);
124}
125static __inline__ void tx_dump_log(void)
126{
127 int i, this;
128
129 this = txlog_cur_entry;
130 for (i = 0; i < TX_LOG_LEN; i++) {
131 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
132 tx_log[this].tstamp,
133 tx_log[this].tx_new, tx_log[this].tx_old,
134 tx_log[this].action, tx_log[this].status);
135 this = (this + 1) & (TX_LOG_LEN - 1);
136 }
137}
138static __inline__ void tx_dump_ring(struct happy_meal *hp)
139{
140 struct hmeal_init_block *hb = hp->happy_block;
141 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
142 int i;
143
144 for (i = 0; i < TX_RING_SIZE; i+=4) {
145 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
146 i, i + 4,
147 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
148 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
149 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
150 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
151 }
152}
153#else
154#define tx_add_log(hp, a, s) do { } while(0)
155#define tx_dump_log() do { } while(0)
156#define tx_dump_ring(hp) do { } while(0)
157#endif
158
159#ifdef HMEDEBUG
160#define HMD(x) printk x
161#else
162#define HMD(x)
163#endif
164
165
166
167#ifdef AUTO_SWITCH_DEBUG
168#define ASD(x) printk x
169#else
170#define ASD(x)
171#endif
172
173#define DEFAULT_IPG0 16
174#define DEFAULT_IPG1 8
175#define DEFAULT_IPG2 4
176#define DEFAULT_JAMSIZE 4
177
178
179
180
181
182
183
184
185#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
186static void sbus_hme_write32(void __iomem *reg, u32 val)
187{
188 sbus_writel(val, reg);
189}
190
191static u32 sbus_hme_read32(void __iomem *reg)
192{
193 return sbus_readl(reg);
194}
195
196static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
197{
198 rxd->rx_addr = (__force hme32)addr;
199 dma_wmb();
200 rxd->rx_flags = (__force hme32)flags;
201}
202
203static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
204{
205 txd->tx_addr = (__force hme32)addr;
206 dma_wmb();
207 txd->tx_flags = (__force hme32)flags;
208}
209
210static u32 sbus_hme_read_desc32(hme32 *p)
211{
212 return (__force u32)*p;
213}
214
215static void pci_hme_write32(void __iomem *reg, u32 val)
216{
217 writel(val, reg);
218}
219
220static u32 pci_hme_read32(void __iomem *reg)
221{
222 return readl(reg);
223}
224
225static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
226{
227 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
228 dma_wmb();
229 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
230}
231
232static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
233{
234 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
235 dma_wmb();
236 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
237}
238
239static u32 pci_hme_read_desc32(hme32 *p)
240{
241 return le32_to_cpup((__le32 *)p);
242}
243
244#define hme_write32(__hp, __reg, __val) \
245 ((__hp)->write32((__reg), (__val)))
246#define hme_read32(__hp, __reg) \
247 ((__hp)->read32(__reg))
248#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
249 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
250#define hme_write_txd(__hp, __txd, __flags, __addr) \
251 ((__hp)->write_txd((__txd), (__flags), (__addr)))
252#define hme_read_desc32(__hp, __p) \
253 ((__hp)->read_desc32(__p))
254#define hme_dma_map(__hp, __ptr, __size, __dir) \
255 ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
256#define hme_dma_unmap(__hp, __addr, __size, __dir) \
257 ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
258#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
259 ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
260#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
261 ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
262#else
263#ifdef CONFIG_SBUS
264
265#define hme_write32(__hp, __reg, __val) \
266 sbus_writel((__val), (__reg))
267#define hme_read32(__hp, __reg) \
268 sbus_readl(__reg)
269#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
270do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
271 dma_wmb(); \
272 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
273} while(0)
274#define hme_write_txd(__hp, __txd, __flags, __addr) \
275do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
276 dma_wmb(); \
277 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
278} while(0)
279#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
280#define hme_dma_map(__hp, __ptr, __size, __dir) \
281 dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
282#define hme_dma_unmap(__hp, __addr, __size, __dir) \
283 dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
284#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
285 dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
286#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
287 dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
288#else
289
290#define hme_write32(__hp, __reg, __val) \
291 writel((__val), (__reg))
292#define hme_read32(__hp, __reg) \
293 readl(__reg)
294#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
295do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
296 dma_wmb(); \
297 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
298} while(0)
299#define hme_write_txd(__hp, __txd, __flags, __addr) \
300do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
301 dma_wmb(); \
302 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
303} while(0)
304static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
305{
306 return le32_to_cpup((__le32 *)p);
307}
308#define hme_dma_map(__hp, __ptr, __size, __dir) \
309 pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
310#define hme_dma_unmap(__hp, __addr, __size, __dir) \
311 pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
312#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
313 pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
314#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
315 pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
316#endif
317#endif
318
319
320
321static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
322{
323 hme_write32(hp, tregs + TCVR_BBDATA, bit);
324 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
325 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
326}
327
328#if 0
329static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
330{
331 u32 ret;
332
333 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
334 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
335 ret = hme_read32(hp, tregs + TCVR_CFG);
336 if (internal)
337 ret &= TCV_CFG_MDIO0;
338 else
339 ret &= TCV_CFG_MDIO1;
340
341 return ret;
342}
343#endif
344
345static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
346{
347 u32 retval;
348
349 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
350 udelay(1);
351 retval = hme_read32(hp, tregs + TCVR_CFG);
352 if (internal)
353 retval &= TCV_CFG_MDIO0;
354 else
355 retval &= TCV_CFG_MDIO1;
356 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
357
358 return retval;
359}
360
361#define TCVR_FAILURE 0x80000000
362
363static int happy_meal_bb_read(struct happy_meal *hp,
364 void __iomem *tregs, int reg)
365{
366 u32 tmp;
367 int retval = 0;
368 int i;
369
370 ASD(("happy_meal_bb_read: reg=%d ", reg));
371
372
373 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
374
375
376 for (i = 0; i < 32; i++)
377 BB_PUT_BIT(hp, tregs, 1);
378
379
380 BB_PUT_BIT(hp, tregs, 0);
381 BB_PUT_BIT(hp, tregs, 1);
382 BB_PUT_BIT(hp, tregs, 1);
383 BB_PUT_BIT(hp, tregs, 0);
384
385
386 tmp = hp->paddr & 0xff;
387 for (i = 4; i >= 0; i--)
388 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
389
390
391 tmp = (reg & 0xff);
392 for (i = 4; i >= 0; i--)
393 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
394
395
396 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
397
398
399 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
400 for (i = 15; i >= 0; i--)
401 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
402 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
403 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
404 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
405 ASD(("value=%x\n", retval));
406 return retval;
407}
408
409static void happy_meal_bb_write(struct happy_meal *hp,
410 void __iomem *tregs, int reg,
411 unsigned short value)
412{
413 u32 tmp;
414 int i;
415
416 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
417
418
419 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
420
421
422 for (i = 0; i < 32; i++)
423 BB_PUT_BIT(hp, tregs, 1);
424
425
426 BB_PUT_BIT(hp, tregs, 0);
427 BB_PUT_BIT(hp, tregs, 1);
428 BB_PUT_BIT(hp, tregs, 0);
429 BB_PUT_BIT(hp, tregs, 1);
430
431
432 tmp = (hp->paddr & 0xff);
433 for (i = 4; i >= 0; i--)
434 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
435
436
437 tmp = (reg & 0xff);
438 for (i = 4; i >= 0; i--)
439 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
440
441
442 BB_PUT_BIT(hp, tregs, 1);
443 BB_PUT_BIT(hp, tregs, 0);
444
445 for (i = 15; i >= 0; i--)
446 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
447
448
449 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
450}
451
452#define TCVR_READ_TRIES 16
453
454static int happy_meal_tcvr_read(struct happy_meal *hp,
455 void __iomem *tregs, int reg)
456{
457 int tries = TCVR_READ_TRIES;
458 int retval;
459
460 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
461 if (hp->tcvr_type == none) {
462 ASD(("no transceiver, value=TCVR_FAILURE\n"));
463 return TCVR_FAILURE;
464 }
465
466 if (!(hp->happy_flags & HFLAG_FENABLE)) {
467 ASD(("doing bit bang\n"));
468 return happy_meal_bb_read(hp, tregs, reg);
469 }
470
471 hme_write32(hp, tregs + TCVR_FRAME,
472 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
473 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
474 udelay(20);
475 if (!tries) {
476 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
477 return TCVR_FAILURE;
478 }
479 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
480 ASD(("value=%04x\n", retval));
481 return retval;
482}
483
484#define TCVR_WRITE_TRIES 16
485
486static void happy_meal_tcvr_write(struct happy_meal *hp,
487 void __iomem *tregs, int reg,
488 unsigned short value)
489{
490 int tries = TCVR_WRITE_TRIES;
491
492 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
493
494
495 if (!(hp->happy_flags & HFLAG_FENABLE)) {
496 happy_meal_bb_write(hp, tregs, reg, value);
497 return;
498 }
499
500
501 hme_write32(hp, tregs + TCVR_FRAME,
502 (FRAME_WRITE | (hp->paddr << 23) |
503 ((reg & 0xff) << 18) | (value & 0xffff)));
504 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
505 udelay(20);
506
507
508 if (!tries)
509 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
510
511
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
547{
548 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
549
550
551
552
553 if (hp->sw_bmcr & BMCR_FULLDPLX) {
554 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
555 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
556 return 0;
557 }
558
559
560 if (hp->sw_bmcr & BMCR_SPEED100) {
561 hp->sw_bmcr &= ~(BMCR_SPEED100);
562 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
563 return 0;
564 }
565
566
567 return -1;
568}
569
570static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
571{
572 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
573 if (hp->tcvr_type == external)
574 printk("external ");
575 else
576 printk("internal ");
577 printk("transceiver at ");
578 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
579 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
580 if (hp->sw_lpa & LPA_100FULL)
581 printk("100Mb/s, Full Duplex.\n");
582 else
583 printk("100Mb/s, Half Duplex.\n");
584 } else {
585 if (hp->sw_lpa & LPA_10FULL)
586 printk("10Mb/s, Full Duplex.\n");
587 else
588 printk("10Mb/s, Half Duplex.\n");
589 }
590}
591
592static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
593{
594 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
595 if (hp->tcvr_type == external)
596 printk("external ");
597 else
598 printk("internal ");
599 printk("transceiver at ");
600 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
601 if (hp->sw_bmcr & BMCR_SPEED100)
602 printk("100Mb/s, ");
603 else
604 printk("10Mb/s, ");
605 if (hp->sw_bmcr & BMCR_FULLDPLX)
606 printk("Full Duplex.\n");
607 else
608 printk("Half Duplex.\n");
609}
610
611static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
612{
613 int full;
614
615
616
617
618 if (hp->timer_state == arbwait) {
619 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
620 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
621 goto no_response;
622 if (hp->sw_lpa & LPA_100FULL)
623 full = 1;
624 else if (hp->sw_lpa & LPA_100HALF)
625 full = 0;
626 else if (hp->sw_lpa & LPA_10FULL)
627 full = 1;
628 else
629 full = 0;
630 } else {
631
632 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
633 if (hp->sw_bmcr & BMCR_FULLDPLX)
634 full = 1;
635 else
636 full = 0;
637 }
638
639
640
641
642
643
644
645
646
647 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
648 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
649 ~(BIGMAC_TXCFG_ENABLE));
650 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
651 barrier();
652 if (full) {
653 hp->happy_flags |= HFLAG_FULL;
654 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
655 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
656 BIGMAC_TXCFG_FULLDPLX);
657 } else {
658 hp->happy_flags &= ~(HFLAG_FULL);
659 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
660 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
661 ~(BIGMAC_TXCFG_FULLDPLX));
662 }
663 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
664 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
665 BIGMAC_TXCFG_ENABLE);
666 return 0;
667no_response:
668 return 1;
669}
670
671static int happy_meal_init(struct happy_meal *hp);
672
673static int is_lucent_phy(struct happy_meal *hp)
674{
675 void __iomem *tregs = hp->tcvregs;
676 unsigned short mr2, mr3;
677 int ret = 0;
678
679 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
680 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
681 if ((mr2 & 0xffff) == 0x0180 &&
682 ((mr3 & 0xffff) >> 10) == 0x1d)
683 ret = 1;
684
685 return ret;
686}
687
688static void happy_meal_timer(struct timer_list *t)
689{
690 struct happy_meal *hp = from_timer(hp, t, happy_timer);
691 void __iomem *tregs = hp->tcvregs;
692 int restart_timer = 0;
693
694 spin_lock_irq(&hp->happy_lock);
695
696 hp->timer_ticks++;
697 switch(hp->timer_state) {
698 case arbwait:
699
700
701
702 if (hp->timer_ticks >= 10) {
703
704 do_force_mode:
705 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
706 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
707 hp->dev->name);
708 hp->sw_bmcr = BMCR_SPEED100;
709 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
710
711 if (!is_lucent_phy(hp)) {
712
713
714
715
716 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
717 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
718 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
719 }
720 hp->timer_state = ltrywait;
721 hp->timer_ticks = 0;
722 restart_timer = 1;
723 } else {
724
725 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
726 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
727 int ret;
728
729
730 ret = set_happy_link_modes(hp, tregs);
731 if (ret) {
732
733
734
735
736
737
738 goto do_force_mode;
739 }
740
741
742 hp->timer_state = lupwait;
743 restart_timer = 1;
744 } else {
745 restart_timer = 1;
746 }
747 }
748 break;
749
750 case lupwait:
751
752
753
754
755
756 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
757 if (hp->sw_bmsr & BMSR_LSTATUS) {
758
759
760
761 display_link_mode(hp, tregs);
762 hp->timer_state = asleep;
763 restart_timer = 0;
764 } else {
765 if (hp->timer_ticks >= 10) {
766 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
767 "not completely up.\n", hp->dev->name);
768 hp->timer_ticks = 0;
769 restart_timer = 1;
770 } else {
771 restart_timer = 1;
772 }
773 }
774 break;
775
776 case ltrywait:
777
778
779
780
781
782 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
783 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
784 if (hp->timer_ticks == 1) {
785 if (!is_lucent_phy(hp)) {
786
787
788
789 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
790 happy_meal_tcvr_write(hp, tregs,
791 DP83840_CSCONFIG, hp->sw_csconfig);
792 }
793 restart_timer = 1;
794 break;
795 }
796 if (hp->timer_ticks == 2) {
797 if (!is_lucent_phy(hp)) {
798 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
799 happy_meal_tcvr_write(hp, tregs,
800 DP83840_CSCONFIG, hp->sw_csconfig);
801 }
802 restart_timer = 1;
803 break;
804 }
805 if (hp->sw_bmsr & BMSR_LSTATUS) {
806
807 display_forced_link_mode(hp, tregs);
808 set_happy_link_modes(hp, tregs);
809 hp->timer_state = asleep;
810 restart_timer = 0;
811 } else {
812 if (hp->timer_ticks >= 4) {
813 int ret;
814
815 ret = try_next_permutation(hp, tregs);
816 if (ret == -1) {
817
818
819
820
821
822 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
823 hp->dev->name);
824
825 ret = happy_meal_init(hp);
826 if (ret) {
827
828 printk(KERN_ERR "%s: Error, cannot re-init the "
829 "Happy Meal.\n", hp->dev->name);
830 }
831 goto out;
832 }
833 if (!is_lucent_phy(hp)) {
834 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
835 DP83840_CSCONFIG);
836 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
837 happy_meal_tcvr_write(hp, tregs,
838 DP83840_CSCONFIG, hp->sw_csconfig);
839 }
840 hp->timer_ticks = 0;
841 restart_timer = 1;
842 } else {
843 restart_timer = 1;
844 }
845 }
846 break;
847
848 case asleep:
849 default:
850
851 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
852 hp->dev->name);
853 restart_timer = 0;
854 hp->timer_ticks = 0;
855 hp->timer_state = asleep;
856 break;
857 }
858
859 if (restart_timer) {
860 hp->happy_timer.expires = jiffies + ((12 * HZ)/10);
861 add_timer(&hp->happy_timer);
862 }
863
864out:
865 spin_unlock_irq(&hp->happy_lock);
866}
867
868#define TX_RESET_TRIES 32
869#define RX_RESET_TRIES 32
870
871
872static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
873{
874 int tries = TX_RESET_TRIES;
875
876 HMD(("happy_meal_tx_reset: reset, "));
877
878
879 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
880 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
881 udelay(20);
882
883
884 if (!tries)
885 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
886
887
888 HMD(("done\n"));
889}
890
891
892static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
893{
894 int tries = RX_RESET_TRIES;
895
896 HMD(("happy_meal_rx_reset: reset, "));
897
898
899 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
900 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
901 udelay(20);
902
903
904 if (!tries)
905 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
906
907
908 HMD(("done\n"));
909}
910
911#define STOP_TRIES 16
912
913
914static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
915{
916 int tries = STOP_TRIES;
917
918 HMD(("happy_meal_stop: reset, "));
919
920
921 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
922 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
923 udelay(20);
924
925
926 if (!tries)
927 printk(KERN_ERR "happy meal: Fry guys.");
928
929
930 HMD(("done\n"));
931}
932
933
934static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
935{
936 struct net_device_stats *stats = &hp->dev->stats;
937
938 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
939 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
940
941 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
942 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
943
944 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
945 hme_write32(hp, bregs + BMAC_GLECTR, 0);
946
947 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
948
949 stats->collisions +=
950 (hme_read32(hp, bregs + BMAC_EXCTR) +
951 hme_read32(hp, bregs + BMAC_LTCTR));
952 hme_write32(hp, bregs + BMAC_EXCTR, 0);
953 hme_write32(hp, bregs + BMAC_LTCTR, 0);
954}
955
956
957static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
958{
959 ASD(("happy_meal_poll_stop: "));
960
961
962 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
963 (HFLAG_POLLENABLE | HFLAG_POLL)) {
964 HMD(("not polling, return\n"));
965 return;
966 }
967
968
969 ASD(("were polling, mif ints off, "));
970 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
971
972
973 ASD(("polling off, "));
974 hme_write32(hp, tregs + TCVR_CFG,
975 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
976
977
978 hp->happy_flags &= ~(HFLAG_POLL);
979
980
981 udelay(200);
982 ASD(("done\n"));
983}
984
985
986
987
988#define TCVR_RESET_TRIES 16
989#define TCVR_UNISOLATE_TRIES 32
990
991
992static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
993{
994 u32 tconfig;
995 int result, tries = TCVR_RESET_TRIES;
996
997 tconfig = hme_read32(hp, tregs + TCVR_CFG);
998 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
999 if (hp->tcvr_type == external) {
1000 ASD(("external<"));
1001 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1002 hp->tcvr_type = internal;
1003 hp->paddr = TCV_PADDR_ITX;
1004 ASD(("ISOLATE,"));
1005 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1006 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1007 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1008 if (result == TCVR_FAILURE) {
1009 ASD(("phyread_fail>\n"));
1010 return -1;
1011 }
1012 ASD(("phyread_ok,PSELECT>"));
1013 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1014 hp->tcvr_type = external;
1015 hp->paddr = TCV_PADDR_ETX;
1016 } else {
1017 if (tconfig & TCV_CFG_MDIO1) {
1018 ASD(("internal<PSELECT,"));
1019 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1020 ASD(("ISOLATE,"));
1021 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1022 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1023 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1024 if (result == TCVR_FAILURE) {
1025 ASD(("phyread_fail>\n"));
1026 return -1;
1027 }
1028 ASD(("phyread_ok,~PSELECT>"));
1029 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1030 hp->tcvr_type = internal;
1031 hp->paddr = TCV_PADDR_ITX;
1032 }
1033 }
1034
1035 ASD(("BMCR_RESET "));
1036 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1037
1038 while (--tries) {
1039 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1040 if (result == TCVR_FAILURE)
1041 return -1;
1042 hp->sw_bmcr = result;
1043 if (!(result & BMCR_RESET))
1044 break;
1045 udelay(20);
1046 }
1047 if (!tries) {
1048 ASD(("BMCR RESET FAILED!\n"));
1049 return -1;
1050 }
1051 ASD(("RESET_OK\n"));
1052
1053
1054 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1055 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1056 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1057 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1058
1059 ASD(("UNISOLATE"));
1060 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1061 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1062
1063 tries = TCVR_UNISOLATE_TRIES;
1064 while (--tries) {
1065 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1066 if (result == TCVR_FAILURE)
1067 return -1;
1068 if (!(result & BMCR_ISOLATE))
1069 break;
1070 udelay(20);
1071 }
1072 if (!tries) {
1073 ASD((" FAILED!\n"));
1074 return -1;
1075 }
1076 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1077 if (!is_lucent_phy(hp)) {
1078 result = happy_meal_tcvr_read(hp, tregs,
1079 DP83840_CSCONFIG);
1080 happy_meal_tcvr_write(hp, tregs,
1081 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1082 }
1083 return 0;
1084}
1085
1086
1087
1088
1089
1090static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1091{
1092 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1093
1094 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1095 if (hp->happy_flags & HFLAG_POLL) {
1096
1097 ASD(("<polling> "));
1098 if (hp->tcvr_type == internal) {
1099 if (tconfig & TCV_CFG_MDIO1) {
1100 ASD(("<internal> <poll stop> "));
1101 happy_meal_poll_stop(hp, tregs);
1102 hp->paddr = TCV_PADDR_ETX;
1103 hp->tcvr_type = external;
1104 ASD(("<external>\n"));
1105 tconfig &= ~(TCV_CFG_PENABLE);
1106 tconfig |= TCV_CFG_PSELECT;
1107 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1108 }
1109 } else {
1110 if (hp->tcvr_type == external) {
1111 ASD(("<external> "));
1112 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1113 ASD(("<poll stop> "));
1114 happy_meal_poll_stop(hp, tregs);
1115 hp->paddr = TCV_PADDR_ITX;
1116 hp->tcvr_type = internal;
1117 ASD(("<internal>\n"));
1118 hme_write32(hp, tregs + TCVR_CFG,
1119 hme_read32(hp, tregs + TCVR_CFG) &
1120 ~(TCV_CFG_PSELECT));
1121 }
1122 ASD(("\n"));
1123 } else {
1124 ASD(("<none>\n"));
1125 }
1126 }
1127 } else {
1128 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1129
1130
1131 ASD(("<not polling> "));
1132 if (reread & TCV_CFG_MDIO1) {
1133 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1134 hp->paddr = TCV_PADDR_ETX;
1135 hp->tcvr_type = external;
1136 ASD(("<external>\n"));
1137 } else {
1138 if (reread & TCV_CFG_MDIO0) {
1139 hme_write32(hp, tregs + TCVR_CFG,
1140 tconfig & ~(TCV_CFG_PSELECT));
1141 hp->paddr = TCV_PADDR_ITX;
1142 hp->tcvr_type = internal;
1143 ASD(("<internal>\n"));
1144 } else {
1145 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1146 hp->tcvr_type = none;
1147 ASD(("<none>\n"));
1148 }
1149 }
1150 }
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static void happy_meal_clean_rings(struct happy_meal *hp)
1198{
1199 int i;
1200
1201 for (i = 0; i < RX_RING_SIZE; i++) {
1202 if (hp->rx_skbs[i] != NULL) {
1203 struct sk_buff *skb = hp->rx_skbs[i];
1204 struct happy_meal_rxd *rxd;
1205 u32 dma_addr;
1206
1207 rxd = &hp->happy_block->happy_meal_rxd[i];
1208 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1209 dma_unmap_single(hp->dma_dev, dma_addr,
1210 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1211 dev_kfree_skb_any(skb);
1212 hp->rx_skbs[i] = NULL;
1213 }
1214 }
1215
1216 for (i = 0; i < TX_RING_SIZE; i++) {
1217 if (hp->tx_skbs[i] != NULL) {
1218 struct sk_buff *skb = hp->tx_skbs[i];
1219 struct happy_meal_txd *txd;
1220 u32 dma_addr;
1221 int frag;
1222
1223 hp->tx_skbs[i] = NULL;
1224
1225 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1226 txd = &hp->happy_block->happy_meal_txd[i];
1227 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1228 if (!frag)
1229 dma_unmap_single(hp->dma_dev, dma_addr,
1230 (hme_read_desc32(hp, &txd->tx_flags)
1231 & TXFLAG_SIZE),
1232 DMA_TO_DEVICE);
1233 else
1234 dma_unmap_page(hp->dma_dev, dma_addr,
1235 (hme_read_desc32(hp, &txd->tx_flags)
1236 & TXFLAG_SIZE),
1237 DMA_TO_DEVICE);
1238
1239 if (frag != skb_shinfo(skb)->nr_frags)
1240 i++;
1241 }
1242
1243 dev_kfree_skb_any(skb);
1244 }
1245 }
1246}
1247
1248
1249static void happy_meal_init_rings(struct happy_meal *hp)
1250{
1251 struct hmeal_init_block *hb = hp->happy_block;
1252 int i;
1253
1254 HMD(("happy_meal_init_rings: counters to zero, "));
1255 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1256
1257
1258 HMD(("clean, "));
1259 happy_meal_clean_rings(hp);
1260
1261
1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb;
1265 u32 mapping;
1266
1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1268 if (!skb) {
1269 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1270 continue;
1271 }
1272 hp->rx_skbs[i] = skb;
1273
1274
1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1285 mapping);
1286 skb_reserve(skb, RX_OFFSET);
1287 }
1288
1289 HMD(("init txring, "));
1290 for (i = 0; i < TX_RING_SIZE; i++)
1291 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1292
1293 HMD(("done\n"));
1294}
1295
1296
1297static void
1298happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1299 void __iomem *tregs,
1300 const struct ethtool_link_ksettings *ep)
1301{
1302 int timeout;
1303
1304
1305 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1306 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1307 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1308 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1309
1310
1311
1312 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1313 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1314
1315 if (hp->sw_bmsr & BMSR_10HALF)
1316 hp->sw_advertise |= (ADVERTISE_10HALF);
1317 else
1318 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1319
1320 if (hp->sw_bmsr & BMSR_10FULL)
1321 hp->sw_advertise |= (ADVERTISE_10FULL);
1322 else
1323 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1324 if (hp->sw_bmsr & BMSR_100HALF)
1325 hp->sw_advertise |= (ADVERTISE_100HALF);
1326 else
1327 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1328 if (hp->sw_bmsr & BMSR_100FULL)
1329 hp->sw_advertise |= (ADVERTISE_100FULL);
1330 else
1331 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1332 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1333
1334
1335
1336
1337
1338
1339
1340#ifdef AUTO_SWITCH_DEBUG
1341 ASD(("%s: Advertising [ ", hp->dev->name));
1342 if (hp->sw_advertise & ADVERTISE_10HALF)
1343 ASD(("10H "));
1344 if (hp->sw_advertise & ADVERTISE_10FULL)
1345 ASD(("10F "));
1346 if (hp->sw_advertise & ADVERTISE_100HALF)
1347 ASD(("100H "));
1348 if (hp->sw_advertise & ADVERTISE_100FULL)
1349 ASD(("100F "));
1350#endif
1351
1352
1353 hp->sw_bmcr |= BMCR_ANENABLE;
1354 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1355
1356
1357 hp->sw_bmcr |= BMCR_ANRESTART;
1358 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1359
1360
1361
1362 timeout = 64;
1363 while (--timeout) {
1364 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1365 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1366 break;
1367 udelay(10);
1368 }
1369 if (!timeout) {
1370 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1371 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1372 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1373 hp->dev->name);
1374 goto force_link;
1375 } else {
1376 hp->timer_state = arbwait;
1377 }
1378 } else {
1379force_link:
1380
1381
1382
1383
1384
1385
1386
1387
1388 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1389 hp->sw_bmcr = BMCR_SPEED100;
1390 } else {
1391 if (ep->base.speed == SPEED_100)
1392 hp->sw_bmcr = BMCR_SPEED100;
1393 else
1394 hp->sw_bmcr = 0;
1395 if (ep->base.duplex == DUPLEX_FULL)
1396 hp->sw_bmcr |= BMCR_FULLDPLX;
1397 }
1398 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1399
1400 if (!is_lucent_phy(hp)) {
1401
1402
1403
1404
1405 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1406 DP83840_CSCONFIG);
1407 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1408 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1409 hp->sw_csconfig);
1410 }
1411 hp->timer_state = ltrywait;
1412 }
1413
1414 hp->timer_ticks = 0;
1415 hp->happy_timer.expires = jiffies + (12 * HZ)/10;
1416 add_timer(&hp->happy_timer);
1417}
1418
1419
1420static int happy_meal_init(struct happy_meal *hp)
1421{
1422 void __iomem *gregs = hp->gregs;
1423 void __iomem *etxregs = hp->etxregs;
1424 void __iomem *erxregs = hp->erxregs;
1425 void __iomem *bregs = hp->bigmacregs;
1426 void __iomem *tregs = hp->tcvregs;
1427 u32 regtmp, rxcfg;
1428 unsigned char *e = &hp->dev->dev_addr[0];
1429
1430
1431 del_timer(&hp->happy_timer);
1432
1433 HMD(("happy_meal_init: happy_flags[%08x] ",
1434 hp->happy_flags));
1435 if (!(hp->happy_flags & HFLAG_INIT)) {
1436 HMD(("set HFLAG_INIT, "));
1437 hp->happy_flags |= HFLAG_INIT;
1438 happy_meal_get_counters(hp, bregs);
1439 }
1440
1441
1442 HMD(("to happy_meal_poll_stop\n"));
1443 happy_meal_poll_stop(hp, tregs);
1444
1445
1446 HMD(("happy_meal_init: to happy_meal_stop\n"));
1447 happy_meal_stop(hp, gregs);
1448
1449
1450 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1451 happy_meal_init_rings(hp);
1452
1453
1454 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1455 hme_read32(hp, tregs + TCVR_IMASK)));
1456 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1457
1458
1459 if (hp->happy_flags & HFLAG_FENABLE) {
1460 HMD(("use frame old[%08x], ",
1461 hme_read32(hp, tregs + TCVR_CFG)));
1462 hme_write32(hp, tregs + TCVR_CFG,
1463 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1464 } else {
1465 HMD(("use bitbang old[%08x], ",
1466 hme_read32(hp, tregs + TCVR_CFG)));
1467 hme_write32(hp, tregs + TCVR_CFG,
1468 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1469 }
1470
1471
1472 HMD(("to happy_meal_transceiver_check\n"));
1473 happy_meal_transceiver_check(hp, tregs);
1474
1475
1476 HMD(("happy_meal_init: "));
1477 switch(hp->tcvr_type) {
1478 case none:
1479
1480 HMD(("AAIEEE no transceiver type, EAGAIN"));
1481 return -EAGAIN;
1482
1483 case internal:
1484
1485 HMD(("internal, using MII, "));
1486 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1487 break;
1488
1489 case external:
1490
1491 HMD(("external, disable MII, "));
1492 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1493 break;
1494 }
1495
1496 if (happy_meal_tcvr_reset(hp, tregs))
1497 return -EAGAIN;
1498
1499
1500 HMD(("tx/rx reset, "));
1501 happy_meal_tx_reset(hp, bregs);
1502 happy_meal_rx_reset(hp, bregs);
1503
1504
1505 HMD(("jsize/ipg1/ipg2, "));
1506 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1507 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1508 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1509
1510
1511 HMD(("rseed/macaddr, "));
1512
1513
1514 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1515
1516 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1517 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1518 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1519
1520 HMD(("htable, "));
1521 if ((hp->dev->flags & IFF_ALLMULTI) ||
1522 (netdev_mc_count(hp->dev) > 64)) {
1523 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1524 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1525 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1526 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1527 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1528 u16 hash_table[4];
1529 struct netdev_hw_addr *ha;
1530 u32 crc;
1531
1532 memset(hash_table, 0, sizeof(hash_table));
1533 netdev_for_each_mc_addr(ha, hp->dev) {
1534 crc = ether_crc_le(6, ha->addr);
1535 crc >>= 26;
1536 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1537 }
1538 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1539 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1540 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1541 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1542 } else {
1543 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1544 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1545 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1546 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1547 }
1548
1549
1550 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1551 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1552 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1553 hme_write32(hp, erxregs + ERX_RING,
1554 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1555 hme_write32(hp, etxregs + ETX_RING,
1556 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1557
1558
1559
1560
1561
1562
1563 if (hme_read32(hp, erxregs + ERX_RING) !=
1564 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1565 hme_write32(hp, erxregs + ERX_RING,
1566 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1567 | 0x4);
1568
1569
1570 HMD(("happy_meal_init: old[%08x] bursts<",
1571 hme_read32(hp, gregs + GREG_CFG)));
1572
1573#ifndef CONFIG_SPARC
1574
1575 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1576#else
1577 if ((hp->happy_bursts & DMA_BURST64) &&
1578 ((hp->happy_flags & HFLAG_PCI) != 0
1579#ifdef CONFIG_SBUS
1580 || sbus_can_burst64()
1581#endif
1582 || 0)) {
1583 u32 gcfg = GREG_CFG_BURST64;
1584
1585
1586
1587
1588
1589#ifdef CONFIG_SBUS
1590 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1591 struct platform_device *op = hp->happy_dev;
1592 if (sbus_can_dma_64bit()) {
1593 sbus_set_sbus64(&op->dev,
1594 hp->happy_bursts);
1595 gcfg |= GREG_CFG_64BIT;
1596 }
1597 }
1598#endif
1599
1600 HMD(("64>"));
1601 hme_write32(hp, gregs + GREG_CFG, gcfg);
1602 } else if (hp->happy_bursts & DMA_BURST32) {
1603 HMD(("32>"));
1604 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1605 } else if (hp->happy_bursts & DMA_BURST16) {
1606 HMD(("16>"));
1607 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1608 } else {
1609 HMD(("XXX>"));
1610 hme_write32(hp, gregs + GREG_CFG, 0);
1611 }
1612#endif
1613
1614
1615 HMD((", enable global interrupts, "));
1616 hme_write32(hp, gregs + GREG_IMASK,
1617 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1618 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1619
1620
1621 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1622 hme_read32(hp, etxregs + ETX_RSIZE)));
1623 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1624
1625
1626 HMD(("tx dma enable old[%08x], ",
1627 hme_read32(hp, etxregs + ETX_CFG)));
1628 hme_write32(hp, etxregs + ETX_CFG,
1629 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1630
1631
1632
1633
1634
1635
1636 HMD(("erx regs bug old[%08x]\n",
1637 hme_read32(hp, erxregs + ERX_CFG)));
1638 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1639 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1640 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1641 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1642 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1643 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1644 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1645
1646 }
1647
1648
1649 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1650 hme_read32(hp, bregs + BMAC_RXCFG)));
1651 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1652 if (hp->dev->flags & IFF_PROMISC)
1653 rxcfg |= BIGMAC_RXCFG_PMISC;
1654 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1655
1656
1657 udelay(10);
1658
1659
1660 HMD(("BIGMAC init, "));
1661 regtmp = 0;
1662 if (hp->happy_flags & HFLAG_FULL)
1663 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1664
1665
1666
1667
1668 hme_write32(hp, bregs + BMAC_TXCFG, regtmp );
1669
1670
1671 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1672
1673
1674 regtmp = BIGMAC_XCFG_ODENABLE;
1675
1676
1677 if (hp->happy_flags & HFLAG_LANCE)
1678 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1679
1680
1681 if (hp->tcvr_type == external)
1682 regtmp |= BIGMAC_XCFG_MIIDISAB;
1683
1684 HMD(("XIF config old[%08x], ",
1685 hme_read32(hp, bregs + BMAC_XIFCFG)));
1686 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1687
1688
1689 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1690 hme_read32(hp, bregs + BMAC_TXCFG),
1691 hme_read32(hp, bregs + BMAC_RXCFG)));
1692
1693
1694 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1695 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1696
1697 hme_write32(hp, bregs + BMAC_TXCFG,
1698 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1699 hme_write32(hp, bregs + BMAC_RXCFG,
1700 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1701
1702
1703 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1704
1705
1706 return 0;
1707}
1708
1709
1710static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1711{
1712 void __iomem *tregs = hp->tcvregs;
1713 void __iomem *bregs = hp->bigmacregs;
1714 void __iomem *gregs = hp->gregs;
1715
1716 happy_meal_stop(hp, gregs);
1717 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1718 if (hp->happy_flags & HFLAG_FENABLE)
1719 hme_write32(hp, tregs + TCVR_CFG,
1720 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1721 else
1722 hme_write32(hp, tregs + TCVR_CFG,
1723 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1724 happy_meal_transceiver_check(hp, tregs);
1725 switch(hp->tcvr_type) {
1726 case none:
1727 return;
1728 case internal:
1729 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1730 break;
1731 case external:
1732 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1733 break;
1734 }
1735 if (happy_meal_tcvr_reset(hp, tregs))
1736 return;
1737
1738
1739 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1740 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1741
1742
1743 if (hp->sw_bmsr & BMSR_10HALF)
1744 hp->sw_advertise |= (ADVERTISE_10HALF);
1745 else
1746 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1747
1748 if (hp->sw_bmsr & BMSR_10FULL)
1749 hp->sw_advertise |= (ADVERTISE_10FULL);
1750 else
1751 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1752 if (hp->sw_bmsr & BMSR_100HALF)
1753 hp->sw_advertise |= (ADVERTISE_100HALF);
1754 else
1755 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1756 if (hp->sw_bmsr & BMSR_100FULL)
1757 hp->sw_advertise |= (ADVERTISE_100FULL);
1758 else
1759 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1760
1761
1762 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1763}
1764
1765
1766
1767
1768
1769
1770static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1771{
1772 int reset = 0;
1773
1774
1775 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1776 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1777 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1778 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1779 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1780 GREG_STAT_SLVPERR))
1781 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1782 hp->dev->name, status);
1783
1784 if (status & GREG_STAT_RFIFOVF) {
1785
1786
1787 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1788 }
1789
1790 if (status & GREG_STAT_STSTERR) {
1791
1792 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1793 reset = 1;
1794 }
1795
1796 if (status & GREG_STAT_TFIFO_UND) {
1797
1798 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1799 hp->dev->name);
1800 reset = 1;
1801 }
1802
1803 if (status & GREG_STAT_MAXPKTERR) {
1804
1805
1806
1807 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1808 reset = 1;
1809 }
1810
1811 if (status & GREG_STAT_NORXD) {
1812
1813
1814
1815
1816
1817 printk(KERN_INFO "%s: Happy Meal out of receive "
1818 "descriptors, packet dropped.\n",
1819 hp->dev->name);
1820 }
1821
1822 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1823
1824 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1825 if (status & GREG_STAT_RXERR)
1826 printk("GenericError ");
1827 if (status & GREG_STAT_RXPERR)
1828 printk("ParityError ");
1829 if (status & GREG_STAT_RXTERR)
1830 printk("RxTagBotch ");
1831 printk("]\n");
1832 reset = 1;
1833 }
1834
1835 if (status & GREG_STAT_EOPERR) {
1836
1837
1838
1839 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1840 hp->dev->name);
1841 reset = 1;
1842 }
1843
1844 if (status & GREG_STAT_MIFIRQ) {
1845
1846 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1847 }
1848
1849 if (status &
1850 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1851
1852 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1853 if (status & GREG_STAT_TXEACK)
1854 printk("GenericError ");
1855 if (status & GREG_STAT_TXLERR)
1856 printk("LateError ");
1857 if (status & GREG_STAT_TXPERR)
1858 printk("ParityError ");
1859 if (status & GREG_STAT_TXTERR)
1860 printk("TagBotch ");
1861 printk("]\n");
1862 reset = 1;
1863 }
1864
1865 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1866
1867
1868
1869 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1870 hp->dev->name,
1871 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1872 reset = 1;
1873 }
1874
1875 if (reset) {
1876 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1877 happy_meal_init(hp);
1878 return 1;
1879 }
1880 return 0;
1881}
1882
1883
1884static void happy_meal_mif_interrupt(struct happy_meal *hp)
1885{
1886 void __iomem *tregs = hp->tcvregs;
1887
1888 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1889 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1890 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1891
1892
1893 if (hp->sw_lpa & LPA_100FULL) {
1894 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1895 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1896 } else if (hp->sw_lpa & LPA_100HALF) {
1897 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1898 hp->sw_bmcr |= BMCR_SPEED100;
1899 } else if (hp->sw_lpa & LPA_10FULL) {
1900 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1901 hp->sw_bmcr |= BMCR_FULLDPLX;
1902 } else {
1903 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1904 }
1905 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1906
1907
1908 happy_meal_poll_stop(hp, tregs);
1909}
1910
1911#ifdef TXDEBUG
1912#define TXD(x) printk x
1913#else
1914#define TXD(x)
1915#endif
1916
1917
1918static void happy_meal_tx(struct happy_meal *hp)
1919{
1920 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1921 struct happy_meal_txd *this;
1922 struct net_device *dev = hp->dev;
1923 int elem;
1924
1925 elem = hp->tx_old;
1926 TXD(("TX<"));
1927 while (elem != hp->tx_new) {
1928 struct sk_buff *skb;
1929 u32 flags, dma_addr, dma_len;
1930 int frag;
1931
1932 TXD(("[%d]", elem));
1933 this = &txbase[elem];
1934 flags = hme_read_desc32(hp, &this->tx_flags);
1935 if (flags & TXFLAG_OWN)
1936 break;
1937 skb = hp->tx_skbs[elem];
1938 if (skb_shinfo(skb)->nr_frags) {
1939 int last;
1940
1941 last = elem + skb_shinfo(skb)->nr_frags;
1942 last &= (TX_RING_SIZE - 1);
1943 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1944 if (flags & TXFLAG_OWN)
1945 break;
1946 }
1947 hp->tx_skbs[elem] = NULL;
1948 dev->stats.tx_bytes += skb->len;
1949
1950 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1951 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1952 dma_len = hme_read_desc32(hp, &this->tx_flags);
1953
1954 dma_len &= TXFLAG_SIZE;
1955 if (!frag)
1956 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1957 else
1958 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1959
1960 elem = NEXT_TX(elem);
1961 this = &txbase[elem];
1962 }
1963
1964 dev_consume_skb_irq(skb);
1965 dev->stats.tx_packets++;
1966 }
1967 hp->tx_old = elem;
1968 TXD((">"));
1969
1970 if (netif_queue_stopped(dev) &&
1971 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1972 netif_wake_queue(dev);
1973}
1974
1975#ifdef RXDEBUG
1976#define RXD(x) printk x
1977#else
1978#define RXD(x)
1979#endif
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1991{
1992 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1993 struct happy_meal_rxd *this;
1994 int elem = hp->rx_new, drops = 0;
1995 u32 flags;
1996
1997 RXD(("RX<"));
1998 this = &rxbase[elem];
1999 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
2000 struct sk_buff *skb;
2001 int len = flags >> 16;
2002 u16 csum = flags & RXFLAG_CSUM;
2003 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
2004
2005 RXD(("[%d ", elem));
2006
2007
2008 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
2009 RXD(("ERR(%08x)]", flags));
2010 dev->stats.rx_errors++;
2011 if (len < ETH_ZLEN)
2012 dev->stats.rx_length_errors++;
2013 if (len & (RXFLAG_OVERFLOW >> 16)) {
2014 dev->stats.rx_over_errors++;
2015 dev->stats.rx_fifo_errors++;
2016 }
2017
2018
2019 drop_it:
2020 dev->stats.rx_dropped++;
2021 hme_write_rxd(hp, this,
2022 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2023 dma_addr);
2024 goto next;
2025 }
2026 skb = hp->rx_skbs[elem];
2027 if (len > RX_COPY_THRESHOLD) {
2028 struct sk_buff *new_skb;
2029 u32 mapping;
2030
2031
2032 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2033 if (new_skb == NULL) {
2034 drops++;
2035 goto drop_it;
2036 }
2037 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2038 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2039 RX_BUF_ALLOC_SIZE,
2040 DMA_FROM_DEVICE);
2041 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2042 dev_kfree_skb_any(new_skb);
2043 drops++;
2044 goto drop_it;
2045 }
2046
2047 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2048 hp->rx_skbs[elem] = new_skb;
2049 hme_write_rxd(hp, this,
2050 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2051 mapping);
2052 skb_reserve(new_skb, RX_OFFSET);
2053
2054
2055 skb_trim(skb, len);
2056 } else {
2057 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
2058
2059 if (copy_skb == NULL) {
2060 drops++;
2061 goto drop_it;
2062 }
2063
2064 skb_reserve(copy_skb, 2);
2065 skb_put(copy_skb, len);
2066 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2067 skb_copy_from_linear_data(skb, copy_skb->data, len);
2068 dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
2069
2070 hme_write_rxd(hp, this,
2071 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2072 dma_addr);
2073
2074 skb = copy_skb;
2075 }
2076
2077
2078 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
2079 skb->ip_summed = CHECKSUM_COMPLETE;
2080
2081 RXD(("len=%d csum=%4x]", len, csum));
2082 skb->protocol = eth_type_trans(skb, dev);
2083 netif_rx(skb);
2084
2085 dev->stats.rx_packets++;
2086 dev->stats.rx_bytes += len;
2087 next:
2088 elem = NEXT_RX(elem);
2089 this = &rxbase[elem];
2090 }
2091 hp->rx_new = elem;
2092 if (drops)
2093 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2094 RXD((">"));
2095}
2096
2097static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
2098{
2099 struct net_device *dev = dev_id;
2100 struct happy_meal *hp = netdev_priv(dev);
2101 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2102
2103 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2104
2105 spin_lock(&hp->happy_lock);
2106
2107 if (happy_status & GREG_STAT_ERRORS) {
2108 HMD(("ERRORS "));
2109 if (happy_meal_is_not_so_happy(hp, happy_status))
2110 goto out;
2111 }
2112
2113 if (happy_status & GREG_STAT_MIFIRQ) {
2114 HMD(("MIFIRQ "));
2115 happy_meal_mif_interrupt(hp);
2116 }
2117
2118 if (happy_status & GREG_STAT_TXALL) {
2119 HMD(("TXALL "));
2120 happy_meal_tx(hp);
2121 }
2122
2123 if (happy_status & GREG_STAT_RXTOHOST) {
2124 HMD(("RXTOHOST "));
2125 happy_meal_rx(hp, dev);
2126 }
2127
2128 HMD(("done\n"));
2129out:
2130 spin_unlock(&hp->happy_lock);
2131
2132 return IRQ_HANDLED;
2133}
2134
2135#ifdef CONFIG_SBUS
2136static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2137{
2138 struct quattro *qp = (struct quattro *) cookie;
2139 int i;
2140
2141 for (i = 0; i < 4; i++) {
2142 struct net_device *dev = qp->happy_meals[i];
2143 struct happy_meal *hp = netdev_priv(dev);
2144 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2145
2146 HMD(("quattro_interrupt: status=%08x ", happy_status));
2147
2148 if (!(happy_status & (GREG_STAT_ERRORS |
2149 GREG_STAT_MIFIRQ |
2150 GREG_STAT_TXALL |
2151 GREG_STAT_RXTOHOST)))
2152 continue;
2153
2154 spin_lock(&hp->happy_lock);
2155
2156 if (happy_status & GREG_STAT_ERRORS) {
2157 HMD(("ERRORS "));
2158 if (happy_meal_is_not_so_happy(hp, happy_status))
2159 goto next;
2160 }
2161
2162 if (happy_status & GREG_STAT_MIFIRQ) {
2163 HMD(("MIFIRQ "));
2164 happy_meal_mif_interrupt(hp);
2165 }
2166
2167 if (happy_status & GREG_STAT_TXALL) {
2168 HMD(("TXALL "));
2169 happy_meal_tx(hp);
2170 }
2171
2172 if (happy_status & GREG_STAT_RXTOHOST) {
2173 HMD(("RXTOHOST "));
2174 happy_meal_rx(hp, dev);
2175 }
2176
2177 next:
2178 spin_unlock(&hp->happy_lock);
2179 }
2180 HMD(("done\n"));
2181
2182 return IRQ_HANDLED;
2183}
2184#endif
2185
2186static int happy_meal_open(struct net_device *dev)
2187{
2188 struct happy_meal *hp = netdev_priv(dev);
2189 int res;
2190
2191 HMD(("happy_meal_open: "));
2192
2193
2194
2195
2196 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2197 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2198 dev->name, dev);
2199 if (res) {
2200 HMD(("EAGAIN\n"));
2201 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2202 hp->irq);
2203
2204 return -EAGAIN;
2205 }
2206 }
2207
2208 HMD(("to happy_meal_init\n"));
2209
2210 spin_lock_irq(&hp->happy_lock);
2211 res = happy_meal_init(hp);
2212 spin_unlock_irq(&hp->happy_lock);
2213
2214 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2215 free_irq(hp->irq, dev);
2216 return res;
2217}
2218
2219static int happy_meal_close(struct net_device *dev)
2220{
2221 struct happy_meal *hp = netdev_priv(dev);
2222
2223 spin_lock_irq(&hp->happy_lock);
2224 happy_meal_stop(hp, hp->gregs);
2225 happy_meal_clean_rings(hp);
2226
2227
2228 del_timer(&hp->happy_timer);
2229
2230 spin_unlock_irq(&hp->happy_lock);
2231
2232
2233
2234
2235
2236 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2237 free_irq(hp->irq, dev);
2238
2239 return 0;
2240}
2241
2242#ifdef SXDEBUG
2243#define SXD(x) printk x
2244#else
2245#define SXD(x)
2246#endif
2247
2248static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
2249{
2250 struct happy_meal *hp = netdev_priv(dev);
2251
2252 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2253 tx_dump_log();
2254 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2255 hme_read32(hp, hp->gregs + GREG_STAT),
2256 hme_read32(hp, hp->etxregs + ETX_CFG),
2257 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2258
2259 spin_lock_irq(&hp->happy_lock);
2260 happy_meal_init(hp);
2261 spin_unlock_irq(&hp->happy_lock);
2262
2263 netif_wake_queue(dev);
2264}
2265
2266static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2267 u32 first_len, u32 first_entry, u32 entry)
2268{
2269 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2270
2271 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2272
2273 first_entry = NEXT_TX(first_entry);
2274 while (first_entry != entry) {
2275 struct happy_meal_txd *this = &txbase[first_entry];
2276 u32 addr, len;
2277
2278 addr = hme_read_desc32(hp, &this->tx_addr);
2279 len = hme_read_desc32(hp, &this->tx_flags);
2280 len &= TXFLAG_SIZE;
2281 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2282 }
2283}
2284
2285static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2286 struct net_device *dev)
2287{
2288 struct happy_meal *hp = netdev_priv(dev);
2289 int entry;
2290 u32 tx_flags;
2291
2292 tx_flags = TXFLAG_OWN;
2293 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2294 const u32 csum_start_off = skb_checksum_start_offset(skb);
2295 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2296
2297 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2298 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2299 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2300 }
2301
2302 spin_lock_irq(&hp->happy_lock);
2303
2304 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2305 netif_stop_queue(dev);
2306 spin_unlock_irq(&hp->happy_lock);
2307 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2308 dev->name);
2309 return NETDEV_TX_BUSY;
2310 }
2311
2312 entry = hp->tx_new;
2313 SXD(("SX<l[%d]e[%d]>", len, entry));
2314 hp->tx_skbs[entry] = skb;
2315
2316 if (skb_shinfo(skb)->nr_frags == 0) {
2317 u32 mapping, len;
2318
2319 len = skb->len;
2320 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2321 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2322 goto out_dma_error;
2323 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2324 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2325 (tx_flags | (len & TXFLAG_SIZE)),
2326 mapping);
2327 entry = NEXT_TX(entry);
2328 } else {
2329 u32 first_len, first_mapping;
2330 int frag, first_entry = entry;
2331
2332
2333
2334
2335 first_len = skb_headlen(skb);
2336 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2337 DMA_TO_DEVICE);
2338 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2339 goto out_dma_error;
2340 entry = NEXT_TX(entry);
2341
2342 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2343 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2344 u32 len, mapping, this_txflags;
2345
2346 len = skb_frag_size(this_frag);
2347 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2348 0, len, DMA_TO_DEVICE);
2349 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2350 unmap_partial_tx_skb(hp, first_mapping, first_len,
2351 first_entry, entry);
2352 goto out_dma_error;
2353 }
2354 this_txflags = tx_flags;
2355 if (frag == skb_shinfo(skb)->nr_frags - 1)
2356 this_txflags |= TXFLAG_EOP;
2357 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2358 (this_txflags | (len & TXFLAG_SIZE)),
2359 mapping);
2360 entry = NEXT_TX(entry);
2361 }
2362 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2363 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2364 first_mapping);
2365 }
2366
2367 hp->tx_new = entry;
2368
2369 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2370 netif_stop_queue(dev);
2371
2372
2373 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2374
2375 spin_unlock_irq(&hp->happy_lock);
2376
2377 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2378 return NETDEV_TX_OK;
2379
2380out_dma_error:
2381 hp->tx_skbs[hp->tx_new] = NULL;
2382 spin_unlock_irq(&hp->happy_lock);
2383
2384 dev_kfree_skb_any(skb);
2385 dev->stats.tx_dropped++;
2386 return NETDEV_TX_OK;
2387}
2388
2389static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2390{
2391 struct happy_meal *hp = netdev_priv(dev);
2392
2393 spin_lock_irq(&hp->happy_lock);
2394 happy_meal_get_counters(hp, hp->bigmacregs);
2395 spin_unlock_irq(&hp->happy_lock);
2396
2397 return &dev->stats;
2398}
2399
2400static void happy_meal_set_multicast(struct net_device *dev)
2401{
2402 struct happy_meal *hp = netdev_priv(dev);
2403 void __iomem *bregs = hp->bigmacregs;
2404 struct netdev_hw_addr *ha;
2405 u32 crc;
2406
2407 spin_lock_irq(&hp->happy_lock);
2408
2409 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2410 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2411 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2412 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2413 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2414 } else if (dev->flags & IFF_PROMISC) {
2415 hme_write32(hp, bregs + BMAC_RXCFG,
2416 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2417 } else {
2418 u16 hash_table[4];
2419
2420 memset(hash_table, 0, sizeof(hash_table));
2421 netdev_for_each_mc_addr(ha, dev) {
2422 crc = ether_crc_le(6, ha->addr);
2423 crc >>= 26;
2424 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2425 }
2426 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2427 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2428 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2429 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2430 }
2431
2432 spin_unlock_irq(&hp->happy_lock);
2433}
2434
2435
2436static int hme_get_link_ksettings(struct net_device *dev,
2437 struct ethtool_link_ksettings *cmd)
2438{
2439 struct happy_meal *hp = netdev_priv(dev);
2440 u32 speed;
2441 u32 supported;
2442
2443 supported =
2444 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2445 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2446 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2447
2448
2449 cmd->base.port = PORT_TP;
2450 cmd->base.phy_address = 0;
2451
2452
2453 spin_lock_irq(&hp->happy_lock);
2454 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2455 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2456 spin_unlock_irq(&hp->happy_lock);
2457
2458 if (hp->sw_bmcr & BMCR_ANENABLE) {
2459 cmd->base.autoneg = AUTONEG_ENABLE;
2460 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2461 SPEED_100 : SPEED_10);
2462 if (speed == SPEED_100)
2463 cmd->base.duplex =
2464 (hp->sw_lpa & (LPA_100FULL)) ?
2465 DUPLEX_FULL : DUPLEX_HALF;
2466 else
2467 cmd->base.duplex =
2468 (hp->sw_lpa & (LPA_10FULL)) ?
2469 DUPLEX_FULL : DUPLEX_HALF;
2470 } else {
2471 cmd->base.autoneg = AUTONEG_DISABLE;
2472 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2473 cmd->base.duplex =
2474 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2475 DUPLEX_FULL : DUPLEX_HALF;
2476 }
2477 cmd->base.speed = speed;
2478 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2479 supported);
2480
2481 return 0;
2482}
2483
2484static int hme_set_link_ksettings(struct net_device *dev,
2485 const struct ethtool_link_ksettings *cmd)
2486{
2487 struct happy_meal *hp = netdev_priv(dev);
2488
2489
2490 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2491 cmd->base.autoneg != AUTONEG_DISABLE)
2492 return -EINVAL;
2493 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2494 ((cmd->base.speed != SPEED_100 &&
2495 cmd->base.speed != SPEED_10) ||
2496 (cmd->base.duplex != DUPLEX_HALF &&
2497 cmd->base.duplex != DUPLEX_FULL)))
2498 return -EINVAL;
2499
2500
2501 spin_lock_irq(&hp->happy_lock);
2502 del_timer(&hp->happy_timer);
2503 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2504 spin_unlock_irq(&hp->happy_lock);
2505
2506 return 0;
2507}
2508
2509static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2510{
2511 struct happy_meal *hp = netdev_priv(dev);
2512
2513 strlcpy(info->driver, "sunhme", sizeof(info->driver));
2514 strlcpy(info->version, "2.02", sizeof(info->version));
2515 if (hp->happy_flags & HFLAG_PCI) {
2516 struct pci_dev *pdev = hp->happy_dev;
2517 strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2518 }
2519#ifdef CONFIG_SBUS
2520 else {
2521 const struct linux_prom_registers *regs;
2522 struct platform_device *op = hp->happy_dev;
2523 regs = of_get_property(op->dev.of_node, "regs", NULL);
2524 if (regs)
2525 snprintf(info->bus_info, sizeof(info->bus_info),
2526 "SBUS:%d",
2527 regs->which_io);
2528 }
2529#endif
2530}
2531
2532static u32 hme_get_link(struct net_device *dev)
2533{
2534 struct happy_meal *hp = netdev_priv(dev);
2535
2536 spin_lock_irq(&hp->happy_lock);
2537 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2538 spin_unlock_irq(&hp->happy_lock);
2539
2540 return hp->sw_bmsr & BMSR_LSTATUS;
2541}
2542
2543static const struct ethtool_ops hme_ethtool_ops = {
2544 .get_drvinfo = hme_get_drvinfo,
2545 .get_link = hme_get_link,
2546 .get_link_ksettings = hme_get_link_ksettings,
2547 .set_link_ksettings = hme_set_link_ksettings,
2548};
2549
2550static int hme_version_printed;
2551
2552#ifdef CONFIG_SBUS
2553
2554
2555
2556
2557
2558static struct quattro *quattro_sbus_find(struct platform_device *child)
2559{
2560 struct device *parent = child->dev.parent;
2561 struct platform_device *op;
2562 struct quattro *qp;
2563
2564 op = to_platform_device(parent);
2565 qp = platform_get_drvdata(op);
2566 if (qp)
2567 return qp;
2568
2569 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2570 if (qp != NULL) {
2571 int i;
2572
2573 for (i = 0; i < 4; i++)
2574 qp->happy_meals[i] = NULL;
2575
2576 qp->quattro_dev = child;
2577 qp->next = qfe_sbus_list;
2578 qfe_sbus_list = qp;
2579
2580 platform_set_drvdata(op, qp);
2581 }
2582 return qp;
2583}
2584
2585
2586
2587
2588
2589static int __init quattro_sbus_register_irqs(void)
2590{
2591 struct quattro *qp;
2592
2593 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2594 struct platform_device *op = qp->quattro_dev;
2595 int err, qfe_slot, skip = 0;
2596
2597 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2598 if (!qp->happy_meals[qfe_slot])
2599 skip = 1;
2600 }
2601 if (skip)
2602 continue;
2603
2604 err = request_irq(op->archdata.irqs[0],
2605 quattro_sbus_interrupt,
2606 IRQF_SHARED, "Quattro",
2607 qp);
2608 if (err != 0) {
2609 printk(KERN_ERR "Quattro HME: IRQ registration "
2610 "error %d.\n", err);
2611 return err;
2612 }
2613 }
2614
2615 return 0;
2616}
2617
2618static void quattro_sbus_free_irqs(void)
2619{
2620 struct quattro *qp;
2621
2622 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2623 struct platform_device *op = qp->quattro_dev;
2624 int qfe_slot, skip = 0;
2625
2626 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2627 if (!qp->happy_meals[qfe_slot])
2628 skip = 1;
2629 }
2630 if (skip)
2631 continue;
2632
2633 free_irq(op->archdata.irqs[0], qp);
2634 }
2635}
2636#endif
2637
2638#ifdef CONFIG_PCI
2639static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2640{
2641 struct pci_dev *bdev = pdev->bus->self;
2642 struct quattro *qp;
2643
2644 if (!bdev) return NULL;
2645 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2646 struct pci_dev *qpdev = qp->quattro_dev;
2647
2648 if (qpdev == bdev)
2649 return qp;
2650 }
2651 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2652 if (qp != NULL) {
2653 int i;
2654
2655 for (i = 0; i < 4; i++)
2656 qp->happy_meals[i] = NULL;
2657
2658 qp->quattro_dev = bdev;
2659 qp->next = qfe_pci_list;
2660 qfe_pci_list = qp;
2661
2662
2663 qp->nranges = 0;
2664 }
2665 return qp;
2666}
2667#endif
2668
2669static const struct net_device_ops hme_netdev_ops = {
2670 .ndo_open = happy_meal_open,
2671 .ndo_stop = happy_meal_close,
2672 .ndo_start_xmit = happy_meal_start_xmit,
2673 .ndo_tx_timeout = happy_meal_tx_timeout,
2674 .ndo_get_stats = happy_meal_get_stats,
2675 .ndo_set_rx_mode = happy_meal_set_multicast,
2676 .ndo_set_mac_address = eth_mac_addr,
2677 .ndo_validate_addr = eth_validate_addr,
2678};
2679
2680#ifdef CONFIG_SBUS
2681static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2682{
2683 struct device_node *dp = op->dev.of_node, *sbus_dp;
2684 struct quattro *qp = NULL;
2685 struct happy_meal *hp;
2686 struct net_device *dev;
2687 int i, qfe_slot = -1;
2688 int err = -ENODEV;
2689
2690 sbus_dp = op->dev.parent->of_node;
2691
2692
2693 if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2694 return err;
2695
2696 if (is_qfe) {
2697 qp = quattro_sbus_find(op);
2698 if (qp == NULL)
2699 goto err_out;
2700 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2701 if (qp->happy_meals[qfe_slot] == NULL)
2702 break;
2703 if (qfe_slot == 4)
2704 goto err_out;
2705 }
2706
2707 err = -ENOMEM;
2708 dev = alloc_etherdev(sizeof(struct happy_meal));
2709 if (!dev)
2710 goto err_out;
2711 SET_NETDEV_DEV(dev, &op->dev);
2712
2713 if (hme_version_printed++ == 0)
2714 printk(KERN_INFO "%s", version);
2715
2716
2717
2718
2719 for (i = 0; i < 6; i++) {
2720 if (macaddr[i] != 0)
2721 break;
2722 }
2723 if (i < 6) {
2724 for (i = 0; i < 6; i++)
2725 dev->dev_addr[i] = macaddr[i];
2726 macaddr[5]++;
2727 } else {
2728 const unsigned char *addr;
2729 int len;
2730
2731 addr = of_get_property(dp, "local-mac-address", &len);
2732
2733 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2734 memcpy(dev->dev_addr, addr, ETH_ALEN);
2735 else
2736 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
2737 }
2738
2739 hp = netdev_priv(dev);
2740
2741 hp->happy_dev = op;
2742 hp->dma_dev = &op->dev;
2743
2744 spin_lock_init(&hp->happy_lock);
2745
2746 err = -ENODEV;
2747 if (qp != NULL) {
2748 hp->qfe_parent = qp;
2749 hp->qfe_ent = qfe_slot;
2750 qp->happy_meals[qfe_slot] = dev;
2751 }
2752
2753 hp->gregs = of_ioremap(&op->resource[0], 0,
2754 GREG_REG_SIZE, "HME Global Regs");
2755 if (!hp->gregs) {
2756 printk(KERN_ERR "happymeal: Cannot map global registers.\n");
2757 goto err_out_free_netdev;
2758 }
2759
2760 hp->etxregs = of_ioremap(&op->resource[1], 0,
2761 ETX_REG_SIZE, "HME TX Regs");
2762 if (!hp->etxregs) {
2763 printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
2764 goto err_out_iounmap;
2765 }
2766
2767 hp->erxregs = of_ioremap(&op->resource[2], 0,
2768 ERX_REG_SIZE, "HME RX Regs");
2769 if (!hp->erxregs) {
2770 printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
2771 goto err_out_iounmap;
2772 }
2773
2774 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2775 BMAC_REG_SIZE, "HME BIGMAC Regs");
2776 if (!hp->bigmacregs) {
2777 printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
2778 goto err_out_iounmap;
2779 }
2780
2781 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2782 TCVR_REG_SIZE, "HME Tranceiver Regs");
2783 if (!hp->tcvregs) {
2784 printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
2785 goto err_out_iounmap;
2786 }
2787
2788 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2789 if (hp->hm_revision == 0xff)
2790 hp->hm_revision = 0xa0;
2791
2792
2793 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2794 hp->happy_flags = HFLAG_20_21;
2795 else if (hp->hm_revision != 0xa0)
2796 hp->happy_flags = HFLAG_NOT_A0;
2797
2798 if (qp != NULL)
2799 hp->happy_flags |= HFLAG_QUATTRO;
2800
2801
2802 hp->happy_bursts = of_getintprop_default(sbus_dp,
2803 "burst-sizes", 0x00);
2804
2805 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2806 PAGE_SIZE,
2807 &hp->hblock_dvma,
2808 GFP_ATOMIC);
2809 err = -ENOMEM;
2810 if (!hp->happy_block)
2811 goto err_out_iounmap;
2812
2813
2814 hp->linkcheck = 0;
2815
2816
2817 hp->timer_state = asleep;
2818 hp->timer_ticks = 0;
2819
2820 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2821
2822 hp->dev = dev;
2823 dev->netdev_ops = &hme_netdev_ops;
2824 dev->watchdog_timeo = 5*HZ;
2825 dev->ethtool_ops = &hme_ethtool_ops;
2826
2827
2828 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2829 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2830
2831 hp->irq = op->archdata.irqs[0];
2832
2833#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2834
2835 hp->read_desc32 = sbus_hme_read_desc32;
2836 hp->write_txd = sbus_hme_write_txd;
2837 hp->write_rxd = sbus_hme_write_rxd;
2838 hp->read32 = sbus_hme_read32;
2839 hp->write32 = sbus_hme_write32;
2840#endif
2841
2842
2843
2844
2845 spin_lock_irq(&hp->happy_lock);
2846 happy_meal_set_initial_advertisement(hp);
2847 spin_unlock_irq(&hp->happy_lock);
2848
2849 err = register_netdev(hp->dev);
2850 if (err) {
2851 printk(KERN_ERR "happymeal: Cannot register net device, "
2852 "aborting.\n");
2853 goto err_out_free_coherent;
2854 }
2855
2856 platform_set_drvdata(op, hp);
2857
2858 if (qfe_slot != -1)
2859 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2860 dev->name, qfe_slot);
2861 else
2862 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2863 dev->name);
2864
2865 printk("%pM\n", dev->dev_addr);
2866
2867 return 0;
2868
2869err_out_free_coherent:
2870 dma_free_coherent(hp->dma_dev,
2871 PAGE_SIZE,
2872 hp->happy_block,
2873 hp->hblock_dvma);
2874
2875err_out_iounmap:
2876 if (hp->gregs)
2877 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2878 if (hp->etxregs)
2879 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2880 if (hp->erxregs)
2881 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2882 if (hp->bigmacregs)
2883 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2884 if (hp->tcvregs)
2885 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2886
2887 if (qp)
2888 qp->happy_meals[qfe_slot] = NULL;
2889
2890err_out_free_netdev:
2891 free_netdev(dev);
2892
2893err_out:
2894 return err;
2895}
2896#endif
2897
2898#ifdef CONFIG_PCI
2899#ifndef CONFIG_SPARC
2900static int is_quattro_p(struct pci_dev *pdev)
2901{
2902 struct pci_dev *busdev = pdev->bus->self;
2903 struct pci_dev *this_pdev;
2904 int n_hmes;
2905
2906 if (busdev == NULL ||
2907 busdev->vendor != PCI_VENDOR_ID_DEC ||
2908 busdev->device != PCI_DEVICE_ID_DEC_21153)
2909 return 0;
2910
2911 n_hmes = 0;
2912 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2913 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2914 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2915 n_hmes++;
2916 }
2917
2918 if (n_hmes != 4)
2919 return 0;
2920
2921 return 1;
2922}
2923
2924
2925static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2926{
2927 int this_offset;
2928
2929 for (this_offset = 0x20; this_offset < len; this_offset++) {
2930 void __iomem *p = rom_base + this_offset;
2931
2932 if (readb(p + 0) != 0x90 ||
2933 readb(p + 1) != 0x00 ||
2934 readb(p + 2) != 0x09 ||
2935 readb(p + 3) != 0x4e ||
2936 readb(p + 4) != 0x41 ||
2937 readb(p + 5) != 0x06)
2938 continue;
2939
2940 this_offset += 6;
2941 p += 6;
2942
2943 if (index == 0) {
2944 int i;
2945
2946 for (i = 0; i < 6; i++)
2947 dev_addr[i] = readb(p + i);
2948 return 1;
2949 }
2950 index--;
2951 }
2952 return 0;
2953}
2954
2955static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2956{
2957 size_t size;
2958 void __iomem *p = pci_map_rom(pdev, &size);
2959
2960 if (p) {
2961 int index = 0;
2962 int found;
2963
2964 if (is_quattro_p(pdev))
2965 index = PCI_SLOT(pdev->devfn);
2966
2967 found = readb(p) == 0x55 &&
2968 readb(p + 1) == 0xaa &&
2969 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2970 pci_unmap_rom(pdev, p);
2971 if (found)
2972 return;
2973 }
2974
2975
2976 dev_addr[0] = 0x08;
2977 dev_addr[1] = 0x00;
2978 dev_addr[2] = 0x20;
2979 get_random_bytes(&dev_addr[3], 3);
2980}
2981#endif
2982
2983static int happy_meal_pci_probe(struct pci_dev *pdev,
2984 const struct pci_device_id *ent)
2985{
2986 struct quattro *qp = NULL;
2987#ifdef CONFIG_SPARC
2988 struct device_node *dp;
2989#endif
2990 struct happy_meal *hp;
2991 struct net_device *dev;
2992 void __iomem *hpreg_base;
2993 unsigned long hpreg_res;
2994 int i, qfe_slot = -1;
2995 char prom_name[64];
2996 int err;
2997
2998
2999#ifdef CONFIG_SPARC
3000 dp = pci_device_to_OF_node(pdev);
3001 snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
3002#else
3003 if (is_quattro_p(pdev))
3004 strcpy(prom_name, "SUNW,qfe");
3005 else
3006 strcpy(prom_name, "SUNW,hme");
3007#endif
3008
3009 err = -ENODEV;
3010
3011 if (pci_enable_device(pdev))
3012 goto err_out;
3013 pci_set_master(pdev);
3014
3015 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3016 qp = quattro_pci_find(pdev);
3017 if (qp == NULL)
3018 goto err_out;
3019 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
3020 if (qp->happy_meals[qfe_slot] == NULL)
3021 break;
3022 if (qfe_slot == 4)
3023 goto err_out;
3024 }
3025
3026 dev = alloc_etherdev(sizeof(struct happy_meal));
3027 err = -ENOMEM;
3028 if (!dev)
3029 goto err_out;
3030 SET_NETDEV_DEV(dev, &pdev->dev);
3031
3032 if (hme_version_printed++ == 0)
3033 printk(KERN_INFO "%s", version);
3034
3035 hp = netdev_priv(dev);
3036
3037 hp->happy_dev = pdev;
3038 hp->dma_dev = &pdev->dev;
3039
3040 spin_lock_init(&hp->happy_lock);
3041
3042 if (qp != NULL) {
3043 hp->qfe_parent = qp;
3044 hp->qfe_ent = qfe_slot;
3045 qp->happy_meals[qfe_slot] = dev;
3046 }
3047
3048 hpreg_res = pci_resource_start(pdev, 0);
3049 err = -ENODEV;
3050 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3051 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3052 goto err_out_clear_quattro;
3053 }
3054 if (pci_request_regions(pdev, DRV_NAME)) {
3055 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3056 "aborting.\n");
3057 goto err_out_clear_quattro;
3058 }
3059
3060 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
3061 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3062 goto err_out_free_res;
3063 }
3064
3065 for (i = 0; i < 6; i++) {
3066 if (macaddr[i] != 0)
3067 break;
3068 }
3069 if (i < 6) {
3070 for (i = 0; i < 6; i++)
3071 dev->dev_addr[i] = macaddr[i];
3072 macaddr[5]++;
3073 } else {
3074#ifdef CONFIG_SPARC
3075 const unsigned char *addr;
3076 int len;
3077
3078 if (qfe_slot != -1 &&
3079 (addr = of_get_property(dp, "local-mac-address", &len))
3080 != NULL &&
3081 len == 6) {
3082 memcpy(dev->dev_addr, addr, ETH_ALEN);
3083 } else {
3084 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
3085 }
3086#else
3087 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
3088#endif
3089 }
3090
3091
3092 hp->gregs = (hpreg_base + 0x0000UL);
3093 hp->etxregs = (hpreg_base + 0x2000UL);
3094 hp->erxregs = (hpreg_base + 0x4000UL);
3095 hp->bigmacregs = (hpreg_base + 0x6000UL);
3096 hp->tcvregs = (hpreg_base + 0x7000UL);
3097
3098#ifdef CONFIG_SPARC
3099 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3100 if (hp->hm_revision == 0xff)
3101 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
3102#else
3103
3104 hp->hm_revision = 0x20;
3105#endif
3106
3107
3108 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3109 hp->happy_flags = HFLAG_20_21;
3110 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3111 hp->happy_flags = HFLAG_NOT_A0;
3112
3113 if (qp != NULL)
3114 hp->happy_flags |= HFLAG_QUATTRO;
3115
3116
3117 hp->happy_flags |= HFLAG_PCI;
3118
3119#ifdef CONFIG_SPARC
3120
3121 hp->happy_bursts = DMA_BURSTBITS;
3122#endif
3123
3124 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3125 &hp->hblock_dvma, GFP_KERNEL);
3126 err = -ENODEV;
3127 if (!hp->happy_block)
3128 goto err_out_iounmap;
3129
3130 hp->linkcheck = 0;
3131 hp->timer_state = asleep;
3132 hp->timer_ticks = 0;
3133
3134 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
3135
3136 hp->irq = pdev->irq;
3137 hp->dev = dev;
3138 dev->netdev_ops = &hme_netdev_ops;
3139 dev->watchdog_timeo = 5*HZ;
3140 dev->ethtool_ops = &hme_ethtool_ops;
3141
3142
3143 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3144 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3145
3146#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3147
3148 hp->read_desc32 = pci_hme_read_desc32;
3149 hp->write_txd = pci_hme_write_txd;
3150 hp->write_rxd = pci_hme_write_rxd;
3151 hp->read32 = pci_hme_read32;
3152 hp->write32 = pci_hme_write32;
3153#endif
3154
3155
3156
3157
3158 spin_lock_irq(&hp->happy_lock);
3159 happy_meal_set_initial_advertisement(hp);
3160 spin_unlock_irq(&hp->happy_lock);
3161
3162 err = register_netdev(hp->dev);
3163 if (err) {
3164 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3165 "aborting.\n");
3166 goto err_out_iounmap;
3167 }
3168
3169 pci_set_drvdata(pdev, hp);
3170
3171 if (!qfe_slot) {
3172 struct pci_dev *qpdev = qp->quattro_dev;
3173
3174 prom_name[0] = 0;
3175 if (!strncmp(dev->name, "eth", 3)) {
3176 int i = simple_strtoul(dev->name + 3, NULL, 10);
3177 sprintf(prom_name, "-%d", i + 3);
3178 }
3179 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3180 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3181 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3182 printk("DEC 21153 PCI Bridge\n");
3183 else
3184 printk("unknown bridge %04x.%04x\n",
3185 qpdev->vendor, qpdev->device);
3186 }
3187
3188 if (qfe_slot != -1)
3189 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3190 dev->name, qfe_slot);
3191 else
3192 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3193 dev->name);
3194
3195 printk("%pM\n", dev->dev_addr);
3196
3197 return 0;
3198
3199err_out_iounmap:
3200 iounmap(hp->gregs);
3201
3202err_out_free_res:
3203 pci_release_regions(pdev);
3204
3205err_out_clear_quattro:
3206 if (qp != NULL)
3207 qp->happy_meals[qfe_slot] = NULL;
3208
3209 free_netdev(dev);
3210
3211err_out:
3212 return err;
3213}
3214
3215static void happy_meal_pci_remove(struct pci_dev *pdev)
3216{
3217 struct happy_meal *hp = pci_get_drvdata(pdev);
3218 struct net_device *net_dev = hp->dev;
3219
3220 unregister_netdev(net_dev);
3221
3222 dma_free_coherent(hp->dma_dev, PAGE_SIZE,
3223 hp->happy_block, hp->hblock_dvma);
3224 iounmap(hp->gregs);
3225 pci_release_regions(hp->happy_dev);
3226
3227 free_netdev(net_dev);
3228}
3229
3230static const struct pci_device_id happymeal_pci_ids[] = {
3231 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3232 { }
3233};
3234
3235MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3236
3237static struct pci_driver hme_pci_driver = {
3238 .name = "hme",
3239 .id_table = happymeal_pci_ids,
3240 .probe = happy_meal_pci_probe,
3241 .remove = happy_meal_pci_remove,
3242};
3243
3244static int __init happy_meal_pci_init(void)
3245{
3246 return pci_register_driver(&hme_pci_driver);
3247}
3248
3249static void happy_meal_pci_exit(void)
3250{
3251 pci_unregister_driver(&hme_pci_driver);
3252
3253 while (qfe_pci_list) {
3254 struct quattro *qfe = qfe_pci_list;
3255 struct quattro *next = qfe->next;
3256
3257 kfree(qfe);
3258
3259 qfe_pci_list = next;
3260 }
3261}
3262
3263#endif
3264
3265#ifdef CONFIG_SBUS
3266static const struct of_device_id hme_sbus_match[];
3267static int hme_sbus_probe(struct platform_device *op)
3268{
3269 const struct of_device_id *match;
3270 struct device_node *dp = op->dev.of_node;
3271 const char *model = of_get_property(dp, "model", NULL);
3272 int is_qfe;
3273
3274 match = of_match_device(hme_sbus_match, &op->dev);
3275 if (!match)
3276 return -EINVAL;
3277 is_qfe = (match->data != NULL);
3278
3279 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3280 is_qfe = 1;
3281
3282 return happy_meal_sbus_probe_one(op, is_qfe);
3283}
3284
3285static int hme_sbus_remove(struct platform_device *op)
3286{
3287 struct happy_meal *hp = platform_get_drvdata(op);
3288 struct net_device *net_dev = hp->dev;
3289
3290 unregister_netdev(net_dev);
3291
3292
3293
3294 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3295 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3296 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3297 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3298 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3299 dma_free_coherent(hp->dma_dev,
3300 PAGE_SIZE,
3301 hp->happy_block,
3302 hp->hblock_dvma);
3303
3304 free_netdev(net_dev);
3305
3306 return 0;
3307}
3308
3309static const struct of_device_id hme_sbus_match[] = {
3310 {
3311 .name = "SUNW,hme",
3312 },
3313 {
3314 .name = "SUNW,qfe",
3315 .data = (void *) 1,
3316 },
3317 {
3318 .name = "qfe",
3319 .data = (void *) 1,
3320 },
3321 {},
3322};
3323
3324MODULE_DEVICE_TABLE(of, hme_sbus_match);
3325
3326static struct platform_driver hme_sbus_driver = {
3327 .driver = {
3328 .name = "hme",
3329 .of_match_table = hme_sbus_match,
3330 },
3331 .probe = hme_sbus_probe,
3332 .remove = hme_sbus_remove,
3333};
3334
3335static int __init happy_meal_sbus_init(void)
3336{
3337 int err;
3338
3339 err = platform_driver_register(&hme_sbus_driver);
3340 if (!err)
3341 err = quattro_sbus_register_irqs();
3342
3343 return err;
3344}
3345
3346static void happy_meal_sbus_exit(void)
3347{
3348 platform_driver_unregister(&hme_sbus_driver);
3349 quattro_sbus_free_irqs();
3350
3351 while (qfe_sbus_list) {
3352 struct quattro *qfe = qfe_sbus_list;
3353 struct quattro *next = qfe->next;
3354
3355 kfree(qfe);
3356
3357 qfe_sbus_list = next;
3358 }
3359}
3360#endif
3361
3362static int __init happy_meal_probe(void)
3363{
3364 int err = 0;
3365
3366#ifdef CONFIG_SBUS
3367 err = happy_meal_sbus_init();
3368#endif
3369#ifdef CONFIG_PCI
3370 if (!err) {
3371 err = happy_meal_pci_init();
3372#ifdef CONFIG_SBUS
3373 if (err)
3374 happy_meal_sbus_exit();
3375#endif
3376 }
3377#endif
3378
3379 return err;
3380}
3381
3382
3383static void __exit happy_meal_exit(void)
3384{
3385#ifdef CONFIG_SBUS
3386 happy_meal_sbus_exit();
3387#endif
3388#ifdef CONFIG_PCI
3389 happy_meal_pci_exit();
3390#endif
3391}
3392
3393module_init(happy_meal_probe);
3394module_exit(happy_meal_exit);
3395