1
2
3
4
5
6#include <linux/module.h>
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/fcntl.h>
11#include <linux/interrupt.h>
12#include <linux/ioport.h>
13#include <linux/in.h>
14#include <linux/string.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/crc32.h>
18#include <linux/errno.h>
19#include <linux/ethtool.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/bitops.h>
24#include <linux/dma-mapping.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/gfp.h>
28
29#include <asm/auxio.h>
30#include <asm/byteorder.h>
31#include <asm/dma.h>
32#include <asm/idprom.h>
33#include <asm/io.h>
34#include <asm/openprom.h>
35#include <asm/oplib.h>
36#include <asm/pgtable.h>
37#include <asm/system.h>
38
39#include "sunbmac.h"
40
41#define DRV_NAME "sunbmac"
42#define DRV_VERSION "2.1"
43#define DRV_RELDATE "August 26, 2008"
44#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
45
46static char version[] =
47 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
48
49MODULE_VERSION(DRV_VERSION);
50MODULE_AUTHOR(DRV_AUTHOR);
51MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
52MODULE_LICENSE("GPL");
53
54#undef DEBUG_PROBE
55#undef DEBUG_TX
56#undef DEBUG_IRQ
57
58#ifdef DEBUG_PROBE
59#define DP(x) printk x
60#else
61#define DP(x)
62#endif
63
64#ifdef DEBUG_TX
65#define DTX(x) printk x
66#else
67#define DTX(x)
68#endif
69
70#ifdef DEBUG_IRQ
71#define DIRQ(x) printk x
72#else
73#define DIRQ(x)
74#endif
75
76#define DEFAULT_JAMSIZE 4
77
78#define QEC_RESET_TRIES 200
79
80static int qec_global_reset(void __iomem *gregs)
81{
82 int tries = QEC_RESET_TRIES;
83
84 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
85 while (--tries) {
86 if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
87 udelay(20);
88 continue;
89 }
90 break;
91 }
92 if (tries)
93 return 0;
94 printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
95 return -1;
96}
97
98static void qec_init(struct bigmac *bp)
99{
100 struct platform_device *qec_op = bp->qec_op;
101 void __iomem *gregs = bp->gregs;
102 u8 bsizes = bp->bigmac_bursts;
103 u32 regval;
104
105
106
107
108 if (bsizes & DMA_BURST32)
109 regval = GLOB_CTRL_B32;
110 else
111 regval = GLOB_CTRL_B16;
112 sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
113 sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
114
115
116 sbus_writel(resource_size(&qec_op->resource[1]),
117 gregs + GLOB_MSIZE);
118
119
120 sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
121 gregs + GLOB_TSIZE);
122 sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
123 gregs + GLOB_RSIZE);
124}
125
126#define TX_RESET_TRIES 32
127#define RX_RESET_TRIES 32
128
129static void bigmac_tx_reset(void __iomem *bregs)
130{
131 int tries = TX_RESET_TRIES;
132
133 sbus_writel(0, bregs + BMAC_TXCFG);
134
135
136
137
138 while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
139 --tries != 0)
140 udelay(20);
141
142 if (!tries) {
143 printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
144 printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
145 sbus_readl(bregs + BMAC_TXCFG));
146 }
147}
148
149static void bigmac_rx_reset(void __iomem *bregs)
150{
151 int tries = RX_RESET_TRIES;
152
153 sbus_writel(0, bregs + BMAC_RXCFG);
154 while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
155 udelay(20);
156
157 if (!tries) {
158 printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
159 printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
160 sbus_readl(bregs + BMAC_RXCFG));
161 }
162}
163
164
165static void bigmac_stop(struct bigmac *bp)
166{
167 bigmac_tx_reset(bp->bregs);
168 bigmac_rx_reset(bp->bregs);
169}
170
171static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
172{
173 struct net_device_stats *stats = &bp->enet_stats;
174
175 stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
176 sbus_writel(0, bregs + BMAC_RCRCECTR);
177
178 stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
179 sbus_writel(0, bregs + BMAC_UNALECTR);
180
181 stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
182 sbus_writel(0, bregs + BMAC_GLECTR);
183
184 stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
185
186 stats->collisions +=
187 (sbus_readl(bregs + BMAC_EXCTR) +
188 sbus_readl(bregs + BMAC_LTCTR));
189 sbus_writel(0, bregs + BMAC_EXCTR);
190 sbus_writel(0, bregs + BMAC_LTCTR);
191}
192
193static void bigmac_clean_rings(struct bigmac *bp)
194{
195 int i;
196
197 for (i = 0; i < RX_RING_SIZE; i++) {
198 if (bp->rx_skbs[i] != NULL) {
199 dev_kfree_skb_any(bp->rx_skbs[i]);
200 bp->rx_skbs[i] = NULL;
201 }
202 }
203
204 for (i = 0; i < TX_RING_SIZE; i++) {
205 if (bp->tx_skbs[i] != NULL) {
206 dev_kfree_skb_any(bp->tx_skbs[i]);
207 bp->tx_skbs[i] = NULL;
208 }
209 }
210}
211
212static void bigmac_init_rings(struct bigmac *bp, int from_irq)
213{
214 struct bmac_init_block *bb = bp->bmac_block;
215 struct net_device *dev = bp->dev;
216 int i;
217 gfp_t gfp_flags = GFP_KERNEL;
218
219 if (from_irq || in_interrupt())
220 gfp_flags = GFP_ATOMIC;
221
222 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
223
224
225 bigmac_clean_rings(bp);
226
227
228 for (i = 0; i < RX_RING_SIZE; i++) {
229 struct sk_buff *skb;
230
231 skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
232 if (!skb)
233 continue;
234
235 bp->rx_skbs[i] = skb;
236 skb->dev = dev;
237
238
239 skb_put(skb, ETH_FRAME_LEN);
240 skb_reserve(skb, 34);
241
242 bb->be_rxd[i].rx_addr =
243 dma_map_single(&bp->bigmac_op->dev,
244 skb->data,
245 RX_BUF_ALLOC_SIZE - 34,
246 DMA_FROM_DEVICE);
247 bb->be_rxd[i].rx_flags =
248 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
249 }
250
251 for (i = 0; i < TX_RING_SIZE; i++)
252 bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
253}
254
255#define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
256#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
257
258static void idle_transceiver(void __iomem *tregs)
259{
260 int i = 20;
261
262 while (i--) {
263 sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
264 sbus_readl(tregs + TCVR_MPAL);
265 sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
266 sbus_readl(tregs + TCVR_MPAL);
267 }
268}
269
270static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
271{
272 if (bp->tcvr_type == internal) {
273 bit = (bit & 1) << 3;
274 sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
275 tregs + TCVR_MPAL);
276 sbus_readl(tregs + TCVR_MPAL);
277 sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
278 tregs + TCVR_MPAL);
279 sbus_readl(tregs + TCVR_MPAL);
280 } else if (bp->tcvr_type == external) {
281 bit = (bit & 1) << 2;
282 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
283 tregs + TCVR_MPAL);
284 sbus_readl(tregs + TCVR_MPAL);
285 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
286 tregs + TCVR_MPAL);
287 sbus_readl(tregs + TCVR_MPAL);
288 } else {
289 printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
290 }
291}
292
293static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
294{
295 int retval = 0;
296
297 if (bp->tcvr_type == internal) {
298 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
299 sbus_readl(tregs + TCVR_MPAL);
300 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
301 tregs + TCVR_MPAL);
302 sbus_readl(tregs + TCVR_MPAL);
303 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
304 } else if (bp->tcvr_type == external) {
305 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
306 sbus_readl(tregs + TCVR_MPAL);
307 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
308 sbus_readl(tregs + TCVR_MPAL);
309 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
310 } else {
311 printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
312 }
313 return retval;
314}
315
316static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
317{
318 int retval = 0;
319
320 if (bp->tcvr_type == internal) {
321 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
322 sbus_readl(tregs + TCVR_MPAL);
323 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
324 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
325 sbus_readl(tregs + TCVR_MPAL);
326 } else if (bp->tcvr_type == external) {
327 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
328 sbus_readl(tregs + TCVR_MPAL);
329 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
330 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
331 sbus_readl(tregs + TCVR_MPAL);
332 } else {
333 printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
334 }
335 return retval;
336}
337
338static void put_tcvr_byte(struct bigmac *bp,
339 void __iomem *tregs,
340 unsigned int byte)
341{
342 int shift = 4;
343
344 do {
345 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
346 shift -= 1;
347 } while (shift >= 0);
348}
349
350static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
351 int reg, unsigned short val)
352{
353 int shift;
354
355 reg &= 0xff;
356 val &= 0xffff;
357 switch(bp->tcvr_type) {
358 case internal:
359 case external:
360 break;
361
362 default:
363 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
364 return;
365 }
366
367 idle_transceiver(tregs);
368 write_tcvr_bit(bp, tregs, 0);
369 write_tcvr_bit(bp, tregs, 1);
370 write_tcvr_bit(bp, tregs, 0);
371 write_tcvr_bit(bp, tregs, 1);
372
373 put_tcvr_byte(bp, tregs,
374 ((bp->tcvr_type == internal) ?
375 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
376
377 put_tcvr_byte(bp, tregs, reg);
378
379 write_tcvr_bit(bp, tregs, 1);
380 write_tcvr_bit(bp, tregs, 0);
381
382 shift = 15;
383 do {
384 write_tcvr_bit(bp, tregs, (val >> shift) & 1);
385 shift -= 1;
386 } while (shift >= 0);
387}
388
389static unsigned short bigmac_tcvr_read(struct bigmac *bp,
390 void __iomem *tregs,
391 int reg)
392{
393 unsigned short retval = 0;
394
395 reg &= 0xff;
396 switch(bp->tcvr_type) {
397 case internal:
398 case external:
399 break;
400
401 default:
402 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
403 return 0xffff;
404 }
405
406 idle_transceiver(tregs);
407 write_tcvr_bit(bp, tregs, 0);
408 write_tcvr_bit(bp, tregs, 1);
409 write_tcvr_bit(bp, tregs, 1);
410 write_tcvr_bit(bp, tregs, 0);
411
412 put_tcvr_byte(bp, tregs,
413 ((bp->tcvr_type == internal) ?
414 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
415
416 put_tcvr_byte(bp, tregs, reg);
417
418 if (bp->tcvr_type == external) {
419 int shift = 15;
420
421 (void) read_tcvr_bit2(bp, tregs);
422 (void) read_tcvr_bit2(bp, tregs);
423
424 do {
425 int tmp;
426
427 tmp = read_tcvr_bit2(bp, tregs);
428 retval |= ((tmp & 1) << shift);
429 shift -= 1;
430 } while (shift >= 0);
431
432 (void) read_tcvr_bit2(bp, tregs);
433 (void) read_tcvr_bit2(bp, tregs);
434 (void) read_tcvr_bit2(bp, tregs);
435 } else {
436 int shift = 15;
437
438 (void) read_tcvr_bit(bp, tregs);
439 (void) read_tcvr_bit(bp, tregs);
440
441 do {
442 int tmp;
443
444 tmp = read_tcvr_bit(bp, tregs);
445 retval |= ((tmp & 1) << shift);
446 shift -= 1;
447 } while (shift >= 0);
448
449 (void) read_tcvr_bit(bp, tregs);
450 (void) read_tcvr_bit(bp, tregs);
451 (void) read_tcvr_bit(bp, tregs);
452 }
453 return retval;
454}
455
456static void bigmac_tcvr_init(struct bigmac *bp)
457{
458 void __iomem *tregs = bp->tregs;
459 u32 mpal;
460
461 idle_transceiver(tregs);
462 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
463 tregs + TCVR_MPAL);
464 sbus_readl(tregs + TCVR_MPAL);
465
466
467
468
469 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
470 sbus_readl(tregs + TCVR_MPAL);
471 udelay(20);
472
473 mpal = sbus_readl(tregs + TCVR_MPAL);
474 if (mpal & MGMT_PAL_EXT_MDIO) {
475 bp->tcvr_type = external;
476 sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
477 tregs + TCVR_TPAL);
478 sbus_readl(tregs + TCVR_TPAL);
479 } else if (mpal & MGMT_PAL_INT_MDIO) {
480 bp->tcvr_type = internal;
481 sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
482 TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
483 tregs + TCVR_TPAL);
484 sbus_readl(tregs + TCVR_TPAL);
485 } else {
486 printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
487 "external MDIO available!\n");
488 printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
489 sbus_readl(tregs + TCVR_MPAL),
490 sbus_readl(tregs + TCVR_TPAL));
491 }
492}
493
494static int bigmac_init_hw(struct bigmac *, int);
495
496static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
497{
498 if (bp->sw_bmcr & BMCR_SPEED100) {
499 int timeout;
500
501
502 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
503 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
504 bp->sw_bmcr = (BMCR_RESET);
505 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
506
507 timeout = 64;
508 while (--timeout) {
509 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
510 if ((bp->sw_bmcr & BMCR_RESET) == 0)
511 break;
512 udelay(20);
513 }
514 if (timeout == 0)
515 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
516
517 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
518
519
520 bp->sw_bmcr &= ~(BMCR_SPEED100);
521 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
522 return 0;
523 }
524
525
526 return -1;
527}
528
529static void bigmac_timer(unsigned long data)
530{
531 struct bigmac *bp = (struct bigmac *) data;
532 void __iomem *tregs = bp->tregs;
533 int restart_timer = 0;
534
535 bp->timer_ticks++;
536 if (bp->timer_state == ltrywait) {
537 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
538 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
539 if (bp->sw_bmsr & BMSR_LSTATUS) {
540 printk(KERN_INFO "%s: Link is now up at %s.\n",
541 bp->dev->name,
542 (bp->sw_bmcr & BMCR_SPEED100) ?
543 "100baseT" : "10baseT");
544 bp->timer_state = asleep;
545 restart_timer = 0;
546 } else {
547 if (bp->timer_ticks >= 4) {
548 int ret;
549
550 ret = try_next_permutation(bp, tregs);
551 if (ret == -1) {
552 printk(KERN_ERR "%s: Link down, cable problem?\n",
553 bp->dev->name);
554 ret = bigmac_init_hw(bp, 0);
555 if (ret) {
556 printk(KERN_ERR "%s: Error, cannot re-init the "
557 "BigMAC.\n", bp->dev->name);
558 }
559 return;
560 }
561 bp->timer_ticks = 0;
562 restart_timer = 1;
563 } else {
564 restart_timer = 1;
565 }
566 }
567 } else {
568
569 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
570 bp->dev->name);
571 restart_timer = 0;
572 bp->timer_ticks = 0;
573 bp->timer_state = asleep;
574 }
575
576 if (restart_timer != 0) {
577 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10);
578 add_timer(&bp->bigmac_timer);
579 }
580}
581
582
583
584
585static void bigmac_begin_auto_negotiation(struct bigmac *bp)
586{
587 void __iomem *tregs = bp->tregs;
588 int timeout;
589
590
591 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
592 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
593
594
595 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
596 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
597 bp->sw_bmcr = (BMCR_RESET);
598 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
599
600 timeout = 64;
601 while (--timeout) {
602 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
603 if ((bp->sw_bmcr & BMCR_RESET) == 0)
604 break;
605 udelay(20);
606 }
607 if (timeout == 0)
608 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
609
610 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
611
612
613 bp->sw_bmcr |= BMCR_SPEED100;
614 bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
615
616 bp->timer_state = ltrywait;
617 bp->timer_ticks = 0;
618 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
619 bp->bigmac_timer.data = (unsigned long) bp;
620 bp->bigmac_timer.function = bigmac_timer;
621 add_timer(&bp->bigmac_timer);
622}
623
624static int bigmac_init_hw(struct bigmac *bp, int from_irq)
625{
626 void __iomem *gregs = bp->gregs;
627 void __iomem *cregs = bp->creg;
628 void __iomem *bregs = bp->bregs;
629 unsigned char *e = &bp->dev->dev_addr[0];
630
631
632 bigmac_get_counters(bp, bregs);
633
634
635 qec_global_reset(gregs);
636
637
638 qec_init(bp);
639
640
641 bigmac_init_rings(bp, from_irq);
642
643
644 bigmac_tcvr_init(bp);
645
646
647 bigmac_stop(bp);
648
649
650 sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
651 sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
652 sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
653
654
655 sbus_writel(0, bregs + BMAC_HTABLE3);
656 sbus_writel(0, bregs + BMAC_HTABLE2);
657 sbus_writel(0, bregs + BMAC_HTABLE1);
658 sbus_writel(0, bregs + BMAC_HTABLE0);
659
660
661 sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
662 bregs + BMAC_RXCFG);
663 udelay(20);
664
665
666 sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
667
668
669 sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
670 bregs + BMAC_RSEED);
671
672
673 sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
674 bregs + BMAC_XIFCFG);
675
676
677 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
678 cregs + CREG_RXDS);
679 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
680 cregs + CREG_TXDS);
681
682
683 sbus_writel(0, cregs + CREG_RXRBUFPTR);
684 sbus_writel(0, cregs + CREG_RXWBUFPTR);
685 sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
686 cregs + CREG_TXRBUFPTR);
687 sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
688 cregs + CREG_TXWBUFPTR);
689
690
691 sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
692 bregs + BMAC_IMASK);
693
694
695 sbus_writel(0, cregs + CREG_RIMASK);
696 sbus_writel(0, cregs + CREG_TIMASK);
697 sbus_writel(0, cregs + CREG_QMASK);
698 sbus_writel(0, cregs + CREG_BMASK);
699
700
701 sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
702
703
704 sbus_writel(0, cregs + CREG_CCNT);
705
706
707 sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
708 bregs + BMAC_TXCFG);
709 sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
710 bregs + BMAC_RXCFG);
711
712
713 bigmac_begin_auto_negotiation(bp);
714
715
716 return 0;
717}
718
719
720static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
721{
722 printk(KERN_ERR "bigmac_is_medium_rare: ");
723 if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
724 if (qec_status & GLOB_STAT_ER)
725 printk("QEC_ERROR, ");
726 if (qec_status & GLOB_STAT_BM)
727 printk("QEC_BMAC_ERROR, ");
728 }
729 if (bmac_status & CREG_STAT_ERRORS) {
730 if (bmac_status & CREG_STAT_BERROR)
731 printk("BMAC_ERROR, ");
732 if (bmac_status & CREG_STAT_TXDERROR)
733 printk("TXD_ERROR, ");
734 if (bmac_status & CREG_STAT_TXLERR)
735 printk("TX_LATE_ERROR, ");
736 if (bmac_status & CREG_STAT_TXPERR)
737 printk("TX_PARITY_ERROR, ");
738 if (bmac_status & CREG_STAT_TXSERR)
739 printk("TX_SBUS_ERROR, ");
740
741 if (bmac_status & CREG_STAT_RXDROP)
742 printk("RX_DROP_ERROR, ");
743
744 if (bmac_status & CREG_STAT_RXSMALL)
745 printk("RX_SMALL_ERROR, ");
746 if (bmac_status & CREG_STAT_RXLERR)
747 printk("RX_LATE_ERROR, ");
748 if (bmac_status & CREG_STAT_RXPERR)
749 printk("RX_PARITY_ERROR, ");
750 if (bmac_status & CREG_STAT_RXSERR)
751 printk("RX_SBUS_ERROR, ");
752 }
753
754 printk(" RESET\n");
755 bigmac_init_hw(bp, 1);
756}
757
758
759static void bigmac_tx(struct bigmac *bp)
760{
761 struct be_txd *txbase = &bp->bmac_block->be_txd[0];
762 struct net_device *dev = bp->dev;
763 int elem;
764
765 spin_lock(&bp->lock);
766
767 elem = bp->tx_old;
768 DTX(("bigmac_tx: tx_old[%d] ", elem));
769 while (elem != bp->tx_new) {
770 struct sk_buff *skb;
771 struct be_txd *this = &txbase[elem];
772
773 DTX(("this(%p) [flags(%08x)addr(%08x)]",
774 this, this->tx_flags, this->tx_addr));
775
776 if (this->tx_flags & TXD_OWN)
777 break;
778 skb = bp->tx_skbs[elem];
779 bp->enet_stats.tx_packets++;
780 bp->enet_stats.tx_bytes += skb->len;
781 dma_unmap_single(&bp->bigmac_op->dev,
782 this->tx_addr, skb->len,
783 DMA_TO_DEVICE);
784
785 DTX(("skb(%p) ", skb));
786 bp->tx_skbs[elem] = NULL;
787 dev_kfree_skb_irq(skb);
788
789 elem = NEXT_TX(elem);
790 }
791 DTX((" DONE, tx_old=%d\n", elem));
792 bp->tx_old = elem;
793
794 if (netif_queue_stopped(dev) &&
795 TX_BUFFS_AVAIL(bp) > 0)
796 netif_wake_queue(bp->dev);
797
798 spin_unlock(&bp->lock);
799}
800
801
802static void bigmac_rx(struct bigmac *bp)
803{
804 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
805 struct be_rxd *this;
806 int elem = bp->rx_new, drops = 0;
807 u32 flags;
808
809 this = &rxbase[elem];
810 while (!((flags = this->rx_flags) & RXD_OWN)) {
811 struct sk_buff *skb;
812 int len = (flags & RXD_LENGTH);
813
814
815 if (len < ETH_ZLEN) {
816 bp->enet_stats.rx_errors++;
817 bp->enet_stats.rx_length_errors++;
818
819 drop_it:
820
821 bp->enet_stats.rx_dropped++;
822 this->rx_flags =
823 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
824 goto next;
825 }
826 skb = bp->rx_skbs[elem];
827 if (len > RX_COPY_THRESHOLD) {
828 struct sk_buff *new_skb;
829
830
831 new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
832 if (new_skb == NULL) {
833 drops++;
834 goto drop_it;
835 }
836 dma_unmap_single(&bp->bigmac_op->dev,
837 this->rx_addr,
838 RX_BUF_ALLOC_SIZE - 34,
839 DMA_FROM_DEVICE);
840 bp->rx_skbs[elem] = new_skb;
841 new_skb->dev = bp->dev;
842 skb_put(new_skb, ETH_FRAME_LEN);
843 skb_reserve(new_skb, 34);
844 this->rx_addr =
845 dma_map_single(&bp->bigmac_op->dev,
846 new_skb->data,
847 RX_BUF_ALLOC_SIZE - 34,
848 DMA_FROM_DEVICE);
849 this->rx_flags =
850 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
851
852
853 skb_trim(skb, len);
854 } else {
855 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
856
857 if (copy_skb == NULL) {
858 drops++;
859 goto drop_it;
860 }
861 skb_reserve(copy_skb, 2);
862 skb_put(copy_skb, len);
863 dma_sync_single_for_cpu(&bp->bigmac_op->dev,
864 this->rx_addr, len,
865 DMA_FROM_DEVICE);
866 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
867 dma_sync_single_for_device(&bp->bigmac_op->dev,
868 this->rx_addr, len,
869 DMA_FROM_DEVICE);
870
871
872 this->rx_flags =
873 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
874
875 skb = copy_skb;
876 }
877
878
879 skb->protocol = eth_type_trans(skb, bp->dev);
880 netif_rx(skb);
881 bp->enet_stats.rx_packets++;
882 bp->enet_stats.rx_bytes += len;
883 next:
884 elem = NEXT_RX(elem);
885 this = &rxbase[elem];
886 }
887 bp->rx_new = elem;
888 if (drops)
889 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
890}
891
892static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
893{
894 struct bigmac *bp = (struct bigmac *) dev_id;
895 u32 qec_status, bmac_status;
896
897 DIRQ(("bigmac_interrupt: "));
898
899
900 bmac_status = sbus_readl(bp->creg + CREG_STAT);
901 qec_status = sbus_readl(bp->gregs + GLOB_STAT);
902
903 DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
904 if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
905 (bmac_status & CREG_STAT_ERRORS))
906 bigmac_is_medium_rare(bp, qec_status, bmac_status);
907
908 if (bmac_status & CREG_STAT_TXIRQ)
909 bigmac_tx(bp);
910
911 if (bmac_status & CREG_STAT_RXIRQ)
912 bigmac_rx(bp);
913
914 return IRQ_HANDLED;
915}
916
917static int bigmac_open(struct net_device *dev)
918{
919 struct bigmac *bp = netdev_priv(dev);
920 int ret;
921
922 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
923 if (ret) {
924 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
925 return ret;
926 }
927 init_timer(&bp->bigmac_timer);
928 ret = bigmac_init_hw(bp, 0);
929 if (ret)
930 free_irq(dev->irq, bp);
931 return ret;
932}
933
934static int bigmac_close(struct net_device *dev)
935{
936 struct bigmac *bp = netdev_priv(dev);
937
938 del_timer(&bp->bigmac_timer);
939 bp->timer_state = asleep;
940 bp->timer_ticks = 0;
941
942 bigmac_stop(bp);
943 bigmac_clean_rings(bp);
944 free_irq(dev->irq, bp);
945 return 0;
946}
947
948static void bigmac_tx_timeout(struct net_device *dev)
949{
950 struct bigmac *bp = netdev_priv(dev);
951
952 bigmac_init_hw(bp, 0);
953 netif_wake_queue(dev);
954}
955
956
957static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
958{
959 struct bigmac *bp = netdev_priv(dev);
960 int len, entry;
961 u32 mapping;
962
963 len = skb->len;
964 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
965 len, DMA_TO_DEVICE);
966
967
968 spin_lock_irq(&bp->lock);
969 entry = bp->tx_new;
970 DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
971 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
972 bp->tx_skbs[entry] = skb;
973 bp->bmac_block->be_txd[entry].tx_addr = mapping;
974 bp->bmac_block->be_txd[entry].tx_flags =
975 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
976 bp->tx_new = NEXT_TX(entry);
977 if (TX_BUFFS_AVAIL(bp) <= 0)
978 netif_stop_queue(dev);
979 spin_unlock_irq(&bp->lock);
980
981
982 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
983
984
985 return NETDEV_TX_OK;
986}
987
988static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
989{
990 struct bigmac *bp = netdev_priv(dev);
991
992 bigmac_get_counters(bp, bp->bregs);
993 return &bp->enet_stats;
994}
995
996static void bigmac_set_multicast(struct net_device *dev)
997{
998 struct bigmac *bp = netdev_priv(dev);
999 void __iomem *bregs = bp->bregs;
1000 struct netdev_hw_addr *ha;
1001 char *addrs;
1002 int i;
1003 u32 tmp, crc;
1004
1005
1006
1007
1008 tmp = sbus_readl(bregs + BMAC_RXCFG);
1009 tmp &= ~(BIGMAC_RXCFG_ENABLE);
1010 sbus_writel(tmp, bregs + BMAC_RXCFG);
1011 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
1012 udelay(20);
1013
1014 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1015 sbus_writel(0xffff, bregs + BMAC_HTABLE0);
1016 sbus_writel(0xffff, bregs + BMAC_HTABLE1);
1017 sbus_writel(0xffff, bregs + BMAC_HTABLE2);
1018 sbus_writel(0xffff, bregs + BMAC_HTABLE3);
1019 } else if (dev->flags & IFF_PROMISC) {
1020 tmp = sbus_readl(bregs + BMAC_RXCFG);
1021 tmp |= BIGMAC_RXCFG_PMISC;
1022 sbus_writel(tmp, bregs + BMAC_RXCFG);
1023 } else {
1024 u16 hash_table[4];
1025
1026 for (i = 0; i < 4; i++)
1027 hash_table[i] = 0;
1028
1029 netdev_for_each_mc_addr(ha, dev) {
1030 addrs = ha->addr;
1031
1032 if (!(*addrs & 1))
1033 continue;
1034
1035 crc = ether_crc_le(6, addrs);
1036 crc >>= 26;
1037 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1038 }
1039 sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
1040 sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
1041 sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
1042 sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
1043 }
1044
1045
1046 tmp = sbus_readl(bregs + BMAC_RXCFG);
1047 tmp |= BIGMAC_RXCFG_ENABLE;
1048 sbus_writel(tmp, bregs + BMAC_RXCFG);
1049}
1050
1051
1052static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1053{
1054 strcpy(info->driver, "sunbmac");
1055 strcpy(info->version, "2.0");
1056}
1057
1058static u32 bigmac_get_link(struct net_device *dev)
1059{
1060 struct bigmac *bp = netdev_priv(dev);
1061
1062 spin_lock_irq(&bp->lock);
1063 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
1064 spin_unlock_irq(&bp->lock);
1065
1066 return (bp->sw_bmsr & BMSR_LSTATUS);
1067}
1068
1069static const struct ethtool_ops bigmac_ethtool_ops = {
1070 .get_drvinfo = bigmac_get_drvinfo,
1071 .get_link = bigmac_get_link,
1072};
1073
1074static const struct net_device_ops bigmac_ops = {
1075 .ndo_open = bigmac_open,
1076 .ndo_stop = bigmac_close,
1077 .ndo_start_xmit = bigmac_start_xmit,
1078 .ndo_get_stats = bigmac_get_stats,
1079 .ndo_set_multicast_list = bigmac_set_multicast,
1080 .ndo_tx_timeout = bigmac_tx_timeout,
1081 .ndo_change_mtu = eth_change_mtu,
1082 .ndo_set_mac_address = eth_mac_addr,
1083 .ndo_validate_addr = eth_validate_addr,
1084};
1085
1086static int __devinit bigmac_ether_init(struct platform_device *op,
1087 struct platform_device *qec_op)
1088{
1089 static int version_printed;
1090 struct net_device *dev;
1091 u8 bsizes, bsizes_more;
1092 struct bigmac *bp;
1093 int i;
1094
1095
1096 dev = alloc_etherdev(sizeof(struct bigmac));
1097 if (!dev)
1098 return -ENOMEM;
1099
1100 if (version_printed++ == 0)
1101 printk(KERN_INFO "%s", version);
1102
1103 for (i = 0; i < 6; i++)
1104 dev->dev_addr[i] = idprom->id_ethaddr[i];
1105
1106
1107 bp = netdev_priv(dev);
1108 bp->qec_op = qec_op;
1109 bp->bigmac_op = op;
1110
1111 SET_NETDEV_DEV(dev, &op->dev);
1112
1113 spin_lock_init(&bp->lock);
1114
1115
1116 bp->gregs = of_ioremap(&qec_op->resource[0], 0,
1117 GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
1118 if (!bp->gregs) {
1119 printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
1120 goto fail_and_cleanup;
1121 }
1122
1123
1124 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
1125 printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
1126 goto fail_and_cleanup;
1127 }
1128
1129
1130 if (qec_global_reset(bp->gregs))
1131 goto fail_and_cleanup;
1132
1133
1134 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1135 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1136
1137 bsizes &= 0xff;
1138 if (bsizes_more != 0xff)
1139 bsizes &= bsizes_more;
1140 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
1141 (bsizes & DMA_BURST32) == 0)
1142 bsizes = (DMA_BURST32 - 1);
1143 bp->bigmac_bursts = bsizes;
1144
1145
1146 qec_init(bp);
1147
1148
1149 bp->creg = of_ioremap(&op->resource[0], 0,
1150 CREG_REG_SIZE, "BigMAC QEC Channel Regs");
1151 if (!bp->creg) {
1152 printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
1153 goto fail_and_cleanup;
1154 }
1155
1156
1157 bp->bregs = of_ioremap(&op->resource[1], 0,
1158 BMAC_REG_SIZE, "BigMAC Primary Regs");
1159 if (!bp->bregs) {
1160 printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
1161 goto fail_and_cleanup;
1162 }
1163
1164
1165
1166
1167 bp->tregs = of_ioremap(&op->resource[2], 0,
1168 TCVR_REG_SIZE, "BigMAC Transceiver Regs");
1169 if (!bp->tregs) {
1170 printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
1171 goto fail_and_cleanup;
1172 }
1173
1174
1175 bigmac_stop(bp);
1176
1177
1178 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
1179 PAGE_SIZE,
1180 &bp->bblock_dvma, GFP_ATOMIC);
1181 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
1182 printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
1183 goto fail_and_cleanup;
1184 }
1185
1186
1187 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
1188 "board-version", 1);
1189
1190
1191 init_timer(&bp->bigmac_timer);
1192 bp->timer_state = asleep;
1193 bp->timer_ticks = 0;
1194
1195
1196 bp->dev = dev;
1197
1198
1199 dev->ethtool_ops = &bigmac_ethtool_ops;
1200 dev->netdev_ops = &bigmac_ops;
1201 dev->watchdog_timeo = 5*HZ;
1202
1203
1204 dev->irq = bp->bigmac_op->archdata.irqs[0];
1205 dev->dma = 0;
1206
1207 if (register_netdev(dev)) {
1208 printk(KERN_ERR "BIGMAC: Cannot register device.\n");
1209 goto fail_and_cleanup;
1210 }
1211
1212 dev_set_drvdata(&bp->bigmac_op->dev, bp);
1213
1214 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
1215 dev->name, dev->dev_addr);
1216
1217 return 0;
1218
1219fail_and_cleanup:
1220
1221
1222 if (bp->gregs)
1223 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1224 if (bp->creg)
1225 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1226 if (bp->bregs)
1227 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1228 if (bp->tregs)
1229 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1230
1231 if (bp->bmac_block)
1232 dma_free_coherent(&bp->bigmac_op->dev,
1233 PAGE_SIZE,
1234 bp->bmac_block,
1235 bp->bblock_dvma);
1236
1237
1238 free_netdev(dev);
1239 return -ENODEV;
1240}
1241
1242
1243
1244
1245static int __devinit bigmac_sbus_probe(struct platform_device *op,
1246 const struct of_device_id *match)
1247{
1248 struct device *parent = op->dev.parent;
1249 struct platform_device *qec_op;
1250
1251 qec_op = to_platform_device(parent);
1252
1253 return bigmac_ether_init(op, qec_op);
1254}
1255
1256static int __devexit bigmac_sbus_remove(struct platform_device *op)
1257{
1258 struct bigmac *bp = dev_get_drvdata(&op->dev);
1259 struct device *parent = op->dev.parent;
1260 struct net_device *net_dev = bp->dev;
1261 struct platform_device *qec_op;
1262
1263 qec_op = to_platform_device(parent);
1264
1265 unregister_netdev(net_dev);
1266
1267 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1268 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1269 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1270 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1271 dma_free_coherent(&op->dev,
1272 PAGE_SIZE,
1273 bp->bmac_block,
1274 bp->bblock_dvma);
1275
1276 free_netdev(net_dev);
1277
1278 dev_set_drvdata(&op->dev, NULL);
1279
1280 return 0;
1281}
1282
1283static const struct of_device_id bigmac_sbus_match[] = {
1284 {
1285 .name = "be",
1286 },
1287 {},
1288};
1289
1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1291
1292static struct of_platform_driver bigmac_sbus_driver = {
1293 .driver = {
1294 .name = "sunbmac",
1295 .owner = THIS_MODULE,
1296 .of_match_table = bigmac_sbus_match,
1297 },
1298 .probe = bigmac_sbus_probe,
1299 .remove = __devexit_p(bigmac_sbus_remove),
1300};
1301
1302static int __init bigmac_init(void)
1303{
1304 return of_register_platform_driver(&bigmac_sbus_driver);
1305}
1306
1307static void __exit bigmac_exit(void)
1308{
1309 of_unregister_platform_driver(&bigmac_sbus_driver);
1310}
1311
1312module_init(bigmac_init);
1313module_exit(bigmac_exit);
1314