1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/interrupt.h>
77#include <linux/delay.h>
78#include <linux/netdevice.h>
79#include <linux/etherdevice.h>
80#include <linux/skbuff.h>
81#include <linux/types.h>
82#include <linux/bitops.h>
83#include <linux/dma-mapping.h>
84#include <linux/io.h>
85#include <linux/irq.h>
86#include <linux/gfp.h>
87
88
89
90
91#define DEB_INIT 0x0001
92#define DEB_PROBE 0x0002
93#define DEB_SERIOUS 0x0004
94#define DEB_ERRORS 0x0008
95#define DEB_MULTI 0x0010
96#define DEB_TDR 0x0020
97#define DEB_OPEN 0x0040
98#define DEB_RESET 0x0080
99#define DEB_ADDCMD 0x0100
100#define DEB_STATUS 0x0200
101#define DEB_STARTTX 0x0400
102#define DEB_RXADDR 0x0800
103#define DEB_TXADDR 0x1000
104#define DEB_RXFRAME 0x2000
105#define DEB_INTS 0x4000
106#define DEB_STRUCT 0x8000
107#define DEB_ANY 0xffff
108
109
110#define DEB(x, y) if (i596_debug & (x)) { y; }
111
112
113
114
115
116
117
118
119#define PORT_RESET 0x00
120#define PORT_SELFTEST 0x01
121#define PORT_ALTSCP 0x02
122#define PORT_ALTDUMP 0x03
123
124static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
125
126
127
128
129static int rx_copybreak = 100;
130
131#define PKT_BUF_SZ 1536
132#define MAX_MC_CNT 64
133
134#define ISCP_BUSY 0x0001
135
136#define I596_NULL ((u32)0xffffffff)
137
138#define CMD_EOL 0x8000
139#define CMD_SUSP 0x4000
140#define CMD_INTR 0x2000
141
142#define CMD_FLEX 0x0008
143
144enum commands {
145 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
146 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
147};
148
149#define STAT_C 0x8000
150#define STAT_B 0x4000
151#define STAT_OK 0x2000
152#define STAT_A 0x1000
153
154#define CUC_START 0x0100
155#define CUC_RESUME 0x0200
156#define CUC_SUSPEND 0x0300
157#define CUC_ABORT 0x0400
158#define RX_START 0x0010
159#define RX_RESUME 0x0020
160#define RX_SUSPEND 0x0030
161#define RX_ABORT 0x0040
162
163#define TX_TIMEOUT (HZ/20)
164
165
166struct i596_reg {
167 unsigned short porthi;
168 unsigned short portlo;
169 u32 ca;
170};
171
172#define EOF 0x8000
173#define SIZE_MASK 0x3fff
174
175struct i596_tbd {
176 unsigned short size;
177 unsigned short pad;
178 u32 next;
179 u32 data;
180 u32 cache_pad[5];
181};
182
183
184
185
186
187
188
189
190
191
192
193struct i596_cmd {
194 struct i596_cmd *v_next;
195 unsigned short status;
196 unsigned short command;
197 u32 b_next;
198};
199
200struct tx_cmd {
201 struct i596_cmd cmd;
202 u32 tbd;
203 unsigned short size;
204 unsigned short pad;
205 struct sk_buff *skb;
206 dma_addr_t dma_addr;
207#ifdef __LP64__
208 u32 cache_pad[6];
209#else
210 u32 cache_pad[1];
211#endif
212};
213
214struct tdr_cmd {
215 struct i596_cmd cmd;
216 unsigned short status;
217 unsigned short pad;
218};
219
220struct mc_cmd {
221 struct i596_cmd cmd;
222 short mc_cnt;
223 char mc_addrs[MAX_MC_CNT*6];
224};
225
226struct sa_cmd {
227 struct i596_cmd cmd;
228 char eth_addr[8];
229};
230
231struct cf_cmd {
232 struct i596_cmd cmd;
233 char i596_config[16];
234};
235
236struct i596_rfd {
237 unsigned short stat;
238 unsigned short cmd;
239 u32 b_next;
240 u32 rbd;
241 unsigned short count;
242 unsigned short size;
243 struct i596_rfd *v_next;
244 struct i596_rfd *v_prev;
245#ifndef __LP64__
246 u32 cache_pad[2];
247#endif
248};
249
250struct i596_rbd {
251
252 unsigned short count;
253 unsigned short zero1;
254 u32 b_next;
255 u32 b_data;
256 unsigned short size;
257 unsigned short zero2;
258
259 struct sk_buff *skb;
260 struct i596_rbd *v_next;
261 u32 b_addr;
262 unsigned char *v_data;
263
264#ifdef __LP64__
265 u32 cache_pad[4];
266#endif
267};
268
269
270
271#define TX_RING_SIZE 32
272#define RX_RING_SIZE 16
273
274struct i596_scb {
275 unsigned short status;
276 unsigned short command;
277 u32 cmd;
278 u32 rfd;
279 u32 crc_err;
280 u32 align_err;
281 u32 resource_err;
282 u32 over_err;
283 u32 rcvdt_err;
284 u32 short_err;
285 unsigned short t_on;
286 unsigned short t_off;
287};
288
289struct i596_iscp {
290 u32 stat;
291 u32 scb;
292};
293
294struct i596_scp {
295 u32 sysbus;
296 u32 pad;
297 u32 iscp;
298};
299
300struct i596_dma {
301 struct i596_scp scp __attribute__((aligned(32)));
302 volatile struct i596_iscp iscp __attribute__((aligned(32)));
303 volatile struct i596_scb scb __attribute__((aligned(32)));
304 struct sa_cmd sa_cmd __attribute__((aligned(32)));
305 struct cf_cmd cf_cmd __attribute__((aligned(32)));
306 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
307 struct mc_cmd mc_cmd __attribute__((aligned(32)));
308 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
309 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
310 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
311 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
312};
313
314struct i596_private {
315 struct i596_dma *dma;
316 u32 stat;
317 int last_restart;
318 struct i596_rfd *rfd_head;
319 struct i596_rbd *rbd_head;
320 struct i596_cmd *cmd_tail;
321 struct i596_cmd *cmd_head;
322 int cmd_backlog;
323 u32 last_cmd;
324 int next_tx_cmd;
325 int options;
326 spinlock_t lock;
327 dma_addr_t dma_addr;
328 void __iomem *mpu_port;
329 void __iomem *ca;
330};
331
332static const char init_setup[] =
333{
334 0x8E,
335 0xC8,
336 0x80,
337 0x2E,
338 0x00,
339 0x60,
340 0x00,
341 0xf2,
342 0x00,
343 0x00,
344 0x40,
345 0xff,
346 0x00,
347 0x7f };
348
349static int i596_open(struct net_device *dev);
350static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351static irqreturn_t i596_interrupt(int irq, void *dev_id);
352static int i596_close(struct net_device *dev);
353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
354static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
355static void print_eth(unsigned char *buf, char *str);
356static void set_multicast_list(struct net_device *dev);
357static inline void ca(struct net_device *dev);
358static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
359
360static int rx_ring_size = RX_RING_SIZE;
361static int ticks_limit = 100;
362static int max_cmd_backlog = TX_RING_SIZE-1;
363
364#ifdef CONFIG_NET_POLL_CONTROLLER
365static void i596_poll_controller(struct net_device *dev);
366#endif
367
368static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
369{
370 return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
371}
372
373#ifdef NONCOHERENT_DMA
374static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
375 size_t len)
376{
377 dma_sync_single_for_device(ndev->dev.parent,
378 virt_to_dma(netdev_priv(ndev), addr), len,
379 DMA_BIDIRECTIONAL);
380}
381
382static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
383 size_t len)
384{
385 dma_sync_single_for_cpu(ndev->dev.parent,
386 virt_to_dma(netdev_priv(ndev), addr), len,
387 DMA_BIDIRECTIONAL);
388}
389#else
390static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
391 size_t len)
392{
393}
394static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
395 size_t len)
396{
397}
398#endif
399
400static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
401{
402 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
403 while (--delcnt && dma->iscp.stat) {
404 udelay(10);
405 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
406 }
407 if (!delcnt) {
408 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
409 dev->name, str, SWAP16(dma->iscp.stat));
410 return -1;
411 } else
412 return 0;
413}
414
415
416static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
417{
418 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
419 while (--delcnt && dma->scb.command) {
420 udelay(10);
421 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
422 }
423 if (!delcnt) {
424 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
425 dev->name, str,
426 SWAP16(dma->scb.status),
427 SWAP16(dma->scb.command));
428 return -1;
429 } else
430 return 0;
431}
432
433
434static void i596_display_data(struct net_device *dev)
435{
436 struct i596_private *lp = netdev_priv(dev);
437 struct i596_dma *dma = lp->dma;
438 struct i596_cmd *cmd;
439 struct i596_rfd *rfd;
440 struct i596_rbd *rbd;
441
442 printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
443 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
444 printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
445 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
446 printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
447 " .cmd = %08x, .rfd = %08x\n",
448 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
449 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
450 printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
451 " over %x, rcvdt %x, short %x\n",
452 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
453 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
454 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
455 cmd = lp->cmd_head;
456 while (cmd != NULL) {
457 printk(KERN_DEBUG
458 "cmd at %p, .status = %04x, .command = %04x,"
459 " .b_next = %08x\n",
460 cmd, SWAP16(cmd->status), SWAP16(cmd->command),
461 SWAP32(cmd->b_next));
462 cmd = cmd->v_next;
463 }
464 rfd = lp->rfd_head;
465 printk(KERN_DEBUG "rfd_head = %p\n", rfd);
466 do {
467 printk(KERN_DEBUG
468 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
469 " count %04x\n",
470 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
471 SWAP32(rfd->b_next), SWAP32(rfd->rbd),
472 SWAP16(rfd->count));
473 rfd = rfd->v_next;
474 } while (rfd != lp->rfd_head);
475 rbd = lp->rbd_head;
476 printk(KERN_DEBUG "rbd_head = %p\n", rbd);
477 do {
478 printk(KERN_DEBUG
479 " %p .count %04x, b_next %08x, b_data %08x,"
480 " size %04x\n",
481 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
482 SWAP32(rbd->b_data), SWAP16(rbd->size));
483 rbd = rbd->v_next;
484 } while (rbd != lp->rbd_head);
485 dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
486}
487
488static inline int init_rx_bufs(struct net_device *dev)
489{
490 struct i596_private *lp = netdev_priv(dev);
491 struct i596_dma *dma = lp->dma;
492 int i;
493 struct i596_rfd *rfd;
494 struct i596_rbd *rbd;
495
496
497
498 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
499 dma_addr_t dma_addr;
500 struct sk_buff *skb;
501
502 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
503 if (skb == NULL)
504 return -1;
505 dma_addr = dma_map_single(dev->dev.parent, skb->data,
506 PKT_BUF_SZ, DMA_FROM_DEVICE);
507 rbd->v_next = rbd+1;
508 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
509 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
510 rbd->skb = skb;
511 rbd->v_data = skb->data;
512 rbd->b_data = SWAP32(dma_addr);
513 rbd->size = SWAP16(PKT_BUF_SZ);
514 }
515 lp->rbd_head = dma->rbds;
516 rbd = dma->rbds + rx_ring_size - 1;
517 rbd->v_next = dma->rbds;
518 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
519
520
521
522 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
523 rfd->rbd = I596_NULL;
524 rfd->v_next = rfd+1;
525 rfd->v_prev = rfd-1;
526 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
527 rfd->cmd = SWAP16(CMD_FLEX);
528 }
529 lp->rfd_head = dma->rfds;
530 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
531 rfd = dma->rfds;
532 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
533 rfd->v_prev = dma->rfds + rx_ring_size - 1;
534 rfd = dma->rfds + rx_ring_size - 1;
535 rfd->v_next = dma->rfds;
536 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
537 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
538
539 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
540 return 0;
541}
542
543static inline void remove_rx_bufs(struct net_device *dev)
544{
545 struct i596_private *lp = netdev_priv(dev);
546 struct i596_rbd *rbd;
547 int i;
548
549 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
550 if (rbd->skb == NULL)
551 break;
552 dma_unmap_single(dev->dev.parent,
553 (dma_addr_t)SWAP32(rbd->b_data),
554 PKT_BUF_SZ, DMA_FROM_DEVICE);
555 dev_kfree_skb(rbd->skb);
556 }
557}
558
559
560static void rebuild_rx_bufs(struct net_device *dev)
561{
562 struct i596_private *lp = netdev_priv(dev);
563 struct i596_dma *dma = lp->dma;
564 int i;
565
566
567
568 for (i = 0; i < rx_ring_size; i++) {
569 dma->rfds[i].rbd = I596_NULL;
570 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
571 }
572 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
573 lp->rfd_head = dma->rfds;
574 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
575 lp->rbd_head = dma->rbds;
576 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
577
578 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
579}
580
581
582static int init_i596_mem(struct net_device *dev)
583{
584 struct i596_private *lp = netdev_priv(dev);
585 struct i596_dma *dma = lp->dma;
586 unsigned long flags;
587
588 mpu_port(dev, PORT_RESET, 0);
589 udelay(100);
590
591
592
593 lp->last_cmd = jiffies;
594
595 dma->scp.sysbus = SYSBUS;
596 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
597 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
598 dma->iscp.stat = SWAP32(ISCP_BUSY);
599 lp->cmd_backlog = 0;
600
601 lp->cmd_head = NULL;
602 dma->scb.cmd = I596_NULL;
603
604 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
605
606 dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
607 dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
608 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
609
610 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
611 ca(dev);
612 if (wait_istat(dev, dma, 1000, "initialization timed out"))
613 goto failed;
614 DEB(DEB_INIT, printk(KERN_DEBUG
615 "%s: i82596 initialization successful\n",
616 dev->name));
617
618 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
619 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
620 goto failed;
621 }
622
623
624 rebuild_rx_bufs(dev);
625
626 dma->scb.command = 0;
627 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
628
629 DEB(DEB_INIT, printk(KERN_DEBUG
630 "%s: queuing CmdConfigure\n", dev->name));
631 memcpy(dma->cf_cmd.i596_config, init_setup, 14);
632 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
633 dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
634 i596_add_cmd(dev, &dma->cf_cmd.cmd);
635
636 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
637 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
638 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
639 dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
640 i596_add_cmd(dev, &dma->sa_cmd.cmd);
641
642 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
643 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
644 dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
645 i596_add_cmd(dev, &dma->tdr_cmd.cmd);
646
647 spin_lock_irqsave (&lp->lock, flags);
648
649 if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
650 spin_unlock_irqrestore (&lp->lock, flags);
651 goto failed_free_irq;
652 }
653 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
654 dma->scb.command = SWAP16(RX_START);
655 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
656 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
657
658 ca(dev);
659
660 spin_unlock_irqrestore (&lp->lock, flags);
661 if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
662 goto failed_free_irq;
663 DEB(DEB_INIT, printk(KERN_DEBUG
664 "%s: Receive unit started OK\n", dev->name));
665 return 0;
666
667failed_free_irq:
668 free_irq(dev->irq, dev);
669failed:
670 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
671 mpu_port(dev, PORT_RESET, 0);
672 return -1;
673}
674
675
676static inline int i596_rx(struct net_device *dev)
677{
678 struct i596_private *lp = netdev_priv(dev);
679 struct i596_rfd *rfd;
680 struct i596_rbd *rbd;
681 int frames = 0;
682
683 DEB(DEB_RXFRAME, printk(KERN_DEBUG
684 "i596_rx(), rfd_head %p, rbd_head %p\n",
685 lp->rfd_head, lp->rbd_head));
686
687
688 rfd = lp->rfd_head;
689
690 dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
691 while (rfd->stat & SWAP16(STAT_C)) {
692 if (rfd->rbd == I596_NULL)
693 rbd = NULL;
694 else if (rfd->rbd == lp->rbd_head->b_addr) {
695 rbd = lp->rbd_head;
696 dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
697 } else {
698 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
699
700 rbd = NULL;
701 }
702 DEB(DEB_RXFRAME, printk(KERN_DEBUG
703 " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
704 rfd, rfd->rbd, rfd->stat));
705
706 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
707
708 int pkt_len = SWAP16(rbd->count) & 0x3fff;
709 struct sk_buff *skb = rbd->skb;
710 int rx_in_place = 0;
711
712 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
713 frames++;
714
715
716
717
718
719 if (pkt_len > rx_copybreak) {
720 struct sk_buff *newskb;
721 dma_addr_t dma_addr;
722
723 dma_unmap_single(dev->dev.parent,
724 (dma_addr_t)SWAP32(rbd->b_data),
725 PKT_BUF_SZ, DMA_FROM_DEVICE);
726
727 newskb = netdev_alloc_skb_ip_align(dev,
728 PKT_BUF_SZ);
729 if (newskb == NULL) {
730 skb = NULL;
731 goto memory_squeeze;
732 }
733
734
735 skb_put(skb, pkt_len);
736 rx_in_place = 1;
737 rbd->skb = newskb;
738 dma_addr = dma_map_single(dev->dev.parent,
739 newskb->data,
740 PKT_BUF_SZ,
741 DMA_FROM_DEVICE);
742 rbd->v_data = newskb->data;
743 rbd->b_data = SWAP32(dma_addr);
744 dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
745 } else {
746 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
747 }
748memory_squeeze:
749 if (skb == NULL) {
750
751 dev->stats.rx_dropped++;
752 } else {
753 if (!rx_in_place) {
754
755 dma_sync_single_for_cpu(dev->dev.parent,
756 (dma_addr_t)SWAP32(rbd->b_data),
757 PKT_BUF_SZ, DMA_FROM_DEVICE);
758 skb_put_data(skb, rbd->v_data,
759 pkt_len);
760 dma_sync_single_for_device(dev->dev.parent,
761 (dma_addr_t)SWAP32(rbd->b_data),
762 PKT_BUF_SZ, DMA_FROM_DEVICE);
763 }
764 skb->len = pkt_len;
765 skb->protocol = eth_type_trans(skb, dev);
766 netif_rx(skb);
767 dev->stats.rx_packets++;
768 dev->stats.rx_bytes += pkt_len;
769 }
770 } else {
771 DEB(DEB_ERRORS, printk(KERN_DEBUG
772 "%s: Error, rfd.stat = 0x%04x\n",
773 dev->name, rfd->stat));
774 dev->stats.rx_errors++;
775 if (rfd->stat & SWAP16(0x0100))
776 dev->stats.collisions++;
777 if (rfd->stat & SWAP16(0x8000))
778 dev->stats.rx_length_errors++;
779 if (rfd->stat & SWAP16(0x0001))
780 dev->stats.rx_over_errors++;
781 if (rfd->stat & SWAP16(0x0002))
782 dev->stats.rx_fifo_errors++;
783 if (rfd->stat & SWAP16(0x0004))
784 dev->stats.rx_frame_errors++;
785 if (rfd->stat & SWAP16(0x0008))
786 dev->stats.rx_crc_errors++;
787 if (rfd->stat & SWAP16(0x0010))
788 dev->stats.rx_length_errors++;
789 }
790
791
792
793 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
794 rbd->count = 0;
795 lp->rbd_head = rbd->v_next;
796 dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
797 }
798
799
800
801 rfd->rbd = I596_NULL;
802 rfd->stat = 0;
803 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
804 rfd->count = 0;
805
806
807
808 lp->dma->scb.rfd = rfd->b_next;
809 lp->rfd_head = rfd->v_next;
810 dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
811
812
813
814 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
815 dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
816 rfd = lp->rfd_head;
817 dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
818 }
819
820 DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
821
822 return 0;
823}
824
825
826static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
827{
828 struct i596_cmd *ptr;
829
830 while (lp->cmd_head != NULL) {
831 ptr = lp->cmd_head;
832 lp->cmd_head = ptr->v_next;
833 lp->cmd_backlog--;
834
835 switch (SWAP16(ptr->command) & 0x7) {
836 case CmdTx:
837 {
838 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
839 struct sk_buff *skb = tx_cmd->skb;
840 dma_unmap_single(dev->dev.parent,
841 tx_cmd->dma_addr,
842 skb->len, DMA_TO_DEVICE);
843
844 dev_kfree_skb(skb);
845
846 dev->stats.tx_errors++;
847 dev->stats.tx_aborted_errors++;
848
849 ptr->v_next = NULL;
850 ptr->b_next = I596_NULL;
851 tx_cmd->cmd.command = 0;
852 break;
853 }
854 default:
855 ptr->v_next = NULL;
856 ptr->b_next = I596_NULL;
857 }
858 dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
859 }
860
861 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
862 lp->dma->scb.cmd = I596_NULL;
863 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
864}
865
866
867static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
868{
869 unsigned long flags;
870
871 DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
872
873 spin_lock_irqsave (&lp->lock, flags);
874
875 wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
876
877 netif_stop_queue(dev);
878
879
880 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
881 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
882 ca(dev);
883
884
885 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
886 spin_unlock_irqrestore (&lp->lock, flags);
887
888 i596_cleanup_cmd(dev, lp);
889 i596_rx(dev);
890
891 netif_start_queue(dev);
892 init_i596_mem(dev);
893}
894
895
896static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
897{
898 struct i596_private *lp = netdev_priv(dev);
899 struct i596_dma *dma = lp->dma;
900 unsigned long flags;
901
902 DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
903 lp->cmd_head));
904
905 cmd->status = 0;
906 cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
907 cmd->v_next = NULL;
908 cmd->b_next = I596_NULL;
909 dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
910
911 spin_lock_irqsave (&lp->lock, flags);
912
913 if (lp->cmd_head != NULL) {
914 lp->cmd_tail->v_next = cmd;
915 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
916 dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
917 } else {
918 lp->cmd_head = cmd;
919 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
920 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
921 dma->scb.command = SWAP16(CUC_START);
922 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
923 ca(dev);
924 }
925 lp->cmd_tail = cmd;
926 lp->cmd_backlog++;
927
928 spin_unlock_irqrestore (&lp->lock, flags);
929
930 if (lp->cmd_backlog > max_cmd_backlog) {
931 unsigned long tickssofar = jiffies - lp->last_cmd;
932
933 if (tickssofar < ticks_limit)
934 return;
935
936 printk(KERN_ERR
937 "%s: command unit timed out, status resetting.\n",
938 dev->name);
939#if 1
940 i596_reset(dev, lp);
941#endif
942 }
943}
944
945static int i596_open(struct net_device *dev)
946{
947 DEB(DEB_OPEN, printk(KERN_DEBUG
948 "%s: i596_open() irq %d.\n", dev->name, dev->irq));
949
950 if (init_rx_bufs(dev)) {
951 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
952 return -EAGAIN;
953 }
954 if (init_i596_mem(dev)) {
955 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
956 goto out_remove_rx_bufs;
957 }
958 netif_start_queue(dev);
959
960 return 0;
961
962out_remove_rx_bufs:
963 remove_rx_bufs(dev);
964 return -EAGAIN;
965}
966
967static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
968{
969 struct i596_private *lp = netdev_priv(dev);
970
971
972 DEB(DEB_ERRORS, printk(KERN_DEBUG
973 "%s: transmit timed out, status resetting.\n",
974 dev->name));
975
976 dev->stats.tx_errors++;
977
978
979 if (lp->last_restart == dev->stats.tx_packets) {
980 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
981
982 i596_reset (dev, lp);
983 } else {
984
985 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
986 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
987 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
988 ca (dev);
989 lp->last_restart = dev->stats.tx_packets;
990 }
991
992 netif_trans_update(dev);
993 netif_wake_queue (dev);
994}
995
996
997static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
998{
999 struct i596_private *lp = netdev_priv(dev);
1000 struct tx_cmd *tx_cmd;
1001 struct i596_tbd *tbd;
1002 short length = skb->len;
1003
1004 DEB(DEB_STARTTX, printk(KERN_DEBUG
1005 "%s: i596_start_xmit(%x,%p) called\n",
1006 dev->name, skb->len, skb->data));
1007
1008 if (length < ETH_ZLEN) {
1009 if (skb_padto(skb, ETH_ZLEN))
1010 return NETDEV_TX_OK;
1011 length = ETH_ZLEN;
1012 }
1013
1014 netif_stop_queue(dev);
1015
1016 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
1017 tbd = lp->dma->tbds + lp->next_tx_cmd;
1018
1019 if (tx_cmd->cmd.command) {
1020 DEB(DEB_ERRORS, printk(KERN_DEBUG
1021 "%s: xmit ring full, dropping packet.\n",
1022 dev->name));
1023 dev->stats.tx_dropped++;
1024
1025 dev_kfree_skb_any(skb);
1026 } else {
1027 if (++lp->next_tx_cmd == TX_RING_SIZE)
1028 lp->next_tx_cmd = 0;
1029 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1030 tbd->next = I596_NULL;
1031
1032 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1033 tx_cmd->skb = skb;
1034
1035 tx_cmd->pad = 0;
1036 tx_cmd->size = 0;
1037 tbd->pad = 0;
1038 tbd->size = SWAP16(EOF | length);
1039
1040 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1041 skb->len, DMA_TO_DEVICE);
1042 tbd->data = SWAP32(tx_cmd->dma_addr);
1043
1044 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1045 dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
1046 dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
1047 i596_add_cmd(dev, &tx_cmd->cmd);
1048
1049 dev->stats.tx_packets++;
1050 dev->stats.tx_bytes += length;
1051 }
1052
1053 netif_start_queue(dev);
1054
1055 return NETDEV_TX_OK;
1056}
1057
1058static void print_eth(unsigned char *add, char *str)
1059{
1060 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1061 add, add + 6, add, add[12], add[13], str);
1062}
1063static const struct net_device_ops i596_netdev_ops = {
1064 .ndo_open = i596_open,
1065 .ndo_stop = i596_close,
1066 .ndo_start_xmit = i596_start_xmit,
1067 .ndo_set_rx_mode = set_multicast_list,
1068 .ndo_tx_timeout = i596_tx_timeout,
1069 .ndo_validate_addr = eth_validate_addr,
1070 .ndo_set_mac_address = eth_mac_addr,
1071#ifdef CONFIG_NET_POLL_CONTROLLER
1072 .ndo_poll_controller = i596_poll_controller,
1073#endif
1074};
1075
1076static int i82596_probe(struct net_device *dev)
1077{
1078 struct i596_private *lp = netdev_priv(dev);
1079 int ret;
1080
1081
1082 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1083 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1084 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1085 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1086#ifndef __LP64__
1087 BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1088#endif
1089
1090 if (!dev->base_addr || !dev->irq)
1091 return -ENODEV;
1092
1093 dev->netdev_ops = &i596_netdev_ops;
1094 dev->watchdog_timeo = TX_TIMEOUT;
1095
1096 memset(lp->dma, 0, sizeof(struct i596_dma));
1097 lp->dma->scb.command = 0;
1098 lp->dma->scb.cmd = I596_NULL;
1099 lp->dma->scb.rfd = I596_NULL;
1100 spin_lock_init(&lp->lock);
1101
1102 dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
1103
1104 ret = register_netdev(dev);
1105 if (ret)
1106 return ret;
1107
1108 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1109 dev->name, dev->base_addr, dev->dev_addr,
1110 dev->irq));
1111 DEB(DEB_INIT, printk(KERN_INFO
1112 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1113 dev->name, lp->dma, (int)sizeof(struct i596_dma),
1114 &lp->dma->scb));
1115
1116 return 0;
1117}
1118
1119#ifdef CONFIG_NET_POLL_CONTROLLER
1120static void i596_poll_controller(struct net_device *dev)
1121{
1122 disable_irq(dev->irq);
1123 i596_interrupt(dev->irq, dev);
1124 enable_irq(dev->irq);
1125}
1126#endif
1127
1128static irqreturn_t i596_interrupt(int irq, void *dev_id)
1129{
1130 struct net_device *dev = dev_id;
1131 struct i596_private *lp;
1132 struct i596_dma *dma;
1133 unsigned short status, ack_cmd = 0;
1134
1135 lp = netdev_priv(dev);
1136 dma = lp->dma;
1137
1138 spin_lock (&lp->lock);
1139
1140 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1141 status = SWAP16(dma->scb.status);
1142
1143 DEB(DEB_INTS, printk(KERN_DEBUG
1144 "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1145 dev->name, dev->irq, status));
1146
1147 ack_cmd = status & 0xf000;
1148
1149 if (!ack_cmd) {
1150 DEB(DEB_ERRORS, printk(KERN_DEBUG
1151 "%s: interrupt with no events\n",
1152 dev->name));
1153 spin_unlock (&lp->lock);
1154 return IRQ_NONE;
1155 }
1156
1157 if ((status & 0x8000) || (status & 0x2000)) {
1158 struct i596_cmd *ptr;
1159
1160 if ((status & 0x8000))
1161 DEB(DEB_INTS,
1162 printk(KERN_DEBUG
1163 "%s: i596 interrupt completed command.\n",
1164 dev->name));
1165 if ((status & 0x2000))
1166 DEB(DEB_INTS,
1167 printk(KERN_DEBUG
1168 "%s: i596 interrupt command unit inactive %x.\n",
1169 dev->name, status & 0x0700));
1170
1171 while (lp->cmd_head != NULL) {
1172 dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
1173 if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1174 break;
1175
1176 ptr = lp->cmd_head;
1177
1178 DEB(DEB_STATUS,
1179 printk(KERN_DEBUG
1180 "cmd_head->status = %04x, ->command = %04x\n",
1181 SWAP16(lp->cmd_head->status),
1182 SWAP16(lp->cmd_head->command)));
1183 lp->cmd_head = ptr->v_next;
1184 lp->cmd_backlog--;
1185
1186 switch (SWAP16(ptr->command) & 0x7) {
1187 case CmdTx:
1188 {
1189 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1190 struct sk_buff *skb = tx_cmd->skb;
1191
1192 if (ptr->status & SWAP16(STAT_OK)) {
1193 DEB(DEB_TXADDR,
1194 print_eth(skb->data, "tx-done"));
1195 } else {
1196 dev->stats.tx_errors++;
1197 if (ptr->status & SWAP16(0x0020))
1198 dev->stats.collisions++;
1199 if (!(ptr->status & SWAP16(0x0040)))
1200 dev->stats.tx_heartbeat_errors++;
1201 if (ptr->status & SWAP16(0x0400))
1202 dev->stats.tx_carrier_errors++;
1203 if (ptr->status & SWAP16(0x0800))
1204 dev->stats.collisions++;
1205 if (ptr->status & SWAP16(0x1000))
1206 dev->stats.tx_aborted_errors++;
1207 }
1208 dma_unmap_single(dev->dev.parent,
1209 tx_cmd->dma_addr,
1210 skb->len, DMA_TO_DEVICE);
1211 dev_consume_skb_irq(skb);
1212
1213 tx_cmd->cmd.command = 0;
1214 break;
1215 }
1216 case CmdTDR:
1217 {
1218 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1219
1220 if (status & 0x8000) {
1221 DEB(DEB_ANY,
1222 printk(KERN_DEBUG "%s: link ok.\n",
1223 dev->name));
1224 } else {
1225 if (status & 0x4000)
1226 printk(KERN_ERR
1227 "%s: Transceiver problem.\n",
1228 dev->name);
1229 if (status & 0x2000)
1230 printk(KERN_ERR
1231 "%s: Termination problem.\n",
1232 dev->name);
1233 if (status & 0x1000)
1234 printk(KERN_ERR
1235 "%s: Short circuit.\n",
1236 dev->name);
1237
1238 DEB(DEB_TDR,
1239 printk(KERN_DEBUG "%s: Time %d.\n",
1240 dev->name, status & 0x07ff));
1241 }
1242 break;
1243 }
1244 case CmdConfigure:
1245
1246
1247
1248
1249 ptr->command = 0;
1250 break;
1251 }
1252 ptr->v_next = NULL;
1253 ptr->b_next = I596_NULL;
1254 dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
1255 lp->last_cmd = jiffies;
1256 }
1257
1258
1259
1260
1261
1262 ptr = lp->cmd_head;
1263 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1264 struct i596_cmd *prev = ptr;
1265
1266 ptr->command &= SWAP16(0x1fff);
1267 ptr = ptr->v_next;
1268 dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
1269 }
1270
1271 if (lp->cmd_head != NULL)
1272 ack_cmd |= CUC_START;
1273 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1274 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1275 }
1276 if ((status & 0x1000) || (status & 0x4000)) {
1277 if ((status & 0x4000))
1278 DEB(DEB_INTS,
1279 printk(KERN_DEBUG
1280 "%s: i596 interrupt received a frame.\n",
1281 dev->name));
1282 i596_rx(dev);
1283
1284 if (status & 0x1000) {
1285 if (netif_running(dev)) {
1286 DEB(DEB_ERRORS,
1287 printk(KERN_DEBUG
1288 "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1289 dev->name, status));
1290 ack_cmd |= RX_START;
1291 dev->stats.rx_errors++;
1292 dev->stats.rx_fifo_errors++;
1293 rebuild_rx_bufs(dev);
1294 }
1295 }
1296 }
1297 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1298 dma->scb.command = SWAP16(ack_cmd);
1299 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1300
1301
1302
1303
1304
1305 ca(dev);
1306
1307 wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1308 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1309
1310 spin_unlock (&lp->lock);
1311 return IRQ_HANDLED;
1312}
1313
1314static int i596_close(struct net_device *dev)
1315{
1316 struct i596_private *lp = netdev_priv(dev);
1317 unsigned long flags;
1318
1319 netif_stop_queue(dev);
1320
1321 DEB(DEB_INIT,
1322 printk(KERN_DEBUG
1323 "%s: Shutting down ethercard, status was %4.4x.\n",
1324 dev->name, SWAP16(lp->dma->scb.status)));
1325
1326 spin_lock_irqsave(&lp->lock, flags);
1327
1328 wait_cmd(dev, lp->dma, 100, "close1 timed out");
1329 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1330 dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
1331
1332 ca(dev);
1333
1334 wait_cmd(dev, lp->dma, 100, "close2 timed out");
1335 spin_unlock_irqrestore(&lp->lock, flags);
1336 DEB(DEB_STRUCT, i596_display_data(dev));
1337 i596_cleanup_cmd(dev, lp);
1338
1339 free_irq(dev->irq, dev);
1340 remove_rx_bufs(dev);
1341
1342 return 0;
1343}
1344
1345
1346
1347
1348
1349static void set_multicast_list(struct net_device *dev)
1350{
1351 struct i596_private *lp = netdev_priv(dev);
1352 struct i596_dma *dma = lp->dma;
1353 int config = 0, cnt;
1354
1355 DEB(DEB_MULTI,
1356 printk(KERN_DEBUG
1357 "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1358 dev->name, netdev_mc_count(dev),
1359 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1360 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1361
1362 if ((dev->flags & IFF_PROMISC) &&
1363 !(dma->cf_cmd.i596_config[8] & 0x01)) {
1364 dma->cf_cmd.i596_config[8] |= 0x01;
1365 config = 1;
1366 }
1367 if (!(dev->flags & IFF_PROMISC) &&
1368 (dma->cf_cmd.i596_config[8] & 0x01)) {
1369 dma->cf_cmd.i596_config[8] &= ~0x01;
1370 config = 1;
1371 }
1372 if ((dev->flags & IFF_ALLMULTI) &&
1373 (dma->cf_cmd.i596_config[11] & 0x20)) {
1374 dma->cf_cmd.i596_config[11] &= ~0x20;
1375 config = 1;
1376 }
1377 if (!(dev->flags & IFF_ALLMULTI) &&
1378 !(dma->cf_cmd.i596_config[11] & 0x20)) {
1379 dma->cf_cmd.i596_config[11] |= 0x20;
1380 config = 1;
1381 }
1382 if (config) {
1383 if (dma->cf_cmd.cmd.command)
1384 printk(KERN_INFO
1385 "%s: config change request already queued\n",
1386 dev->name);
1387 else {
1388 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1389 dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1390 i596_add_cmd(dev, &dma->cf_cmd.cmd);
1391 }
1392 }
1393
1394 cnt = netdev_mc_count(dev);
1395 if (cnt > MAX_MC_CNT) {
1396 cnt = MAX_MC_CNT;
1397 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1398 dev->name, cnt);
1399 }
1400
1401 if (!netdev_mc_empty(dev)) {
1402 struct netdev_hw_addr *ha;
1403 unsigned char *cp;
1404 struct mc_cmd *cmd;
1405
1406 cmd = &dma->mc_cmd;
1407 cmd->cmd.command = SWAP16(CmdMulticastList);
1408 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1409 cp = cmd->mc_addrs;
1410 netdev_for_each_mc_addr(ha, dev) {
1411 if (!cnt--)
1412 break;
1413 memcpy(cp, ha->addr, ETH_ALEN);
1414 if (i596_debug > 1)
1415 DEB(DEB_MULTI,
1416 printk(KERN_DEBUG
1417 "%s: Adding address %pM\n",
1418 dev->name, cp));
1419 cp += ETH_ALEN;
1420 }
1421 dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1422 i596_add_cmd(dev, &cmd->cmd);
1423 }
1424}
1425