1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/ptrace.h>
45#include <linux/fcntl.h>
46#include <linux/ioport.h>
47#include <linux/interrupt.h>
48#include <linux/string.h>
49#include <linux/errno.h>
50#include <linux/netdevice.h>
51#include <linux/etherdevice.h>
52#include <linux/pci.h>
53#include <linux/skbuff.h>
54#include <linux/timer.h>
55#include <linux/init.h>
56#include <linux/delay.h>
57
58#include <net/net_namespace.h>
59#include <net/arp.h>
60#include <net/Space.h>
61
62#include <asm/io.h>
63#include <asm/types.h>
64#include <asm/byteorder.h>
65#include <asm/irq.h>
66#include <asm/uaccess.h>
67
68#include "sbni.h"
69
70
71
72struct net_local {
73 struct timer_list watchdog;
74
75 spinlock_t lock;
76 struct sk_buff *rx_buf_p;
77 struct sk_buff *tx_buf_p;
78
79 unsigned int framelen;
80 unsigned int maxframe;
81 unsigned int state;
82 unsigned int inppos, outpos;
83
84
85 unsigned int tx_frameno;
86
87
88 unsigned int wait_frameno;
89
90
91
92 unsigned int trans_errors;
93
94
95 unsigned int timer_ticks;
96
97
98 int delta_rxl;
99 unsigned int cur_rxl_index, timeout_rxl;
100 unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
101
102 struct sbni_csr1 csr1;
103 struct sbni_in_stats in_stats;
104
105 struct net_device *second;
106
107#ifdef CONFIG_SBNI_MULTILINE
108 struct net_device *master;
109 struct net_device *link;
110#endif
111};
112
113
114static int sbni_card_probe( unsigned long );
115static int sbni_pci_probe( struct net_device * );
116static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
117static int sbni_open( struct net_device * );
118static int sbni_close( struct net_device * );
119static netdev_tx_t sbni_start_xmit(struct sk_buff *,
120 struct net_device * );
121static int sbni_ioctl( struct net_device *, struct ifreq *, int );
122static void set_multicast_list( struct net_device * );
123
124static irqreturn_t sbni_interrupt( int, void * );
125static void handle_channel( struct net_device * );
126static int recv_frame( struct net_device * );
127static void send_frame( struct net_device * );
128static int upload_data( struct net_device *,
129 unsigned, unsigned, unsigned, u32 );
130static void download_data( struct net_device *, u32 * );
131static void sbni_watchdog( unsigned long );
132static void interpret_ack( struct net_device *, unsigned );
133static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
134static void indicate_pkt( struct net_device * );
135static void card_start( struct net_device * );
136static void prepare_to_send( struct sk_buff *, struct net_device * );
137static void drop_xmit_queue( struct net_device * );
138static void send_frame_header( struct net_device *, u32 * );
139static int skip_tail( unsigned int, unsigned int, u32 );
140static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
141static void change_level( struct net_device * );
142static void timeout_change_level( struct net_device * );
143static u32 calc_crc32( u32, u8 *, u32 );
144static struct sk_buff * get_rx_buf( struct net_device * );
145static int sbni_init( struct net_device * );
146
147#ifdef CONFIG_SBNI_MULTILINE
148static int enslave( struct net_device *, struct net_device * );
149static int emancipate( struct net_device * );
150#endif
151
152static const char version[] =
153 "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
154
155static bool skip_pci_probe __initdata = false;
156static int scandone __initdata = 0;
157static int num __initdata = 0;
158
159static unsigned char rxl_tab[];
160static u32 crc32tab[];
161
162
163static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
164
165
166static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
167 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
168static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
169static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
170static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
171 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
172static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
173
174#ifndef MODULE
175typedef u32 iarr[];
176static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
177#endif
178
179
180static unsigned int netcard_portlist[ ] __initdata = {
181 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
182 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
183 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
184 0 };
185
186#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
187
188
189
190
191
192
193static inline int __init
194sbni_isa_probe( struct net_device *dev )
195{
196 if( dev->base_addr > 0x1ff &&
197 request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
198 sbni_probe1( dev, dev->base_addr, dev->irq ) )
199
200 return 0;
201 else {
202 pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
203 dev->base_addr);
204 return -ENODEV;
205 }
206}
207
208static const struct net_device_ops sbni_netdev_ops = {
209 .ndo_open = sbni_open,
210 .ndo_stop = sbni_close,
211 .ndo_start_xmit = sbni_start_xmit,
212 .ndo_set_rx_mode = set_multicast_list,
213 .ndo_do_ioctl = sbni_ioctl,
214 .ndo_change_mtu = eth_change_mtu,
215 .ndo_set_mac_address = eth_mac_addr,
216 .ndo_validate_addr = eth_validate_addr,
217};
218
219static void __init sbni_devsetup(struct net_device *dev)
220{
221 ether_setup( dev );
222 dev->netdev_ops = &sbni_netdev_ops;
223}
224
225int __init sbni_probe(int unit)
226{
227 struct net_device *dev;
228 int err;
229
230 dev = alloc_netdev(sizeof(struct net_local), "sbni",
231 NET_NAME_UNKNOWN, sbni_devsetup);
232 if (!dev)
233 return -ENOMEM;
234
235 dev->netdev_ops = &sbni_netdev_ops;
236
237 sprintf(dev->name, "sbni%d", unit);
238 netdev_boot_setup_check(dev);
239
240 err = sbni_init(dev);
241 if (err) {
242 free_netdev(dev);
243 return err;
244 }
245
246 err = register_netdev(dev);
247 if (err) {
248 release_region( dev->base_addr, SBNI_IO_EXTENT );
249 free_netdev(dev);
250 return err;
251 }
252 pr_info_once("%s", version);
253 return 0;
254}
255
256static int __init sbni_init(struct net_device *dev)
257{
258 int i;
259 if( dev->base_addr )
260 return sbni_isa_probe( dev );
261
262
263 if( io[ num ] != -1 )
264 dev->base_addr = io[ num ],
265 dev->irq = irq[ num ];
266 else if( scandone || io[ 0 ] != -1 )
267 return -ENODEV;
268
269
270 if( dev->base_addr )
271 return sbni_isa_probe( dev );
272
273
274 if( !skip_pci_probe && !sbni_pci_probe( dev ) )
275 return 0;
276
277 if( io[ num ] == -1 ) {
278
279 scandone = 1;
280 if( num > 0 )
281 return -ENODEV;
282 }
283
284 for( i = 0; netcard_portlist[ i ]; ++i ) {
285 int ioaddr = netcard_portlist[ i ];
286 if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
287 sbni_probe1( dev, ioaddr, 0 ))
288 return 0;
289 }
290
291 return -ENODEV;
292}
293
294
295static int __init
296sbni_pci_probe( struct net_device *dev )
297{
298 struct pci_dev *pdev = NULL;
299
300 while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
301 != NULL ) {
302 int pci_irq_line;
303 unsigned long pci_ioaddr;
304
305 if( pdev->vendor != SBNI_PCI_VENDOR &&
306 pdev->device != SBNI_PCI_DEVICE )
307 continue;
308
309 pci_ioaddr = pci_resource_start( pdev, 0 );
310 pci_irq_line = pdev->irq;
311
312
313 if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
314 if (pdev->subsystem_device != 2)
315 continue;
316
317
318 if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
319 dev->name ) )
320 continue;
321 }
322
323 if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
324 pr_warn(
325"WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
326"You should use the PCI BIOS setup to assign a valid IRQ line.\n",
327 pci_irq_line );
328
329
330 if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
331 release_region( pci_ioaddr, SBNI_IO_EXTENT );
332 pci_dev_put( pdev );
333 return -EIO;
334 }
335 if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
336 SET_NETDEV_DEV(dev, &pdev->dev);
337
338
339 pci_dev_put( pdev );
340 return 0;
341 }
342 }
343 return -ENODEV;
344}
345
346
347static struct net_device * __init
348sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
349{
350 struct net_local *nl;
351
352 if( sbni_card_probe( ioaddr ) ) {
353 release_region( ioaddr, SBNI_IO_EXTENT );
354 return NULL;
355 }
356
357 outb( 0, ioaddr + CSR0 );
358
359 if( irq < 2 ) {
360 unsigned long irq_mask;
361
362 irq_mask = probe_irq_on();
363 outb( EN_INT | TR_REQ, ioaddr + CSR0 );
364 outb( PR_RES, ioaddr + CSR1 );
365 mdelay(50);
366 irq = probe_irq_off(irq_mask);
367 outb( 0, ioaddr + CSR0 );
368
369 if( !irq ) {
370 pr_err("%s: can't detect device irq!\n", dev->name);
371 release_region( ioaddr, SBNI_IO_EXTENT );
372 return NULL;
373 }
374 } else if( irq == 2 )
375 irq = 9;
376
377 dev->irq = irq;
378 dev->base_addr = ioaddr;
379
380
381 nl = netdev_priv(dev);
382 if( !nl ) {
383 pr_err("%s: unable to get memory!\n", dev->name);
384 release_region( ioaddr, SBNI_IO_EXTENT );
385 return NULL;
386 }
387
388 memset( nl, 0, sizeof(struct net_local) );
389 spin_lock_init( &nl->lock );
390
391
392 *(__be16 *)dev->dev_addr = htons( 0x00ff );
393 *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
394 ((mac[num] ?
395 mac[num] :
396 (u32)((long)netdev_priv(dev))) & 0x00ffffff));
397
398
399 nl->maxframe = DEFAULT_FRAME_LEN;
400 nl->csr1.rate = baud[ num ];
401
402 if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
403
404 nl->cur_rxl_index = DEF_RXL,
405 nl->delta_rxl = DEF_RXL_DELTA;
406 else
407 nl->delta_rxl = 0;
408 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
409 if( inb( ioaddr + CSR0 ) & 0x01 )
410 nl->state |= FL_SLOW_MODE;
411
412 pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
413 dev->name, dev->base_addr, dev->irq,
414 ((u8 *)dev->dev_addr)[3],
415 ((u8 *)dev->dev_addr)[4],
416 ((u8 *)dev->dev_addr)[5]);
417
418 pr_notice("%s: speed %d",
419 dev->name,
420 ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
421 / (1 << nl->csr1.rate));
422
423 if( nl->delta_rxl == 0 )
424 pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
425 else
426 pr_cont(", receive level (auto)\n");
427
428#ifdef CONFIG_SBNI_MULTILINE
429 nl->master = dev;
430 nl->link = NULL;
431#endif
432
433 sbni_cards[ num++ ] = dev;
434 return dev;
435}
436
437
438
439#ifdef CONFIG_SBNI_MULTILINE
440
441static netdev_tx_t
442sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
443{
444 struct net_device *p;
445
446 netif_stop_queue( dev );
447
448
449 for( p = dev; p; ) {
450 struct net_local *nl = netdev_priv(p);
451 spin_lock( &nl->lock );
452 if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
453 p = nl->link;
454 spin_unlock( &nl->lock );
455 } else {
456
457 prepare_to_send( skb, p );
458 spin_unlock( &nl->lock );
459 netif_start_queue( dev );
460 return NETDEV_TX_OK;
461 }
462 }
463
464 return NETDEV_TX_BUSY;
465}
466
467#else
468
469static netdev_tx_t
470sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
471{
472 struct net_local *nl = netdev_priv(dev);
473
474 netif_stop_queue( dev );
475 spin_lock( &nl->lock );
476
477 prepare_to_send( skb, dev );
478
479 spin_unlock( &nl->lock );
480 return NETDEV_TX_OK;
481}
482
483#endif
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502static irqreturn_t
503sbni_interrupt( int irq, void *dev_id )
504{
505 struct net_device *dev = dev_id;
506 struct net_local *nl = netdev_priv(dev);
507 int repeat;
508
509 spin_lock( &nl->lock );
510 if( nl->second )
511 spin_lock(&NET_LOCAL_LOCK(nl->second));
512
513 do {
514 repeat = 0;
515 if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
516 handle_channel( dev ),
517 repeat = 1;
518 if( nl->second &&
519 (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
520 handle_channel( nl->second ),
521 repeat = 1;
522 } while( repeat );
523
524 if( nl->second )
525 spin_unlock(&NET_LOCAL_LOCK(nl->second));
526 spin_unlock( &nl->lock );
527 return IRQ_HANDLED;
528}
529
530
531static void
532handle_channel( struct net_device *dev )
533{
534 struct net_local *nl = netdev_priv(dev);
535 unsigned long ioaddr = dev->base_addr;
536
537 int req_ans;
538 unsigned char csr0;
539
540#ifdef CONFIG_SBNI_MULTILINE
541
542 if( nl->state & FL_SLAVE )
543 spin_lock(&NET_LOCAL_LOCK(nl->master));
544#endif
545
546 outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
547
548 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
549 for(;;) {
550 csr0 = inb( ioaddr + CSR0 );
551 if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
552 break;
553
554 req_ans = !(nl->state & FL_PREV_OK);
555
556 if( csr0 & RC_RDY )
557 req_ans = recv_frame( dev );
558
559
560
561
562
563 csr0 = inb( ioaddr + CSR0 );
564 if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
565 netdev_err(dev, "internal error!\n");
566
567
568 if( req_ans || nl->tx_frameno != 0 )
569 send_frame( dev );
570 else
571
572 outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
573 }
574
575 outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
576
577#ifdef CONFIG_SBNI_MULTILINE
578 if( nl->state & FL_SLAVE )
579 spin_unlock(&NET_LOCAL_LOCK(nl->master));
580#endif
581}
582
583
584
585
586
587
588
589static int
590recv_frame( struct net_device *dev )
591{
592 struct net_local *nl = netdev_priv(dev);
593 unsigned long ioaddr = dev->base_addr;
594
595 u32 crc = CRC32_INITIAL;
596
597 unsigned framelen = 0, frameno, ack;
598 unsigned is_first, frame_ok = 0;
599
600 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
601 frame_ok = framelen > 4
602 ? upload_data( dev, framelen, frameno, is_first, crc )
603 : skip_tail( ioaddr, framelen, crc );
604 if( frame_ok )
605 interpret_ack( dev, ack );
606 }
607
608 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
609 if( frame_ok ) {
610 nl->state |= FL_PREV_OK;
611 if( framelen > 4 )
612 nl->in_stats.all_rx_number++;
613 } else
614 nl->state &= ~FL_PREV_OK,
615 change_level( dev ),
616 nl->in_stats.all_rx_number++,
617 nl->in_stats.bad_rx_number++;
618
619 return !frame_ok || framelen > 4;
620}
621
622
623static void
624send_frame( struct net_device *dev )
625{
626 struct net_local *nl = netdev_priv(dev);
627
628 u32 crc = CRC32_INITIAL;
629
630 if( nl->state & FL_NEED_RESEND ) {
631
632
633 if( nl->trans_errors ) {
634 --nl->trans_errors;
635 if( nl->framelen != 0 )
636 nl->in_stats.resend_tx_number++;
637 } else {
638
639#ifdef CONFIG_SBNI_MULTILINE
640 if( (nl->state & FL_SLAVE) || nl->link )
641#endif
642 nl->state |= FL_LINE_DOWN;
643 drop_xmit_queue( dev );
644 goto do_send;
645 }
646 } else
647 nl->trans_errors = TR_ERROR_COUNT;
648
649 send_frame_header( dev, &crc );
650 nl->state |= FL_NEED_RESEND;
651
652
653
654
655
656
657 if( nl->framelen ) {
658 download_data( dev, &crc );
659 nl->in_stats.all_tx_number++;
660 nl->state |= FL_WAIT_ACK;
661 }
662
663 outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
664
665do_send:
666 outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
667
668 if( nl->tx_frameno )
669
670 outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
671 dev->base_addr + CSR0 );
672}
673
674
675
676
677
678
679
680static void
681download_data( struct net_device *dev, u32 *crc_p )
682{
683 struct net_local *nl = netdev_priv(dev);
684 struct sk_buff *skb = nl->tx_buf_p;
685
686 unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
687
688 outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
689 *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
690
691
692 for( len = nl->framelen - len; len--; )
693 outb( 0, dev->base_addr + DAT ),
694 *crc_p = CRC32( 0, *crc_p );
695}
696
697
698static int
699upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
700 unsigned is_first, u32 crc )
701{
702 struct net_local *nl = netdev_priv(dev);
703
704 int frame_ok;
705
706 if( is_first )
707 nl->wait_frameno = frameno,
708 nl->inppos = 0;
709
710 if( nl->wait_frameno == frameno ) {
711
712 if( nl->inppos + framelen <= ETHER_MAX_LEN )
713 frame_ok = append_frame_to_pkt( dev, framelen, crc );
714
715
716
717
718
719 else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
720 != 0 )
721 nl->wait_frameno = 0,
722 nl->inppos = 0,
723#ifdef CONFIG_SBNI_MULTILINE
724 nl->master->stats.rx_errors++,
725 nl->master->stats.rx_missed_errors++;
726#else
727 dev->stats.rx_errors++,
728 dev->stats.rx_missed_errors++;
729#endif
730
731 } else
732 frame_ok = skip_tail( dev->base_addr, framelen, crc );
733
734 if( is_first && !frame_ok )
735
736
737
738
739 nl->wait_frameno = 0,
740#ifdef CONFIG_SBNI_MULTILINE
741 nl->master->stats.rx_errors++,
742 nl->master->stats.rx_crc_errors++;
743#else
744 dev->stats.rx_errors++,
745 dev->stats.rx_crc_errors++;
746#endif
747
748 return frame_ok;
749}
750
751
752static inline void
753send_complete( struct net_device *dev )
754{
755 struct net_local *nl = netdev_priv(dev);
756
757#ifdef CONFIG_SBNI_MULTILINE
758 nl->master->stats.tx_packets++;
759 nl->master->stats.tx_bytes += nl->tx_buf_p->len;
760#else
761 dev->stats.tx_packets++;
762 dev->stats.tx_bytes += nl->tx_buf_p->len;
763#endif
764 dev_kfree_skb_irq( nl->tx_buf_p );
765
766 nl->tx_buf_p = NULL;
767
768 nl->outpos = 0;
769 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
770 nl->framelen = 0;
771}
772
773
774static void
775interpret_ack( struct net_device *dev, unsigned ack )
776{
777 struct net_local *nl = netdev_priv(dev);
778
779 if( ack == FRAME_SENT_OK ) {
780 nl->state &= ~FL_NEED_RESEND;
781
782 if( nl->state & FL_WAIT_ACK ) {
783 nl->outpos += nl->framelen;
784
785 if( --nl->tx_frameno )
786 nl->framelen = min_t(unsigned int,
787 nl->maxframe,
788 nl->tx_buf_p->len - nl->outpos);
789 else
790 send_complete( dev ),
791#ifdef CONFIG_SBNI_MULTILINE
792 netif_wake_queue( nl->master );
793#else
794 netif_wake_queue( dev );
795#endif
796 }
797 }
798
799 nl->state &= ~FL_WAIT_ACK;
800}
801
802
803
804
805
806
807
808static int
809append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
810{
811 struct net_local *nl = netdev_priv(dev);
812
813 u8 *p;
814
815 if( nl->inppos + framelen > ETHER_MAX_LEN )
816 return 0;
817
818 if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
819 return 0;
820
821 p = nl->rx_buf_p->data + nl->inppos;
822 insb( dev->base_addr + DAT, p, framelen );
823 if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
824 return 0;
825
826 nl->inppos += framelen - 4;
827 if( --nl->wait_frameno == 0 )
828 indicate_pkt( dev );
829
830 return 1;
831}
832
833
834
835
836
837
838
839static void
840prepare_to_send( struct sk_buff *skb, struct net_device *dev )
841{
842 struct net_local *nl = netdev_priv(dev);
843
844 unsigned int len;
845
846
847 if( nl->tx_buf_p )
848 netdev_err(dev, "memory leak!\n");
849
850 nl->outpos = 0;
851 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
852
853 len = skb->len;
854 if( len < SBNI_MIN_LEN )
855 len = SBNI_MIN_LEN;
856
857 nl->tx_buf_p = skb;
858 nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
859 nl->framelen = len < nl->maxframe ? len : nl->maxframe;
860
861 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
862#ifdef CONFIG_SBNI_MULTILINE
863 nl->master->trans_start = jiffies;
864#else
865 dev->trans_start = jiffies;
866#endif
867}
868
869
870static void
871drop_xmit_queue( struct net_device *dev )
872{
873 struct net_local *nl = netdev_priv(dev);
874
875 if( nl->tx_buf_p )
876 dev_kfree_skb_any( nl->tx_buf_p ),
877 nl->tx_buf_p = NULL,
878#ifdef CONFIG_SBNI_MULTILINE
879 nl->master->stats.tx_errors++,
880 nl->master->stats.tx_carrier_errors++;
881#else
882 dev->stats.tx_errors++,
883 dev->stats.tx_carrier_errors++;
884#endif
885
886 nl->tx_frameno = 0;
887 nl->framelen = 0;
888 nl->outpos = 0;
889 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
890#ifdef CONFIG_SBNI_MULTILINE
891 netif_start_queue( nl->master );
892 nl->master->trans_start = jiffies;
893#else
894 netif_start_queue( dev );
895 dev->trans_start = jiffies;
896#endif
897}
898
899
900static void
901send_frame_header( struct net_device *dev, u32 *crc_p )
902{
903 struct net_local *nl = netdev_priv(dev);
904
905 u32 crc = *crc_p;
906 u32 len_field = nl->framelen + 6;
907 u8 value;
908
909 if( nl->state & FL_NEED_RESEND )
910 len_field |= FRAME_RETRY;
911
912 if( nl->outpos == 0 )
913 len_field |= FRAME_FIRST;
914
915 len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
916 outb( SBNI_SIG, dev->base_addr + DAT );
917
918 value = (u8) len_field;
919 outb( value, dev->base_addr + DAT );
920 crc = CRC32( value, crc );
921 value = (u8) (len_field >> 8);
922 outb( value, dev->base_addr + DAT );
923 crc = CRC32( value, crc );
924
925 outb( nl->tx_frameno, dev->base_addr + DAT );
926 crc = CRC32( nl->tx_frameno, crc );
927 outb( 0, dev->base_addr + DAT );
928 crc = CRC32( 0, crc );
929 *crc_p = crc;
930}
931
932
933
934
935
936
937
938static int
939skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
940{
941 while( tail_len-- )
942 crc = CRC32( inb( ioaddr + DAT ), crc );
943
944 return crc == CRC32_REMAINDER;
945}
946
947
948
949
950
951
952
953static int
954check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
955 u32 *is_first, u32 *crc_p )
956{
957 u32 crc = *crc_p;
958 u8 value;
959
960 if( inb( ioaddr + DAT ) != SBNI_SIG )
961 return 0;
962
963 value = inb( ioaddr + DAT );
964 *framelen = (u32)value;
965 crc = CRC32( value, crc );
966 value = inb( ioaddr + DAT );
967 *framelen |= ((u32)value) << 8;
968 crc = CRC32( value, crc );
969
970 *ack = *framelen & FRAME_ACK_MASK;
971 *is_first = (*framelen & FRAME_FIRST) != 0;
972
973 if( (*framelen &= FRAME_LEN_MASK) < 6 ||
974 *framelen > SBNI_MAX_FRAME - 3 )
975 return 0;
976
977 value = inb( ioaddr + DAT );
978 *frameno = (u32)value;
979 crc = CRC32( value, crc );
980
981 crc = CRC32( inb( ioaddr + DAT ), crc );
982 *framelen -= 2;
983
984 *crc_p = crc;
985 return 1;
986}
987
988
989static struct sk_buff *
990get_rx_buf( struct net_device *dev )
991{
992
993 struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
994 if( !skb )
995 return NULL;
996
997 skb_reserve( skb, 2 );
998 return skb;
999}
1000
1001
1002static void
1003indicate_pkt( struct net_device *dev )
1004{
1005 struct net_local *nl = netdev_priv(dev);
1006 struct sk_buff *skb = nl->rx_buf_p;
1007
1008 skb_put( skb, nl->inppos );
1009
1010#ifdef CONFIG_SBNI_MULTILINE
1011 skb->protocol = eth_type_trans( skb, nl->master );
1012 netif_rx( skb );
1013 ++nl->master->stats.rx_packets;
1014 nl->master->stats.rx_bytes += nl->inppos;
1015#else
1016 skb->protocol = eth_type_trans( skb, dev );
1017 netif_rx( skb );
1018 ++dev->stats.rx_packets;
1019 dev->stats.rx_bytes += nl->inppos;
1020#endif
1021 nl->rx_buf_p = NULL;
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static void
1033sbni_watchdog( unsigned long arg )
1034{
1035 struct net_device *dev = (struct net_device *) arg;
1036 struct net_local *nl = netdev_priv(dev);
1037 struct timer_list *w = &nl->watchdog;
1038 unsigned long flags;
1039 unsigned char csr0;
1040
1041 spin_lock_irqsave( &nl->lock, flags );
1042
1043 csr0 = inb( dev->base_addr + CSR0 );
1044 if( csr0 & RC_CHK ) {
1045
1046 if( nl->timer_ticks ) {
1047 if( csr0 & (RC_RDY | BU_EMP) )
1048
1049 nl->timer_ticks--;
1050 } else {
1051 nl->in_stats.timeout_number++;
1052 if( nl->delta_rxl )
1053 timeout_change_level( dev );
1054
1055 outb( *(u_char *)&nl->csr1 | PR_RES,
1056 dev->base_addr + CSR1 );
1057 csr0 = inb( dev->base_addr + CSR0 );
1058 }
1059 } else
1060 nl->state &= ~FL_LINE_DOWN;
1061
1062 outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
1063
1064 init_timer( w );
1065 w->expires = jiffies + SBNI_TIMEOUT;
1066 w->data = arg;
1067 w->function = sbni_watchdog;
1068 add_timer( w );
1069
1070 spin_unlock_irqrestore( &nl->lock, flags );
1071}
1072
1073
1074static unsigned char rxl_tab[] = {
1075 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
1076 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
1077};
1078
1079#define SIZE_OF_TIMEOUT_RXL_TAB 4
1080static unsigned char timeout_rxl_tab[] = {
1081 0x03, 0x05, 0x08, 0x0b
1082};
1083
1084
1085
1086static void
1087card_start( struct net_device *dev )
1088{
1089 struct net_local *nl = netdev_priv(dev);
1090
1091 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
1092 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
1093 nl->state |= FL_PREV_OK;
1094
1095 nl->inppos = nl->outpos = 0;
1096 nl->wait_frameno = 0;
1097 nl->tx_frameno = 0;
1098 nl->framelen = 0;
1099
1100 outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1101 outb( EN_INT, dev->base_addr + CSR0 );
1102}
1103
1104
1105
1106
1107
1108static void
1109change_level( struct net_device *dev )
1110{
1111 struct net_local *nl = netdev_priv(dev);
1112
1113 if( nl->delta_rxl == 0 )
1114 return;
1115
1116 if( nl->cur_rxl_index == 0 )
1117 nl->delta_rxl = 1;
1118 else if( nl->cur_rxl_index == 15 )
1119 nl->delta_rxl = -1;
1120 else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
1121 nl->delta_rxl = -nl->delta_rxl;
1122
1123 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
1124 inb( dev->base_addr + CSR0 );
1125 outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
1126
1127 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1128 nl->cur_rxl_rcvd = 0;
1129}
1130
1131
1132static void
1133timeout_change_level( struct net_device *dev )
1134{
1135 struct net_local *nl = netdev_priv(dev);
1136
1137 nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
1138 if( ++nl->timeout_rxl >= 4 )
1139 nl->timeout_rxl = 0;
1140
1141 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1142 inb( dev->base_addr + CSR0 );
1143 outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
1144
1145 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1146 nl->cur_rxl_rcvd = 0;
1147}
1148
1149
1150
1151
1152
1153
1154
1155static int
1156sbni_open( struct net_device *dev )
1157{
1158 struct net_local *nl = netdev_priv(dev);
1159 struct timer_list *w = &nl->watchdog;
1160
1161
1162
1163
1164
1165
1166 if( dev->base_addr < 0x400 ) {
1167 struct net_device **p = sbni_cards;
1168 for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
1169 if( (*p)->irq == dev->irq &&
1170 ((*p)->base_addr == dev->base_addr + 4 ||
1171 (*p)->base_addr == dev->base_addr - 4) &&
1172 (*p)->flags & IFF_UP ) {
1173
1174 ((struct net_local *) (netdev_priv(*p)))
1175 ->second = dev;
1176 netdev_notice(dev, "using shared irq with %s\n",
1177 (*p)->name);
1178 nl->state |= FL_SECONDARY;
1179 goto handler_attached;
1180 }
1181 }
1182
1183 if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
1184 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
1185 return -EAGAIN;
1186 }
1187
1188handler_attached:
1189
1190 spin_lock( &nl->lock );
1191 memset( &dev->stats, 0, sizeof(struct net_device_stats) );
1192 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1193
1194 card_start( dev );
1195
1196 netif_start_queue( dev );
1197
1198
1199 init_timer( w );
1200 w->expires = jiffies + SBNI_TIMEOUT;
1201 w->data = (unsigned long) dev;
1202 w->function = sbni_watchdog;
1203 add_timer( w );
1204
1205 spin_unlock( &nl->lock );
1206 return 0;
1207}
1208
1209
1210static int
1211sbni_close( struct net_device *dev )
1212{
1213 struct net_local *nl = netdev_priv(dev);
1214
1215 if( nl->second && nl->second->flags & IFF_UP ) {
1216 netdev_notice(dev, "Secondary channel (%s) is active!\n",
1217 nl->second->name);
1218 return -EBUSY;
1219 }
1220
1221#ifdef CONFIG_SBNI_MULTILINE
1222 if( nl->state & FL_SLAVE )
1223 emancipate( dev );
1224 else
1225 while( nl->link )
1226 emancipate( nl->link );
1227#endif
1228
1229 spin_lock( &nl->lock );
1230
1231 nl->second = NULL;
1232 drop_xmit_queue( dev );
1233 netif_stop_queue( dev );
1234
1235 del_timer( &nl->watchdog );
1236
1237 outb( 0, dev->base_addr + CSR0 );
1238
1239 if( !(nl->state & FL_SECONDARY) )
1240 free_irq( dev->irq, dev );
1241 nl->state &= FL_SECONDARY;
1242
1243 spin_unlock( &nl->lock );
1244 return 0;
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
1272
1273
1274static int
1275sbni_card_probe( unsigned long ioaddr )
1276{
1277 unsigned char csr0;
1278
1279 csr0 = inb( ioaddr + CSR0 );
1280 if( csr0 != 0xff && csr0 != 0x00 ) {
1281 csr0 &= ~EN_INT;
1282 if( csr0 & BU_EMP )
1283 csr0 |= EN_INT;
1284
1285 if( VALID_DECODER & (1 << (csr0 >> 4)) )
1286 return 0;
1287 }
1288
1289 return -ENODEV;
1290}
1291
1292
1293
1294static int
1295sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1296{
1297 struct net_local *nl = netdev_priv(dev);
1298 struct sbni_flags flags;
1299 int error = 0;
1300
1301#ifdef CONFIG_SBNI_MULTILINE
1302 struct net_device *slave_dev;
1303 char slave_name[ 8 ];
1304#endif
1305
1306 switch( cmd ) {
1307 case SIOCDEVGETINSTATS :
1308 if (copy_to_user( ifr->ifr_data, &nl->in_stats,
1309 sizeof(struct sbni_in_stats) ))
1310 error = -EFAULT;
1311 break;
1312
1313 case SIOCDEVRESINSTATS :
1314 if (!capable(CAP_NET_ADMIN))
1315 return -EPERM;
1316 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1317 break;
1318
1319 case SIOCDEVGHWSTATE :
1320 flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
1321 flags.rate = nl->csr1.rate;
1322 flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
1323 flags.rxl = nl->cur_rxl_index;
1324 flags.fixed_rxl = nl->delta_rxl == 0;
1325
1326 if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
1327 error = -EFAULT;
1328 break;
1329
1330 case SIOCDEVSHWSTATE :
1331 if (!capable(CAP_NET_ADMIN))
1332 return -EPERM;
1333
1334 spin_lock( &nl->lock );
1335 flags = *(struct sbni_flags*) &ifr->ifr_ifru;
1336 if( flags.fixed_rxl )
1337 nl->delta_rxl = 0,
1338 nl->cur_rxl_index = flags.rxl;
1339 else
1340 nl->delta_rxl = DEF_RXL_DELTA,
1341 nl->cur_rxl_index = DEF_RXL;
1342
1343 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1344 nl->csr1.rate = flags.rate;
1345 outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1346 spin_unlock( &nl->lock );
1347 break;
1348
1349#ifdef CONFIG_SBNI_MULTILINE
1350
1351 case SIOCDEVENSLAVE :
1352 if (!capable(CAP_NET_ADMIN))
1353 return -EPERM;
1354
1355 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
1356 return -EFAULT;
1357 slave_dev = dev_get_by_name(&init_net, slave_name );
1358 if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
1359 netdev_err(dev, "trying to enslave non-active device %s\n",
1360 slave_name);
1361 return -EPERM;
1362 }
1363
1364 return enslave( dev, slave_dev );
1365
1366 case SIOCDEVEMANSIPATE :
1367 if (!capable(CAP_NET_ADMIN))
1368 return -EPERM;
1369
1370 return emancipate( dev );
1371
1372#endif
1373
1374 default :
1375 return -EOPNOTSUPP;
1376 }
1377
1378 return error;
1379}
1380
1381
1382#ifdef CONFIG_SBNI_MULTILINE
1383
1384static int
1385enslave( struct net_device *dev, struct net_device *slave_dev )
1386{
1387 struct net_local *nl = netdev_priv(dev);
1388 struct net_local *snl = netdev_priv(slave_dev);
1389
1390 if( nl->state & FL_SLAVE )
1391 return -EBUSY;
1392
1393 if( snl->state & FL_SLAVE )
1394 return -EBUSY;
1395
1396 spin_lock( &nl->lock );
1397 spin_lock( &snl->lock );
1398
1399
1400 snl->link = nl->link;
1401 nl->link = slave_dev;
1402 snl->master = dev;
1403 snl->state |= FL_SLAVE;
1404
1405
1406
1407 memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
1408 netif_stop_queue( slave_dev );
1409 netif_wake_queue( dev );
1410
1411 spin_unlock( &snl->lock );
1412 spin_unlock( &nl->lock );
1413 netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
1414 return 0;
1415}
1416
1417
1418static int
1419emancipate( struct net_device *dev )
1420{
1421 struct net_local *snl = netdev_priv(dev);
1422 struct net_device *p = snl->master;
1423 struct net_local *nl = netdev_priv(p);
1424
1425 if( !(snl->state & FL_SLAVE) )
1426 return -EINVAL;
1427
1428 spin_lock( &nl->lock );
1429 spin_lock( &snl->lock );
1430 drop_xmit_queue( dev );
1431
1432
1433 for(;;) {
1434 struct net_local *t = netdev_priv(p);
1435 if( t->link == dev ) {
1436 t->link = snl->link;
1437 break;
1438 }
1439 p = t->link;
1440 }
1441
1442 snl->link = NULL;
1443 snl->master = dev;
1444 snl->state &= ~FL_SLAVE;
1445
1446 netif_start_queue( dev );
1447
1448 spin_unlock( &snl->lock );
1449 spin_unlock( &nl->lock );
1450
1451 dev_put( dev );
1452 return 0;
1453}
1454
1455#endif
1456
1457static void
1458set_multicast_list( struct net_device *dev )
1459{
1460 return;
1461}
1462
1463
1464#ifdef MODULE
1465module_param_array(io, int, NULL, 0);
1466module_param_array(irq, int, NULL, 0);
1467module_param_array(baud, int, NULL, 0);
1468module_param_array(rxl, int, NULL, 0);
1469module_param_array(mac, int, NULL, 0);
1470module_param(skip_pci_probe, bool, 0);
1471
1472MODULE_LICENSE("GPL");
1473
1474
1475int __init init_module( void )
1476{
1477 struct net_device *dev;
1478 int err;
1479
1480 while( num < SBNI_MAX_NUM_CARDS ) {
1481 dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
1482 NET_NAME_UNKNOWN, sbni_devsetup);
1483 if( !dev)
1484 break;
1485
1486 sprintf( dev->name, "sbni%d", num );
1487
1488 err = sbni_init(dev);
1489 if (err) {
1490 free_netdev(dev);
1491 break;
1492 }
1493
1494 if( register_netdev( dev ) ) {
1495 release_region( dev->base_addr, SBNI_IO_EXTENT );
1496 free_netdev( dev );
1497 break;
1498 }
1499 }
1500
1501 return *sbni_cards ? 0 : -ENODEV;
1502}
1503
1504void
1505cleanup_module(void)
1506{
1507 int i;
1508
1509 for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
1510 struct net_device *dev = sbni_cards[i];
1511 if (dev != NULL) {
1512 unregister_netdev(dev);
1513 release_region(dev->base_addr, SBNI_IO_EXTENT);
1514 free_netdev(dev);
1515 }
1516 }
1517}
1518
1519#else
1520
1521static int __init
1522sbni_setup( char *p )
1523{
1524 int n, parm;
1525
1526 if( *p++ != '(' )
1527 goto bad_param;
1528
1529 for( n = 0, parm = 0; *p && n < 8; ) {
1530 (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
1531 if( !*p || *p == ')' )
1532 return 1;
1533 if( *p == ';' )
1534 ++p, ++n, parm = 0;
1535 else if( *p++ != ',' )
1536 break;
1537 else
1538 if( ++parm >= 5 )
1539 break;
1540 }
1541bad_param:
1542 pr_err("Error in sbni kernel parameter!\n");
1543 return 0;
1544}
1545
1546__setup( "sbni=", sbni_setup );
1547
1548#endif
1549
1550
1551
1552static u32
1553calc_crc32( u32 crc, u8 *p, u32 len )
1554{
1555 while( len-- )
1556 crc = CRC32( *p++, crc );
1557
1558 return crc;
1559}
1560
1561static u32 crc32tab[] __attribute__ ((aligned(8))) = {
1562 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
1563 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
1564 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
1565 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
1566 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
1567 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
1568 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
1569 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
1570 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
1571 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
1572 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
1573 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
1574 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
1575 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
1576 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
1577 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
1578 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
1579 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
1580 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
1581 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
1582 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
1583 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
1584 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
1585 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
1586 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
1587 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
1588 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
1589 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
1590 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
1591 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
1592 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
1593 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
1594 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
1595 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
1596 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
1597 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
1598 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
1599 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
1600 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
1601 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
1602 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
1603 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
1604 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
1605 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
1606 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
1607 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
1608 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
1609 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
1610 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
1611 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
1612 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
1613 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
1614 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
1615 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
1616 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
1617 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
1618 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
1619 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
1620 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
1621 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
1622 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
1623 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
1624 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
1625 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
1626};
1627
1628