1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/types.h>
56#include <linux/errno.h>
57#include <linux/ioport.h>
58#include <linux/pci.h>
59#include <linux/dma-mapping.h>
60#include <linux/kernel.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/skbuff.h>
64#include <linux/delay.h>
65#include <linux/mm.h>
66#include <linux/highmem.h>
67#include <linux/sockios.h>
68#include <linux/firmware.h>
69#include <linux/slab.h>
70#include <linux/prefetch.h>
71#include <linux/if_vlan.h>
72
73#ifdef SIOCETHTOOL
74#include <linux/ethtool.h>
75#endif
76
77#include <net/sock.h>
78#include <net/ip.h>
79
80#include <asm/io.h>
81#include <asm/irq.h>
82#include <asm/byteorder.h>
83#include <linux/uaccess.h>
84
85
86#define DRV_NAME "acenic"
87
88#undef INDEX_DEBUG
89
90#ifdef CONFIG_ACENIC_OMIT_TIGON_I
91#define ACE_IS_TIGON_I(ap) 0
92#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
93#else
94#define ACE_IS_TIGON_I(ap) (ap->version == 1)
95#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
96#endif
97
98#ifndef PCI_VENDOR_ID_ALTEON
99#define PCI_VENDOR_ID_ALTEON 0x12ae
100#endif
101#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
102#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
103#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
104#endif
105#ifndef PCI_DEVICE_ID_3COM_3C985
106#define PCI_DEVICE_ID_3COM_3C985 0x0001
107#endif
108#ifndef PCI_VENDOR_ID_NETGEAR
109#define PCI_VENDOR_ID_NETGEAR 0x1385
110#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
111#endif
112#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
113#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
114#endif
115
116
117
118
119
120
121#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
122#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
123#endif
124#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
125#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
126#endif
127#ifndef PCI_VENDOR_ID_SGI
128#define PCI_VENDOR_ID_SGI 0x10a9
129#endif
130#ifndef PCI_DEVICE_ID_SGI_ACENIC
131#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
132#endif
133
134static const struct pci_device_id acenic_pci_tbl[] = {
135 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
136 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
137 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
138 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
139 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
140 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
141 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
142 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
143 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
144 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
145
146
147
148
149 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
150 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
151 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
152 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
153 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
154 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
155 { }
156};
157MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
158
159#define ace_sync_irq(irq) synchronize_irq(irq)
160
161#ifndef offset_in_page
162#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
163#endif
164
165#define ACE_MAX_MOD_PARMS 8
166#define BOARD_IDX_STATIC 0
167#define BOARD_IDX_OVERFLOW -1
168
169#include "acenic.h"
170
171
172
173
174#define MAX_TEXT_LEN 96*1024
175#define MAX_RODATA_LEN 8*1024
176#define MAX_DATA_LEN 2*1024
177
178#ifndef tigon2FwReleaseLocal
179#define tigon2FwReleaseLocal 0
180#endif
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330#define RX_RING_SIZE 72
331#define RX_MINI_SIZE 64
332#define RX_JUMBO_SIZE 48
333
334#define RX_PANIC_STD_THRES 16
335#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
336#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
337#define RX_PANIC_MINI_THRES 12
338#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
339#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
340#define RX_PANIC_JUMBO_THRES 6
341#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
342#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
343
344
345
346
347
348
349#define ACE_MINI_SIZE 100
350
351#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
352#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
353#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
354
355
356
357
358
359
360
361
362
363#define DEF_TX_COAL 400
364#define DEF_TX_MAX_DESC 60
365#define DEF_RX_COAL 120
366#define DEF_RX_MAX_DESC 25
367#define DEF_TX_RATIO 21
368
369#define DEF_JUMBO_TX_COAL 20
370#define DEF_JUMBO_TX_MAX_DESC 60
371#define DEF_JUMBO_RX_COAL 30
372#define DEF_JUMBO_RX_MAX_DESC 6
373#define DEF_JUMBO_TX_RATIO 21
374
375#if tigon2FwReleaseLocal < 20001118
376
377
378
379
380
381
382
383#define TX_COAL_INTS_ONLY 1
384#else
385
386
387
388#define TX_COAL_INTS_ONLY 1
389#endif
390
391#define DEF_TRACE 0
392#define DEF_STAT (2 * TICKS_PER_SEC)
393
394
395static int link_state[ACE_MAX_MOD_PARMS];
396static int trace[ACE_MAX_MOD_PARMS];
397static int tx_coal_tick[ACE_MAX_MOD_PARMS];
398static int rx_coal_tick[ACE_MAX_MOD_PARMS];
399static int max_tx_desc[ACE_MAX_MOD_PARMS];
400static int max_rx_desc[ACE_MAX_MOD_PARMS];
401static int tx_ratio[ACE_MAX_MOD_PARMS];
402static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
403
404MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
405MODULE_LICENSE("GPL");
406MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
407#ifndef CONFIG_ACENIC_OMIT_TIGON_I
408MODULE_FIRMWARE("acenic/tg1.bin");
409#endif
410MODULE_FIRMWARE("acenic/tg2.bin");
411
412module_param_array_named(link, link_state, int, NULL, 0);
413module_param_array(trace, int, NULL, 0);
414module_param_array(tx_coal_tick, int, NULL, 0);
415module_param_array(max_tx_desc, int, NULL, 0);
416module_param_array(rx_coal_tick, int, NULL, 0);
417module_param_array(max_rx_desc, int, NULL, 0);
418module_param_array(tx_ratio, int, NULL, 0);
419MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
420MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
421MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
422MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
423MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
424MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
425MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
426
427
428static const char version[] =
429 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
430 " http://home.cern.ch/~jes/gige/acenic.html\n";
431
432static int ace_get_link_ksettings(struct net_device *,
433 struct ethtool_link_ksettings *);
434static int ace_set_link_ksettings(struct net_device *,
435 const struct ethtool_link_ksettings *);
436static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
437
438static const struct ethtool_ops ace_ethtool_ops = {
439 .get_drvinfo = ace_get_drvinfo,
440 .get_link_ksettings = ace_get_link_ksettings,
441 .set_link_ksettings = ace_set_link_ksettings,
442};
443
444static void ace_watchdog(struct net_device *dev);
445
446static const struct net_device_ops ace_netdev_ops = {
447 .ndo_open = ace_open,
448 .ndo_stop = ace_close,
449 .ndo_tx_timeout = ace_watchdog,
450 .ndo_get_stats = ace_get_stats,
451 .ndo_start_xmit = ace_start_xmit,
452 .ndo_set_rx_mode = ace_set_multicast_list,
453 .ndo_validate_addr = eth_validate_addr,
454 .ndo_set_mac_address = ace_set_mac_addr,
455 .ndo_change_mtu = ace_change_mtu,
456};
457
458static int acenic_probe_one(struct pci_dev *pdev,
459 const struct pci_device_id *id)
460{
461 struct net_device *dev;
462 struct ace_private *ap;
463 static int boards_found;
464
465 dev = alloc_etherdev(sizeof(struct ace_private));
466 if (dev == NULL)
467 return -ENOMEM;
468
469 SET_NETDEV_DEV(dev, &pdev->dev);
470
471 ap = netdev_priv(dev);
472 ap->pdev = pdev;
473 ap->name = pci_name(pdev);
474
475 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
476 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
477
478 dev->watchdog_timeo = 5*HZ;
479 dev->min_mtu = 0;
480 dev->max_mtu = ACE_JUMBO_MTU;
481
482 dev->netdev_ops = &ace_netdev_ops;
483 dev->ethtool_ops = &ace_ethtool_ops;
484
485
486 if (!boards_found)
487 printk(version);
488
489 if (pci_enable_device(pdev))
490 goto fail_free_netdev;
491
492
493
494
495
496
497 pci_set_master(pdev);
498
499 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
500
501
502 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
503 printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
504 "access - was not enabled by BIOS/Firmware\n",
505 ap->name);
506 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
507 pci_write_config_word(ap->pdev, PCI_COMMAND,
508 ap->pci_command);
509 wmb();
510 }
511
512 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
513 if (ap->pci_latency <= 0x40) {
514 ap->pci_latency = 0x40;
515 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
516 }
517
518
519
520
521
522
523 dev->base_addr = pci_resource_start(pdev, 0);
524 ap->regs = ioremap(dev->base_addr, 0x4000);
525 if (!ap->regs) {
526 printk(KERN_ERR "%s: Unable to map I/O register, "
527 "AceNIC %i will be disabled.\n",
528 ap->name, boards_found);
529 goto fail_free_netdev;
530 }
531
532 switch(pdev->vendor) {
533 case PCI_VENDOR_ID_ALTEON:
534 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
535 printk(KERN_INFO "%s: Farallon PN9100-T ",
536 ap->name);
537 } else {
538 printk(KERN_INFO "%s: Alteon AceNIC ",
539 ap->name);
540 }
541 break;
542 case PCI_VENDOR_ID_3COM:
543 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
544 break;
545 case PCI_VENDOR_ID_NETGEAR:
546 printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
547 break;
548 case PCI_VENDOR_ID_DEC:
549 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
550 printk(KERN_INFO "%s: Farallon PN9000-SX ",
551 ap->name);
552 break;
553 }
554
555 case PCI_VENDOR_ID_SGI:
556 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
557 break;
558 default:
559 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
560 break;
561 }
562
563 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
564 printk("irq %d\n", pdev->irq);
565
566#ifdef CONFIG_ACENIC_OMIT_TIGON_I
567 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
568 printk(KERN_ERR "%s: Driver compiled without Tigon I"
569 " support - NIC disabled\n", dev->name);
570 goto fail_uninit;
571 }
572#endif
573
574 if (ace_allocate_descriptors(dev))
575 goto fail_free_netdev;
576
577#ifdef MODULE
578 if (boards_found >= ACE_MAX_MOD_PARMS)
579 ap->board_idx = BOARD_IDX_OVERFLOW;
580 else
581 ap->board_idx = boards_found;
582#else
583 ap->board_idx = BOARD_IDX_STATIC;
584#endif
585
586 if (ace_init(dev))
587 goto fail_free_netdev;
588
589 if (register_netdev(dev)) {
590 printk(KERN_ERR "acenic: device registration failed\n");
591 goto fail_uninit;
592 }
593 ap->name = dev->name;
594
595 if (ap->pci_using_dac)
596 dev->features |= NETIF_F_HIGHDMA;
597
598 pci_set_drvdata(pdev, dev);
599
600 boards_found++;
601 return 0;
602
603 fail_uninit:
604 ace_init_cleanup(dev);
605 fail_free_netdev:
606 free_netdev(dev);
607 return -ENODEV;
608}
609
610static void acenic_remove_one(struct pci_dev *pdev)
611{
612 struct net_device *dev = pci_get_drvdata(pdev);
613 struct ace_private *ap = netdev_priv(dev);
614 struct ace_regs __iomem *regs = ap->regs;
615 short i;
616
617 unregister_netdev(dev);
618
619 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
620 if (ap->version >= 2)
621 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
622
623
624
625
626 writel(1, ®s->Mb0Lo);
627 readl(®s->CpuCtrl);
628
629
630
631
632
633
634
635
636
637
638 ace_sync_irq(dev->irq);
639
640 for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
641 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
642
643 if (skb) {
644 struct ring_info *ringp;
645 dma_addr_t mapping;
646
647 ringp = &ap->skb->rx_std_skbuff[i];
648 mapping = dma_unmap_addr(ringp, mapping);
649 pci_unmap_page(ap->pdev, mapping,
650 ACE_STD_BUFSIZE,
651 PCI_DMA_FROMDEVICE);
652
653 ap->rx_std_ring[i].size = 0;
654 ap->skb->rx_std_skbuff[i].skb = NULL;
655 dev_kfree_skb(skb);
656 }
657 }
658
659 if (ap->version >= 2) {
660 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
661 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
662
663 if (skb) {
664 struct ring_info *ringp;
665 dma_addr_t mapping;
666
667 ringp = &ap->skb->rx_mini_skbuff[i];
668 mapping = dma_unmap_addr(ringp,mapping);
669 pci_unmap_page(ap->pdev, mapping,
670 ACE_MINI_BUFSIZE,
671 PCI_DMA_FROMDEVICE);
672
673 ap->rx_mini_ring[i].size = 0;
674 ap->skb->rx_mini_skbuff[i].skb = NULL;
675 dev_kfree_skb(skb);
676 }
677 }
678 }
679
680 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
681 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
682 if (skb) {
683 struct ring_info *ringp;
684 dma_addr_t mapping;
685
686 ringp = &ap->skb->rx_jumbo_skbuff[i];
687 mapping = dma_unmap_addr(ringp, mapping);
688 pci_unmap_page(ap->pdev, mapping,
689 ACE_JUMBO_BUFSIZE,
690 PCI_DMA_FROMDEVICE);
691
692 ap->rx_jumbo_ring[i].size = 0;
693 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
694 dev_kfree_skb(skb);
695 }
696 }
697
698 ace_init_cleanup(dev);
699 free_netdev(dev);
700}
701
702static struct pci_driver acenic_pci_driver = {
703 .name = "acenic",
704 .id_table = acenic_pci_tbl,
705 .probe = acenic_probe_one,
706 .remove = acenic_remove_one,
707};
708
709static void ace_free_descriptors(struct net_device *dev)
710{
711 struct ace_private *ap = netdev_priv(dev);
712 int size;
713
714 if (ap->rx_std_ring != NULL) {
715 size = (sizeof(struct rx_desc) *
716 (RX_STD_RING_ENTRIES +
717 RX_JUMBO_RING_ENTRIES +
718 RX_MINI_RING_ENTRIES +
719 RX_RETURN_RING_ENTRIES));
720 pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
721 ap->rx_ring_base_dma);
722 ap->rx_std_ring = NULL;
723 ap->rx_jumbo_ring = NULL;
724 ap->rx_mini_ring = NULL;
725 ap->rx_return_ring = NULL;
726 }
727 if (ap->evt_ring != NULL) {
728 size = (sizeof(struct event) * EVT_RING_ENTRIES);
729 pci_free_consistent(ap->pdev, size, ap->evt_ring,
730 ap->evt_ring_dma);
731 ap->evt_ring = NULL;
732 }
733 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
734 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
735 pci_free_consistent(ap->pdev, size, ap->tx_ring,
736 ap->tx_ring_dma);
737 }
738 ap->tx_ring = NULL;
739
740 if (ap->evt_prd != NULL) {
741 pci_free_consistent(ap->pdev, sizeof(u32),
742 (void *)ap->evt_prd, ap->evt_prd_dma);
743 ap->evt_prd = NULL;
744 }
745 if (ap->rx_ret_prd != NULL) {
746 pci_free_consistent(ap->pdev, sizeof(u32),
747 (void *)ap->rx_ret_prd,
748 ap->rx_ret_prd_dma);
749 ap->rx_ret_prd = NULL;
750 }
751 if (ap->tx_csm != NULL) {
752 pci_free_consistent(ap->pdev, sizeof(u32),
753 (void *)ap->tx_csm, ap->tx_csm_dma);
754 ap->tx_csm = NULL;
755 }
756}
757
758
759static int ace_allocate_descriptors(struct net_device *dev)
760{
761 struct ace_private *ap = netdev_priv(dev);
762 int size;
763
764 size = (sizeof(struct rx_desc) *
765 (RX_STD_RING_ENTRIES +
766 RX_JUMBO_RING_ENTRIES +
767 RX_MINI_RING_ENTRIES +
768 RX_RETURN_RING_ENTRIES));
769
770 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
771 &ap->rx_ring_base_dma);
772 if (ap->rx_std_ring == NULL)
773 goto fail;
774
775 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
776 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
777 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
778
779 size = (sizeof(struct event) * EVT_RING_ENTRIES);
780
781 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
782
783 if (ap->evt_ring == NULL)
784 goto fail;
785
786
787
788
789
790 if (!ACE_IS_TIGON_I(ap)) {
791 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
792
793 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
794 &ap->tx_ring_dma);
795
796 if (ap->tx_ring == NULL)
797 goto fail;
798 }
799
800 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
801 &ap->evt_prd_dma);
802 if (ap->evt_prd == NULL)
803 goto fail;
804
805 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
806 &ap->rx_ret_prd_dma);
807 if (ap->rx_ret_prd == NULL)
808 goto fail;
809
810 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
811 &ap->tx_csm_dma);
812 if (ap->tx_csm == NULL)
813 goto fail;
814
815 return 0;
816
817fail:
818
819 ace_init_cleanup(dev);
820 return 1;
821}
822
823
824
825
826
827
828static void ace_init_cleanup(struct net_device *dev)
829{
830 struct ace_private *ap;
831
832 ap = netdev_priv(dev);
833
834 ace_free_descriptors(dev);
835
836 if (ap->info)
837 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
838 ap->info, ap->info_dma);
839 kfree(ap->skb);
840 kfree(ap->trace_buf);
841
842 if (dev->irq)
843 free_irq(dev->irq, dev);
844
845 iounmap(ap->regs);
846}
847
848
849
850
851
852static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
853{
854 u32 idx;
855
856 idx = readl(®s->CmdPrd);
857
858 writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
859 idx = (idx + 1) % CMD_RING_ENTRIES;
860
861 writel(idx, ®s->CmdPrd);
862}
863
864
865static int ace_init(struct net_device *dev)
866{
867 struct ace_private *ap;
868 struct ace_regs __iomem *regs;
869 struct ace_info *info = NULL;
870 struct pci_dev *pdev;
871 unsigned long myjif;
872 u64 tmp_ptr;
873 u32 tig_ver, mac1, mac2, tmp, pci_state;
874 int board_idx, ecode = 0;
875 short i;
876 unsigned char cache_size;
877
878 ap = netdev_priv(dev);
879 regs = ap->regs;
880
881 board_idx = ap->board_idx;
882
883
884
885
886
887
888 writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
889 readl(®s->HostCtrl);
890 udelay(5);
891
892
893
894
895#ifdef __BIG_ENDIAN
896
897
898
899
900 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
901 ®s->HostCtrl);
902#else
903 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
904 ®s->HostCtrl);
905#endif
906 readl(®s->HostCtrl);
907
908
909
910
911 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
912 readl(®s->CpuCtrl);
913 writel(0, ®s->Mb0Lo);
914
915 tig_ver = readl(®s->HostCtrl) >> 28;
916
917 switch(tig_ver){
918#ifndef CONFIG_ACENIC_OMIT_TIGON_I
919 case 4:
920 case 5:
921 printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
922 tig_ver, ap->firmware_major, ap->firmware_minor,
923 ap->firmware_fix);
924 writel(0, ®s->LocalCtrl);
925 ap->version = 1;
926 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
927 break;
928#endif
929 case 6:
930 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
931 tig_ver, ap->firmware_major, ap->firmware_minor,
932 ap->firmware_fix);
933 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
934 readl(®s->CpuBCtrl);
935
936
937
938
939
940 writel(SRAM_BANK_512K, ®s->LocalCtrl);
941 writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
942 ap->version = 2;
943 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
944 break;
945 default:
946 printk(KERN_WARNING " Unsupported Tigon version detected "
947 "(%i)\n", tig_ver);
948 ecode = -ENODEV;
949 goto init_error;
950 }
951
952
953
954
955
956
957
958
959#ifdef __BIG_ENDIAN
960 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
961 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
962#else
963 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
964 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
965#endif
966 readl(®s->ModeStat);
967
968 mac1 = 0;
969 for(i = 0; i < 4; i++) {
970 int t;
971
972 mac1 = mac1 << 8;
973 t = read_eeprom_byte(dev, 0x8c+i);
974 if (t < 0) {
975 ecode = -EIO;
976 goto init_error;
977 } else
978 mac1 |= (t & 0xff);
979 }
980 mac2 = 0;
981 for(i = 4; i < 8; i++) {
982 int t;
983
984 mac2 = mac2 << 8;
985 t = read_eeprom_byte(dev, 0x8c+i);
986 if (t < 0) {
987 ecode = -EIO;
988 goto init_error;
989 } else
990 mac2 |= (t & 0xff);
991 }
992
993 writel(mac1, ®s->MacAddrHi);
994 writel(mac2, ®s->MacAddrLo);
995
996 dev->dev_addr[0] = (mac1 >> 8) & 0xff;
997 dev->dev_addr[1] = mac1 & 0xff;
998 dev->dev_addr[2] = (mac2 >> 24) & 0xff;
999 dev->dev_addr[3] = (mac2 >> 16) & 0xff;
1000 dev->dev_addr[4] = (mac2 >> 8) & 0xff;
1001 dev->dev_addr[5] = mac2 & 0xff;
1002
1003 printk("MAC: %pM\n", dev->dev_addr);
1004
1005
1006
1007
1008
1009
1010
1011 pdev = ap->pdev;
1012 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1013 cache_size <<= 2;
1014 if (cache_size != SMP_CACHE_BYTES) {
1015 printk(KERN_INFO " PCI cache line size set incorrectly "
1016 "(%i bytes) by BIOS/FW, ", cache_size);
1017 if (cache_size > SMP_CACHE_BYTES)
1018 printk("expecting %i\n", SMP_CACHE_BYTES);
1019 else {
1020 printk("correcting to %i\n", SMP_CACHE_BYTES);
1021 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1022 SMP_CACHE_BYTES >> 2);
1023 }
1024 }
1025
1026 pci_state = readl(®s->PciState);
1027 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
1028 "latency: %i clks\n",
1029 (pci_state & PCI_32BIT) ? 32 : 64,
1030 (pci_state & PCI_66MHZ) ? 66 : 33,
1031 ap->pci_latency);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1044 if (ap->version >= 2) {
1045 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1046
1047
1048
1049 if (board_idx == BOARD_IDX_OVERFLOW ||
1050 dis_pci_mem_inval[board_idx]) {
1051 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1052 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1053 pci_write_config_word(pdev, PCI_COMMAND,
1054 ap->pci_command);
1055 printk(KERN_INFO " Disabling PCI memory "
1056 "write and invalidate\n");
1057 }
1058 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1059 printk(KERN_INFO " PCI memory write & invalidate "
1060 "enabled by BIOS, enabling counter measures\n");
1061
1062 switch(SMP_CACHE_BYTES) {
1063 case 16:
1064 tmp |= DMA_WRITE_MAX_16;
1065 break;
1066 case 32:
1067 tmp |= DMA_WRITE_MAX_32;
1068 break;
1069 case 64:
1070 tmp |= DMA_WRITE_MAX_64;
1071 break;
1072 case 128:
1073 tmp |= DMA_WRITE_MAX_128;
1074 break;
1075 default:
1076 printk(KERN_INFO " Cache line size %i not "
1077 "supported, PCI write and invalidate "
1078 "disabled\n", SMP_CACHE_BYTES);
1079 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1080 pci_write_config_word(pdev, PCI_COMMAND,
1081 ap->pci_command);
1082 }
1083 }
1084 }
1085
1086#ifdef __sparc__
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 tmp &= ~DMA_READ_WRITE_MASK;
1099 tmp |= DMA_READ_MAX_64;
1100 tmp |= DMA_WRITE_MAX_64;
1101#endif
1102#ifdef __alpha__
1103 tmp &= ~DMA_READ_WRITE_MASK;
1104 tmp |= DMA_READ_MAX_128;
1105
1106
1107
1108
1109
1110 tmp |= DMA_WRITE_MAX_128;
1111#endif
1112 writel(tmp, ®s->PciState);
1113
1114#if 0
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1127 printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
1128 ap->pci_command |= PCI_COMMAND_FAST_BACK;
1129 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1130 }
1131#endif
1132
1133
1134
1135
1136 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1137 ap->pci_using_dac = 1;
1138 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1139 ap->pci_using_dac = 0;
1140 } else {
1141 ecode = -ENODEV;
1142 goto init_error;
1143 }
1144
1145
1146
1147
1148
1149
1150 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
1151 &ap->info_dma))) {
1152 ecode = -EAGAIN;
1153 goto init_error;
1154 }
1155 ap->info = info;
1156
1157
1158
1159
1160 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1161 ecode = -EAGAIN;
1162 goto init_error;
1163 }
1164
1165 ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
1166 DRV_NAME, dev);
1167 if (ecode) {
1168 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1169 DRV_NAME, pdev->irq);
1170 goto init_error;
1171 } else
1172 dev->irq = pdev->irq;
1173
1174#ifdef INDEX_DEBUG
1175 spin_lock_init(&ap->debug_lock);
1176 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1177 ap->last_std_rx = 0;
1178 ap->last_mini_rx = 0;
1179#endif
1180
1181 memset(ap->info, 0, sizeof(struct ace_info));
1182 memset(ap->skb, 0, sizeof(struct ace_skb));
1183
1184 ecode = ace_load_firmware(dev);
1185 if (ecode)
1186 goto init_error;
1187
1188 ap->fw_running = 0;
1189
1190 tmp_ptr = ap->info_dma;
1191 writel(tmp_ptr >> 32, ®s->InfoPtrHi);
1192 writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
1193
1194 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1195
1196 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1197 info->evt_ctrl.flags = 0;
1198
1199 *(ap->evt_prd) = 0;
1200 wmb();
1201 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1202 writel(0, ®s->EvtCsm);
1203
1204 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1205 info->cmd_ctrl.flags = 0;
1206 info->cmd_ctrl.max_len = 0;
1207
1208 for (i = 0; i < CMD_RING_ENTRIES; i++)
1209 writel(0, ®s->CmdRng[i]);
1210
1211 writel(0, ®s->CmdPrd);
1212 writel(0, ®s->CmdCsm);
1213
1214 tmp_ptr = ap->info_dma;
1215 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1216 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1217
1218 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1219 info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
1220 info->rx_std_ctrl.flags =
1221 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1222
1223 memset(ap->rx_std_ring, 0,
1224 RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1225
1226 for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1227 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1228
1229 ap->rx_std_skbprd = 0;
1230 atomic_set(&ap->cur_rx_bufs, 0);
1231
1232 set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1233 (ap->rx_ring_base_dma +
1234 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1235 info->rx_jumbo_ctrl.max_len = 0;
1236 info->rx_jumbo_ctrl.flags =
1237 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1238
1239 memset(ap->rx_jumbo_ring, 0,
1240 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1241
1242 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1243 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1244
1245 ap->rx_jumbo_skbprd = 0;
1246 atomic_set(&ap->cur_jumbo_bufs, 0);
1247
1248 memset(ap->rx_mini_ring, 0,
1249 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1250
1251 if (ap->version >= 2) {
1252 set_aceaddr(&info->rx_mini_ctrl.rngptr,
1253 (ap->rx_ring_base_dma +
1254 (sizeof(struct rx_desc) *
1255 (RX_STD_RING_ENTRIES +
1256 RX_JUMBO_RING_ENTRIES))));
1257 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1258 info->rx_mini_ctrl.flags =
1259 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
1260
1261 for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1262 ap->rx_mini_ring[i].flags =
1263 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1264 } else {
1265 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1266 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1267 info->rx_mini_ctrl.max_len = 0;
1268 }
1269
1270 ap->rx_mini_skbprd = 0;
1271 atomic_set(&ap->cur_mini_bufs, 0);
1272
1273 set_aceaddr(&info->rx_return_ctrl.rngptr,
1274 (ap->rx_ring_base_dma +
1275 (sizeof(struct rx_desc) *
1276 (RX_STD_RING_ENTRIES +
1277 RX_JUMBO_RING_ENTRIES +
1278 RX_MINI_RING_ENTRIES))));
1279 info->rx_return_ctrl.flags = 0;
1280 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1281
1282 memset(ap->rx_return_ring, 0,
1283 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1284
1285 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1286 *(ap->rx_ret_prd) = 0;
1287
1288 writel(TX_RING_BASE, ®s->WinBase);
1289
1290 if (ACE_IS_TIGON_I(ap)) {
1291 ap->tx_ring = (__force struct tx_desc *) regs->Window;
1292 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
1293 * sizeof(struct tx_desc)) / sizeof(u32); i++)
1294 writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
1295
1296 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1297 } else {
1298 memset(ap->tx_ring, 0,
1299 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1300
1301 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1302 }
1303
1304 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1305 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1306
1307
1308
1309
1310 if (!ACE_IS_TIGON_I(ap))
1311 tmp |= RCB_FLG_TX_HOST_RING;
1312#if TX_COAL_INTS_ONLY
1313 tmp |= RCB_FLG_COAL_INT_ONLY;
1314#endif
1315 info->tx_ctrl.flags = tmp;
1316
1317 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1318
1319
1320
1321
1322#if 0
1323 writel(DMA_THRESH_16W, ®s->DmaReadCfg);
1324 writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
1325#else
1326 writel(DMA_THRESH_8W, ®s->DmaReadCfg);
1327 writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
1328#endif
1329
1330 writel(0, ®s->MaskInt);
1331 writel(1, ®s->IfIdx);
1332#if 0
1333
1334
1335
1336
1337 writel(1, ®s->AssistState);
1338#endif
1339
1340 writel(DEF_STAT, ®s->TuneStatTicks);
1341 writel(DEF_TRACE, ®s->TuneTrace);
1342
1343 ace_set_rxtx_parms(dev, 0);
1344
1345 if (board_idx == BOARD_IDX_OVERFLOW) {
1346 printk(KERN_WARNING "%s: more than %i NICs detected, "
1347 "ignoring module parameters!\n",
1348 ap->name, ACE_MAX_MOD_PARMS);
1349 } else if (board_idx >= 0) {
1350 if (tx_coal_tick[board_idx])
1351 writel(tx_coal_tick[board_idx],
1352 ®s->TuneTxCoalTicks);
1353 if (max_tx_desc[board_idx])
1354 writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
1355
1356 if (rx_coal_tick[board_idx])
1357 writel(rx_coal_tick[board_idx],
1358 ®s->TuneRxCoalTicks);
1359 if (max_rx_desc[board_idx])
1360 writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
1361
1362 if (trace[board_idx])
1363 writel(trace[board_idx], ®s->TuneTrace);
1364
1365 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1366 writel(tx_ratio[board_idx], ®s->TxBufRat);
1367 }
1368
1369
1370
1371
1372 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1373 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1374 if(ap->version >= 2)
1375 tmp |= LNK_TX_FLOW_CTL_Y;
1376
1377
1378
1379
1380 if ((board_idx >= 0) && link_state[board_idx]) {
1381 int option = link_state[board_idx];
1382
1383 tmp = LNK_ENABLE;
1384
1385 if (option & 0x01) {
1386 printk(KERN_INFO "%s: Setting half duplex link\n",
1387 ap->name);
1388 tmp &= ~LNK_FULL_DUPLEX;
1389 }
1390 if (option & 0x02)
1391 tmp &= ~LNK_NEGOTIATE;
1392 if (option & 0x10)
1393 tmp |= LNK_10MB;
1394 if (option & 0x20)
1395 tmp |= LNK_100MB;
1396 if (option & 0x40)
1397 tmp |= LNK_1000MB;
1398 if ((option & 0x70) == 0) {
1399 printk(KERN_WARNING "%s: No media speed specified, "
1400 "forcing auto negotiation\n", ap->name);
1401 tmp |= LNK_NEGOTIATE | LNK_1000MB |
1402 LNK_100MB | LNK_10MB;
1403 }
1404 if ((option & 0x100) == 0)
1405 tmp |= LNK_NEG_FCTL;
1406 else
1407 printk(KERN_INFO "%s: Disabling flow control "
1408 "negotiation\n", ap->name);
1409 if (option & 0x200)
1410 tmp |= LNK_RX_FLOW_CTL_Y;
1411 if ((option & 0x400) && (ap->version >= 2)) {
1412 printk(KERN_INFO "%s: Enabling TX flow control\n",
1413 ap->name);
1414 tmp |= LNK_TX_FLOW_CTL_Y;
1415 }
1416 }
1417
1418 ap->link = tmp;
1419 writel(tmp, ®s->TuneLink);
1420 if (ap->version >= 2)
1421 writel(tmp, ®s->TuneFastLink);
1422
1423 writel(ap->firmware_start, ®s->Pc);
1424
1425 writel(0, ®s->Mb0Lo);
1426
1427
1428
1429
1430
1431
1432
1433 ap->cur_rx = 0;
1434 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1435
1436 wmb();
1437 ace_set_txprd(regs, ap, 0);
1438 writel(0, ®s->RxRetCsm);
1439
1440
1441
1442
1443
1444
1445
1446 writel(1, ®s->AssistState);
1447
1448
1449
1450
1451 writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
1452 readl(®s->CpuCtrl);
1453
1454
1455
1456
1457 myjif = jiffies + 3 * HZ;
1458 while (time_before(jiffies, myjif) && !ap->fw_running)
1459 cpu_relax();
1460
1461 if (!ap->fw_running) {
1462 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1463
1464 ace_dump_trace(ap);
1465 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
1466 readl(®s->CpuCtrl);
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 if (ap->version >= 2)
1478 writel(readl(®s->CpuBCtrl) | CPU_HALT,
1479 ®s->CpuBCtrl);
1480 writel(0, ®s->Mb0Lo);
1481 readl(®s->Mb0Lo);
1482
1483 ecode = -EBUSY;
1484 goto init_error;
1485 }
1486
1487
1488
1489
1490
1491 if (!test_and_set_bit(0, &ap->std_refill_busy))
1492 ace_load_std_rx_ring(dev, RX_RING_SIZE);
1493 else
1494 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1495 ap->name);
1496 if (ap->version >= 2) {
1497 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1498 ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
1499 else
1500 printk(KERN_ERR "%s: Someone is busy refilling "
1501 "the RX mini ring\n", ap->name);
1502 }
1503 return 0;
1504
1505 init_error:
1506 ace_init_cleanup(dev);
1507 return ecode;
1508}
1509
1510
1511static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1512{
1513 struct ace_private *ap = netdev_priv(dev);
1514 struct ace_regs __iomem *regs = ap->regs;
1515 int board_idx = ap->board_idx;
1516
1517 if (board_idx >= 0) {
1518 if (!jumbo) {
1519 if (!tx_coal_tick[board_idx])
1520 writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
1521 if (!max_tx_desc[board_idx])
1522 writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
1523 if (!rx_coal_tick[board_idx])
1524 writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
1525 if (!max_rx_desc[board_idx])
1526 writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
1527 if (!tx_ratio[board_idx])
1528 writel(DEF_TX_RATIO, ®s->TxBufRat);
1529 } else {
1530 if (!tx_coal_tick[board_idx])
1531 writel(DEF_JUMBO_TX_COAL,
1532 ®s->TuneTxCoalTicks);
1533 if (!max_tx_desc[board_idx])
1534 writel(DEF_JUMBO_TX_MAX_DESC,
1535 ®s->TuneMaxTxDesc);
1536 if (!rx_coal_tick[board_idx])
1537 writel(DEF_JUMBO_RX_COAL,
1538 ®s->TuneRxCoalTicks);
1539 if (!max_rx_desc[board_idx])
1540 writel(DEF_JUMBO_RX_MAX_DESC,
1541 ®s->TuneMaxRxDesc);
1542 if (!tx_ratio[board_idx])
1543 writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
1544 }
1545 }
1546}
1547
1548
1549static void ace_watchdog(struct net_device *data)
1550{
1551 struct net_device *dev = data;
1552 struct ace_private *ap = netdev_priv(dev);
1553 struct ace_regs __iomem *regs = ap->regs;
1554
1555
1556
1557
1558
1559
1560 if (*ap->tx_csm != ap->tx_ret_csm) {
1561 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1562 dev->name, (unsigned int)readl(®s->HostCtrl));
1563
1564 } else {
1565 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1566 dev->name);
1567#if 0
1568 netif_wake_queue(dev);
1569#endif
1570 }
1571}
1572
1573
1574static void ace_tasklet(unsigned long arg)
1575{
1576 struct net_device *dev = (struct net_device *) arg;
1577 struct ace_private *ap = netdev_priv(dev);
1578 int cur_size;
1579
1580 cur_size = atomic_read(&ap->cur_rx_bufs);
1581 if ((cur_size < RX_LOW_STD_THRES) &&
1582 !test_and_set_bit(0, &ap->std_refill_busy)) {
1583#ifdef DEBUG
1584 printk("refilling buffers (current %i)\n", cur_size);
1585#endif
1586 ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
1587 }
1588
1589 if (ap->version >= 2) {
1590 cur_size = atomic_read(&ap->cur_mini_bufs);
1591 if ((cur_size < RX_LOW_MINI_THRES) &&
1592 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1593#ifdef DEBUG
1594 printk("refilling mini buffers (current %i)\n",
1595 cur_size);
1596#endif
1597 ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
1598 }
1599 }
1600
1601 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1602 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1603 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1604#ifdef DEBUG
1605 printk("refilling jumbo buffers (current %i)\n", cur_size);
1606#endif
1607 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
1608 }
1609 ap->tasklet_pending = 0;
1610}
1611
1612
1613
1614
1615
1616static void ace_dump_trace(struct ace_private *ap)
1617{
1618#if 0
1619 if (!ap->trace_buf)
1620 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1621 return;
1622#endif
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
1634{
1635 struct ace_private *ap = netdev_priv(dev);
1636 struct ace_regs __iomem *regs = ap->regs;
1637 short i, idx;
1638
1639
1640 prefetchw(&ap->cur_rx_bufs);
1641
1642 idx = ap->rx_std_skbprd;
1643
1644 for (i = 0; i < nr_bufs; i++) {
1645 struct sk_buff *skb;
1646 struct rx_desc *rd;
1647 dma_addr_t mapping;
1648
1649 skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
1650 if (!skb)
1651 break;
1652
1653 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1654 offset_in_page(skb->data),
1655 ACE_STD_BUFSIZE,
1656 PCI_DMA_FROMDEVICE);
1657 ap->skb->rx_std_skbuff[idx].skb = skb;
1658 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1659 mapping, mapping);
1660
1661 rd = &ap->rx_std_ring[idx];
1662 set_aceaddr(&rd->addr, mapping);
1663 rd->size = ACE_STD_BUFSIZE;
1664 rd->idx = idx;
1665 idx = (idx + 1) % RX_STD_RING_ENTRIES;
1666 }
1667
1668 if (!i)
1669 goto error_out;
1670
1671 atomic_add(i, &ap->cur_rx_bufs);
1672 ap->rx_std_skbprd = idx;
1673
1674 if (ACE_IS_TIGON_I(ap)) {
1675 struct cmd cmd;
1676 cmd.evt = C_SET_RX_PRD_IDX;
1677 cmd.code = 0;
1678 cmd.idx = ap->rx_std_skbprd;
1679 ace_issue_cmd(regs, &cmd);
1680 } else {
1681 writel(idx, ®s->RxStdPrd);
1682 wmb();
1683 }
1684
1685 out:
1686 clear_bit(0, &ap->std_refill_busy);
1687 return;
1688
1689 error_out:
1690 printk(KERN_INFO "Out of memory when allocating "
1691 "standard receive buffers\n");
1692 goto out;
1693}
1694
1695
1696static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
1697{
1698 struct ace_private *ap = netdev_priv(dev);
1699 struct ace_regs __iomem *regs = ap->regs;
1700 short i, idx;
1701
1702 prefetchw(&ap->cur_mini_bufs);
1703
1704 idx = ap->rx_mini_skbprd;
1705 for (i = 0; i < nr_bufs; i++) {
1706 struct sk_buff *skb;
1707 struct rx_desc *rd;
1708 dma_addr_t mapping;
1709
1710 skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
1711 if (!skb)
1712 break;
1713
1714 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1715 offset_in_page(skb->data),
1716 ACE_MINI_BUFSIZE,
1717 PCI_DMA_FROMDEVICE);
1718 ap->skb->rx_mini_skbuff[idx].skb = skb;
1719 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1720 mapping, mapping);
1721
1722 rd = &ap->rx_mini_ring[idx];
1723 set_aceaddr(&rd->addr, mapping);
1724 rd->size = ACE_MINI_BUFSIZE;
1725 rd->idx = idx;
1726 idx = (idx + 1) % RX_MINI_RING_ENTRIES;
1727 }
1728
1729 if (!i)
1730 goto error_out;
1731
1732 atomic_add(i, &ap->cur_mini_bufs);
1733
1734 ap->rx_mini_skbprd = idx;
1735
1736 writel(idx, ®s->RxMiniPrd);
1737 wmb();
1738
1739 out:
1740 clear_bit(0, &ap->mini_refill_busy);
1741 return;
1742 error_out:
1743 printk(KERN_INFO "Out of memory when allocating "
1744 "mini receive buffers\n");
1745 goto out;
1746}
1747
1748
1749
1750
1751
1752
1753static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
1754{
1755 struct ace_private *ap = netdev_priv(dev);
1756 struct ace_regs __iomem *regs = ap->regs;
1757 short i, idx;
1758
1759 idx = ap->rx_jumbo_skbprd;
1760
1761 for (i = 0; i < nr_bufs; i++) {
1762 struct sk_buff *skb;
1763 struct rx_desc *rd;
1764 dma_addr_t mapping;
1765
1766 skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
1767 if (!skb)
1768 break;
1769
1770 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1771 offset_in_page(skb->data),
1772 ACE_JUMBO_BUFSIZE,
1773 PCI_DMA_FROMDEVICE);
1774 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1775 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1776 mapping, mapping);
1777
1778 rd = &ap->rx_jumbo_ring[idx];
1779 set_aceaddr(&rd->addr, mapping);
1780 rd->size = ACE_JUMBO_BUFSIZE;
1781 rd->idx = idx;
1782 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
1783 }
1784
1785 if (!i)
1786 goto error_out;
1787
1788 atomic_add(i, &ap->cur_jumbo_bufs);
1789 ap->rx_jumbo_skbprd = idx;
1790
1791 if (ACE_IS_TIGON_I(ap)) {
1792 struct cmd cmd;
1793 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1794 cmd.code = 0;
1795 cmd.idx = ap->rx_jumbo_skbprd;
1796 ace_issue_cmd(regs, &cmd);
1797 } else {
1798 writel(idx, ®s->RxJumboPrd);
1799 wmb();
1800 }
1801
1802 out:
1803 clear_bit(0, &ap->jumbo_refill_busy);
1804 return;
1805 error_out:
1806 if (net_ratelimit())
1807 printk(KERN_INFO "Out of memory when allocating "
1808 "jumbo receive buffers\n");
1809 goto out;
1810}
1811
1812
1813
1814
1815
1816
1817
1818static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
1819{
1820 struct ace_private *ap;
1821
1822 ap = netdev_priv(dev);
1823
1824 while (evtcsm != evtprd) {
1825 switch (ap->evt_ring[evtcsm].evt) {
1826 case E_FW_RUNNING:
1827 printk(KERN_INFO "%s: Firmware up and running\n",
1828 ap->name);
1829 ap->fw_running = 1;
1830 wmb();
1831 break;
1832 case E_STATS_UPDATED:
1833 break;
1834 case E_LNK_STATE:
1835 {
1836 u16 code = ap->evt_ring[evtcsm].code;
1837 switch (code) {
1838 case E_C_LINK_UP:
1839 {
1840 u32 state = readl(&ap->regs->GigLnkState);
1841 printk(KERN_WARNING "%s: Optical link UP "
1842 "(%s Duplex, Flow Control: %s%s)\n",
1843 ap->name,
1844 state & LNK_FULL_DUPLEX ? "Full":"Half",
1845 state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
1846 state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
1847 break;
1848 }
1849 case E_C_LINK_DOWN:
1850 printk(KERN_WARNING "%s: Optical link DOWN\n",
1851 ap->name);
1852 break;
1853 case E_C_LINK_10_100:
1854 printk(KERN_WARNING "%s: 10/100BaseT link "
1855 "UP\n", ap->name);
1856 break;
1857 default:
1858 printk(KERN_ERR "%s: Unknown optical link "
1859 "state %02x\n", ap->name, code);
1860 }
1861 break;
1862 }
1863 case E_ERROR:
1864 switch(ap->evt_ring[evtcsm].code) {
1865 case E_C_ERR_INVAL_CMD:
1866 printk(KERN_ERR "%s: invalid command error\n",
1867 ap->name);
1868 break;
1869 case E_C_ERR_UNIMP_CMD:
1870 printk(KERN_ERR "%s: unimplemented command "
1871 "error\n", ap->name);
1872 break;
1873 case E_C_ERR_BAD_CFG:
1874 printk(KERN_ERR "%s: bad config error\n",
1875 ap->name);
1876 break;
1877 default:
1878 printk(KERN_ERR "%s: unknown error %02x\n",
1879 ap->name, ap->evt_ring[evtcsm].code);
1880 }
1881 break;
1882 case E_RESET_JUMBO_RNG:
1883 {
1884 int i;
1885 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
1886 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1887 ap->rx_jumbo_ring[i].size = 0;
1888 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1889 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1890 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1891 }
1892 }
1893
1894 if (ACE_IS_TIGON_I(ap)) {
1895 struct cmd cmd;
1896 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1897 cmd.code = 0;
1898 cmd.idx = 0;
1899 ace_issue_cmd(ap->regs, &cmd);
1900 } else {
1901 writel(0, &((ap->regs)->RxJumboPrd));
1902 wmb();
1903 }
1904
1905 ap->jumbo = 0;
1906 ap->rx_jumbo_skbprd = 0;
1907 printk(KERN_INFO "%s: Jumbo ring flushed\n",
1908 ap->name);
1909 clear_bit(0, &ap->jumbo_refill_busy);
1910 break;
1911 }
1912 default:
1913 printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
1914 ap->name, ap->evt_ring[evtcsm].evt);
1915 }
1916 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
1917 }
1918
1919 return evtcsm;
1920}
1921
1922
1923static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1924{
1925 struct ace_private *ap = netdev_priv(dev);
1926 u32 idx;
1927 int mini_count = 0, std_count = 0;
1928
1929 idx = rxretcsm;
1930
1931 prefetchw(&ap->cur_rx_bufs);
1932 prefetchw(&ap->cur_mini_bufs);
1933
1934 while (idx != rxretprd) {
1935 struct ring_info *rip;
1936 struct sk_buff *skb;
1937 struct rx_desc *retdesc;
1938 u32 skbidx;
1939 int bd_flags, desc_type, mapsize;
1940 u16 csum;
1941
1942
1943
1944 if (idx == rxretcsm)
1945 rmb();
1946
1947 retdesc = &ap->rx_return_ring[idx];
1948 skbidx = retdesc->idx;
1949 bd_flags = retdesc->flags;
1950 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
1951
1952 switch(desc_type) {
1953
1954
1955
1956
1957
1958
1959
1960 case 0:
1961 rip = &ap->skb->rx_std_skbuff[skbidx];
1962 mapsize = ACE_STD_BUFSIZE;
1963 std_count++;
1964 break;
1965 case BD_FLG_JUMBO:
1966 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1967 mapsize = ACE_JUMBO_BUFSIZE;
1968 atomic_dec(&ap->cur_jumbo_bufs);
1969 break;
1970 case BD_FLG_MINI:
1971 rip = &ap->skb->rx_mini_skbuff[skbidx];
1972 mapsize = ACE_MINI_BUFSIZE;
1973 mini_count++;
1974 break;
1975 default:
1976 printk(KERN_INFO "%s: unknown frame type (0x%02x) "
1977 "returned by NIC\n", dev->name,
1978 retdesc->flags);
1979 goto error;
1980 }
1981
1982 skb = rip->skb;
1983 rip->skb = NULL;
1984 pci_unmap_page(ap->pdev,
1985 dma_unmap_addr(rip, mapping),
1986 mapsize,
1987 PCI_DMA_FROMDEVICE);
1988 skb_put(skb, retdesc->size);
1989
1990
1991
1992
1993 csum = retdesc->tcp_udp_csum;
1994
1995 skb->protocol = eth_type_trans(skb, dev);
1996
1997
1998
1999
2000
2001 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2002 skb->csum = htons(csum);
2003 skb->ip_summed = CHECKSUM_COMPLETE;
2004 } else {
2005 skb_checksum_none_assert(skb);
2006 }
2007
2008
2009 if ((bd_flags & BD_FLG_VLAN_TAG))
2010 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
2011 netif_rx(skb);
2012
2013 dev->stats.rx_packets++;
2014 dev->stats.rx_bytes += retdesc->size;
2015
2016 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2017 }
2018
2019 atomic_sub(std_count, &ap->cur_rx_bufs);
2020 if (!ACE_IS_TIGON_I(ap))
2021 atomic_sub(mini_count, &ap->cur_mini_bufs);
2022
2023 out:
2024
2025
2026
2027
2028 if (ACE_IS_TIGON_I(ap)) {
2029 writel(idx, &ap->regs->RxRetCsm);
2030 }
2031 ap->cur_rx = idx;
2032
2033 return;
2034 error:
2035 idx = rxretprd;
2036 goto out;
2037}
2038
2039
2040static inline void ace_tx_int(struct net_device *dev,
2041 u32 txcsm, u32 idx)
2042{
2043 struct ace_private *ap = netdev_priv(dev);
2044
2045 do {
2046 struct sk_buff *skb;
2047 struct tx_ring_info *info;
2048
2049 info = ap->skb->tx_skbuff + idx;
2050 skb = info->skb;
2051
2052 if (dma_unmap_len(info, maplen)) {
2053 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2054 dma_unmap_len(info, maplen),
2055 PCI_DMA_TODEVICE);
2056 dma_unmap_len_set(info, maplen, 0);
2057 }
2058
2059 if (skb) {
2060 dev->stats.tx_packets++;
2061 dev->stats.tx_bytes += skb->len;
2062 dev_kfree_skb_irq(skb);
2063 info->skb = NULL;
2064 }
2065
2066 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2067 } while (idx != txcsm);
2068
2069 if (netif_queue_stopped(dev))
2070 netif_wake_queue(dev);
2071
2072 wmb();
2073 ap->tx_ret_csm = txcsm;
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102}
2103
2104
2105static irqreturn_t ace_interrupt(int irq, void *dev_id)
2106{
2107 struct net_device *dev = (struct net_device *)dev_id;
2108 struct ace_private *ap = netdev_priv(dev);
2109 struct ace_regs __iomem *regs = ap->regs;
2110 u32 idx;
2111 u32 txcsm, rxretcsm, rxretprd;
2112 u32 evtcsm, evtprd;
2113
2114
2115
2116
2117
2118
2119 if (!(readl(®s->HostCtrl) & IN_INT))
2120 return IRQ_NONE;
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 writel(0, ®s->Mb0Lo);
2131 readl(®s->Mb0Lo);
2132
2133
2134
2135
2136
2137
2138
2139
2140 rxretprd = *ap->rx_ret_prd;
2141 rxretcsm = ap->cur_rx;
2142
2143 if (rxretprd != rxretcsm)
2144 ace_rx_int(dev, rxretprd, rxretcsm);
2145
2146 txcsm = *ap->tx_csm;
2147 idx = ap->tx_ret_csm;
2148
2149 if (txcsm != idx) {
2150
2151
2152
2153
2154
2155
2156
2157 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2158 ace_tx_int(dev, txcsm, idx);
2159 }
2160
2161 evtcsm = readl(®s->EvtCsm);
2162 evtprd = *ap->evt_prd;
2163
2164 if (evtcsm != evtprd) {
2165 evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2166 writel(evtcsm, ®s->EvtCsm);
2167 }
2168
2169
2170
2171
2172
2173 if (netif_running(dev)) {
2174 int cur_size;
2175 int run_tasklet = 0;
2176
2177 cur_size = atomic_read(&ap->cur_rx_bufs);
2178 if (cur_size < RX_LOW_STD_THRES) {
2179 if ((cur_size < RX_PANIC_STD_THRES) &&
2180 !test_and_set_bit(0, &ap->std_refill_busy)) {
2181#ifdef DEBUG
2182 printk("low on std buffers %i\n", cur_size);
2183#endif
2184 ace_load_std_rx_ring(dev,
2185 RX_RING_SIZE - cur_size);
2186 } else
2187 run_tasklet = 1;
2188 }
2189
2190 if (!ACE_IS_TIGON_I(ap)) {
2191 cur_size = atomic_read(&ap->cur_mini_bufs);
2192 if (cur_size < RX_LOW_MINI_THRES) {
2193 if ((cur_size < RX_PANIC_MINI_THRES) &&
2194 !test_and_set_bit(0,
2195 &ap->mini_refill_busy)) {
2196#ifdef DEBUG
2197 printk("low on mini buffers %i\n",
2198 cur_size);
2199#endif
2200 ace_load_mini_rx_ring(dev,
2201 RX_MINI_SIZE - cur_size);
2202 } else
2203 run_tasklet = 1;
2204 }
2205 }
2206
2207 if (ap->jumbo) {
2208 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2209 if (cur_size < RX_LOW_JUMBO_THRES) {
2210 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2211 !test_and_set_bit(0,
2212 &ap->jumbo_refill_busy)){
2213#ifdef DEBUG
2214 printk("low on jumbo buffers %i\n",
2215 cur_size);
2216#endif
2217 ace_load_jumbo_rx_ring(dev,
2218 RX_JUMBO_SIZE - cur_size);
2219 } else
2220 run_tasklet = 1;
2221 }
2222 }
2223 if (run_tasklet && !ap->tasklet_pending) {
2224 ap->tasklet_pending = 1;
2225 tasklet_schedule(&ap->ace_tasklet);
2226 }
2227 }
2228
2229 return IRQ_HANDLED;
2230}
2231
2232static int ace_open(struct net_device *dev)
2233{
2234 struct ace_private *ap = netdev_priv(dev);
2235 struct ace_regs __iomem *regs = ap->regs;
2236 struct cmd cmd;
2237
2238 if (!(ap->fw_running)) {
2239 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2240 return -EBUSY;
2241 }
2242
2243 writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
2244
2245 cmd.evt = C_CLEAR_STATS;
2246 cmd.code = 0;
2247 cmd.idx = 0;
2248 ace_issue_cmd(regs, &cmd);
2249
2250 cmd.evt = C_HOST_STATE;
2251 cmd.code = C_C_STACK_UP;
2252 cmd.idx = 0;
2253 ace_issue_cmd(regs, &cmd);
2254
2255 if (ap->jumbo &&
2256 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2257 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2258
2259 if (dev->flags & IFF_PROMISC) {
2260 cmd.evt = C_SET_PROMISC_MODE;
2261 cmd.code = C_C_PROMISC_ENABLE;
2262 cmd.idx = 0;
2263 ace_issue_cmd(regs, &cmd);
2264
2265 ap->promisc = 1;
2266 }else
2267 ap->promisc = 0;
2268 ap->mcast_all = 0;
2269
2270#if 0
2271 cmd.evt = C_LNK_NEGOTIATION;
2272 cmd.code = 0;
2273 cmd.idx = 0;
2274 ace_issue_cmd(regs, &cmd);
2275#endif
2276
2277 netif_start_queue(dev);
2278
2279
2280
2281
2282 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
2283 return 0;
2284}
2285
2286
2287static int ace_close(struct net_device *dev)
2288{
2289 struct ace_private *ap = netdev_priv(dev);
2290 struct ace_regs __iomem *regs = ap->regs;
2291 struct cmd cmd;
2292 unsigned long flags;
2293 short i;
2294
2295
2296
2297
2298
2299
2300 netif_stop_queue(dev);
2301
2302
2303 if (ap->promisc) {
2304 cmd.evt = C_SET_PROMISC_MODE;
2305 cmd.code = C_C_PROMISC_DISABLE;
2306 cmd.idx = 0;
2307 ace_issue_cmd(regs, &cmd);
2308 ap->promisc = 0;
2309 }
2310
2311 cmd.evt = C_HOST_STATE;
2312 cmd.code = C_C_STACK_DOWN;
2313 cmd.idx = 0;
2314 ace_issue_cmd(regs, &cmd);
2315
2316 tasklet_kill(&ap->ace_tasklet);
2317
2318
2319
2320
2321
2322
2323 local_irq_save(flags);
2324 ace_mask_irq(dev);
2325
2326 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2327 struct sk_buff *skb;
2328 struct tx_ring_info *info;
2329
2330 info = ap->skb->tx_skbuff + i;
2331 skb = info->skb;
2332
2333 if (dma_unmap_len(info, maplen)) {
2334 if (ACE_IS_TIGON_I(ap)) {
2335
2336 struct tx_desc __iomem *tx;
2337 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
2338 writel(0, &tx->addr.addrhi);
2339 writel(0, &tx->addr.addrlo);
2340 writel(0, &tx->flagsize);
2341 } else
2342 memset(ap->tx_ring + i, 0,
2343 sizeof(struct tx_desc));
2344 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2345 dma_unmap_len(info, maplen),
2346 PCI_DMA_TODEVICE);
2347 dma_unmap_len_set(info, maplen, 0);
2348 }
2349 if (skb) {
2350 dev_kfree_skb(skb);
2351 info->skb = NULL;
2352 }
2353 }
2354
2355 if (ap->jumbo) {
2356 cmd.evt = C_RESET_JUMBO_RNG;
2357 cmd.code = 0;
2358 cmd.idx = 0;
2359 ace_issue_cmd(regs, &cmd);
2360 }
2361
2362 ace_unmask_irq(dev);
2363 local_irq_restore(flags);
2364
2365 return 0;
2366}
2367
2368
2369static inline dma_addr_t
2370ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2371 struct sk_buff *tail, u32 idx)
2372{
2373 dma_addr_t mapping;
2374 struct tx_ring_info *info;
2375
2376 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2377 offset_in_page(skb->data),
2378 skb->len, PCI_DMA_TODEVICE);
2379
2380 info = ap->skb->tx_skbuff + idx;
2381 info->skb = tail;
2382 dma_unmap_addr_set(info, mapping, mapping);
2383 dma_unmap_len_set(info, maplen, skb->len);
2384 return mapping;
2385}
2386
2387
2388static inline void
2389ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2390 u32 flagsize, u32 vlan_tag)
2391{
2392#if !USE_TX_COAL_NOW
2393 flagsize &= ~BD_FLG_COAL_NOW;
2394#endif
2395
2396 if (ACE_IS_TIGON_I(ap)) {
2397 struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
2398 writel(addr >> 32, &io->addr.addrhi);
2399 writel(addr & 0xffffffff, &io->addr.addrlo);
2400 writel(flagsize, &io->flagsize);
2401 writel(vlan_tag, &io->vlanres);
2402 } else {
2403 desc->addr.addrhi = addr >> 32;
2404 desc->addr.addrlo = addr;
2405 desc->flagsize = flagsize;
2406 desc->vlanres = vlan_tag;
2407 }
2408}
2409
2410
2411static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
2412 struct net_device *dev)
2413{
2414 struct ace_private *ap = netdev_priv(dev);
2415 struct ace_regs __iomem *regs = ap->regs;
2416 struct tx_desc *desc;
2417 u32 idx, flagsize;
2418 unsigned long maxjiff = jiffies + 3*HZ;
2419
2420restart:
2421 idx = ap->tx_prd;
2422
2423 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2424 goto overflow;
2425
2426 if (!skb_shinfo(skb)->nr_frags) {
2427 dma_addr_t mapping;
2428 u32 vlan_tag = 0;
2429
2430 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2431 flagsize = (skb->len << 16) | (BD_FLG_END);
2432 if (skb->ip_summed == CHECKSUM_PARTIAL)
2433 flagsize |= BD_FLG_TCP_UDP_SUM;
2434 if (skb_vlan_tag_present(skb)) {
2435 flagsize |= BD_FLG_VLAN_TAG;
2436 vlan_tag = skb_vlan_tag_get(skb);
2437 }
2438 desc = ap->tx_ring + idx;
2439 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2440
2441
2442 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2443 flagsize |= BD_FLG_COAL_NOW;
2444
2445 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2446 } else {
2447 dma_addr_t mapping;
2448 u32 vlan_tag = 0;
2449 int i, len = 0;
2450
2451 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2452 flagsize = (skb_headlen(skb) << 16);
2453 if (skb->ip_summed == CHECKSUM_PARTIAL)
2454 flagsize |= BD_FLG_TCP_UDP_SUM;
2455 if (skb_vlan_tag_present(skb)) {
2456 flagsize |= BD_FLG_VLAN_TAG;
2457 vlan_tag = skb_vlan_tag_get(skb);
2458 }
2459
2460 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2461
2462 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2463
2464 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2465 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2466 struct tx_ring_info *info;
2467
2468 len += skb_frag_size(frag);
2469 info = ap->skb->tx_skbuff + idx;
2470 desc = ap->tx_ring + idx;
2471
2472 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
2473 skb_frag_size(frag),
2474 DMA_TO_DEVICE);
2475
2476 flagsize = skb_frag_size(frag) << 16;
2477 if (skb->ip_summed == CHECKSUM_PARTIAL)
2478 flagsize |= BD_FLG_TCP_UDP_SUM;
2479 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2480
2481 if (i == skb_shinfo(skb)->nr_frags - 1) {
2482 flagsize |= BD_FLG_END;
2483 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2484 flagsize |= BD_FLG_COAL_NOW;
2485
2486
2487
2488
2489
2490 info->skb = skb;
2491 } else {
2492 info->skb = NULL;
2493 }
2494 dma_unmap_addr_set(info, mapping, mapping);
2495 dma_unmap_len_set(info, maplen, skb_frag_size(frag));
2496 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2497 }
2498 }
2499
2500 wmb();
2501 ap->tx_prd = idx;
2502 ace_set_txprd(regs, ap, idx);
2503
2504 if (flagsize & BD_FLG_COAL_NOW) {
2505 netif_stop_queue(dev);
2506
2507
2508
2509
2510
2511
2512
2513 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2514 netif_wake_queue(dev);
2515 }
2516
2517 return NETDEV_TX_OK;
2518
2519overflow:
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 if (time_before(jiffies, maxjiff)) {
2537 barrier();
2538 cpu_relax();
2539 goto restart;
2540 }
2541
2542
2543 printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
2544 return NETDEV_TX_BUSY;
2545}
2546
2547
2548static int ace_change_mtu(struct net_device *dev, int new_mtu)
2549{
2550 struct ace_private *ap = netdev_priv(dev);
2551 struct ace_regs __iomem *regs = ap->regs;
2552
2553 writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
2554 dev->mtu = new_mtu;
2555
2556 if (new_mtu > ACE_STD_MTU) {
2557 if (!(ap->jumbo)) {
2558 printk(KERN_INFO "%s: Enabling Jumbo frame "
2559 "support\n", dev->name);
2560 ap->jumbo = 1;
2561 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2562 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2563 ace_set_rxtx_parms(dev, 1);
2564 }
2565 } else {
2566 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2567 ace_sync_irq(dev->irq);
2568 ace_set_rxtx_parms(dev, 0);
2569 if (ap->jumbo) {
2570 struct cmd cmd;
2571
2572 cmd.evt = C_RESET_JUMBO_RNG;
2573 cmd.code = 0;
2574 cmd.idx = 0;
2575 ace_issue_cmd(regs, &cmd);
2576 }
2577 }
2578
2579 return 0;
2580}
2581
2582static int ace_get_link_ksettings(struct net_device *dev,
2583 struct ethtool_link_ksettings *cmd)
2584{
2585 struct ace_private *ap = netdev_priv(dev);
2586 struct ace_regs __iomem *regs = ap->regs;
2587 u32 link;
2588 u32 supported;
2589
2590 memset(cmd, 0, sizeof(struct ethtool_link_ksettings));
2591
2592 supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2593 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2594 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2595 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2596
2597 cmd->base.port = PORT_FIBRE;
2598
2599 link = readl(®s->GigLnkState);
2600 if (link & LNK_1000MB) {
2601 cmd->base.speed = SPEED_1000;
2602 } else {
2603 link = readl(®s->FastLnkState);
2604 if (link & LNK_100MB)
2605 cmd->base.speed = SPEED_100;
2606 else if (link & LNK_10MB)
2607 cmd->base.speed = SPEED_10;
2608 else
2609 cmd->base.speed = 0;
2610 }
2611 if (link & LNK_FULL_DUPLEX)
2612 cmd->base.duplex = DUPLEX_FULL;
2613 else
2614 cmd->base.duplex = DUPLEX_HALF;
2615
2616 if (link & LNK_NEGOTIATE)
2617 cmd->base.autoneg = AUTONEG_ENABLE;
2618 else
2619 cmd->base.autoneg = AUTONEG_DISABLE;
2620
2621#if 0
2622
2623
2624
2625 ecmd->trace = readl(®s->TuneTrace);
2626
2627 ecmd->txcoal = readl(®s->TuneTxCoalTicks);
2628 ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
2629#endif
2630
2631 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2632 supported);
2633
2634 return 0;
2635}
2636
2637static int ace_set_link_ksettings(struct net_device *dev,
2638 const struct ethtool_link_ksettings *cmd)
2639{
2640 struct ace_private *ap = netdev_priv(dev);
2641 struct ace_regs __iomem *regs = ap->regs;
2642 u32 link, speed;
2643
2644 link = readl(®s->GigLnkState);
2645 if (link & LNK_1000MB)
2646 speed = SPEED_1000;
2647 else {
2648 link = readl(®s->FastLnkState);
2649 if (link & LNK_100MB)
2650 speed = SPEED_100;
2651 else if (link & LNK_10MB)
2652 speed = SPEED_10;
2653 else
2654 speed = SPEED_100;
2655 }
2656
2657 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
2658 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
2659 if (!ACE_IS_TIGON_I(ap))
2660 link |= LNK_TX_FLOW_CTL_Y;
2661 if (cmd->base.autoneg == AUTONEG_ENABLE)
2662 link |= LNK_NEGOTIATE;
2663 if (cmd->base.speed != speed) {
2664 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2665 switch (cmd->base.speed) {
2666 case SPEED_1000:
2667 link |= LNK_1000MB;
2668 break;
2669 case SPEED_100:
2670 link |= LNK_100MB;
2671 break;
2672 case SPEED_10:
2673 link |= LNK_10MB;
2674 break;
2675 }
2676 }
2677
2678 if (cmd->base.duplex == DUPLEX_FULL)
2679 link |= LNK_FULL_DUPLEX;
2680
2681 if (link != ap->link) {
2682 struct cmd cmd;
2683 printk(KERN_INFO "%s: Renegotiating link state\n",
2684 dev->name);
2685
2686 ap->link = link;
2687 writel(link, ®s->TuneLink);
2688 if (!ACE_IS_TIGON_I(ap))
2689 writel(link, ®s->TuneFastLink);
2690 wmb();
2691
2692 cmd.evt = C_LNK_NEGOTIATION;
2693 cmd.code = 0;
2694 cmd.idx = 0;
2695 ace_issue_cmd(regs, &cmd);
2696 }
2697 return 0;
2698}
2699
2700static void ace_get_drvinfo(struct net_device *dev,
2701 struct ethtool_drvinfo *info)
2702{
2703 struct ace_private *ap = netdev_priv(dev);
2704
2705 strlcpy(info->driver, "acenic", sizeof(info->driver));
2706 snprintf(info->version, sizeof(info->version), "%i.%i.%i",
2707 ap->firmware_major, ap->firmware_minor,
2708 ap->firmware_fix);
2709
2710 if (ap->pdev)
2711 strlcpy(info->bus_info, pci_name(ap->pdev),
2712 sizeof(info->bus_info));
2713
2714}
2715
2716
2717
2718
2719static int ace_set_mac_addr(struct net_device *dev, void *p)
2720{
2721 struct ace_private *ap = netdev_priv(dev);
2722 struct ace_regs __iomem *regs = ap->regs;
2723 struct sockaddr *addr=p;
2724 u8 *da;
2725 struct cmd cmd;
2726
2727 if(netif_running(dev))
2728 return -EBUSY;
2729
2730 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
2731
2732 da = (u8 *)dev->dev_addr;
2733
2734 writel(da[0] << 8 | da[1], ®s->MacAddrHi);
2735 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
2736 ®s->MacAddrLo);
2737
2738 cmd.evt = C_SET_MAC_ADDR;
2739 cmd.code = 0;
2740 cmd.idx = 0;
2741 ace_issue_cmd(regs, &cmd);
2742
2743 return 0;
2744}
2745
2746
2747static void ace_set_multicast_list(struct net_device *dev)
2748{
2749 struct ace_private *ap = netdev_priv(dev);
2750 struct ace_regs __iomem *regs = ap->regs;
2751 struct cmd cmd;
2752
2753 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2754 cmd.evt = C_SET_MULTICAST_MODE;
2755 cmd.code = C_C_MCAST_ENABLE;
2756 cmd.idx = 0;
2757 ace_issue_cmd(regs, &cmd);
2758 ap->mcast_all = 1;
2759 } else if (ap->mcast_all) {
2760 cmd.evt = C_SET_MULTICAST_MODE;
2761 cmd.code = C_C_MCAST_DISABLE;
2762 cmd.idx = 0;
2763 ace_issue_cmd(regs, &cmd);
2764 ap->mcast_all = 0;
2765 }
2766
2767 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2768 cmd.evt = C_SET_PROMISC_MODE;
2769 cmd.code = C_C_PROMISC_ENABLE;
2770 cmd.idx = 0;
2771 ace_issue_cmd(regs, &cmd);
2772 ap->promisc = 1;
2773 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2774 cmd.evt = C_SET_PROMISC_MODE;
2775 cmd.code = C_C_PROMISC_DISABLE;
2776 cmd.idx = 0;
2777 ace_issue_cmd(regs, &cmd);
2778 ap->promisc = 0;
2779 }
2780
2781
2782
2783
2784
2785
2786
2787 if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2788 cmd.evt = C_SET_MULTICAST_MODE;
2789 cmd.code = C_C_MCAST_ENABLE;
2790 cmd.idx = 0;
2791 ace_issue_cmd(regs, &cmd);
2792 }else if (!ap->mcast_all) {
2793 cmd.evt = C_SET_MULTICAST_MODE;
2794 cmd.code = C_C_MCAST_DISABLE;
2795 cmd.idx = 0;
2796 ace_issue_cmd(regs, &cmd);
2797 }
2798}
2799
2800
2801static struct net_device_stats *ace_get_stats(struct net_device *dev)
2802{
2803 struct ace_private *ap = netdev_priv(dev);
2804 struct ace_mac_stats __iomem *mac_stats =
2805 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2806
2807 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2808 dev->stats.multicast = readl(&mac_stats->kept_mc);
2809 dev->stats.collisions = readl(&mac_stats->coll);
2810
2811 return &dev->stats;
2812}
2813
2814
2815static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2816 u32 dest, int size)
2817{
2818 void __iomem *tdest;
2819 short tsize, i;
2820
2821 if (size <= 0)
2822 return;
2823
2824 while (size > 0) {
2825 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2826 min_t(u32, size, ACE_WINDOW_SIZE));
2827 tdest = (void __iomem *) ®s->Window +
2828 (dest & (ACE_WINDOW_SIZE - 1));
2829 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2830 for (i = 0; i < (tsize / 4); i++) {
2831
2832 writel(be32_to_cpup(src), tdest);
2833 src++;
2834 tdest += 4;
2835 dest += 4;
2836 size -= 4;
2837 }
2838 }
2839}
2840
2841
2842static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2843{
2844 void __iomem *tdest;
2845 short tsize = 0, i;
2846
2847 if (size <= 0)
2848 return;
2849
2850 while (size > 0) {
2851 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2852 min_t(u32, size, ACE_WINDOW_SIZE));
2853 tdest = (void __iomem *) ®s->Window +
2854 (dest & (ACE_WINDOW_SIZE - 1));
2855 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2856
2857 for (i = 0; i < (tsize / 4); i++) {
2858 writel(0, tdest + i*4);
2859 }
2860
2861 dest += tsize;
2862 size -= tsize;
2863 }
2864}
2865
2866
2867
2868
2869
2870
2871
2872
2873static int ace_load_firmware(struct net_device *dev)
2874{
2875 const struct firmware *fw;
2876 const char *fw_name = "acenic/tg2.bin";
2877 struct ace_private *ap = netdev_priv(dev);
2878 struct ace_regs __iomem *regs = ap->regs;
2879 const __be32 *fw_data;
2880 u32 load_addr;
2881 int ret;
2882
2883 if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
2884 printk(KERN_ERR "%s: trying to download firmware while the "
2885 "CPU is running!\n", ap->name);
2886 return -EFAULT;
2887 }
2888
2889 if (ACE_IS_TIGON_I(ap))
2890 fw_name = "acenic/tg1.bin";
2891
2892 ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
2893 if (ret) {
2894 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
2895 ap->name, fw_name);
2896 return ret;
2897 }
2898
2899 fw_data = (void *)fw->data;
2900
2901
2902
2903
2904
2905
2906 ap->firmware_major = fw->data[0];
2907 ap->firmware_minor = fw->data[1];
2908 ap->firmware_fix = fw->data[2];
2909
2910 ap->firmware_start = be32_to_cpu(fw_data[1]);
2911 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
2912 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2913 ap->name, ap->firmware_start, fw_name);
2914 ret = -EINVAL;
2915 goto out;
2916 }
2917
2918 load_addr = be32_to_cpu(fw_data[2]);
2919 if (load_addr < 0x4000 || load_addr >= 0x80000) {
2920 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2921 ap->name, load_addr, fw_name);
2922 ret = -EINVAL;
2923 goto out;
2924 }
2925
2926
2927
2928
2929
2930 ace_clear(regs, 0x2000, 0x80000-0x2000);
2931 ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
2932 out:
2933 release_firmware(fw);
2934 return ret;
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953static void eeprom_start(struct ace_regs __iomem *regs)
2954{
2955 u32 local;
2956
2957 readl(®s->LocalCtrl);
2958 udelay(ACE_SHORT_DELAY);
2959 local = readl(®s->LocalCtrl);
2960 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
2961 writel(local, ®s->LocalCtrl);
2962 readl(®s->LocalCtrl);
2963 mb();
2964 udelay(ACE_SHORT_DELAY);
2965 local |= EEPROM_CLK_OUT;
2966 writel(local, ®s->LocalCtrl);
2967 readl(®s->LocalCtrl);
2968 mb();
2969 udelay(ACE_SHORT_DELAY);
2970 local &= ~EEPROM_DATA_OUT;
2971 writel(local, ®s->LocalCtrl);
2972 readl(®s->LocalCtrl);
2973 mb();
2974 udelay(ACE_SHORT_DELAY);
2975 local &= ~EEPROM_CLK_OUT;
2976 writel(local, ®s->LocalCtrl);
2977 readl(®s->LocalCtrl);
2978 mb();
2979}
2980
2981
2982static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
2983{
2984 short i;
2985 u32 local;
2986
2987 udelay(ACE_SHORT_DELAY);
2988 local = readl(®s->LocalCtrl);
2989 local &= ~EEPROM_DATA_OUT;
2990 local |= EEPROM_WRITE_ENABLE;
2991 writel(local, ®s->LocalCtrl);
2992 readl(®s->LocalCtrl);
2993 mb();
2994
2995 for (i = 0; i < 8; i++, magic <<= 1) {
2996 udelay(ACE_SHORT_DELAY);
2997 if (magic & 0x80)
2998 local |= EEPROM_DATA_OUT;
2999 else
3000 local &= ~EEPROM_DATA_OUT;
3001 writel(local, ®s->LocalCtrl);
3002 readl(®s->LocalCtrl);
3003 mb();
3004
3005 udelay(ACE_SHORT_DELAY);
3006 local |= EEPROM_CLK_OUT;
3007 writel(local, ®s->LocalCtrl);
3008 readl(®s->LocalCtrl);
3009 mb();
3010 udelay(ACE_SHORT_DELAY);
3011 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3012 writel(local, ®s->LocalCtrl);
3013 readl(®s->LocalCtrl);
3014 mb();
3015 }
3016}
3017
3018
3019static int eeprom_check_ack(struct ace_regs __iomem *regs)
3020{
3021 int state;
3022 u32 local;
3023
3024 local = readl(®s->LocalCtrl);
3025 local &= ~EEPROM_WRITE_ENABLE;
3026 writel(local, ®s->LocalCtrl);
3027 readl(®s->LocalCtrl);
3028 mb();
3029 udelay(ACE_LONG_DELAY);
3030 local |= EEPROM_CLK_OUT;
3031 writel(local, ®s->LocalCtrl);
3032 readl(®s->LocalCtrl);
3033 mb();
3034 udelay(ACE_SHORT_DELAY);
3035
3036 state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
3037 udelay(ACE_SHORT_DELAY);
3038 mb();
3039 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3040 readl(®s->LocalCtrl);
3041 mb();
3042
3043 return state;
3044}
3045
3046
3047static void eeprom_stop(struct ace_regs __iomem *regs)
3048{
3049 u32 local;
3050
3051 udelay(ACE_SHORT_DELAY);
3052 local = readl(®s->LocalCtrl);
3053 local |= EEPROM_WRITE_ENABLE;
3054 writel(local, ®s->LocalCtrl);
3055 readl(®s->LocalCtrl);
3056 mb();
3057 udelay(ACE_SHORT_DELAY);
3058 local &= ~EEPROM_DATA_OUT;
3059 writel(local, ®s->LocalCtrl);
3060 readl(®s->LocalCtrl);
3061 mb();
3062 udelay(ACE_SHORT_DELAY);
3063 local |= EEPROM_CLK_OUT;
3064 writel(local, ®s->LocalCtrl);
3065 readl(®s->LocalCtrl);
3066 mb();
3067 udelay(ACE_SHORT_DELAY);
3068 local |= EEPROM_DATA_OUT;
3069 writel(local, ®s->LocalCtrl);
3070 readl(®s->LocalCtrl);
3071 mb();
3072 udelay(ACE_LONG_DELAY);
3073 local &= ~EEPROM_CLK_OUT;
3074 writel(local, ®s->LocalCtrl);
3075 mb();
3076}
3077
3078
3079
3080
3081
3082static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3083{
3084 struct ace_private *ap = netdev_priv(dev);
3085 struct ace_regs __iomem *regs = ap->regs;
3086 unsigned long flags;
3087 u32 local;
3088 int result = 0;
3089 short i;
3090
3091
3092
3093
3094
3095 local_irq_save(flags);
3096
3097 eeprom_start(regs);
3098
3099 eeprom_prep(regs, EEPROM_WRITE_SELECT);
3100 if (eeprom_check_ack(regs)) {
3101 local_irq_restore(flags);
3102 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3103 result = -EIO;
3104 goto eeprom_read_error;
3105 }
3106
3107 eeprom_prep(regs, (offset >> 8) & 0xff);
3108 if (eeprom_check_ack(regs)) {
3109 local_irq_restore(flags);
3110 printk(KERN_ERR "%s: Unable to set address byte 0\n",
3111 ap->name);
3112 result = -EIO;
3113 goto eeprom_read_error;
3114 }
3115
3116 eeprom_prep(regs, offset & 0xff);
3117 if (eeprom_check_ack(regs)) {
3118 local_irq_restore(flags);
3119 printk(KERN_ERR "%s: Unable to set address byte 1\n",
3120 ap->name);
3121 result = -EIO;
3122 goto eeprom_read_error;
3123 }
3124
3125 eeprom_start(regs);
3126 eeprom_prep(regs, EEPROM_READ_SELECT);
3127 if (eeprom_check_ack(regs)) {
3128 local_irq_restore(flags);
3129 printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3130 ap->name);
3131 result = -EIO;
3132 goto eeprom_read_error;
3133 }
3134
3135 for (i = 0; i < 8; i++) {
3136 local = readl(®s->LocalCtrl);
3137 local &= ~EEPROM_WRITE_ENABLE;
3138 writel(local, ®s->LocalCtrl);
3139 readl(®s->LocalCtrl);
3140 udelay(ACE_LONG_DELAY);
3141 mb();
3142 local |= EEPROM_CLK_OUT;
3143 writel(local, ®s->LocalCtrl);
3144 readl(®s->LocalCtrl);
3145 mb();
3146 udelay(ACE_SHORT_DELAY);
3147
3148 result = (result << 1) |
3149 ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
3150 udelay(ACE_SHORT_DELAY);
3151 mb();
3152 local = readl(®s->LocalCtrl);
3153 local &= ~EEPROM_CLK_OUT;
3154 writel(local, ®s->LocalCtrl);
3155 readl(®s->LocalCtrl);
3156 udelay(ACE_SHORT_DELAY);
3157 mb();
3158 if (i == 7) {
3159 local |= EEPROM_WRITE_ENABLE;
3160 writel(local, ®s->LocalCtrl);
3161 readl(®s->LocalCtrl);
3162 mb();
3163 udelay(ACE_SHORT_DELAY);
3164 }
3165 }
3166
3167 local |= EEPROM_DATA_OUT;
3168 writel(local, ®s->LocalCtrl);
3169 readl(®s->LocalCtrl);
3170 mb();
3171 udelay(ACE_SHORT_DELAY);
3172 writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
3173 readl(®s->LocalCtrl);
3174 udelay(ACE_LONG_DELAY);
3175 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3176 readl(®s->LocalCtrl);
3177 mb();
3178 udelay(ACE_SHORT_DELAY);
3179 eeprom_stop(regs);
3180
3181 local_irq_restore(flags);
3182 out:
3183 return result;
3184
3185 eeprom_read_error:
3186 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3187 ap->name, offset);
3188 goto out;
3189}
3190
3191module_pci_driver(acenic_pci_driver);
3192