1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/moduleparam.h>
55#include <linux/types.h>
56#include <linux/errno.h>
57#include <linux/ioport.h>
58#include <linux/pci.h>
59#include <linux/dma-mapping.h>
60#include <linux/kernel.h>
61#include <linux/netdevice.h>
62#include <linux/etherdevice.h>
63#include <linux/skbuff.h>
64#include <linux/init.h>
65#include <linux/delay.h>
66#include <linux/mm.h>
67#include <linux/highmem.h>
68#include <linux/sockios.h>
69#include <linux/firmware.h>
70#include <linux/slab.h>
71#include <linux/prefetch.h>
72
73#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
74#include <linux/if_vlan.h>
75#endif
76
77#ifdef SIOCETHTOOL
78#include <linux/ethtool.h>
79#endif
80
81#include <net/sock.h>
82#include <net/ip.h>
83
84#include <asm/system.h>
85#include <asm/io.h>
86#include <asm/irq.h>
87#include <asm/byteorder.h>
88#include <asm/uaccess.h>
89
90
91#define DRV_NAME "acenic"
92
93#undef INDEX_DEBUG
94
95#ifdef CONFIG_ACENIC_OMIT_TIGON_I
96#define ACE_IS_TIGON_I(ap) 0
97#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
98#else
99#define ACE_IS_TIGON_I(ap) (ap->version == 1)
100#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
101#endif
102
103#ifndef PCI_VENDOR_ID_ALTEON
104#define PCI_VENDOR_ID_ALTEON 0x12ae
105#endif
106#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
107#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
108#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
109#endif
110#ifndef PCI_DEVICE_ID_3COM_3C985
111#define PCI_DEVICE_ID_3COM_3C985 0x0001
112#endif
113#ifndef PCI_VENDOR_ID_NETGEAR
114#define PCI_VENDOR_ID_NETGEAR 0x1385
115#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
116#endif
117#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
118#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
119#endif
120
121
122
123
124
125
126#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
127#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
128#endif
129#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
130#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
131#endif
132#ifndef PCI_VENDOR_ID_SGI
133#define PCI_VENDOR_ID_SGI 0x10a9
134#endif
135#ifndef PCI_DEVICE_ID_SGI_ACENIC
136#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
137#endif
138
139static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
141 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
142 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
143 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
144 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
145 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
146 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
147 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
148 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
149 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
150
151
152
153
154 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
155 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
156 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
157 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
158 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
159 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
160 { }
161};
162MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
163
164#define ace_sync_irq(irq) synchronize_irq(irq)
165
166#ifndef offset_in_page
167#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
168#endif
169
170#define ACE_MAX_MOD_PARMS 8
171#define BOARD_IDX_STATIC 0
172#define BOARD_IDX_OVERFLOW -1
173
174#if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \
175 defined(NETIF_F_HW_VLAN_RX)
176#define ACENIC_DO_VLAN 1
177#define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST
178#else
179#define ACENIC_DO_VLAN 0
180#define ACE_RCB_VLAN_FLAG 0
181#endif
182
183#include "acenic.h"
184
185
186
187
188#define MAX_TEXT_LEN 96*1024
189#define MAX_RODATA_LEN 8*1024
190#define MAX_DATA_LEN 2*1024
191
192#ifndef tigon2FwReleaseLocal
193#define tigon2FwReleaseLocal 0
194#endif
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344#define RX_RING_SIZE 72
345#define RX_MINI_SIZE 64
346#define RX_JUMBO_SIZE 48
347
348#define RX_PANIC_STD_THRES 16
349#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
350#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
351#define RX_PANIC_MINI_THRES 12
352#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
353#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
354#define RX_PANIC_JUMBO_THRES 6
355#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
356#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
357
358
359
360
361
362
363#define ACE_MINI_SIZE 100
364
365#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
366#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
367#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
368
369
370
371
372
373
374
375
376
377#define DEF_TX_COAL 400
378#define DEF_TX_MAX_DESC 60
379#define DEF_RX_COAL 120
380#define DEF_RX_MAX_DESC 25
381#define DEF_TX_RATIO 21
382
383#define DEF_JUMBO_TX_COAL 20
384#define DEF_JUMBO_TX_MAX_DESC 60
385#define DEF_JUMBO_RX_COAL 30
386#define DEF_JUMBO_RX_MAX_DESC 6
387#define DEF_JUMBO_TX_RATIO 21
388
389#if tigon2FwReleaseLocal < 20001118
390
391
392
393
394
395
396
397#define TX_COAL_INTS_ONLY 1
398#else
399
400
401
402#define TX_COAL_INTS_ONLY 1
403#endif
404
405#define DEF_TRACE 0
406#define DEF_STAT (2 * TICKS_PER_SEC)
407
408
409static int link_state[ACE_MAX_MOD_PARMS];
410static int trace[ACE_MAX_MOD_PARMS];
411static int tx_coal_tick[ACE_MAX_MOD_PARMS];
412static int rx_coal_tick[ACE_MAX_MOD_PARMS];
413static int max_tx_desc[ACE_MAX_MOD_PARMS];
414static int max_rx_desc[ACE_MAX_MOD_PARMS];
415static int tx_ratio[ACE_MAX_MOD_PARMS];
416static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
417
418MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
419MODULE_LICENSE("GPL");
420MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
421#ifndef CONFIG_ACENIC_OMIT_TIGON_I
422MODULE_FIRMWARE("acenic/tg1.bin");
423#endif
424MODULE_FIRMWARE("acenic/tg2.bin");
425
426module_param_array_named(link, link_state, int, NULL, 0);
427module_param_array(trace, int, NULL, 0);
428module_param_array(tx_coal_tick, int, NULL, 0);
429module_param_array(max_tx_desc, int, NULL, 0);
430module_param_array(rx_coal_tick, int, NULL, 0);
431module_param_array(max_rx_desc, int, NULL, 0);
432module_param_array(tx_ratio, int, NULL, 0);
433MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
434MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
435MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
436MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
437MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
438MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
439MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
440
441
442static const char version[] __devinitconst =
443 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
444 " http://home.cern.ch/~jes/gige/acenic.html\n";
445
446static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
447static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
448static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
449
450static const struct ethtool_ops ace_ethtool_ops = {
451 .get_settings = ace_get_settings,
452 .set_settings = ace_set_settings,
453 .get_drvinfo = ace_get_drvinfo,
454};
455
456static void ace_watchdog(struct net_device *dev);
457
458static const struct net_device_ops ace_netdev_ops = {
459 .ndo_open = ace_open,
460 .ndo_stop = ace_close,
461 .ndo_tx_timeout = ace_watchdog,
462 .ndo_get_stats = ace_get_stats,
463 .ndo_start_xmit = ace_start_xmit,
464 .ndo_set_multicast_list = ace_set_multicast_list,
465 .ndo_validate_addr = eth_validate_addr,
466 .ndo_set_mac_address = ace_set_mac_addr,
467 .ndo_change_mtu = ace_change_mtu,
468#if ACENIC_DO_VLAN
469 .ndo_vlan_rx_register = ace_vlan_rx_register,
470#endif
471};
472
473static int __devinit acenic_probe_one(struct pci_dev *pdev,
474 const struct pci_device_id *id)
475{
476 struct net_device *dev;
477 struct ace_private *ap;
478 static int boards_found;
479
480 dev = alloc_etherdev(sizeof(struct ace_private));
481 if (dev == NULL) {
482 printk(KERN_ERR "acenic: Unable to allocate "
483 "net_device structure!\n");
484 return -ENOMEM;
485 }
486
487 SET_NETDEV_DEV(dev, &pdev->dev);
488
489 ap = netdev_priv(dev);
490 ap->pdev = pdev;
491 ap->name = pci_name(pdev);
492
493 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
494#if ACENIC_DO_VLAN
495 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
496#endif
497
498 dev->watchdog_timeo = 5*HZ;
499
500 dev->netdev_ops = &ace_netdev_ops;
501 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
502
503
504 if (!boards_found)
505 printk(version);
506
507 if (pci_enable_device(pdev))
508 goto fail_free_netdev;
509
510
511
512
513
514
515 pci_set_master(pdev);
516
517 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
518
519
520 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
521 printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
522 "access - was not enabled by BIOS/Firmware\n",
523 ap->name);
524 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
525 pci_write_config_word(ap->pdev, PCI_COMMAND,
526 ap->pci_command);
527 wmb();
528 }
529
530 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
531 if (ap->pci_latency <= 0x40) {
532 ap->pci_latency = 0x40;
533 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
534 }
535
536
537
538
539
540
541 dev->base_addr = pci_resource_start(pdev, 0);
542 ap->regs = ioremap(dev->base_addr, 0x4000);
543 if (!ap->regs) {
544 printk(KERN_ERR "%s: Unable to map I/O register, "
545 "AceNIC %i will be disabled.\n",
546 ap->name, boards_found);
547 goto fail_free_netdev;
548 }
549
550 switch(pdev->vendor) {
551 case PCI_VENDOR_ID_ALTEON:
552 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
553 printk(KERN_INFO "%s: Farallon PN9100-T ",
554 ap->name);
555 } else {
556 printk(KERN_INFO "%s: Alteon AceNIC ",
557 ap->name);
558 }
559 break;
560 case PCI_VENDOR_ID_3COM:
561 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
562 break;
563 case PCI_VENDOR_ID_NETGEAR:
564 printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
565 break;
566 case PCI_VENDOR_ID_DEC:
567 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
568 printk(KERN_INFO "%s: Farallon PN9000-SX ",
569 ap->name);
570 break;
571 }
572 case PCI_VENDOR_ID_SGI:
573 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
574 break;
575 default:
576 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
577 break;
578 }
579
580 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
581 printk("irq %d\n", pdev->irq);
582
583#ifdef CONFIG_ACENIC_OMIT_TIGON_I
584 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
585 printk(KERN_ERR "%s: Driver compiled without Tigon I"
586 " support - NIC disabled\n", dev->name);
587 goto fail_uninit;
588 }
589#endif
590
591 if (ace_allocate_descriptors(dev))
592 goto fail_free_netdev;
593
594#ifdef MODULE
595 if (boards_found >= ACE_MAX_MOD_PARMS)
596 ap->board_idx = BOARD_IDX_OVERFLOW;
597 else
598 ap->board_idx = boards_found;
599#else
600 ap->board_idx = BOARD_IDX_STATIC;
601#endif
602
603 if (ace_init(dev))
604 goto fail_free_netdev;
605
606 if (register_netdev(dev)) {
607 printk(KERN_ERR "acenic: device registration failed\n");
608 goto fail_uninit;
609 }
610 ap->name = dev->name;
611
612 if (ap->pci_using_dac)
613 dev->features |= NETIF_F_HIGHDMA;
614
615 pci_set_drvdata(pdev, dev);
616
617 boards_found++;
618 return 0;
619
620 fail_uninit:
621 ace_init_cleanup(dev);
622 fail_free_netdev:
623 free_netdev(dev);
624 return -ENODEV;
625}
626
627static void __devexit acenic_remove_one(struct pci_dev *pdev)
628{
629 struct net_device *dev = pci_get_drvdata(pdev);
630 struct ace_private *ap = netdev_priv(dev);
631 struct ace_regs __iomem *regs = ap->regs;
632 short i;
633
634 unregister_netdev(dev);
635
636 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
637 if (ap->version >= 2)
638 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
639
640
641
642
643 writel(1, ®s->Mb0Lo);
644 readl(®s->CpuCtrl);
645
646
647
648
649
650
651
652
653
654
655 ace_sync_irq(dev->irq);
656
657 for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
658 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
659
660 if (skb) {
661 struct ring_info *ringp;
662 dma_addr_t mapping;
663
664 ringp = &ap->skb->rx_std_skbuff[i];
665 mapping = dma_unmap_addr(ringp, mapping);
666 pci_unmap_page(ap->pdev, mapping,
667 ACE_STD_BUFSIZE,
668 PCI_DMA_FROMDEVICE);
669
670 ap->rx_std_ring[i].size = 0;
671 ap->skb->rx_std_skbuff[i].skb = NULL;
672 dev_kfree_skb(skb);
673 }
674 }
675
676 if (ap->version >= 2) {
677 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
678 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
679
680 if (skb) {
681 struct ring_info *ringp;
682 dma_addr_t mapping;
683
684 ringp = &ap->skb->rx_mini_skbuff[i];
685 mapping = dma_unmap_addr(ringp,mapping);
686 pci_unmap_page(ap->pdev, mapping,
687 ACE_MINI_BUFSIZE,
688 PCI_DMA_FROMDEVICE);
689
690 ap->rx_mini_ring[i].size = 0;
691 ap->skb->rx_mini_skbuff[i].skb = NULL;
692 dev_kfree_skb(skb);
693 }
694 }
695 }
696
697 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
698 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
699 if (skb) {
700 struct ring_info *ringp;
701 dma_addr_t mapping;
702
703 ringp = &ap->skb->rx_jumbo_skbuff[i];
704 mapping = dma_unmap_addr(ringp, mapping);
705 pci_unmap_page(ap->pdev, mapping,
706 ACE_JUMBO_BUFSIZE,
707 PCI_DMA_FROMDEVICE);
708
709 ap->rx_jumbo_ring[i].size = 0;
710 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
711 dev_kfree_skb(skb);
712 }
713 }
714
715 ace_init_cleanup(dev);
716 free_netdev(dev);
717}
718
719static struct pci_driver acenic_pci_driver = {
720 .name = "acenic",
721 .id_table = acenic_pci_tbl,
722 .probe = acenic_probe_one,
723 .remove = __devexit_p(acenic_remove_one),
724};
725
726static int __init acenic_init(void)
727{
728 return pci_register_driver(&acenic_pci_driver);
729}
730
731static void __exit acenic_exit(void)
732{
733 pci_unregister_driver(&acenic_pci_driver);
734}
735
736module_init(acenic_init);
737module_exit(acenic_exit);
738
739static void ace_free_descriptors(struct net_device *dev)
740{
741 struct ace_private *ap = netdev_priv(dev);
742 int size;
743
744 if (ap->rx_std_ring != NULL) {
745 size = (sizeof(struct rx_desc) *
746 (RX_STD_RING_ENTRIES +
747 RX_JUMBO_RING_ENTRIES +
748 RX_MINI_RING_ENTRIES +
749 RX_RETURN_RING_ENTRIES));
750 pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
751 ap->rx_ring_base_dma);
752 ap->rx_std_ring = NULL;
753 ap->rx_jumbo_ring = NULL;
754 ap->rx_mini_ring = NULL;
755 ap->rx_return_ring = NULL;
756 }
757 if (ap->evt_ring != NULL) {
758 size = (sizeof(struct event) * EVT_RING_ENTRIES);
759 pci_free_consistent(ap->pdev, size, ap->evt_ring,
760 ap->evt_ring_dma);
761 ap->evt_ring = NULL;
762 }
763 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
764 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
765 pci_free_consistent(ap->pdev, size, ap->tx_ring,
766 ap->tx_ring_dma);
767 }
768 ap->tx_ring = NULL;
769
770 if (ap->evt_prd != NULL) {
771 pci_free_consistent(ap->pdev, sizeof(u32),
772 (void *)ap->evt_prd, ap->evt_prd_dma);
773 ap->evt_prd = NULL;
774 }
775 if (ap->rx_ret_prd != NULL) {
776 pci_free_consistent(ap->pdev, sizeof(u32),
777 (void *)ap->rx_ret_prd,
778 ap->rx_ret_prd_dma);
779 ap->rx_ret_prd = NULL;
780 }
781 if (ap->tx_csm != NULL) {
782 pci_free_consistent(ap->pdev, sizeof(u32),
783 (void *)ap->tx_csm, ap->tx_csm_dma);
784 ap->tx_csm = NULL;
785 }
786}
787
788
789static int ace_allocate_descriptors(struct net_device *dev)
790{
791 struct ace_private *ap = netdev_priv(dev);
792 int size;
793
794 size = (sizeof(struct rx_desc) *
795 (RX_STD_RING_ENTRIES +
796 RX_JUMBO_RING_ENTRIES +
797 RX_MINI_RING_ENTRIES +
798 RX_RETURN_RING_ENTRIES));
799
800 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
801 &ap->rx_ring_base_dma);
802 if (ap->rx_std_ring == NULL)
803 goto fail;
804
805 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
806 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
807 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
808
809 size = (sizeof(struct event) * EVT_RING_ENTRIES);
810
811 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
812
813 if (ap->evt_ring == NULL)
814 goto fail;
815
816
817
818
819
820 if (!ACE_IS_TIGON_I(ap)) {
821 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
822
823 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
824 &ap->tx_ring_dma);
825
826 if (ap->tx_ring == NULL)
827 goto fail;
828 }
829
830 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
831 &ap->evt_prd_dma);
832 if (ap->evt_prd == NULL)
833 goto fail;
834
835 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
836 &ap->rx_ret_prd_dma);
837 if (ap->rx_ret_prd == NULL)
838 goto fail;
839
840 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
841 &ap->tx_csm_dma);
842 if (ap->tx_csm == NULL)
843 goto fail;
844
845 return 0;
846
847fail:
848
849 ace_init_cleanup(dev);
850 return 1;
851}
852
853
854
855
856
857
858static void ace_init_cleanup(struct net_device *dev)
859{
860 struct ace_private *ap;
861
862 ap = netdev_priv(dev);
863
864 ace_free_descriptors(dev);
865
866 if (ap->info)
867 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
868 ap->info, ap->info_dma);
869 kfree(ap->skb);
870 kfree(ap->trace_buf);
871
872 if (dev->irq)
873 free_irq(dev->irq, dev);
874
875 iounmap(ap->regs);
876}
877
878
879
880
881
882static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
883{
884 u32 idx;
885
886 idx = readl(®s->CmdPrd);
887
888 writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
889 idx = (idx + 1) % CMD_RING_ENTRIES;
890
891 writel(idx, ®s->CmdPrd);
892}
893
894
895static int __devinit ace_init(struct net_device *dev)
896{
897 struct ace_private *ap;
898 struct ace_regs __iomem *regs;
899 struct ace_info *info = NULL;
900 struct pci_dev *pdev;
901 unsigned long myjif;
902 u64 tmp_ptr;
903 u32 tig_ver, mac1, mac2, tmp, pci_state;
904 int board_idx, ecode = 0;
905 short i;
906 unsigned char cache_size;
907
908 ap = netdev_priv(dev);
909 regs = ap->regs;
910
911 board_idx = ap->board_idx;
912
913
914
915
916
917
918 writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
919 readl(®s->HostCtrl);
920 udelay(5);
921
922
923
924
925#ifdef __BIG_ENDIAN
926
927
928
929
930 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
931 ®s->HostCtrl);
932#else
933 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
934 ®s->HostCtrl);
935#endif
936 readl(®s->HostCtrl);
937
938
939
940
941 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
942 readl(®s->CpuCtrl);
943 writel(0, ®s->Mb0Lo);
944
945 tig_ver = readl(®s->HostCtrl) >> 28;
946
947 switch(tig_ver){
948#ifndef CONFIG_ACENIC_OMIT_TIGON_I
949 case 4:
950 case 5:
951 printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
952 tig_ver, ap->firmware_major, ap->firmware_minor,
953 ap->firmware_fix);
954 writel(0, ®s->LocalCtrl);
955 ap->version = 1;
956 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
957 break;
958#endif
959 case 6:
960 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
961 tig_ver, ap->firmware_major, ap->firmware_minor,
962 ap->firmware_fix);
963 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
964 readl(®s->CpuBCtrl);
965
966
967
968
969
970 writel(SRAM_BANK_512K, ®s->LocalCtrl);
971 writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
972 ap->version = 2;
973 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
974 break;
975 default:
976 printk(KERN_WARNING " Unsupported Tigon version detected "
977 "(%i)\n", tig_ver);
978 ecode = -ENODEV;
979 goto init_error;
980 }
981
982
983
984
985
986
987
988
989#ifdef __BIG_ENDIAN
990 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
991 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
992#else
993 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
994 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
995#endif
996 readl(®s->ModeStat);
997
998 mac1 = 0;
999 for(i = 0; i < 4; i++) {
1000 int t;
1001
1002 mac1 = mac1 << 8;
1003 t = read_eeprom_byte(dev, 0x8c+i);
1004 if (t < 0) {
1005 ecode = -EIO;
1006 goto init_error;
1007 } else
1008 mac1 |= (t & 0xff);
1009 }
1010 mac2 = 0;
1011 for(i = 4; i < 8; i++) {
1012 int t;
1013
1014 mac2 = mac2 << 8;
1015 t = read_eeprom_byte(dev, 0x8c+i);
1016 if (t < 0) {
1017 ecode = -EIO;
1018 goto init_error;
1019 } else
1020 mac2 |= (t & 0xff);
1021 }
1022
1023 writel(mac1, ®s->MacAddrHi);
1024 writel(mac2, ®s->MacAddrLo);
1025
1026 dev->dev_addr[0] = (mac1 >> 8) & 0xff;
1027 dev->dev_addr[1] = mac1 & 0xff;
1028 dev->dev_addr[2] = (mac2 >> 24) & 0xff;
1029 dev->dev_addr[3] = (mac2 >> 16) & 0xff;
1030 dev->dev_addr[4] = (mac2 >> 8) & 0xff;
1031 dev->dev_addr[5] = mac2 & 0xff;
1032
1033 printk("MAC: %pM\n", dev->dev_addr);
1034
1035
1036
1037
1038
1039
1040
1041 pdev = ap->pdev;
1042 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1043 cache_size <<= 2;
1044 if (cache_size != SMP_CACHE_BYTES) {
1045 printk(KERN_INFO " PCI cache line size set incorrectly "
1046 "(%i bytes) by BIOS/FW, ", cache_size);
1047 if (cache_size > SMP_CACHE_BYTES)
1048 printk("expecting %i\n", SMP_CACHE_BYTES);
1049 else {
1050 printk("correcting to %i\n", SMP_CACHE_BYTES);
1051 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1052 SMP_CACHE_BYTES >> 2);
1053 }
1054 }
1055
1056 pci_state = readl(®s->PciState);
1057 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
1058 "latency: %i clks\n",
1059 (pci_state & PCI_32BIT) ? 32 : 64,
1060 (pci_state & PCI_66MHZ) ? 66 : 33,
1061 ap->pci_latency);
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1074 if (ap->version >= 2) {
1075 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1076
1077
1078
1079 if (board_idx == BOARD_IDX_OVERFLOW ||
1080 dis_pci_mem_inval[board_idx]) {
1081 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1082 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1083 pci_write_config_word(pdev, PCI_COMMAND,
1084 ap->pci_command);
1085 printk(KERN_INFO " Disabling PCI memory "
1086 "write and invalidate\n");
1087 }
1088 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1089 printk(KERN_INFO " PCI memory write & invalidate "
1090 "enabled by BIOS, enabling counter measures\n");
1091
1092 switch(SMP_CACHE_BYTES) {
1093 case 16:
1094 tmp |= DMA_WRITE_MAX_16;
1095 break;
1096 case 32:
1097 tmp |= DMA_WRITE_MAX_32;
1098 break;
1099 case 64:
1100 tmp |= DMA_WRITE_MAX_64;
1101 break;
1102 case 128:
1103 tmp |= DMA_WRITE_MAX_128;
1104 break;
1105 default:
1106 printk(KERN_INFO " Cache line size %i not "
1107 "supported, PCI write and invalidate "
1108 "disabled\n", SMP_CACHE_BYTES);
1109 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1110 pci_write_config_word(pdev, PCI_COMMAND,
1111 ap->pci_command);
1112 }
1113 }
1114 }
1115
1116#ifdef __sparc__
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 tmp &= ~DMA_READ_WRITE_MASK;
1129 tmp |= DMA_READ_MAX_64;
1130 tmp |= DMA_WRITE_MAX_64;
1131#endif
1132#ifdef __alpha__
1133 tmp &= ~DMA_READ_WRITE_MASK;
1134 tmp |= DMA_READ_MAX_128;
1135
1136
1137
1138
1139
1140 tmp |= DMA_WRITE_MAX_128;
1141#endif
1142 writel(tmp, ®s->PciState);
1143
1144#if 0
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1157 printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
1158 ap->pci_command |= PCI_COMMAND_FAST_BACK;
1159 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1160 }
1161#endif
1162
1163
1164
1165
1166 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1167 ap->pci_using_dac = 1;
1168 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1169 ap->pci_using_dac = 0;
1170 } else {
1171 ecode = -ENODEV;
1172 goto init_error;
1173 }
1174
1175
1176
1177
1178
1179
1180 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
1181 &ap->info_dma))) {
1182 ecode = -EAGAIN;
1183 goto init_error;
1184 }
1185 ap->info = info;
1186
1187
1188
1189
1190 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1191 ecode = -EAGAIN;
1192 goto init_error;
1193 }
1194
1195 ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
1196 DRV_NAME, dev);
1197 if (ecode) {
1198 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1199 DRV_NAME, pdev->irq);
1200 goto init_error;
1201 } else
1202 dev->irq = pdev->irq;
1203
1204#ifdef INDEX_DEBUG
1205 spin_lock_init(&ap->debug_lock);
1206 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1207 ap->last_std_rx = 0;
1208 ap->last_mini_rx = 0;
1209#endif
1210
1211 memset(ap->info, 0, sizeof(struct ace_info));
1212 memset(ap->skb, 0, sizeof(struct ace_skb));
1213
1214 ecode = ace_load_firmware(dev);
1215 if (ecode)
1216 goto init_error;
1217
1218 ap->fw_running = 0;
1219
1220 tmp_ptr = ap->info_dma;
1221 writel(tmp_ptr >> 32, ®s->InfoPtrHi);
1222 writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
1223
1224 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1225
1226 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1227 info->evt_ctrl.flags = 0;
1228
1229 *(ap->evt_prd) = 0;
1230 wmb();
1231 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1232 writel(0, ®s->EvtCsm);
1233
1234 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1235 info->cmd_ctrl.flags = 0;
1236 info->cmd_ctrl.max_len = 0;
1237
1238 for (i = 0; i < CMD_RING_ENTRIES; i++)
1239 writel(0, ®s->CmdRng[i]);
1240
1241 writel(0, ®s->CmdPrd);
1242 writel(0, ®s->CmdCsm);
1243
1244 tmp_ptr = ap->info_dma;
1245 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1246 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1247
1248 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1249 info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
1250 info->rx_std_ctrl.flags =
1251 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1252
1253 memset(ap->rx_std_ring, 0,
1254 RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1255
1256 for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1257 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1258
1259 ap->rx_std_skbprd = 0;
1260 atomic_set(&ap->cur_rx_bufs, 0);
1261
1262 set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1263 (ap->rx_ring_base_dma +
1264 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1265 info->rx_jumbo_ctrl.max_len = 0;
1266 info->rx_jumbo_ctrl.flags =
1267 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1268
1269 memset(ap->rx_jumbo_ring, 0,
1270 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1271
1272 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1273 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1274
1275 ap->rx_jumbo_skbprd = 0;
1276 atomic_set(&ap->cur_jumbo_bufs, 0);
1277
1278 memset(ap->rx_mini_ring, 0,
1279 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1280
1281 if (ap->version >= 2) {
1282 set_aceaddr(&info->rx_mini_ctrl.rngptr,
1283 (ap->rx_ring_base_dma +
1284 (sizeof(struct rx_desc) *
1285 (RX_STD_RING_ENTRIES +
1286 RX_JUMBO_RING_ENTRIES))));
1287 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1288 info->rx_mini_ctrl.flags =
1289 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;
1290
1291 for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1292 ap->rx_mini_ring[i].flags =
1293 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1294 } else {
1295 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1296 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1297 info->rx_mini_ctrl.max_len = 0;
1298 }
1299
1300 ap->rx_mini_skbprd = 0;
1301 atomic_set(&ap->cur_mini_bufs, 0);
1302
1303 set_aceaddr(&info->rx_return_ctrl.rngptr,
1304 (ap->rx_ring_base_dma +
1305 (sizeof(struct rx_desc) *
1306 (RX_STD_RING_ENTRIES +
1307 RX_JUMBO_RING_ENTRIES +
1308 RX_MINI_RING_ENTRIES))));
1309 info->rx_return_ctrl.flags = 0;
1310 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1311
1312 memset(ap->rx_return_ring, 0,
1313 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1314
1315 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1316 *(ap->rx_ret_prd) = 0;
1317
1318 writel(TX_RING_BASE, ®s->WinBase);
1319
1320 if (ACE_IS_TIGON_I(ap)) {
1321 ap->tx_ring = (__force struct tx_desc *) regs->Window;
1322 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
1323 * sizeof(struct tx_desc)) / sizeof(u32); i++)
1324 writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
1325
1326 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1327 } else {
1328 memset(ap->tx_ring, 0,
1329 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1330
1331 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1332 }
1333
1334 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1335 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1336
1337
1338
1339
1340 if (!ACE_IS_TIGON_I(ap))
1341 tmp |= RCB_FLG_TX_HOST_RING;
1342#if TX_COAL_INTS_ONLY
1343 tmp |= RCB_FLG_COAL_INT_ONLY;
1344#endif
1345 info->tx_ctrl.flags = tmp;
1346
1347 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1348
1349
1350
1351
1352#if 0
1353 writel(DMA_THRESH_16W, ®s->DmaReadCfg);
1354 writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
1355#else
1356 writel(DMA_THRESH_8W, ®s->DmaReadCfg);
1357 writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
1358#endif
1359
1360 writel(0, ®s->MaskInt);
1361 writel(1, ®s->IfIdx);
1362#if 0
1363
1364
1365
1366
1367 writel(1, ®s->AssistState);
1368#endif
1369
1370 writel(DEF_STAT, ®s->TuneStatTicks);
1371 writel(DEF_TRACE, ®s->TuneTrace);
1372
1373 ace_set_rxtx_parms(dev, 0);
1374
1375 if (board_idx == BOARD_IDX_OVERFLOW) {
1376 printk(KERN_WARNING "%s: more than %i NICs detected, "
1377 "ignoring module parameters!\n",
1378 ap->name, ACE_MAX_MOD_PARMS);
1379 } else if (board_idx >= 0) {
1380 if (tx_coal_tick[board_idx])
1381 writel(tx_coal_tick[board_idx],
1382 ®s->TuneTxCoalTicks);
1383 if (max_tx_desc[board_idx])
1384 writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
1385
1386 if (rx_coal_tick[board_idx])
1387 writel(rx_coal_tick[board_idx],
1388 ®s->TuneRxCoalTicks);
1389 if (max_rx_desc[board_idx])
1390 writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
1391
1392 if (trace[board_idx])
1393 writel(trace[board_idx], ®s->TuneTrace);
1394
1395 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1396 writel(tx_ratio[board_idx], ®s->TxBufRat);
1397 }
1398
1399
1400
1401
1402 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1403 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1404 if(ap->version >= 2)
1405 tmp |= LNK_TX_FLOW_CTL_Y;
1406
1407
1408
1409
1410 if ((board_idx >= 0) && link_state[board_idx]) {
1411 int option = link_state[board_idx];
1412
1413 tmp = LNK_ENABLE;
1414
1415 if (option & 0x01) {
1416 printk(KERN_INFO "%s: Setting half duplex link\n",
1417 ap->name);
1418 tmp &= ~LNK_FULL_DUPLEX;
1419 }
1420 if (option & 0x02)
1421 tmp &= ~LNK_NEGOTIATE;
1422 if (option & 0x10)
1423 tmp |= LNK_10MB;
1424 if (option & 0x20)
1425 tmp |= LNK_100MB;
1426 if (option & 0x40)
1427 tmp |= LNK_1000MB;
1428 if ((option & 0x70) == 0) {
1429 printk(KERN_WARNING "%s: No media speed specified, "
1430 "forcing auto negotiation\n", ap->name);
1431 tmp |= LNK_NEGOTIATE | LNK_1000MB |
1432 LNK_100MB | LNK_10MB;
1433 }
1434 if ((option & 0x100) == 0)
1435 tmp |= LNK_NEG_FCTL;
1436 else
1437 printk(KERN_INFO "%s: Disabling flow control "
1438 "negotiation\n", ap->name);
1439 if (option & 0x200)
1440 tmp |= LNK_RX_FLOW_CTL_Y;
1441 if ((option & 0x400) && (ap->version >= 2)) {
1442 printk(KERN_INFO "%s: Enabling TX flow control\n",
1443 ap->name);
1444 tmp |= LNK_TX_FLOW_CTL_Y;
1445 }
1446 }
1447
1448 ap->link = tmp;
1449 writel(tmp, ®s->TuneLink);
1450 if (ap->version >= 2)
1451 writel(tmp, ®s->TuneFastLink);
1452
1453 writel(ap->firmware_start, ®s->Pc);
1454
1455 writel(0, ®s->Mb0Lo);
1456
1457
1458
1459
1460
1461
1462
1463 ap->cur_rx = 0;
1464 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1465
1466 wmb();
1467 ace_set_txprd(regs, ap, 0);
1468 writel(0, ®s->RxRetCsm);
1469
1470
1471
1472
1473
1474
1475
1476 writel(1, ®s->AssistState);
1477
1478
1479
1480
1481 writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
1482 readl(®s->CpuCtrl);
1483
1484
1485
1486
1487 myjif = jiffies + 3 * HZ;
1488 while (time_before(jiffies, myjif) && !ap->fw_running)
1489 cpu_relax();
1490
1491 if (!ap->fw_running) {
1492 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1493
1494 ace_dump_trace(ap);
1495 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
1496 readl(®s->CpuCtrl);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507 if (ap->version >= 2)
1508 writel(readl(®s->CpuBCtrl) | CPU_HALT,
1509 ®s->CpuBCtrl);
1510 writel(0, ®s->Mb0Lo);
1511 readl(®s->Mb0Lo);
1512
1513 ecode = -EBUSY;
1514 goto init_error;
1515 }
1516
1517
1518
1519
1520
1521 if (!test_and_set_bit(0, &ap->std_refill_busy))
1522 ace_load_std_rx_ring(ap, RX_RING_SIZE);
1523 else
1524 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1525 ap->name);
1526 if (ap->version >= 2) {
1527 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1528 ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
1529 else
1530 printk(KERN_ERR "%s: Someone is busy refilling "
1531 "the RX mini ring\n", ap->name);
1532 }
1533 return 0;
1534
1535 init_error:
1536 ace_init_cleanup(dev);
1537 return ecode;
1538}
1539
1540
1541static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1542{
1543 struct ace_private *ap = netdev_priv(dev);
1544 struct ace_regs __iomem *regs = ap->regs;
1545 int board_idx = ap->board_idx;
1546
1547 if (board_idx >= 0) {
1548 if (!jumbo) {
1549 if (!tx_coal_tick[board_idx])
1550 writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
1551 if (!max_tx_desc[board_idx])
1552 writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
1553 if (!rx_coal_tick[board_idx])
1554 writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
1555 if (!max_rx_desc[board_idx])
1556 writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
1557 if (!tx_ratio[board_idx])
1558 writel(DEF_TX_RATIO, ®s->TxBufRat);
1559 } else {
1560 if (!tx_coal_tick[board_idx])
1561 writel(DEF_JUMBO_TX_COAL,
1562 ®s->TuneTxCoalTicks);
1563 if (!max_tx_desc[board_idx])
1564 writel(DEF_JUMBO_TX_MAX_DESC,
1565 ®s->TuneMaxTxDesc);
1566 if (!rx_coal_tick[board_idx])
1567 writel(DEF_JUMBO_RX_COAL,
1568 ®s->TuneRxCoalTicks);
1569 if (!max_rx_desc[board_idx])
1570 writel(DEF_JUMBO_RX_MAX_DESC,
1571 ®s->TuneMaxRxDesc);
1572 if (!tx_ratio[board_idx])
1573 writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
1574 }
1575 }
1576}
1577
1578
1579static void ace_watchdog(struct net_device *data)
1580{
1581 struct net_device *dev = data;
1582 struct ace_private *ap = netdev_priv(dev);
1583 struct ace_regs __iomem *regs = ap->regs;
1584
1585
1586
1587
1588
1589
1590 if (*ap->tx_csm != ap->tx_ret_csm) {
1591 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1592 dev->name, (unsigned int)readl(®s->HostCtrl));
1593
1594 } else {
1595 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1596 dev->name);
1597#if 0
1598 netif_wake_queue(dev);
1599#endif
1600 }
1601}
1602
1603
1604static void ace_tasklet(unsigned long dev)
1605{
1606 struct ace_private *ap = netdev_priv((struct net_device *)dev);
1607 int cur_size;
1608
1609 cur_size = atomic_read(&ap->cur_rx_bufs);
1610 if ((cur_size < RX_LOW_STD_THRES) &&
1611 !test_and_set_bit(0, &ap->std_refill_busy)) {
1612#ifdef DEBUG
1613 printk("refilling buffers (current %i)\n", cur_size);
1614#endif
1615 ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
1616 }
1617
1618 if (ap->version >= 2) {
1619 cur_size = atomic_read(&ap->cur_mini_bufs);
1620 if ((cur_size < RX_LOW_MINI_THRES) &&
1621 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1622#ifdef DEBUG
1623 printk("refilling mini buffers (current %i)\n",
1624 cur_size);
1625#endif
1626 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
1627 }
1628 }
1629
1630 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1631 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1632 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1633#ifdef DEBUG
1634 printk("refilling jumbo buffers (current %i)\n", cur_size);
1635#endif
1636 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
1637 }
1638 ap->tasklet_pending = 0;
1639}
1640
1641
1642
1643
1644
1645static void ace_dump_trace(struct ace_private *ap)
1646{
1647#if 0
1648 if (!ap->trace_buf)
1649 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1650 return;
1651#endif
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1663{
1664 struct ace_regs __iomem *regs = ap->regs;
1665 short i, idx;
1666
1667
1668 prefetchw(&ap->cur_rx_bufs);
1669
1670 idx = ap->rx_std_skbprd;
1671
1672 for (i = 0; i < nr_bufs; i++) {
1673 struct sk_buff *skb;
1674 struct rx_desc *rd;
1675 dma_addr_t mapping;
1676
1677 skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
1678 if (!skb)
1679 break;
1680
1681 skb_reserve(skb, NET_IP_ALIGN);
1682 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1683 offset_in_page(skb->data),
1684 ACE_STD_BUFSIZE,
1685 PCI_DMA_FROMDEVICE);
1686 ap->skb->rx_std_skbuff[idx].skb = skb;
1687 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1688 mapping, mapping);
1689
1690 rd = &ap->rx_std_ring[idx];
1691 set_aceaddr(&rd->addr, mapping);
1692 rd->size = ACE_STD_BUFSIZE;
1693 rd->idx = idx;
1694 idx = (idx + 1) % RX_STD_RING_ENTRIES;
1695 }
1696
1697 if (!i)
1698 goto error_out;
1699
1700 atomic_add(i, &ap->cur_rx_bufs);
1701 ap->rx_std_skbprd = idx;
1702
1703 if (ACE_IS_TIGON_I(ap)) {
1704 struct cmd cmd;
1705 cmd.evt = C_SET_RX_PRD_IDX;
1706 cmd.code = 0;
1707 cmd.idx = ap->rx_std_skbprd;
1708 ace_issue_cmd(regs, &cmd);
1709 } else {
1710 writel(idx, ®s->RxStdPrd);
1711 wmb();
1712 }
1713
1714 out:
1715 clear_bit(0, &ap->std_refill_busy);
1716 return;
1717
1718 error_out:
1719 printk(KERN_INFO "Out of memory when allocating "
1720 "standard receive buffers\n");
1721 goto out;
1722}
1723
1724
1725static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1726{
1727 struct ace_regs __iomem *regs = ap->regs;
1728 short i, idx;
1729
1730 prefetchw(&ap->cur_mini_bufs);
1731
1732 idx = ap->rx_mini_skbprd;
1733 for (i = 0; i < nr_bufs; i++) {
1734 struct sk_buff *skb;
1735 struct rx_desc *rd;
1736 dma_addr_t mapping;
1737
1738 skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
1739 if (!skb)
1740 break;
1741
1742 skb_reserve(skb, NET_IP_ALIGN);
1743 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1744 offset_in_page(skb->data),
1745 ACE_MINI_BUFSIZE,
1746 PCI_DMA_FROMDEVICE);
1747 ap->skb->rx_mini_skbuff[idx].skb = skb;
1748 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1749 mapping, mapping);
1750
1751 rd = &ap->rx_mini_ring[idx];
1752 set_aceaddr(&rd->addr, mapping);
1753 rd->size = ACE_MINI_BUFSIZE;
1754 rd->idx = idx;
1755 idx = (idx + 1) % RX_MINI_RING_ENTRIES;
1756 }
1757
1758 if (!i)
1759 goto error_out;
1760
1761 atomic_add(i, &ap->cur_mini_bufs);
1762
1763 ap->rx_mini_skbprd = idx;
1764
1765 writel(idx, ®s->RxMiniPrd);
1766 wmb();
1767
1768 out:
1769 clear_bit(0, &ap->mini_refill_busy);
1770 return;
1771 error_out:
1772 printk(KERN_INFO "Out of memory when allocating "
1773 "mini receive buffers\n");
1774 goto out;
1775}
1776
1777
1778
1779
1780
1781
1782static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
1783{
1784 struct ace_regs __iomem *regs = ap->regs;
1785 short i, idx;
1786
1787 idx = ap->rx_jumbo_skbprd;
1788
1789 for (i = 0; i < nr_bufs; i++) {
1790 struct sk_buff *skb;
1791 struct rx_desc *rd;
1792 dma_addr_t mapping;
1793
1794 skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
1795 if (!skb)
1796 break;
1797
1798 skb_reserve(skb, NET_IP_ALIGN);
1799 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1800 offset_in_page(skb->data),
1801 ACE_JUMBO_BUFSIZE,
1802 PCI_DMA_FROMDEVICE);
1803 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1804 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1805 mapping, mapping);
1806
1807 rd = &ap->rx_jumbo_ring[idx];
1808 set_aceaddr(&rd->addr, mapping);
1809 rd->size = ACE_JUMBO_BUFSIZE;
1810 rd->idx = idx;
1811 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
1812 }
1813
1814 if (!i)
1815 goto error_out;
1816
1817 atomic_add(i, &ap->cur_jumbo_bufs);
1818 ap->rx_jumbo_skbprd = idx;
1819
1820 if (ACE_IS_TIGON_I(ap)) {
1821 struct cmd cmd;
1822 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1823 cmd.code = 0;
1824 cmd.idx = ap->rx_jumbo_skbprd;
1825 ace_issue_cmd(regs, &cmd);
1826 } else {
1827 writel(idx, ®s->RxJumboPrd);
1828 wmb();
1829 }
1830
1831 out:
1832 clear_bit(0, &ap->jumbo_refill_busy);
1833 return;
1834 error_out:
1835 if (net_ratelimit())
1836 printk(KERN_INFO "Out of memory when allocating "
1837 "jumbo receive buffers\n");
1838 goto out;
1839}
1840
1841
1842
1843
1844
1845
1846
1847static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
1848{
1849 struct ace_private *ap;
1850
1851 ap = netdev_priv(dev);
1852
1853 while (evtcsm != evtprd) {
1854 switch (ap->evt_ring[evtcsm].evt) {
1855 case E_FW_RUNNING:
1856 printk(KERN_INFO "%s: Firmware up and running\n",
1857 ap->name);
1858 ap->fw_running = 1;
1859 wmb();
1860 break;
1861 case E_STATS_UPDATED:
1862 break;
1863 case E_LNK_STATE:
1864 {
1865 u16 code = ap->evt_ring[evtcsm].code;
1866 switch (code) {
1867 case E_C_LINK_UP:
1868 {
1869 u32 state = readl(&ap->regs->GigLnkState);
1870 printk(KERN_WARNING "%s: Optical link UP "
1871 "(%s Duplex, Flow Control: %s%s)\n",
1872 ap->name,
1873 state & LNK_FULL_DUPLEX ? "Full":"Half",
1874 state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
1875 state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
1876 break;
1877 }
1878 case E_C_LINK_DOWN:
1879 printk(KERN_WARNING "%s: Optical link DOWN\n",
1880 ap->name);
1881 break;
1882 case E_C_LINK_10_100:
1883 printk(KERN_WARNING "%s: 10/100BaseT link "
1884 "UP\n", ap->name);
1885 break;
1886 default:
1887 printk(KERN_ERR "%s: Unknown optical link "
1888 "state %02x\n", ap->name, code);
1889 }
1890 break;
1891 }
1892 case E_ERROR:
1893 switch(ap->evt_ring[evtcsm].code) {
1894 case E_C_ERR_INVAL_CMD:
1895 printk(KERN_ERR "%s: invalid command error\n",
1896 ap->name);
1897 break;
1898 case E_C_ERR_UNIMP_CMD:
1899 printk(KERN_ERR "%s: unimplemented command "
1900 "error\n", ap->name);
1901 break;
1902 case E_C_ERR_BAD_CFG:
1903 printk(KERN_ERR "%s: bad config error\n",
1904 ap->name);
1905 break;
1906 default:
1907 printk(KERN_ERR "%s: unknown error %02x\n",
1908 ap->name, ap->evt_ring[evtcsm].code);
1909 }
1910 break;
1911 case E_RESET_JUMBO_RNG:
1912 {
1913 int i;
1914 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
1915 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1916 ap->rx_jumbo_ring[i].size = 0;
1917 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1918 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1919 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1920 }
1921 }
1922
1923 if (ACE_IS_TIGON_I(ap)) {
1924 struct cmd cmd;
1925 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1926 cmd.code = 0;
1927 cmd.idx = 0;
1928 ace_issue_cmd(ap->regs, &cmd);
1929 } else {
1930 writel(0, &((ap->regs)->RxJumboPrd));
1931 wmb();
1932 }
1933
1934 ap->jumbo = 0;
1935 ap->rx_jumbo_skbprd = 0;
1936 printk(KERN_INFO "%s: Jumbo ring flushed\n",
1937 ap->name);
1938 clear_bit(0, &ap->jumbo_refill_busy);
1939 break;
1940 }
1941 default:
1942 printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
1943 ap->name, ap->evt_ring[evtcsm].evt);
1944 }
1945 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
1946 }
1947
1948 return evtcsm;
1949}
1950
1951
1952static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1953{
1954 struct ace_private *ap = netdev_priv(dev);
1955 u32 idx;
1956 int mini_count = 0, std_count = 0;
1957
1958 idx = rxretcsm;
1959
1960 prefetchw(&ap->cur_rx_bufs);
1961 prefetchw(&ap->cur_mini_bufs);
1962
1963 while (idx != rxretprd) {
1964 struct ring_info *rip;
1965 struct sk_buff *skb;
1966 struct rx_desc *rxdesc, *retdesc;
1967 u32 skbidx;
1968 int bd_flags, desc_type, mapsize;
1969 u16 csum;
1970
1971
1972
1973 if (idx == rxretcsm)
1974 rmb();
1975
1976 retdesc = &ap->rx_return_ring[idx];
1977 skbidx = retdesc->idx;
1978 bd_flags = retdesc->flags;
1979 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
1980
1981 switch(desc_type) {
1982
1983
1984
1985
1986
1987
1988
1989 case 0:
1990 rip = &ap->skb->rx_std_skbuff[skbidx];
1991 mapsize = ACE_STD_BUFSIZE;
1992 rxdesc = &ap->rx_std_ring[skbidx];
1993 std_count++;
1994 break;
1995 case BD_FLG_JUMBO:
1996 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1997 mapsize = ACE_JUMBO_BUFSIZE;
1998 rxdesc = &ap->rx_jumbo_ring[skbidx];
1999 atomic_dec(&ap->cur_jumbo_bufs);
2000 break;
2001 case BD_FLG_MINI:
2002 rip = &ap->skb->rx_mini_skbuff[skbidx];
2003 mapsize = ACE_MINI_BUFSIZE;
2004 rxdesc = &ap->rx_mini_ring[skbidx];
2005 mini_count++;
2006 break;
2007 default:
2008 printk(KERN_INFO "%s: unknown frame type (0x%02x) "
2009 "returned by NIC\n", dev->name,
2010 retdesc->flags);
2011 goto error;
2012 }
2013
2014 skb = rip->skb;
2015 rip->skb = NULL;
2016 pci_unmap_page(ap->pdev,
2017 dma_unmap_addr(rip, mapping),
2018 mapsize,
2019 PCI_DMA_FROMDEVICE);
2020 skb_put(skb, retdesc->size);
2021
2022
2023
2024
2025 csum = retdesc->tcp_udp_csum;
2026
2027 skb->protocol = eth_type_trans(skb, dev);
2028
2029
2030
2031
2032
2033 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2034 skb->csum = htons(csum);
2035 skb->ip_summed = CHECKSUM_COMPLETE;
2036 } else {
2037 skb_checksum_none_assert(skb);
2038 }
2039
2040
2041#if ACENIC_DO_VLAN
2042 if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {
2043 vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);
2044 } else
2045#endif
2046 netif_rx(skb);
2047
2048 dev->stats.rx_packets++;
2049 dev->stats.rx_bytes += retdesc->size;
2050
2051 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2052 }
2053
2054 atomic_sub(std_count, &ap->cur_rx_bufs);
2055 if (!ACE_IS_TIGON_I(ap))
2056 atomic_sub(mini_count, &ap->cur_mini_bufs);
2057
2058 out:
2059
2060
2061
2062
2063 if (ACE_IS_TIGON_I(ap)) {
2064 writel(idx, &ap->regs->RxRetCsm);
2065 }
2066 ap->cur_rx = idx;
2067
2068 return;
2069 error:
2070 idx = rxretprd;
2071 goto out;
2072}
2073
2074
2075static inline void ace_tx_int(struct net_device *dev,
2076 u32 txcsm, u32 idx)
2077{
2078 struct ace_private *ap = netdev_priv(dev);
2079
2080 do {
2081 struct sk_buff *skb;
2082 struct tx_ring_info *info;
2083
2084 info = ap->skb->tx_skbuff + idx;
2085 skb = info->skb;
2086
2087 if (dma_unmap_len(info, maplen)) {
2088 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2089 dma_unmap_len(info, maplen),
2090 PCI_DMA_TODEVICE);
2091 dma_unmap_len_set(info, maplen, 0);
2092 }
2093
2094 if (skb) {
2095 dev->stats.tx_packets++;
2096 dev->stats.tx_bytes += skb->len;
2097 dev_kfree_skb_irq(skb);
2098 info->skb = NULL;
2099 }
2100
2101 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2102 } while (idx != txcsm);
2103
2104 if (netif_queue_stopped(dev))
2105 netif_wake_queue(dev);
2106
2107 wmb();
2108 ap->tx_ret_csm = txcsm;
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137}
2138
2139
2140static irqreturn_t ace_interrupt(int irq, void *dev_id)
2141{
2142 struct net_device *dev = (struct net_device *)dev_id;
2143 struct ace_private *ap = netdev_priv(dev);
2144 struct ace_regs __iomem *regs = ap->regs;
2145 u32 idx;
2146 u32 txcsm, rxretcsm, rxretprd;
2147 u32 evtcsm, evtprd;
2148
2149
2150
2151
2152
2153
2154 if (!(readl(®s->HostCtrl) & IN_INT))
2155 return IRQ_NONE;
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 writel(0, ®s->Mb0Lo);
2166 readl(®s->Mb0Lo);
2167
2168
2169
2170
2171
2172
2173
2174
2175 rxretprd = *ap->rx_ret_prd;
2176 rxretcsm = ap->cur_rx;
2177
2178 if (rxretprd != rxretcsm)
2179 ace_rx_int(dev, rxretprd, rxretcsm);
2180
2181 txcsm = *ap->tx_csm;
2182 idx = ap->tx_ret_csm;
2183
2184 if (txcsm != idx) {
2185
2186
2187
2188
2189
2190
2191
2192 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2193 ace_tx_int(dev, txcsm, idx);
2194 }
2195
2196 evtcsm = readl(®s->EvtCsm);
2197 evtprd = *ap->evt_prd;
2198
2199 if (evtcsm != evtprd) {
2200 evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2201 writel(evtcsm, ®s->EvtCsm);
2202 }
2203
2204
2205
2206
2207
2208 if (netif_running(dev)) {
2209 int cur_size;
2210 int run_tasklet = 0;
2211
2212 cur_size = atomic_read(&ap->cur_rx_bufs);
2213 if (cur_size < RX_LOW_STD_THRES) {
2214 if ((cur_size < RX_PANIC_STD_THRES) &&
2215 !test_and_set_bit(0, &ap->std_refill_busy)) {
2216#ifdef DEBUG
2217 printk("low on std buffers %i\n", cur_size);
2218#endif
2219 ace_load_std_rx_ring(ap,
2220 RX_RING_SIZE - cur_size);
2221 } else
2222 run_tasklet = 1;
2223 }
2224
2225 if (!ACE_IS_TIGON_I(ap)) {
2226 cur_size = atomic_read(&ap->cur_mini_bufs);
2227 if (cur_size < RX_LOW_MINI_THRES) {
2228 if ((cur_size < RX_PANIC_MINI_THRES) &&
2229 !test_and_set_bit(0,
2230 &ap->mini_refill_busy)) {
2231#ifdef DEBUG
2232 printk("low on mini buffers %i\n",
2233 cur_size);
2234#endif
2235 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
2236 } else
2237 run_tasklet = 1;
2238 }
2239 }
2240
2241 if (ap->jumbo) {
2242 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2243 if (cur_size < RX_LOW_JUMBO_THRES) {
2244 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2245 !test_and_set_bit(0,
2246 &ap->jumbo_refill_busy)){
2247#ifdef DEBUG
2248 printk("low on jumbo buffers %i\n",
2249 cur_size);
2250#endif
2251 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
2252 } else
2253 run_tasklet = 1;
2254 }
2255 }
2256 if (run_tasklet && !ap->tasklet_pending) {
2257 ap->tasklet_pending = 1;
2258 tasklet_schedule(&ap->ace_tasklet);
2259 }
2260 }
2261
2262 return IRQ_HANDLED;
2263}
2264
2265
2266#if ACENIC_DO_VLAN
2267static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2268{
2269 struct ace_private *ap = netdev_priv(dev);
2270 unsigned long flags;
2271
2272 local_irq_save(flags);
2273 ace_mask_irq(dev);
2274
2275 ap->vlgrp = grp;
2276
2277 ace_unmask_irq(dev);
2278 local_irq_restore(flags);
2279}
2280#endif
2281
2282
2283static int ace_open(struct net_device *dev)
2284{
2285 struct ace_private *ap = netdev_priv(dev);
2286 struct ace_regs __iomem *regs = ap->regs;
2287 struct cmd cmd;
2288
2289 if (!(ap->fw_running)) {
2290 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2291 return -EBUSY;
2292 }
2293
2294 writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
2295
2296 cmd.evt = C_CLEAR_STATS;
2297 cmd.code = 0;
2298 cmd.idx = 0;
2299 ace_issue_cmd(regs, &cmd);
2300
2301 cmd.evt = C_HOST_STATE;
2302 cmd.code = C_C_STACK_UP;
2303 cmd.idx = 0;
2304 ace_issue_cmd(regs, &cmd);
2305
2306 if (ap->jumbo &&
2307 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2308 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2309
2310 if (dev->flags & IFF_PROMISC) {
2311 cmd.evt = C_SET_PROMISC_MODE;
2312 cmd.code = C_C_PROMISC_ENABLE;
2313 cmd.idx = 0;
2314 ace_issue_cmd(regs, &cmd);
2315
2316 ap->promisc = 1;
2317 }else
2318 ap->promisc = 0;
2319 ap->mcast_all = 0;
2320
2321#if 0
2322 cmd.evt = C_LNK_NEGOTIATION;
2323 cmd.code = 0;
2324 cmd.idx = 0;
2325 ace_issue_cmd(regs, &cmd);
2326#endif
2327
2328 netif_start_queue(dev);
2329
2330
2331
2332
2333 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
2334 return 0;
2335}
2336
2337
2338static int ace_close(struct net_device *dev)
2339{
2340 struct ace_private *ap = netdev_priv(dev);
2341 struct ace_regs __iomem *regs = ap->regs;
2342 struct cmd cmd;
2343 unsigned long flags;
2344 short i;
2345
2346
2347
2348
2349
2350
2351 netif_stop_queue(dev);
2352
2353
2354 if (ap->promisc) {
2355 cmd.evt = C_SET_PROMISC_MODE;
2356 cmd.code = C_C_PROMISC_DISABLE;
2357 cmd.idx = 0;
2358 ace_issue_cmd(regs, &cmd);
2359 ap->promisc = 0;
2360 }
2361
2362 cmd.evt = C_HOST_STATE;
2363 cmd.code = C_C_STACK_DOWN;
2364 cmd.idx = 0;
2365 ace_issue_cmd(regs, &cmd);
2366
2367 tasklet_kill(&ap->ace_tasklet);
2368
2369
2370
2371
2372
2373
2374 local_irq_save(flags);
2375 ace_mask_irq(dev);
2376
2377 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2378 struct sk_buff *skb;
2379 struct tx_ring_info *info;
2380
2381 info = ap->skb->tx_skbuff + i;
2382 skb = info->skb;
2383
2384 if (dma_unmap_len(info, maplen)) {
2385 if (ACE_IS_TIGON_I(ap)) {
2386
2387 struct tx_desc __iomem *tx;
2388 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
2389 writel(0, &tx->addr.addrhi);
2390 writel(0, &tx->addr.addrlo);
2391 writel(0, &tx->flagsize);
2392 } else
2393 memset(ap->tx_ring + i, 0,
2394 sizeof(struct tx_desc));
2395 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2396 dma_unmap_len(info, maplen),
2397 PCI_DMA_TODEVICE);
2398 dma_unmap_len_set(info, maplen, 0);
2399 }
2400 if (skb) {
2401 dev_kfree_skb(skb);
2402 info->skb = NULL;
2403 }
2404 }
2405
2406 if (ap->jumbo) {
2407 cmd.evt = C_RESET_JUMBO_RNG;
2408 cmd.code = 0;
2409 cmd.idx = 0;
2410 ace_issue_cmd(regs, &cmd);
2411 }
2412
2413 ace_unmask_irq(dev);
2414 local_irq_restore(flags);
2415
2416 return 0;
2417}
2418
2419
2420static inline dma_addr_t
2421ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2422 struct sk_buff *tail, u32 idx)
2423{
2424 dma_addr_t mapping;
2425 struct tx_ring_info *info;
2426
2427 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2428 offset_in_page(skb->data),
2429 skb->len, PCI_DMA_TODEVICE);
2430
2431 info = ap->skb->tx_skbuff + idx;
2432 info->skb = tail;
2433 dma_unmap_addr_set(info, mapping, mapping);
2434 dma_unmap_len_set(info, maplen, skb->len);
2435 return mapping;
2436}
2437
2438
2439static inline void
2440ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2441 u32 flagsize, u32 vlan_tag)
2442{
2443#if !USE_TX_COAL_NOW
2444 flagsize &= ~BD_FLG_COAL_NOW;
2445#endif
2446
2447 if (ACE_IS_TIGON_I(ap)) {
2448 struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
2449 writel(addr >> 32, &io->addr.addrhi);
2450 writel(addr & 0xffffffff, &io->addr.addrlo);
2451 writel(flagsize, &io->flagsize);
2452#if ACENIC_DO_VLAN
2453 writel(vlan_tag, &io->vlanres);
2454#endif
2455 } else {
2456 desc->addr.addrhi = addr >> 32;
2457 desc->addr.addrlo = addr;
2458 desc->flagsize = flagsize;
2459#if ACENIC_DO_VLAN
2460 desc->vlanres = vlan_tag;
2461#endif
2462 }
2463}
2464
2465
2466static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
2467 struct net_device *dev)
2468{
2469 struct ace_private *ap = netdev_priv(dev);
2470 struct ace_regs __iomem *regs = ap->regs;
2471 struct tx_desc *desc;
2472 u32 idx, flagsize;
2473 unsigned long maxjiff = jiffies + 3*HZ;
2474
2475restart:
2476 idx = ap->tx_prd;
2477
2478 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2479 goto overflow;
2480
2481 if (!skb_shinfo(skb)->nr_frags) {
2482 dma_addr_t mapping;
2483 u32 vlan_tag = 0;
2484
2485 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2486 flagsize = (skb->len << 16) | (BD_FLG_END);
2487 if (skb->ip_summed == CHECKSUM_PARTIAL)
2488 flagsize |= BD_FLG_TCP_UDP_SUM;
2489#if ACENIC_DO_VLAN
2490 if (vlan_tx_tag_present(skb)) {
2491 flagsize |= BD_FLG_VLAN_TAG;
2492 vlan_tag = vlan_tx_tag_get(skb);
2493 }
2494#endif
2495 desc = ap->tx_ring + idx;
2496 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2497
2498
2499 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2500 flagsize |= BD_FLG_COAL_NOW;
2501
2502 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2503 } else {
2504 dma_addr_t mapping;
2505 u32 vlan_tag = 0;
2506 int i, len = 0;
2507
2508 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2509 flagsize = (skb_headlen(skb) << 16);
2510 if (skb->ip_summed == CHECKSUM_PARTIAL)
2511 flagsize |= BD_FLG_TCP_UDP_SUM;
2512#if ACENIC_DO_VLAN
2513 if (vlan_tx_tag_present(skb)) {
2514 flagsize |= BD_FLG_VLAN_TAG;
2515 vlan_tag = vlan_tx_tag_get(skb);
2516 }
2517#endif
2518
2519 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2520
2521 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2522
2523 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2524 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2525 struct tx_ring_info *info;
2526
2527 len += frag->size;
2528 info = ap->skb->tx_skbuff + idx;
2529 desc = ap->tx_ring + idx;
2530
2531 mapping = pci_map_page(ap->pdev, frag->page,
2532 frag->page_offset, frag->size,
2533 PCI_DMA_TODEVICE);
2534
2535 flagsize = (frag->size << 16);
2536 if (skb->ip_summed == CHECKSUM_PARTIAL)
2537 flagsize |= BD_FLG_TCP_UDP_SUM;
2538 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2539
2540 if (i == skb_shinfo(skb)->nr_frags - 1) {
2541 flagsize |= BD_FLG_END;
2542 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2543 flagsize |= BD_FLG_COAL_NOW;
2544
2545
2546
2547
2548
2549 info->skb = skb;
2550 } else {
2551 info->skb = NULL;
2552 }
2553 dma_unmap_addr_set(info, mapping, mapping);
2554 dma_unmap_len_set(info, maplen, frag->size);
2555 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2556 }
2557 }
2558
2559 wmb();
2560 ap->tx_prd = idx;
2561 ace_set_txprd(regs, ap, idx);
2562
2563 if (flagsize & BD_FLG_COAL_NOW) {
2564 netif_stop_queue(dev);
2565
2566
2567
2568
2569
2570
2571
2572 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2573 netif_wake_queue(dev);
2574 }
2575
2576 return NETDEV_TX_OK;
2577
2578overflow:
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595 if (time_before(jiffies, maxjiff)) {
2596 barrier();
2597 cpu_relax();
2598 goto restart;
2599 }
2600
2601
2602 printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
2603 return NETDEV_TX_BUSY;
2604}
2605
2606
2607static int ace_change_mtu(struct net_device *dev, int new_mtu)
2608{
2609 struct ace_private *ap = netdev_priv(dev);
2610 struct ace_regs __iomem *regs = ap->regs;
2611
2612 if (new_mtu > ACE_JUMBO_MTU)
2613 return -EINVAL;
2614
2615 writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
2616 dev->mtu = new_mtu;
2617
2618 if (new_mtu > ACE_STD_MTU) {
2619 if (!(ap->jumbo)) {
2620 printk(KERN_INFO "%s: Enabling Jumbo frame "
2621 "support\n", dev->name);
2622 ap->jumbo = 1;
2623 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2624 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2625 ace_set_rxtx_parms(dev, 1);
2626 }
2627 } else {
2628 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2629 ace_sync_irq(dev->irq);
2630 ace_set_rxtx_parms(dev, 0);
2631 if (ap->jumbo) {
2632 struct cmd cmd;
2633
2634 cmd.evt = C_RESET_JUMBO_RNG;
2635 cmd.code = 0;
2636 cmd.idx = 0;
2637 ace_issue_cmd(regs, &cmd);
2638 }
2639 }
2640
2641 return 0;
2642}
2643
2644static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2645{
2646 struct ace_private *ap = netdev_priv(dev);
2647 struct ace_regs __iomem *regs = ap->regs;
2648 u32 link;
2649
2650 memset(ecmd, 0, sizeof(struct ethtool_cmd));
2651 ecmd->supported =
2652 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2653 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2654 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2655 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2656
2657 ecmd->port = PORT_FIBRE;
2658 ecmd->transceiver = XCVR_INTERNAL;
2659
2660 link = readl(®s->GigLnkState);
2661 if (link & LNK_1000MB)
2662 ethtool_cmd_speed_set(ecmd, SPEED_1000);
2663 else {
2664 link = readl(®s->FastLnkState);
2665 if (link & LNK_100MB)
2666 ethtool_cmd_speed_set(ecmd, SPEED_100);
2667 else if (link & LNK_10MB)
2668 ethtool_cmd_speed_set(ecmd, SPEED_10);
2669 else
2670 ethtool_cmd_speed_set(ecmd, 0);
2671 }
2672 if (link & LNK_FULL_DUPLEX)
2673 ecmd->duplex = DUPLEX_FULL;
2674 else
2675 ecmd->duplex = DUPLEX_HALF;
2676
2677 if (link & LNK_NEGOTIATE)
2678 ecmd->autoneg = AUTONEG_ENABLE;
2679 else
2680 ecmd->autoneg = AUTONEG_DISABLE;
2681
2682#if 0
2683
2684
2685
2686 ecmd->trace = readl(®s->TuneTrace);
2687
2688 ecmd->txcoal = readl(®s->TuneTxCoalTicks);
2689 ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
2690#endif
2691 ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc);
2692 ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc);
2693
2694 return 0;
2695}
2696
2697static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2698{
2699 struct ace_private *ap = netdev_priv(dev);
2700 struct ace_regs __iomem *regs = ap->regs;
2701 u32 link, speed;
2702
2703 link = readl(®s->GigLnkState);
2704 if (link & LNK_1000MB)
2705 speed = SPEED_1000;
2706 else {
2707 link = readl(®s->FastLnkState);
2708 if (link & LNK_100MB)
2709 speed = SPEED_100;
2710 else if (link & LNK_10MB)
2711 speed = SPEED_10;
2712 else
2713 speed = SPEED_100;
2714 }
2715
2716 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
2717 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
2718 if (!ACE_IS_TIGON_I(ap))
2719 link |= LNK_TX_FLOW_CTL_Y;
2720 if (ecmd->autoneg == AUTONEG_ENABLE)
2721 link |= LNK_NEGOTIATE;
2722 if (ethtool_cmd_speed(ecmd) != speed) {
2723 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2724 switch (ethtool_cmd_speed(ecmd)) {
2725 case SPEED_1000:
2726 link |= LNK_1000MB;
2727 break;
2728 case SPEED_100:
2729 link |= LNK_100MB;
2730 break;
2731 case SPEED_10:
2732 link |= LNK_10MB;
2733 break;
2734 }
2735 }
2736
2737 if (ecmd->duplex == DUPLEX_FULL)
2738 link |= LNK_FULL_DUPLEX;
2739
2740 if (link != ap->link) {
2741 struct cmd cmd;
2742 printk(KERN_INFO "%s: Renegotiating link state\n",
2743 dev->name);
2744
2745 ap->link = link;
2746 writel(link, ®s->TuneLink);
2747 if (!ACE_IS_TIGON_I(ap))
2748 writel(link, ®s->TuneFastLink);
2749 wmb();
2750
2751 cmd.evt = C_LNK_NEGOTIATION;
2752 cmd.code = 0;
2753 cmd.idx = 0;
2754 ace_issue_cmd(regs, &cmd);
2755 }
2756 return 0;
2757}
2758
2759static void ace_get_drvinfo(struct net_device *dev,
2760 struct ethtool_drvinfo *info)
2761{
2762 struct ace_private *ap = netdev_priv(dev);
2763
2764 strlcpy(info->driver, "acenic", sizeof(info->driver));
2765 snprintf(info->version, sizeof(info->version), "%i.%i.%i",
2766 ap->firmware_major, ap->firmware_minor,
2767 ap->firmware_fix);
2768
2769 if (ap->pdev)
2770 strlcpy(info->bus_info, pci_name(ap->pdev),
2771 sizeof(info->bus_info));
2772
2773}
2774
2775
2776
2777
2778static int ace_set_mac_addr(struct net_device *dev, void *p)
2779{
2780 struct ace_private *ap = netdev_priv(dev);
2781 struct ace_regs __iomem *regs = ap->regs;
2782 struct sockaddr *addr=p;
2783 u8 *da;
2784 struct cmd cmd;
2785
2786 if(netif_running(dev))
2787 return -EBUSY;
2788
2789 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
2790
2791 da = (u8 *)dev->dev_addr;
2792
2793 writel(da[0] << 8 | da[1], ®s->MacAddrHi);
2794 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
2795 ®s->MacAddrLo);
2796
2797 cmd.evt = C_SET_MAC_ADDR;
2798 cmd.code = 0;
2799 cmd.idx = 0;
2800 ace_issue_cmd(regs, &cmd);
2801
2802 return 0;
2803}
2804
2805
2806static void ace_set_multicast_list(struct net_device *dev)
2807{
2808 struct ace_private *ap = netdev_priv(dev);
2809 struct ace_regs __iomem *regs = ap->regs;
2810 struct cmd cmd;
2811
2812 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2813 cmd.evt = C_SET_MULTICAST_MODE;
2814 cmd.code = C_C_MCAST_ENABLE;
2815 cmd.idx = 0;
2816 ace_issue_cmd(regs, &cmd);
2817 ap->mcast_all = 1;
2818 } else if (ap->mcast_all) {
2819 cmd.evt = C_SET_MULTICAST_MODE;
2820 cmd.code = C_C_MCAST_DISABLE;
2821 cmd.idx = 0;
2822 ace_issue_cmd(regs, &cmd);
2823 ap->mcast_all = 0;
2824 }
2825
2826 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2827 cmd.evt = C_SET_PROMISC_MODE;
2828 cmd.code = C_C_PROMISC_ENABLE;
2829 cmd.idx = 0;
2830 ace_issue_cmd(regs, &cmd);
2831 ap->promisc = 1;
2832 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2833 cmd.evt = C_SET_PROMISC_MODE;
2834 cmd.code = C_C_PROMISC_DISABLE;
2835 cmd.idx = 0;
2836 ace_issue_cmd(regs, &cmd);
2837 ap->promisc = 0;
2838 }
2839
2840
2841
2842
2843
2844
2845
2846 if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2847 cmd.evt = C_SET_MULTICAST_MODE;
2848 cmd.code = C_C_MCAST_ENABLE;
2849 cmd.idx = 0;
2850 ace_issue_cmd(regs, &cmd);
2851 }else if (!ap->mcast_all) {
2852 cmd.evt = C_SET_MULTICAST_MODE;
2853 cmd.code = C_C_MCAST_DISABLE;
2854 cmd.idx = 0;
2855 ace_issue_cmd(regs, &cmd);
2856 }
2857}
2858
2859
2860static struct net_device_stats *ace_get_stats(struct net_device *dev)
2861{
2862 struct ace_private *ap = netdev_priv(dev);
2863 struct ace_mac_stats __iomem *mac_stats =
2864 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2865
2866 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2867 dev->stats.multicast = readl(&mac_stats->kept_mc);
2868 dev->stats.collisions = readl(&mac_stats->coll);
2869
2870 return &dev->stats;
2871}
2872
2873
2874static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2875 u32 dest, int size)
2876{
2877 void __iomem *tdest;
2878 short tsize, i;
2879
2880 if (size <= 0)
2881 return;
2882
2883 while (size > 0) {
2884 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2885 min_t(u32, size, ACE_WINDOW_SIZE));
2886 tdest = (void __iomem *) ®s->Window +
2887 (dest & (ACE_WINDOW_SIZE - 1));
2888 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2889 for (i = 0; i < (tsize / 4); i++) {
2890
2891 writel(be32_to_cpup(src), tdest);
2892 src++;
2893 tdest += 4;
2894 dest += 4;
2895 size -= 4;
2896 }
2897 }
2898}
2899
2900
2901static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2902{
2903 void __iomem *tdest;
2904 short tsize = 0, i;
2905
2906 if (size <= 0)
2907 return;
2908
2909 while (size > 0) {
2910 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2911 min_t(u32, size, ACE_WINDOW_SIZE));
2912 tdest = (void __iomem *) ®s->Window +
2913 (dest & (ACE_WINDOW_SIZE - 1));
2914 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2915
2916 for (i = 0; i < (tsize / 4); i++) {
2917 writel(0, tdest + i*4);
2918 }
2919
2920 dest += tsize;
2921 size -= tsize;
2922 }
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932static int __devinit ace_load_firmware(struct net_device *dev)
2933{
2934 const struct firmware *fw;
2935 const char *fw_name = "acenic/tg2.bin";
2936 struct ace_private *ap = netdev_priv(dev);
2937 struct ace_regs __iomem *regs = ap->regs;
2938 const __be32 *fw_data;
2939 u32 load_addr;
2940 int ret;
2941
2942 if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
2943 printk(KERN_ERR "%s: trying to download firmware while the "
2944 "CPU is running!\n", ap->name);
2945 return -EFAULT;
2946 }
2947
2948 if (ACE_IS_TIGON_I(ap))
2949 fw_name = "acenic/tg1.bin";
2950
2951 ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
2952 if (ret) {
2953 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
2954 ap->name, fw_name);
2955 return ret;
2956 }
2957
2958 fw_data = (void *)fw->data;
2959
2960
2961
2962
2963
2964
2965 ap->firmware_major = fw->data[0];
2966 ap->firmware_minor = fw->data[1];
2967 ap->firmware_fix = fw->data[2];
2968
2969 ap->firmware_start = be32_to_cpu(fw_data[1]);
2970 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
2971 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2972 ap->name, ap->firmware_start, fw_name);
2973 ret = -EINVAL;
2974 goto out;
2975 }
2976
2977 load_addr = be32_to_cpu(fw_data[2]);
2978 if (load_addr < 0x4000 || load_addr >= 0x80000) {
2979 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2980 ap->name, load_addr, fw_name);
2981 ret = -EINVAL;
2982 goto out;
2983 }
2984
2985
2986
2987
2988
2989 ace_clear(regs, 0x2000, 0x80000-0x2000);
2990 ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
2991 out:
2992 release_firmware(fw);
2993 return ret;
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012static void __devinit eeprom_start(struct ace_regs __iomem *regs)
3013{
3014 u32 local;
3015
3016 readl(®s->LocalCtrl);
3017 udelay(ACE_SHORT_DELAY);
3018 local = readl(®s->LocalCtrl);
3019 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
3020 writel(local, ®s->LocalCtrl);
3021 readl(®s->LocalCtrl);
3022 mb();
3023 udelay(ACE_SHORT_DELAY);
3024 local |= EEPROM_CLK_OUT;
3025 writel(local, ®s->LocalCtrl);
3026 readl(®s->LocalCtrl);
3027 mb();
3028 udelay(ACE_SHORT_DELAY);
3029 local &= ~EEPROM_DATA_OUT;
3030 writel(local, ®s->LocalCtrl);
3031 readl(®s->LocalCtrl);
3032 mb();
3033 udelay(ACE_SHORT_DELAY);
3034 local &= ~EEPROM_CLK_OUT;
3035 writel(local, ®s->LocalCtrl);
3036 readl(®s->LocalCtrl);
3037 mb();
3038}
3039
3040
3041static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
3042{
3043 short i;
3044 u32 local;
3045
3046 udelay(ACE_SHORT_DELAY);
3047 local = readl(®s->LocalCtrl);
3048 local &= ~EEPROM_DATA_OUT;
3049 local |= EEPROM_WRITE_ENABLE;
3050 writel(local, ®s->LocalCtrl);
3051 readl(®s->LocalCtrl);
3052 mb();
3053
3054 for (i = 0; i < 8; i++, magic <<= 1) {
3055 udelay(ACE_SHORT_DELAY);
3056 if (magic & 0x80)
3057 local |= EEPROM_DATA_OUT;
3058 else
3059 local &= ~EEPROM_DATA_OUT;
3060 writel(local, ®s->LocalCtrl);
3061 readl(®s->LocalCtrl);
3062 mb();
3063
3064 udelay(ACE_SHORT_DELAY);
3065 local |= EEPROM_CLK_OUT;
3066 writel(local, ®s->LocalCtrl);
3067 readl(®s->LocalCtrl);
3068 mb();
3069 udelay(ACE_SHORT_DELAY);
3070 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3071 writel(local, ®s->LocalCtrl);
3072 readl(®s->LocalCtrl);
3073 mb();
3074 }
3075}
3076
3077
3078static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
3079{
3080 int state;
3081 u32 local;
3082
3083 local = readl(®s->LocalCtrl);
3084 local &= ~EEPROM_WRITE_ENABLE;
3085 writel(local, ®s->LocalCtrl);
3086 readl(®s->LocalCtrl);
3087 mb();
3088 udelay(ACE_LONG_DELAY);
3089 local |= EEPROM_CLK_OUT;
3090 writel(local, ®s->LocalCtrl);
3091 readl(®s->LocalCtrl);
3092 mb();
3093 udelay(ACE_SHORT_DELAY);
3094
3095 state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
3096 udelay(ACE_SHORT_DELAY);
3097 mb();
3098 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3099 readl(®s->LocalCtrl);
3100 mb();
3101
3102 return state;
3103}
3104
3105
3106static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
3107{
3108 u32 local;
3109
3110 udelay(ACE_SHORT_DELAY);
3111 local = readl(®s->LocalCtrl);
3112 local |= EEPROM_WRITE_ENABLE;
3113 writel(local, ®s->LocalCtrl);
3114 readl(®s->LocalCtrl);
3115 mb();
3116 udelay(ACE_SHORT_DELAY);
3117 local &= ~EEPROM_DATA_OUT;
3118 writel(local, ®s->LocalCtrl);
3119 readl(®s->LocalCtrl);
3120 mb();
3121 udelay(ACE_SHORT_DELAY);
3122 local |= EEPROM_CLK_OUT;
3123 writel(local, ®s->LocalCtrl);
3124 readl(®s->LocalCtrl);
3125 mb();
3126 udelay(ACE_SHORT_DELAY);
3127 local |= EEPROM_DATA_OUT;
3128 writel(local, ®s->LocalCtrl);
3129 readl(®s->LocalCtrl);
3130 mb();
3131 udelay(ACE_LONG_DELAY);
3132 local &= ~EEPROM_CLK_OUT;
3133 writel(local, ®s->LocalCtrl);
3134 mb();
3135}
3136
3137
3138
3139
3140
3141static int __devinit read_eeprom_byte(struct net_device *dev,
3142 unsigned long offset)
3143{
3144 struct ace_private *ap = netdev_priv(dev);
3145 struct ace_regs __iomem *regs = ap->regs;
3146 unsigned long flags;
3147 u32 local;
3148 int result = 0;
3149 short i;
3150
3151
3152
3153
3154
3155 local_irq_save(flags);
3156
3157 eeprom_start(regs);
3158
3159 eeprom_prep(regs, EEPROM_WRITE_SELECT);
3160 if (eeprom_check_ack(regs)) {
3161 local_irq_restore(flags);
3162 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3163 result = -EIO;
3164 goto eeprom_read_error;
3165 }
3166
3167 eeprom_prep(regs, (offset >> 8) & 0xff);
3168 if (eeprom_check_ack(regs)) {
3169 local_irq_restore(flags);
3170 printk(KERN_ERR "%s: Unable to set address byte 0\n",
3171 ap->name);
3172 result = -EIO;
3173 goto eeprom_read_error;
3174 }
3175
3176 eeprom_prep(regs, offset & 0xff);
3177 if (eeprom_check_ack(regs)) {
3178 local_irq_restore(flags);
3179 printk(KERN_ERR "%s: Unable to set address byte 1\n",
3180 ap->name);
3181 result = -EIO;
3182 goto eeprom_read_error;
3183 }
3184
3185 eeprom_start(regs);
3186 eeprom_prep(regs, EEPROM_READ_SELECT);
3187 if (eeprom_check_ack(regs)) {
3188 local_irq_restore(flags);
3189 printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3190 ap->name);
3191 result = -EIO;
3192 goto eeprom_read_error;
3193 }
3194
3195 for (i = 0; i < 8; i++) {
3196 local = readl(®s->LocalCtrl);
3197 local &= ~EEPROM_WRITE_ENABLE;
3198 writel(local, ®s->LocalCtrl);
3199 readl(®s->LocalCtrl);
3200 udelay(ACE_LONG_DELAY);
3201 mb();
3202 local |= EEPROM_CLK_OUT;
3203 writel(local, ®s->LocalCtrl);
3204 readl(®s->LocalCtrl);
3205 mb();
3206 udelay(ACE_SHORT_DELAY);
3207
3208 result = (result << 1) |
3209 ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
3210 udelay(ACE_SHORT_DELAY);
3211 mb();
3212 local = readl(®s->LocalCtrl);
3213 local &= ~EEPROM_CLK_OUT;
3214 writel(local, ®s->LocalCtrl);
3215 readl(®s->LocalCtrl);
3216 udelay(ACE_SHORT_DELAY);
3217 mb();
3218 if (i == 7) {
3219 local |= EEPROM_WRITE_ENABLE;
3220 writel(local, ®s->LocalCtrl);
3221 readl(®s->LocalCtrl);
3222 mb();
3223 udelay(ACE_SHORT_DELAY);
3224 }
3225 }
3226
3227 local |= EEPROM_DATA_OUT;
3228 writel(local, ®s->LocalCtrl);
3229 readl(®s->LocalCtrl);
3230 mb();
3231 udelay(ACE_SHORT_DELAY);
3232 writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
3233 readl(®s->LocalCtrl);
3234 udelay(ACE_LONG_DELAY);
3235 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3236 readl(®s->LocalCtrl);
3237 mb();
3238 udelay(ACE_SHORT_DELAY);
3239 eeprom_stop(regs);
3240
3241 local_irq_restore(flags);
3242 out:
3243 return result;
3244
3245 eeprom_read_error:
3246 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3247 ap->name, offset);
3248 goto out;
3249}
3250