1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/pci.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/types.h>
60#include <linux/kernel.h>
61
62#include <linux/sched.h>
63#include <linux/ptrace.h>
64#include <linux/slab.h>
65#include <linux/ctype.h>
66#include <linux/string.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/in.h>
70#include <linux/delay.h>
71#include <linux/bitops.h>
72#include <linux/io.h>
73#include <asm/system.h>
74
75#include <linux/netdevice.h>
76#include <linux/etherdevice.h>
77#include <linux/skbuff.h>
78#include <linux/if_arp.h>
79#include <linux/ioport.h>
80#include <linux/crc32.h>
81#include <linux/random.h>
82#include <linux/phy.h>
83
84#include "et131x.h"
85
86MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
87MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
88MODULE_LICENSE("Dual BSD/GPL");
89MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
90 "for the ET1310 by Agere Systems");
91
92
93#define MAX_NUM_REGISTER_POLLS 1000
94#define MAX_NUM_WRITE_RETRIES 2
95
96
97#define COUNTER_WRAP_16_BIT 0x10000
98#define COUNTER_WRAP_12_BIT 0x1000
99
100
101#define INTERNAL_MEM_SIZE 0x400
102#define INTERNAL_MEM_RX_OFFSET 0x1FF
103
104
105
106
107
108
109
110
111
112
113
114#define INT_MASK_DISABLE 0xffffffff
115
116
117
118
119
120#define INT_MASK_ENABLE 0xfffebf17
121#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
122
123
124
125#define NIC_MIN_PACKET_SIZE 60
126
127
128#define NIC_MAX_MCAST_LIST 128
129
130
131#define ET131X_PACKET_TYPE_DIRECTED 0x0001
132#define ET131X_PACKET_TYPE_MULTICAST 0x0002
133#define ET131X_PACKET_TYPE_BROADCAST 0x0004
134#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
136
137
138#define ET131X_TX_TIMEOUT (1 * HZ)
139#define NIC_SEND_HANG_THRESHOLD 0
140
141
142#define fMP_DEST_MULTI 0x00000001
143#define fMP_DEST_BROAD 0x00000002
144
145
146#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
148
149
150#define fMP_ADAPTER_LOWER_POWER 0x00200000
151
152#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
154
155#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
156
157
158#define ET1310_PCI_MAX_PYLD 0x4C
159#define ET1310_PCI_MAC_ADDRESS 0xA4
160#define ET1310_PCI_EEPROM_STATUS 0xB2
161#define ET1310_PCI_ACK_NACK 0xC0
162#define ET1310_PCI_REPLAY 0xC2
163#define ET1310_PCI_L0L1LATENCY 0xCF
164
165
166#define ET131X_PCI_DEVICE_ID_GIG 0xED00
167#define ET131X_PCI_DEVICE_ID_FAST 0xED01
168
169
170#define NANO_IN_A_MICRO 1000
171
172#define PARM_RX_NUM_BUFS_DEF 4
173#define PARM_RX_TIME_INT_DEF 10
174#define PARM_RX_MEM_END_DEF 0x2bc
175#define PARM_TX_TIME_INT_DEF 40
176#define PARM_TX_NUM_BUFS_DEF 4
177#define PARM_DMA_CACHE_DEF 0
178
179
180#define USE_FBR0 1
181
182#define FBR_CHUNKS 32
183
184#define MAX_DESC_PER_RING_RX 1024
185
186
187#ifdef USE_FBR0
188#define RFD_LOW_WATER_MARK 40
189#define NIC_DEFAULT_NUM_RFD 1024
190#define NUM_FBRS 2
191#else
192#define RFD_LOW_WATER_MARK 20
193#define NIC_DEFAULT_NUM_RFD 256
194#define NUM_FBRS 1
195#endif
196
197#define NIC_MIN_NUM_RFD 64
198
199#define NUM_PACKETS_HANDLED 256
200
201#define ALCATEL_MULTICAST_PKT 0x01000000
202#define ALCATEL_BROADCAST_PKT 0x02000000
203
204
205struct fbr_desc {
206 u32 addr_lo;
207 u32 addr_hi;
208 u32 word2;
209};
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254struct pkt_stat_desc {
255 u32 word0;
256 u32 word1;
257};
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287struct rx_status_block {
288 u32 word0;
289 u32 word1;
290};
291
292
293
294
295
296struct fbr_lookup {
297 void *virt[MAX_DESC_PER_RING_RX];
298 void *buffer1[MAX_DESC_PER_RING_RX];
299 void *buffer2[MAX_DESC_PER_RING_RX];
300 u32 bus_high[MAX_DESC_PER_RING_RX];
301 u32 bus_low[MAX_DESC_PER_RING_RX];
302 void *ring_virtaddr;
303 dma_addr_t ring_physaddr;
304 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
305 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
306 uint64_t real_physaddr;
307 uint64_t offset;
308 u32 local_full;
309 u32 num_entries;
310 u32 buffsize;
311};
312
313
314
315
316
317
318
319
320
321
322struct rx_ring {
323 struct fbr_lookup *fbr[NUM_FBRS];
324 void *ps_ring_virtaddr;
325 dma_addr_t ps_ring_physaddr;
326 u32 local_psr_full;
327 u32 psr_num_entries;
328
329 struct rx_status_block *rx_status_block;
330 dma_addr_t rx_status_bus;
331
332
333 struct list_head recv_list;
334 u32 num_ready_recv;
335
336 u32 num_rfd;
337
338 bool unfinished_receives;
339
340
341 struct kmem_cache *recv_lookaside;
342};
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373struct tx_desc {
374 u32 addr_hi;
375 u32 addr_lo;
376 u32 len_vlan;
377 u32 flags;
378};
379
380
381
382
383
384
385
386struct tcb {
387 struct tcb *next;
388 u32 flags;
389 u32 count;
390 u32 stale;
391 struct sk_buff *skb;
392 u32 index;
393 u32 index_start;
394};
395
396
397struct tx_ring {
398
399 struct tcb *tcb_ring;
400
401
402 struct tcb *tcb_qhead;
403 struct tcb *tcb_qtail;
404
405
406
407
408
409
410
411 struct tcb *send_head;
412 struct tcb *send_tail;
413 int used;
414
415
416 struct tx_desc *tx_desc_ring;
417 dma_addr_t tx_desc_ring_pa;
418
419
420 u32 send_idx;
421
422
423 u32 *tx_status;
424 dma_addr_t tx_status_pa;
425
426
427 int since_irq;
428};
429
430
431
432
433
434
435#define NUM_DESC_PER_RING_TX 512
436#define NUM_TCB 64
437
438
439
440
441
442
443#define TX_ERROR_PERIOD 1000
444
445#define LO_MARK_PERCENT_FOR_PSR 15
446#define LO_MARK_PERCENT_FOR_RX 15
447
448
449struct rfd {
450 struct list_head list_node;
451 struct sk_buff *skb;
452 u32 len;
453 u16 bufferindex;
454 u8 ringindex;
455};
456
457
458#define FLOW_BOTH 0
459#define FLOW_TXONLY 1
460#define FLOW_RXONLY 2
461#define FLOW_NONE 3
462
463
464struct ce_stats {
465
466
467
468
469
470
471 u32 unicast_pkts_rcvd;
472 atomic_t unicast_pkts_xmtd;
473 u32 multicast_pkts_rcvd;
474 atomic_t multicast_pkts_xmtd;
475 u32 broadcast_pkts_rcvd;
476 atomic_t broadcast_pkts_xmtd;
477 u32 rcvd_pkts_dropped;
478
479
480 u32 tx_underflows;
481
482 u32 tx_collisions;
483 u32 tx_excessive_collisions;
484 u32 tx_first_collisions;
485 u32 tx_late_collisions;
486 u32 tx_max_pkt_errs;
487 u32 tx_deferred;
488
489
490 u32 rx_overflows;
491
492 u32 rx_length_errs;
493 u32 rx_align_errs;
494 u32 rx_crc_errs;
495 u32 rx_code_violations;
496 u32 rx_other_errs;
497
498 u32 synchronous_iterations;
499 u32 interrupt_status;
500};
501
502
503struct et131x_adapter {
504 struct net_device *netdev;
505 struct pci_dev *pdev;
506 struct mii_bus *mii_bus;
507 struct phy_device *phydev;
508 struct work_struct task;
509
510
511 u32 flags;
512
513
514 int link;
515
516
517 u8 rom_addr[ETH_ALEN];
518 u8 addr[ETH_ALEN];
519 bool has_eeprom;
520 u8 eeprom_data[2];
521
522
523 spinlock_t lock;
524
525 spinlock_t tcb_send_qlock;
526 spinlock_t tcb_ready_qlock;
527 spinlock_t send_hw_lock;
528
529 spinlock_t rcv_lock;
530 spinlock_t rcv_pend_lock;
531 spinlock_t fbr_lock;
532
533 spinlock_t phy_lock;
534
535
536 u32 packet_filter;
537
538
539 u32 multicast_addr_count;
540 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
541
542
543 struct address_map __iomem *regs;
544
545
546 u8 wanted_flow;
547 u32 registry_jumbo_packet;
548
549
550 u8 flowcontrol;
551
552
553 struct timer_list error_timer;
554
555
556
557
558 u8 boot_coma;
559
560
561
562
563
564 u16 pdown_speed;
565 u8 pdown_duplex;
566
567
568 struct tx_ring tx_ring;
569
570
571 struct rx_ring rx_ring;
572
573
574 struct ce_stats stats;
575
576 struct net_device_stats net_stats;
577};
578
579
580
581static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
582{
583 u32 reg;
584 int i;
585
586
587
588
589
590
591
592
593 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
594
595 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
596 return -EIO;
597
598
599 if ((reg & 0x3000) == 0x3000) {
600 if (status)
601 *status = reg;
602 return reg & 0xFF;
603 }
604 }
605 return -ETIMEDOUT;
606}
607
608
609
610
611
612
613
614
615
616
617static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
618{
619 struct pci_dev *pdev = adapter->pdev;
620 int index = 0;
621 int retries;
622 int err = 0;
623 int i2c_wack = 0;
624 int writeok = 0;
625 u32 status;
626 u32 val = 0;
627
628
629
630
631
632
633
634
635
636
637 err = eeprom_wait_ready(pdev, NULL);
638 if (err)
639 return err;
640
641
642
643
644
645
646
647 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
648 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
649 return -EIO;
650
651 i2c_wack = 1;
652
653
654
655 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
656
657 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
658 break;
659
660
661
662
663 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
664 break;
665
666
667
668
669
670
671
672
673 err = eeprom_wait_ready(pdev, &status);
674 if (err < 0)
675 return 0;
676
677
678
679
680
681
682 if ((status & LBCIF_STATUS_GENERAL_ERROR)
683 && adapter->pdev->revision == 0)
684 break;
685
686
687
688
689
690
691
692
693
694 if (status & LBCIF_STATUS_ACK_ERROR) {
695
696
697
698
699
700
701 udelay(10);
702 continue;
703 }
704
705 writeok = 1;
706 break;
707 }
708
709
710
711
712 udelay(10);
713
714 while (i2c_wack) {
715 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
716 LBCIF_CONTROL_LBCIF_ENABLE))
717 writeok = 0;
718
719
720
721
722 do {
723 pci_write_config_dword(pdev,
724 LBCIF_ADDRESS_REGISTER,
725 addr);
726 do {
727 pci_read_config_dword(pdev,
728 LBCIF_DATA_REGISTER, &val);
729 } while ((val & 0x00010000) == 0);
730 } while (val & 0x00040000);
731
732 if ((val & 0xFF00) != 0xC000 || index == 10000)
733 break;
734 index++;
735 }
736 return writeok ? 0 : -EIO;
737}
738
739
740
741
742
743
744
745
746
747
748
749static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
750{
751 struct pci_dev *pdev = adapter->pdev;
752 int err;
753 u32 status;
754
755
756
757
758
759
760 err = eeprom_wait_ready(pdev, NULL);
761 if (err)
762 return err;
763
764
765
766
767
768
769 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
770 LBCIF_CONTROL_LBCIF_ENABLE))
771 return -EIO;
772
773
774
775
776 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
777 return -EIO;
778
779
780
781
782
783 err = eeprom_wait_ready(pdev, &status);
784 if (err < 0)
785 return err;
786
787
788
789
790 *pdata = err;
791
792
793
794
795 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
796}
797
798int et131x_init_eeprom(struct et131x_adapter *adapter)
799{
800 struct pci_dev *pdev = adapter->pdev;
801 u8 eestatus;
802
803
804
805
806 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
807 &eestatus);
808
809
810
811
812
813
814
815 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
816 dev_err(&pdev->dev,
817 "Could not read PCI config space for EEPROM Status\n");
818 return -EIO;
819 }
820
821
822
823
824 if (eestatus & 0x4C) {
825 int write_failed = 0;
826 if (pdev->revision == 0x01) {
827 int i;
828 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
829
830
831
832
833
834 for (i = 0; i < 3; i++)
835 if (eeprom_write(adapter, i, eedata[i]) < 0)
836 write_failed = 1;
837 }
838 if (pdev->revision != 0x01 || write_failed) {
839 dev_err(&pdev->dev,
840 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
841
842
843
844
845
846
847
848 adapter->has_eeprom = 0;
849 return -EIO;
850 }
851 }
852 adapter->has_eeprom = 1;
853
854
855
856
857 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
858 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
859
860 if (adapter->eeprom_data[0] != 0xcd)
861
862 adapter->eeprom_data[1] = 0x00;
863
864 return 0;
865}
866
867
868
869
870
871void et131x_rx_dma_enable(struct et131x_adapter *adapter)
872{
873
874 u32 csr = 0x2000;
875
876 if (adapter->rx_ring.fbr[0]->buffsize == 4096)
877 csr |= 0x0800;
878 else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
879 csr |= 0x1000;
880 else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
881 csr |= 0x1800;
882#ifdef USE_FBR0
883 csr |= 0x0400;
884 if (adapter->rx_ring.fbr[1]->buffsize == 256)
885 csr |= 0x0100;
886 else if (adapter->rx_ring.fbr[1]->buffsize == 512)
887 csr |= 0x0200;
888 else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
889 csr |= 0x0300;
890#endif
891 writel(csr, &adapter->regs->rxdma.csr);
892
893 csr = readl(&adapter->regs->rxdma.csr);
894 if ((csr & 0x00020000) != 0) {
895 udelay(5);
896 csr = readl(&adapter->regs->rxdma.csr);
897 if ((csr & 0x00020000) != 0) {
898 dev_err(&adapter->pdev->dev,
899 "RX Dma failed to exit halt state. CSR 0x%08x\n",
900 csr);
901 }
902 }
903}
904
905
906
907
908
909void et131x_rx_dma_disable(struct et131x_adapter *adapter)
910{
911 u32 csr;
912
913 writel(0x00002001, &adapter->regs->rxdma.csr);
914 csr = readl(&adapter->regs->rxdma.csr);
915 if ((csr & 0x00020000) == 0) {
916 udelay(5);
917 csr = readl(&adapter->regs->rxdma.csr);
918 if ((csr & 0x00020000) == 0)
919 dev_err(&adapter->pdev->dev,
920 "RX Dma failed to enter halt state. CSR 0x%08x\n",
921 csr);
922 }
923}
924
925
926
927
928
929
930
931void et131x_tx_dma_enable(struct et131x_adapter *adapter)
932{
933
934
935
936 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
937 &adapter->regs->txdma.csr);
938}
939
940static inline void add_10bit(u32 *v, int n)
941{
942 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
943}
944
945static inline void add_12bit(u32 *v, int n)
946{
947 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967void et1310_config_mac_regs1(struct et131x_adapter *adapter)
968{
969 struct mac_regs __iomem *macregs = &adapter->regs->mac;
970 u32 station1;
971 u32 station2;
972 u32 ipg;
973
974
975
976
977 writel(0xC00F0000, ¯egs->cfg1);
978
979
980 ipg = 0x38005860;
981 ipg |= 0x50 << 8;
982 writel(ipg, ¯egs->ipg);
983
984
985
986 writel(0x00A1F037, ¯egs->hfdp);
987
988
989 writel(0, ¯egs->if_ctrl);
990
991
992 writel(0x07, ¯egs->mii_mgmt_cfg);
993
994
995
996
997
998
999
1000
1001 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
1002 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
1003 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
1004 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
1005 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
1006 adapter->addr[2];
1007 writel(station1, ¯egs->station_addr_1);
1008 writel(station2, ¯egs->station_addr_2);
1009
1010
1011
1012
1013
1014
1015
1016
1017 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
1018
1019
1020 writel(0, ¯egs->cfg1);
1021}
1022
1023
1024
1025
1026
1027void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1028{
1029 int32_t delay = 0;
1030 struct mac_regs __iomem *mac = &adapter->regs->mac;
1031 struct phy_device *phydev = adapter->phydev;
1032 u32 cfg1;
1033 u32 cfg2;
1034 u32 ifctrl;
1035 u32 ctl;
1036
1037 ctl = readl(&adapter->regs->txmac.ctl);
1038 cfg1 = readl(&mac->cfg1);
1039 cfg2 = readl(&mac->cfg2);
1040 ifctrl = readl(&mac->if_ctrl);
1041
1042
1043 cfg2 &= ~0x300;
1044 if (phydev && phydev->speed == SPEED_1000) {
1045 cfg2 |= 0x200;
1046
1047 ifctrl &= ~(1 << 24);
1048 } else {
1049 cfg2 |= 0x100;
1050 ifctrl |= (1 << 24);
1051 }
1052
1053
1054 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1055
1056 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1057 if (adapter->flowcontrol == FLOW_RXONLY ||
1058 adapter->flowcontrol == FLOW_BOTH)
1059 cfg1 |= CFG1_RX_FLOW;
1060 writel(cfg1, &mac->cfg1);
1061
1062
1063
1064
1065 cfg2 |= 0x7016;
1066 cfg2 &= ~0x0021;
1067
1068
1069 if (phydev && phydev->duplex == DUPLEX_FULL)
1070 cfg2 |= 0x01;
1071
1072 ifctrl &= ~(1 << 26);
1073 if (phydev && phydev->duplex == DUPLEX_HALF)
1074 ifctrl |= (1<<26);
1075
1076 writel(ifctrl, &mac->if_ctrl);
1077 writel(cfg2, &mac->cfg2);
1078
1079 do {
1080 udelay(10);
1081 delay++;
1082 cfg1 = readl(&mac->cfg1);
1083 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1084
1085 if (delay == 100) {
1086 dev_warn(&adapter->pdev->dev,
1087 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1088 cfg1);
1089 }
1090
1091
1092 ctl |= 0x09;
1093 writel(ctl, &adapter->regs->txmac.ctl);
1094
1095
1096 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1097 et131x_rx_dma_enable(adapter);
1098 et131x_tx_dma_enable(adapter);
1099 }
1100}
1101
1102
1103
1104
1105
1106
1107
1108int et1310_in_phy_coma(struct et131x_adapter *adapter)
1109{
1110 u32 pmcsr;
1111
1112 pmcsr = readl(&adapter->regs->global.pm_csr);
1113
1114 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1115}
1116
1117void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1118{
1119 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1120 uint32_t nIndex;
1121 uint32_t result;
1122 uint32_t hash1 = 0;
1123 uint32_t hash2 = 0;
1124 uint32_t hash3 = 0;
1125 uint32_t hash4 = 0;
1126 u32 pm_csr;
1127
1128
1129
1130
1131
1132
1133 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1134
1135 for (nIndex = 0; nIndex < adapter->multicast_addr_count;
1136 nIndex++) {
1137 result = ether_crc(6, adapter->multicast_list[nIndex]);
1138
1139 result = (result & 0x3F800000) >> 23;
1140
1141 if (result < 32) {
1142 hash1 |= (1 << result);
1143 } else if ((31 < result) && (result < 64)) {
1144 result -= 32;
1145 hash2 |= (1 << result);
1146 } else if ((63 < result) && (result < 96)) {
1147 result -= 64;
1148 hash3 |= (1 << result);
1149 } else {
1150 result -= 96;
1151 hash4 |= (1 << result);
1152 }
1153 }
1154 }
1155
1156
1157 pm_csr = readl(&adapter->regs->global.pm_csr);
1158 if (!et1310_in_phy_coma(adapter)) {
1159 writel(hash1, &rxmac->multi_hash1);
1160 writel(hash2, &rxmac->multi_hash2);
1161 writel(hash3, &rxmac->multi_hash3);
1162 writel(hash4, &rxmac->multi_hash4);
1163 }
1164}
1165
1166void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1167{
1168 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1169 u32 uni_pf1;
1170 u32 uni_pf2;
1171 u32 uni_pf3;
1172 u32 pm_csr;
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1184 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1185 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1186 adapter->addr[1];
1187
1188 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1189 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1190 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1191 adapter->addr[5];
1192
1193 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1194 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1195 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1196 adapter->addr[5];
1197
1198 pm_csr = readl(&adapter->regs->global.pm_csr);
1199 if (!et1310_in_phy_coma(adapter)) {
1200 writel(uni_pf1, &rxmac->uni_pf_addr1);
1201 writel(uni_pf2, &rxmac->uni_pf_addr2);
1202 writel(uni_pf3, &rxmac->uni_pf_addr3);
1203 }
1204}
1205
1206void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1207{
1208 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1209 struct phy_device *phydev = adapter->phydev;
1210 u32 sa_lo;
1211 u32 sa_hi = 0;
1212 u32 pf_ctrl = 0;
1213
1214
1215 writel(0x8, &rxmac->ctrl);
1216
1217
1218 writel(0, &rxmac->crc0);
1219 writel(0, &rxmac->crc12);
1220 writel(0, &rxmac->crc34);
1221
1222
1223
1224
1225
1226 writel(0, &rxmac->mask0_word0);
1227 writel(0, &rxmac->mask0_word1);
1228 writel(0, &rxmac->mask0_word2);
1229 writel(0, &rxmac->mask0_word3);
1230
1231 writel(0, &rxmac->mask1_word0);
1232 writel(0, &rxmac->mask1_word1);
1233 writel(0, &rxmac->mask1_word2);
1234 writel(0, &rxmac->mask1_word3);
1235
1236 writel(0, &rxmac->mask2_word0);
1237 writel(0, &rxmac->mask2_word1);
1238 writel(0, &rxmac->mask2_word2);
1239 writel(0, &rxmac->mask2_word3);
1240
1241 writel(0, &rxmac->mask3_word0);
1242 writel(0, &rxmac->mask3_word1);
1243 writel(0, &rxmac->mask3_word2);
1244 writel(0, &rxmac->mask3_word3);
1245
1246 writel(0, &rxmac->mask4_word0);
1247 writel(0, &rxmac->mask4_word1);
1248 writel(0, &rxmac->mask4_word2);
1249 writel(0, &rxmac->mask4_word3);
1250
1251
1252 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1253 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1254 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1255 adapter->addr[5];
1256 writel(sa_lo, &rxmac->sa_lo);
1257
1258 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1259 adapter->addr[1];
1260 writel(sa_hi, &rxmac->sa_hi);
1261
1262
1263 writel(0, &rxmac->pf_ctrl);
1264
1265
1266 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1267 et1310_setup_device_for_unicast(adapter);
1268 pf_ctrl |= 4;
1269 } else {
1270 writel(0, &rxmac->uni_pf_addr1);
1271 writel(0, &rxmac->uni_pf_addr2);
1272 writel(0, &rxmac->uni_pf_addr3);
1273 }
1274
1275
1276 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1277 pf_ctrl |= 2;
1278 et1310_setup_device_for_multicast(adapter);
1279 }
1280
1281
1282 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1283 pf_ctrl |= 8;
1284
1285 if (adapter->registry_jumbo_packet > 8192)
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1297 else
1298 writel(0, &rxmac->mcif_ctrl_max_seg);
1299
1300
1301 writel(0, &rxmac->mcif_water_mark);
1302
1303
1304 writel(0, &rxmac->mif_ctrl);
1305
1306
1307 writel(0, &rxmac->space_avail);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 if (phydev && phydev->speed == SPEED_100)
1323 writel(0x30038, &rxmac->mif_ctrl);
1324 else
1325 writel(0x30030, &rxmac->mif_ctrl);
1326
1327
1328
1329
1330
1331
1332
1333 writel(pf_ctrl, &rxmac->pf_ctrl);
1334 writel(0x9, &rxmac->ctrl);
1335}
1336
1337void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1338{
1339 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1340
1341
1342
1343
1344
1345 if (adapter->flowcontrol == FLOW_NONE)
1346 writel(0, &txmac->cf_param);
1347 else
1348 writel(0x40, &txmac->cf_param);
1349}
1350
1351void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1352{
1353 struct macstat_regs __iomem *macstat =
1354 &adapter->regs->macstat;
1355
1356
1357
1358
1359 writel(0, &macstat->txrx_0_64_byte_frames);
1360 writel(0, &macstat->txrx_65_127_byte_frames);
1361 writel(0, &macstat->txrx_128_255_byte_frames);
1362 writel(0, &macstat->txrx_256_511_byte_frames);
1363 writel(0, &macstat->txrx_512_1023_byte_frames);
1364 writel(0, &macstat->txrx_1024_1518_byte_frames);
1365 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1366
1367 writel(0, &macstat->rx_bytes);
1368 writel(0, &macstat->rx_packets);
1369 writel(0, &macstat->rx_fcs_errs);
1370 writel(0, &macstat->rx_multicast_packets);
1371 writel(0, &macstat->rx_broadcast_packets);
1372 writel(0, &macstat->rx_control_frames);
1373 writel(0, &macstat->rx_pause_frames);
1374 writel(0, &macstat->rx_unknown_opcodes);
1375 writel(0, &macstat->rx_align_errs);
1376 writel(0, &macstat->rx_frame_len_errs);
1377 writel(0, &macstat->rx_code_errs);
1378 writel(0, &macstat->rx_carrier_sense_errs);
1379 writel(0, &macstat->rx_undersize_packets);
1380 writel(0, &macstat->rx_oversize_packets);
1381 writel(0, &macstat->rx_fragment_packets);
1382 writel(0, &macstat->rx_jabbers);
1383 writel(0, &macstat->rx_drops);
1384
1385 writel(0, &macstat->tx_bytes);
1386 writel(0, &macstat->tx_packets);
1387 writel(0, &macstat->tx_multicast_packets);
1388 writel(0, &macstat->tx_broadcast_packets);
1389 writel(0, &macstat->tx_pause_frames);
1390 writel(0, &macstat->tx_deferred);
1391 writel(0, &macstat->tx_excessive_deferred);
1392 writel(0, &macstat->tx_single_collisions);
1393 writel(0, &macstat->tx_multiple_collisions);
1394 writel(0, &macstat->tx_late_collisions);
1395 writel(0, &macstat->tx_excessive_collisions);
1396 writel(0, &macstat->tx_total_collisions);
1397 writel(0, &macstat->tx_pause_honored_frames);
1398 writel(0, &macstat->tx_drops);
1399 writel(0, &macstat->tx_jabbers);
1400 writel(0, &macstat->tx_fcs_errs);
1401 writel(0, &macstat->tx_control_frames);
1402 writel(0, &macstat->tx_oversize_frames);
1403 writel(0, &macstat->tx_undersize_frames);
1404 writel(0, &macstat->tx_fragments);
1405 writel(0, &macstat->carry_reg1);
1406 writel(0, &macstat->carry_reg2);
1407
1408
1409
1410
1411
1412 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1413 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1426 u8 reg, u16 *value)
1427{
1428 struct mac_regs __iomem *mac = &adapter->regs->mac;
1429 int status = 0;
1430 u32 delay = 0;
1431 u32 mii_addr;
1432 u32 mii_cmd;
1433 u32 mii_indicator;
1434
1435
1436
1437
1438 mii_addr = readl(&mac->mii_mgmt_addr);
1439 mii_cmd = readl(&mac->mii_mgmt_cmd);
1440
1441
1442 writel(0, &mac->mii_mgmt_cmd);
1443
1444
1445 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1446
1447 writel(0x1, &mac->mii_mgmt_cmd);
1448
1449 do {
1450 udelay(50);
1451 delay++;
1452 mii_indicator = readl(&mac->mii_mgmt_indicator);
1453 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1454
1455
1456 if (delay == 50) {
1457 dev_warn(&adapter->pdev->dev,
1458 "reg 0x%08x could not be read\n", reg);
1459 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1460 mii_indicator);
1461
1462 status = -EIO;
1463 }
1464
1465
1466
1467 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1468
1469
1470 writel(0, &mac->mii_mgmt_cmd);
1471
1472
1473
1474
1475 writel(mii_addr, &mac->mii_mgmt_addr);
1476 writel(mii_cmd, &mac->mii_mgmt_cmd);
1477
1478 return status;
1479}
1480
1481int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1482{
1483 struct phy_device *phydev = adapter->phydev;
1484
1485 if (!phydev)
1486 return -EIO;
1487
1488 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1502{
1503 struct mac_regs __iomem *mac = &adapter->regs->mac;
1504 struct phy_device *phydev = adapter->phydev;
1505 int status = 0;
1506 u8 addr;
1507 u32 delay = 0;
1508 u32 mii_addr;
1509 u32 mii_cmd;
1510 u32 mii_indicator;
1511
1512 if (!phydev)
1513 return -EIO;
1514
1515 addr = phydev->addr;
1516
1517
1518
1519
1520 mii_addr = readl(&mac->mii_mgmt_addr);
1521 mii_cmd = readl(&mac->mii_mgmt_cmd);
1522
1523
1524 writel(0, &mac->mii_mgmt_cmd);
1525
1526
1527 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1528
1529
1530 writel(value, &mac->mii_mgmt_ctrl);
1531
1532 do {
1533 udelay(50);
1534 delay++;
1535 mii_indicator = readl(&mac->mii_mgmt_indicator);
1536 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1537
1538
1539 if (delay == 100) {
1540 u16 tmp;
1541
1542 dev_warn(&adapter->pdev->dev,
1543 "reg 0x%08x could not be written", reg);
1544 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1545 mii_indicator);
1546 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1547 readl(&mac->mii_mgmt_cmd));
1548
1549 et131x_mii_read(adapter, reg, &tmp);
1550
1551 status = -EIO;
1552 }
1553
1554 writel(0, &mac->mii_mgmt_cmd);
1555
1556
1557
1558
1559
1560 writel(mii_addr, &mac->mii_mgmt_addr);
1561 writel(mii_cmd, &mac->mii_mgmt_cmd);
1562
1563 return status;
1564}
1565
1566
1567void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
1568 u16 regnum, u16 bitnum, u8 *value)
1569{
1570 u16 reg;
1571 u16 mask = 0x0001 << bitnum;
1572
1573
1574 et131x_mii_read(adapter, regnum, ®);
1575
1576 switch (action) {
1577 case TRUEPHY_BIT_READ:
1578 *value = (reg & mask) >> bitnum;
1579 break;
1580
1581 case TRUEPHY_BIT_SET:
1582 et131x_mii_write(adapter, regnum, reg | mask);
1583 break;
1584
1585 case TRUEPHY_BIT_CLEAR:
1586 et131x_mii_write(adapter, regnum, reg & ~mask);
1587 break;
1588
1589 default:
1590 break;
1591 }
1592}
1593
1594void et1310_config_flow_control(struct et131x_adapter *adapter)
1595{
1596 struct phy_device *phydev = adapter->phydev;
1597
1598 if (phydev->duplex == DUPLEX_HALF) {
1599 adapter->flowcontrol = FLOW_NONE;
1600 } else {
1601 char remote_pause, remote_async_pause;
1602
1603 et1310_phy_access_mii_bit(adapter,
1604 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1605 et1310_phy_access_mii_bit(adapter,
1606 TRUEPHY_BIT_READ, 5, 11,
1607 &remote_async_pause);
1608
1609 if ((remote_pause == TRUEPHY_BIT_SET) &&
1610 (remote_async_pause == TRUEPHY_BIT_SET)) {
1611 adapter->flowcontrol = adapter->wanted_flow;
1612 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1613 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1614 if (adapter->wanted_flow == FLOW_BOTH)
1615 adapter->flowcontrol = FLOW_BOTH;
1616 else
1617 adapter->flowcontrol = FLOW_NONE;
1618 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1619 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1620 adapter->flowcontrol = FLOW_NONE;
1621 } else {
1622
1623 if (adapter->wanted_flow == FLOW_BOTH)
1624 adapter->flowcontrol = FLOW_RXONLY;
1625 else
1626 adapter->flowcontrol = FLOW_NONE;
1627 }
1628 }
1629}
1630
1631
1632
1633
1634
1635void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1636{
1637 struct ce_stats *stats = &adapter->stats;
1638 struct macstat_regs __iomem *macstat =
1639 &adapter->regs->macstat;
1640
1641 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1642 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1643 stats->tx_deferred += readl(&macstat->tx_deferred);
1644 stats->tx_excessive_collisions +=
1645 readl(&macstat->tx_multiple_collisions);
1646 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1647 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1648 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1649
1650 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1651 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1652 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1653 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1654 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1655 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1656 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1668{
1669 u32 carry_reg1;
1670 u32 carry_reg2;
1671
1672
1673
1674
1675 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1676 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1677
1678 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1679 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1680
1681
1682
1683
1684
1685
1686
1687 if (carry_reg1 & (1 << 14))
1688 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1689 if (carry_reg1 & (1 << 8))
1690 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1691 if (carry_reg1 & (1 << 7))
1692 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1693 if (carry_reg1 & (1 << 2))
1694 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1695 if (carry_reg1 & (1 << 6))
1696 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1697 if (carry_reg1 & (1 << 3))
1698 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1699 if (carry_reg1 & (1 << 0))
1700 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1701 if (carry_reg2 & (1 << 16))
1702 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1703 if (carry_reg2 & (1 << 15))
1704 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1705 if (carry_reg2 & (1 << 6))
1706 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1707 if (carry_reg2 & (1 << 8))
1708 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1709 if (carry_reg2 & (1 << 5))
1710 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1711 if (carry_reg2 & (1 << 4))
1712 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1713 if (carry_reg2 & (1 << 2))
1714 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1715}
1716
1717
1718
1719int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1720{
1721 struct net_device *netdev = bus->priv;
1722 struct et131x_adapter *adapter = netdev_priv(netdev);
1723 u16 value;
1724 int ret;
1725
1726 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1727
1728 if (ret < 0)
1729 return ret;
1730 else
1731 return value;
1732}
1733
1734int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1735{
1736 struct net_device *netdev = bus->priv;
1737 struct et131x_adapter *adapter = netdev_priv(netdev);
1738
1739 return et131x_mii_write(adapter, reg, value);
1740}
1741
1742int et131x_mdio_reset(struct mii_bus *bus)
1743{
1744 struct net_device *netdev = bus->priv;
1745 struct et131x_adapter *adapter = netdev_priv(netdev);
1746
1747 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1748
1749 return 0;
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1763{
1764 u16 data;
1765
1766 et131x_mii_read(adapter, MII_BMCR, &data);
1767 data &= ~BMCR_PDOWN;
1768 if (down)
1769 data |= BMCR_PDOWN;
1770 et131x_mii_write(adapter, MII_BMCR, data);
1771}
1772
1773
1774
1775
1776
1777
1778void et131x_xcvr_init(struct et131x_adapter *adapter)
1779{
1780 u16 imr;
1781 u16 isr;
1782 u16 lcr2;
1783
1784 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1785 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1786
1787
1788
1789
1790 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1791 ET_PHY_INT_MASK_LINKSTAT &
1792 ET_PHY_INT_MASK_ENABLE);
1793
1794 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1805 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1806
1807 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1808 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1809
1810 if ((adapter->eeprom_data[1] & 0x8) == 0)
1811 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1812 else
1813 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1814
1815 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1816 }
1817}
1818
1819
1820
1821
1822
1823
1824
1825void et131x_configure_global_regs(struct et131x_adapter *adapter)
1826{
1827 struct global_regs __iomem *regs = &adapter->regs->global;
1828
1829 writel(0, ®s->rxq_start_addr);
1830 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1831
1832 if (adapter->registry_jumbo_packet < 2048) {
1833
1834
1835
1836
1837
1838 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1839 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1840 } else if (adapter->registry_jumbo_packet < 8192) {
1841
1842 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1843 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1844 } else {
1845
1846
1847
1848
1849
1850 writel(0x01b3, ®s->rxq_end_addr);
1851 writel(0x01b4, ®s->txq_start_addr);
1852 }
1853
1854
1855 writel(0, ®s->loopback);
1856
1857
1858 writel(0, ®s->msi_config);
1859
1860
1861
1862
1863 writel(0, ®s->watchdog_timer);
1864}
1865
1866
1867
1868
1869
1870
1871
1872void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1873{
1874 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1875 struct rx_ring *rx_local = &adapter->rx_ring;
1876 struct fbr_desc *fbr_entry;
1877 u32 entry;
1878 u32 psr_num_des;
1879 unsigned long flags;
1880
1881
1882 et131x_rx_dma_disable(adapter);
1883
1884
1885
1886
1887
1888
1889
1890
1891 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1892 &rx_dma->dma_wb_base_hi);
1893 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1894
1895 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1896
1897
1898
1899
1900 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1901 &rx_dma->psr_base_hi);
1902 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1903 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1904 writel(0, &rx_dma->psr_full_offset);
1905
1906 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1907 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1908 &rx_dma->psr_min_des);
1909
1910 spin_lock_irqsave(&adapter->rcv_lock, flags);
1911
1912
1913 rx_local->local_psr_full = 0;
1914
1915
1916 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1917 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1918 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1919 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1920 fbr_entry->word2 = entry;
1921 fbr_entry++;
1922 }
1923
1924
1925
1926
1927 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1928 &rx_dma->fbr1_base_hi);
1929 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1930 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1931 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1932
1933
1934
1935
1936 rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1937 writel(
1938 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1939 &rx_dma->fbr1_min_des);
1940
1941#ifdef USE_FBR0
1942
1943 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1944 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1945 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1946 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1947 fbr_entry->word2 = entry;
1948 fbr_entry++;
1949 }
1950
1951 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1952 &rx_dma->fbr0_base_hi);
1953 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1954 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1955 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1956
1957
1958
1959
1960 rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1961 writel(
1962 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1963 &rx_dma->fbr0_min_des);
1964#endif
1965
1966
1967
1968
1969
1970
1971 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1972
1973
1974
1975
1976
1977
1978 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1979
1980 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1981}
1982
1983
1984
1985
1986
1987
1988
1989
1990void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1991{
1992 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1993
1994
1995 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1996 &txdma->pr_base_hi);
1997 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1998 &txdma->pr_base_lo);
1999
2000
2001 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
2002
2003
2004 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
2005 &txdma->dma_wb_base_hi);
2006 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
2007
2008 *adapter->tx_ring.tx_status = 0;
2009
2010 writel(0, &txdma->service_request);
2011 adapter->tx_ring.send_idx = 0;
2012}
2013
2014
2015
2016
2017
2018
2019
2020void et131x_adapter_setup(struct et131x_adapter *adapter)
2021{
2022
2023 et131x_configure_global_regs(adapter);
2024
2025 et1310_config_mac_regs1(adapter);
2026
2027
2028
2029 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2030
2031 et1310_config_rxmac_regs(adapter);
2032 et1310_config_txmac_regs(adapter);
2033
2034 et131x_config_rx_dma_regs(adapter);
2035 et131x_config_tx_dma_regs(adapter);
2036
2037 et1310_config_macstat_regs(adapter);
2038
2039 et1310_phy_power_down(adapter, 0);
2040 et131x_xcvr_init(adapter);
2041}
2042
2043
2044
2045
2046
2047void et131x_soft_reset(struct et131x_adapter *adapter)
2048{
2049
2050 writel(0xc00f0000, &adapter->regs->mac.cfg1);
2051
2052
2053 writel(0x7F, &adapter->regs->global.sw_reset);
2054 writel(0x000f0000, &adapter->regs->mac.cfg1);
2055 writel(0x00000000, &adapter->regs->mac.cfg1);
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065void et131x_enable_interrupts(struct et131x_adapter *adapter)
2066{
2067 u32 mask;
2068
2069
2070 if (adapter->flowcontrol == FLOW_TXONLY ||
2071 adapter->flowcontrol == FLOW_BOTH)
2072 mask = INT_MASK_ENABLE;
2073 else
2074 mask = INT_MASK_ENABLE_NO_FLOW;
2075
2076 writel(mask, &adapter->regs->global.int_mask);
2077}
2078
2079
2080
2081
2082
2083
2084
2085void et131x_disable_interrupts(struct et131x_adapter *adapter)
2086{
2087
2088 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2089}
2090
2091
2092
2093
2094
2095void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2096{
2097
2098 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2099 &adapter->regs->txdma.csr);
2100}
2101
2102
2103
2104
2105
2106void et131x_enable_txrx(struct net_device *netdev)
2107{
2108 struct et131x_adapter *adapter = netdev_priv(netdev);
2109
2110
2111 et131x_rx_dma_enable(adapter);
2112 et131x_tx_dma_enable(adapter);
2113
2114
2115 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2116 et131x_enable_interrupts(adapter);
2117
2118
2119 netif_start_queue(netdev);
2120}
2121
2122
2123
2124
2125
2126void et131x_disable_txrx(struct net_device *netdev)
2127{
2128 struct et131x_adapter *adapter = netdev_priv(netdev);
2129
2130
2131 netif_stop_queue(netdev);
2132
2133
2134 et131x_rx_dma_disable(adapter);
2135 et131x_tx_dma_disable(adapter);
2136
2137
2138 et131x_disable_interrupts(adapter);
2139}
2140
2141
2142
2143
2144
2145void et131x_init_send(struct et131x_adapter *adapter)
2146{
2147 struct tcb *tcb;
2148 u32 ct;
2149 struct tx_ring *tx_ring;
2150
2151
2152 tx_ring = &adapter->tx_ring;
2153 tcb = adapter->tx_ring.tcb_ring;
2154
2155 tx_ring->tcb_qhead = tcb;
2156
2157 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2158
2159
2160 for (ct = 0; ct++ < NUM_TCB; tcb++)
2161
2162
2163
2164 tcb->next = tcb + 1;
2165
2166
2167 tcb--;
2168 tx_ring->tcb_qtail = tcb;
2169 tcb->next = NULL;
2170
2171 tx_ring->send_head = NULL;
2172 tx_ring->send_tail = NULL;
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2196{
2197 unsigned long flags;
2198 u32 pmcsr;
2199
2200 pmcsr = readl(&adapter->regs->global.pm_csr);
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2214 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2215 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2216
2217
2218
2219 et131x_disable_txrx(adapter->netdev);
2220
2221
2222 pmcsr &= ~ET_PMCSR_INIT;
2223 writel(pmcsr, &adapter->regs->global.pm_csr);
2224
2225
2226 pmcsr |= ET_PM_PHY_SW_COMA;
2227 writel(pmcsr, &adapter->regs->global.pm_csr);
2228}
2229
2230
2231
2232
2233
2234void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2235{
2236 u32 pmcsr;
2237
2238 pmcsr = readl(&adapter->regs->global.pm_csr);
2239
2240
2241 pmcsr |= ET_PMCSR_INIT;
2242 pmcsr &= ~ET_PM_PHY_SW_COMA;
2243 writel(pmcsr, &adapter->regs->global.pm_csr);
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255 et131x_init_send(adapter);
2256
2257
2258
2259
2260
2261 et131x_soft_reset(adapter);
2262
2263
2264 et131x_adapter_setup(adapter);
2265
2266
2267 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2268
2269 et131x_enable_txrx(adapter->netdev);
2270}
2271
2272
2273
2274static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2275{
2276 u32 tmp_free_buff_ring = *free_buff_ring;
2277 tmp_free_buff_ring++;
2278
2279
2280
2281
2282 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2283 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2284 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2285 }
2286
2287 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2288 *free_buff_ring = tmp_free_buff_ring;
2289 return tmp_free_buff_ring;
2290}
2291
2292
2293
2294
2295
2296
2297
2298
2299void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2300 uint64_t *phys_addr,
2301 uint64_t *offset, uint64_t mask)
2302{
2303 uint64_t new_addr;
2304
2305 *offset = 0;
2306
2307 new_addr = *phys_addr & ~mask;
2308
2309 if (new_addr != *phys_addr) {
2310
2311 new_addr += mask + 1;
2312
2313 *offset = new_addr - *phys_addr;
2314
2315 *phys_addr = new_addr;
2316 }
2317}
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2329{
2330 u32 i, j;
2331 u32 bufsize;
2332 u32 pktstat_ringsize, fbr_chunksize;
2333 struct rx_ring *rx_ring;
2334
2335
2336 rx_ring = &adapter->rx_ring;
2337
2338
2339#ifdef USE_FBR0
2340 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2341#endif
2342 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362 if (adapter->registry_jumbo_packet < 2048) {
2363#ifdef USE_FBR0
2364 rx_ring->fbr[1]->buffsize = 256;
2365 rx_ring->fbr[1]->num_entries = 512;
2366#endif
2367 rx_ring->fbr[0]->buffsize = 2048;
2368 rx_ring->fbr[0]->num_entries = 512;
2369 } else if (adapter->registry_jumbo_packet < 4096) {
2370#ifdef USE_FBR0
2371 rx_ring->fbr[1]->buffsize = 512;
2372 rx_ring->fbr[1]->num_entries = 1024;
2373#endif
2374 rx_ring->fbr[0]->buffsize = 4096;
2375 rx_ring->fbr[0]->num_entries = 512;
2376 } else {
2377#ifdef USE_FBR0
2378 rx_ring->fbr[1]->buffsize = 1024;
2379 rx_ring->fbr[1]->num_entries = 768;
2380#endif
2381 rx_ring->fbr[0]->buffsize = 16384;
2382 rx_ring->fbr[0]->num_entries = 128;
2383 }
2384
2385#ifdef USE_FBR0
2386 adapter->rx_ring.psr_num_entries =
2387 adapter->rx_ring.fbr[1]->num_entries +
2388 adapter->rx_ring.fbr[0]->num_entries;
2389#else
2390 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2391#endif
2392
2393
2394 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2395 0xfff;
2396 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2397 bufsize,
2398 &rx_ring->fbr[0]->ring_physaddr,
2399 GFP_KERNEL);
2400 if (!rx_ring->fbr[0]->ring_virtaddr) {
2401 dev_err(&adapter->pdev->dev,
2402 "Cannot alloc memory for Free Buffer Ring 1\n");
2403 return -ENOMEM;
2404 }
2405
2406
2407
2408
2409
2410
2411
2412
2413 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2414
2415
2416 et131x_align_allocated_memory(adapter,
2417 &rx_ring->fbr[0]->real_physaddr,
2418 &rx_ring->fbr[0]->offset, 0x0FFF);
2419
2420 rx_ring->fbr[0]->ring_virtaddr =
2421 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2422 rx_ring->fbr[0]->offset);
2423
2424#ifdef USE_FBR0
2425
2426 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2427 0xfff;
2428 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2429 bufsize,
2430 &rx_ring->fbr[1]->ring_physaddr,
2431 GFP_KERNEL);
2432 if (!rx_ring->fbr[1]->ring_virtaddr) {
2433 dev_err(&adapter->pdev->dev,
2434 "Cannot alloc memory for Free Buffer Ring 0\n");
2435 return -ENOMEM;
2436 }
2437
2438
2439
2440
2441
2442
2443
2444
2445 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2446
2447
2448 et131x_align_allocated_memory(adapter,
2449 &rx_ring->fbr[1]->real_physaddr,
2450 &rx_ring->fbr[1]->offset, 0x0FFF);
2451
2452 rx_ring->fbr[1]->ring_virtaddr =
2453 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2454 rx_ring->fbr[1]->offset);
2455#endif
2456 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2457 u64 fbr1_offset;
2458 u64 fbr1_tmp_physaddr;
2459 u32 fbr1_align;
2460
2461
2462
2463
2464
2465
2466
2467
2468 if (rx_ring->fbr[0]->buffsize > 4096)
2469 fbr1_align = 4096;
2470 else
2471 fbr1_align = rx_ring->fbr[0]->buffsize;
2472
2473 fbr_chunksize =
2474 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2475 rx_ring->fbr[0]->mem_virtaddrs[i] =
2476 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2477 &rx_ring->fbr[0]->mem_physaddrs[i],
2478 GFP_KERNEL);
2479
2480 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2481 dev_err(&adapter->pdev->dev,
2482 "Could not alloc memory\n");
2483 return -ENOMEM;
2484 }
2485
2486
2487 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2488
2489 et131x_align_allocated_memory(adapter,
2490 &fbr1_tmp_physaddr,
2491 &fbr1_offset, (fbr1_align - 1));
2492
2493 for (j = 0; j < FBR_CHUNKS; j++) {
2494 u32 index = (i * FBR_CHUNKS) + j;
2495
2496
2497
2498
2499 rx_ring->fbr[0]->virt[index] =
2500 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2501 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2502
2503
2504
2505
2506 rx_ring->fbr[0]->bus_high[index] =
2507 (u32) (fbr1_tmp_physaddr >> 32);
2508 rx_ring->fbr[0]->bus_low[index] =
2509 (u32) fbr1_tmp_physaddr;
2510
2511 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2512
2513 rx_ring->fbr[0]->buffer1[index] =
2514 rx_ring->fbr[0]->virt[index];
2515 rx_ring->fbr[0]->buffer2[index] =
2516 rx_ring->fbr[0]->virt[index] - 4;
2517 }
2518 }
2519
2520#ifdef USE_FBR0
2521
2522 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2523 u64 fbr0_offset;
2524 u64 fbr0_tmp_physaddr;
2525
2526 fbr_chunksize =
2527 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2528 rx_ring->fbr[1]->mem_virtaddrs[i] =
2529 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2530 &rx_ring->fbr[1]->mem_physaddrs[i],
2531 GFP_KERNEL);
2532
2533 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2534 dev_err(&adapter->pdev->dev,
2535 "Could not alloc memory\n");
2536 return -ENOMEM;
2537 }
2538
2539
2540 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2541
2542 et131x_align_allocated_memory(adapter,
2543 &fbr0_tmp_physaddr,
2544 &fbr0_offset,
2545 rx_ring->fbr[1]->buffsize - 1);
2546
2547 for (j = 0; j < FBR_CHUNKS; j++) {
2548 u32 index = (i * FBR_CHUNKS) + j;
2549
2550 rx_ring->fbr[1]->virt[index] =
2551 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2552 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2553
2554 rx_ring->fbr[1]->bus_high[index] =
2555 (u32) (fbr0_tmp_physaddr >> 32);
2556 rx_ring->fbr[1]->bus_low[index] =
2557 (u32) fbr0_tmp_physaddr;
2558
2559 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2560
2561 rx_ring->fbr[1]->buffer1[index] =
2562 rx_ring->fbr[1]->virt[index];
2563 rx_ring->fbr[1]->buffer2[index] =
2564 rx_ring->fbr[1]->virt[index] - 4;
2565 }
2566 }
2567#endif
2568
2569
2570 pktstat_ringsize =
2571 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2572
2573 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2574 pktstat_ringsize,
2575 &rx_ring->ps_ring_physaddr,
2576 GFP_KERNEL);
2577
2578 if (!rx_ring->ps_ring_virtaddr) {
2579 dev_err(&adapter->pdev->dev,
2580 "Cannot alloc memory for Packet Status Ring\n");
2581 return -ENOMEM;
2582 }
2583 printk(KERN_INFO "Packet Status Ring %lx\n",
2584 (unsigned long) rx_ring->ps_ring_physaddr);
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2595 sizeof(struct rx_status_block),
2596 &rx_ring->rx_status_bus,
2597 GFP_KERNEL);
2598 if (!rx_ring->rx_status_block) {
2599 dev_err(&adapter->pdev->dev,
2600 "Cannot alloc memory for Status Block\n");
2601 return -ENOMEM;
2602 }
2603 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2604 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2605
2606
2607
2608
2609
2610
2611
2612 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2613 sizeof(struct rfd),
2614 0,
2615 SLAB_CACHE_DMA |
2616 SLAB_HWCACHE_ALIGN,
2617 NULL);
2618
2619 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2620
2621
2622
2623
2624 INIT_LIST_HEAD(&rx_ring->recv_list);
2625 return 0;
2626}
2627
2628
2629
2630
2631
2632void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2633{
2634 u32 index;
2635 u32 bufsize;
2636 u32 pktstat_ringsize;
2637 struct rfd *rfd;
2638 struct rx_ring *rx_ring;
2639
2640
2641 rx_ring = &adapter->rx_ring;
2642
2643
2644 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2645
2646 while (!list_empty(&rx_ring->recv_list)) {
2647 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2648 struct rfd, list_node);
2649
2650 list_del(&rfd->list_node);
2651 rfd->skb = NULL;
2652 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2653 }
2654
2655
2656 if (rx_ring->fbr[0]->ring_virtaddr) {
2657
2658 for (index = 0; index <
2659 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2660 if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2661 u32 fbr1_align;
2662
2663 if (rx_ring->fbr[0]->buffsize > 4096)
2664 fbr1_align = 4096;
2665 else
2666 fbr1_align = rx_ring->fbr[0]->buffsize;
2667
2668 bufsize =
2669 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2670 fbr1_align - 1;
2671
2672 dma_free_coherent(&adapter->pdev->dev,
2673 bufsize,
2674 rx_ring->fbr[0]->mem_virtaddrs[index],
2675 rx_ring->fbr[0]->mem_physaddrs[index]);
2676
2677 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2678 }
2679 }
2680
2681
2682 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2683 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2684
2685 bufsize =
2686 (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2687 0xfff;
2688
2689 dma_free_coherent(&adapter->pdev->dev, bufsize,
2690 rx_ring->fbr[0]->ring_virtaddr,
2691 rx_ring->fbr[0]->ring_physaddr);
2692
2693 rx_ring->fbr[0]->ring_virtaddr = NULL;
2694 }
2695
2696#ifdef USE_FBR0
2697
2698 if (rx_ring->fbr[1]->ring_virtaddr) {
2699
2700 for (index = 0; index <
2701 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2702 if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2703 bufsize =
2704 (rx_ring->fbr[1]->buffsize *
2705 (FBR_CHUNKS + 1)) - 1;
2706
2707 dma_free_coherent(&adapter->pdev->dev,
2708 bufsize,
2709 rx_ring->fbr[1]->mem_virtaddrs[index],
2710 rx_ring->fbr[1]->mem_physaddrs[index]);
2711
2712 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2713 }
2714 }
2715
2716
2717 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2718 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2719
2720 bufsize =
2721 (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2722 0xfff;
2723
2724 dma_free_coherent(&adapter->pdev->dev,
2725 bufsize,
2726 rx_ring->fbr[1]->ring_virtaddr,
2727 rx_ring->fbr[1]->ring_physaddr);
2728
2729 rx_ring->fbr[1]->ring_virtaddr = NULL;
2730 }
2731#endif
2732
2733
2734 if (rx_ring->ps_ring_virtaddr) {
2735 pktstat_ringsize =
2736 sizeof(struct pkt_stat_desc) *
2737 adapter->rx_ring.psr_num_entries;
2738
2739 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2740 rx_ring->ps_ring_virtaddr,
2741 rx_ring->ps_ring_physaddr);
2742
2743 rx_ring->ps_ring_virtaddr = NULL;
2744 }
2745
2746
2747 if (rx_ring->rx_status_block) {
2748 dma_free_coherent(&adapter->pdev->dev,
2749 sizeof(struct rx_status_block),
2750 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2751 rx_ring->rx_status_block = NULL;
2752 }
2753
2754
2755 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2756 kmem_cache_destroy(rx_ring->recv_lookaside);
2757 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2758 }
2759
2760
2761#ifdef USE_FBR0
2762 kfree(rx_ring->fbr[1]);
2763#endif
2764
2765 kfree(rx_ring->fbr[0]);
2766
2767
2768 rx_ring->num_ready_recv = 0;
2769}
2770
2771
2772
2773
2774
2775
2776
2777int et131x_init_recv(struct et131x_adapter *adapter)
2778{
2779 int status = -ENOMEM;
2780 struct rfd *rfd = NULL;
2781 u32 rfdct;
2782 u32 numrfd = 0;
2783 struct rx_ring *rx_ring;
2784
2785
2786 rx_ring = &adapter->rx_ring;
2787
2788
2789 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2790 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2791 GFP_ATOMIC | GFP_DMA);
2792
2793 if (!rfd) {
2794 dev_err(&adapter->pdev->dev,
2795 "Couldn't alloc RFD out of kmem_cache\n");
2796 status = -ENOMEM;
2797 continue;
2798 }
2799
2800 rfd->skb = NULL;
2801
2802
2803 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2804
2805
2806 rx_ring->num_ready_recv++;
2807 numrfd++;
2808 }
2809
2810 if (numrfd > NIC_MIN_NUM_RFD)
2811 status = 0;
2812
2813 rx_ring->num_rfd = numrfd;
2814
2815 if (status != 0) {
2816 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2817 dev_err(&adapter->pdev->dev,
2818 "Allocation problems in et131x_init_recv\n");
2819 }
2820 return status;
2821}
2822
2823
2824
2825
2826
2827void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2828{
2829 struct phy_device *phydev = adapter->phydev;
2830
2831 if (!phydev)
2832 return;
2833
2834
2835
2836
2837 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2838 writel(0, &adapter->regs->rxdma.max_pkt_time);
2839 writel(1, &adapter->regs->rxdma.num_pkt_done);
2840 }
2841}
2842
2843
2844
2845
2846
2847
2848static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2849{
2850 struct rx_ring *rx_local = &adapter->rx_ring;
2851 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2852 u16 buff_index = rfd->bufferindex;
2853 u8 ring_index = rfd->ringindex;
2854 unsigned long flags;
2855
2856
2857
2858
2859 if (
2860#ifdef USE_FBR0
2861 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2862#endif
2863 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2864 spin_lock_irqsave(&adapter->fbr_lock, flags);
2865
2866 if (ring_index == 1) {
2867 struct fbr_desc *next = (struct fbr_desc *)
2868 (rx_local->fbr[0]->ring_virtaddr) +
2869 INDEX10(rx_local->fbr[0]->local_full);
2870
2871
2872
2873
2874
2875 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2876 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2877 next->word2 = buff_index;
2878
2879 writel(bump_free_buff_ring(
2880 &rx_local->fbr[0]->local_full,
2881 rx_local->fbr[0]->num_entries - 1),
2882 &rx_dma->fbr1_full_offset);
2883 }
2884#ifdef USE_FBR0
2885 else {
2886 struct fbr_desc *next = (struct fbr_desc *)
2887 rx_local->fbr[1]->ring_virtaddr +
2888 INDEX10(rx_local->fbr[1]->local_full);
2889
2890
2891
2892
2893
2894 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2895 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2896 next->word2 = buff_index;
2897
2898 writel(bump_free_buff_ring(
2899 &rx_local->fbr[1]->local_full,
2900 rx_local->fbr[1]->num_entries - 1),
2901 &rx_dma->fbr0_full_offset);
2902 }
2903#endif
2904 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2905 } else {
2906 dev_err(&adapter->pdev->dev,
2907 "%s illegal Buffer Index returned\n", __func__);
2908 }
2909
2910
2911
2912
2913 spin_lock_irqsave(&adapter->rcv_lock, flags);
2914 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2915 rx_local->num_ready_recv++;
2916 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2917
2918 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2919}
2920
2921static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2922{
2923 struct rx_ring *rx_local = &adapter->rx_ring;
2924 struct rx_status_block *status;
2925 struct pkt_stat_desc *psr;
2926 struct rfd *rfd;
2927 u32 i;
2928 u8 *buf;
2929 unsigned long flags;
2930 struct list_head *element;
2931 u8 ring_index;
2932 u16 buff_index;
2933 u32 len;
2934 u32 word0;
2935 u32 word1;
2936
2937
2938
2939
2940
2941 status = rx_local->rx_status_block;
2942 word1 = status->word1 >> 16;
2943
2944
2945 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2946
2947 return NULL;
2948
2949
2950 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2951 (rx_local->local_psr_full & 0xFFF);
2952
2953
2954
2955
2956
2957 len = psr->word1 & 0xFFFF;
2958 ring_index = (psr->word1 >> 26) & 0x03;
2959 buff_index = (psr->word1 >> 16) & 0x3FF;
2960 word0 = psr->word0;
2961
2962
2963
2964 add_12bit(&rx_local->local_psr_full, 1);
2965 if (
2966 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2967
2968 rx_local->local_psr_full &= ~0xFFF;
2969 rx_local->local_psr_full ^= 0x1000;
2970 }
2971
2972 writel(rx_local->local_psr_full,
2973 &adapter->regs->rxdma.psr_full_offset);
2974
2975#ifndef USE_FBR0
2976 if (ring_index != 1)
2977 return NULL;
2978#endif
2979
2980#ifdef USE_FBR0
2981 if (ring_index > 1 ||
2982 (ring_index == 0 &&
2983 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2984 (ring_index == 1 &&
2985 buff_index > rx_local->fbr[0]->num_entries - 1))
2986#else
2987 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2988#endif
2989 {
2990
2991 dev_err(&adapter->pdev->dev,
2992 "NICRxPkts PSR Entry %d indicates "
2993 "length of %d and/or bad bi(%d)\n",
2994 rx_local->local_psr_full & 0xFFF,
2995 len, buff_index);
2996 return NULL;
2997 }
2998
2999
3000 spin_lock_irqsave(&adapter->rcv_lock, flags);
3001
3002 rfd = NULL;
3003 element = rx_local->recv_list.next;
3004 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
3005
3006 if (rfd == NULL) {
3007 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3008 return NULL;
3009 }
3010
3011 list_del(&rfd->list_node);
3012 rx_local->num_ready_recv--;
3013
3014 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3015
3016 rfd->bufferindex = buff_index;
3017 rfd->ringindex = ring_index;
3018
3019
3020
3021
3022
3023
3024 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3025 adapter->stats.rx_other_errs++;
3026 len = 0;
3027 }
3028
3029 if (len) {
3030
3031 if ((word0 & ALCATEL_MULTICAST_PKT) &&
3032 !(word0 & ALCATEL_BROADCAST_PKT)) {
3033
3034
3035
3036
3037
3038
3039
3040 if ((adapter->packet_filter &
3041 ET131X_PACKET_TYPE_MULTICAST)
3042 && !(adapter->packet_filter &
3043 ET131X_PACKET_TYPE_PROMISCUOUS)
3044 && !(adapter->packet_filter &
3045 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3046
3047
3048
3049
3050 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3051 virt[buff_index];
3052
3053
3054
3055
3056
3057 for (i = 0; i < adapter->multicast_addr_count;
3058 i++) {
3059 if (buf[0] ==
3060 adapter->multicast_list[i][0]
3061 && buf[1] ==
3062 adapter->multicast_list[i][1]
3063 && buf[2] ==
3064 adapter->multicast_list[i][2]
3065 && buf[3] ==
3066 adapter->multicast_list[i][3]
3067 && buf[4] ==
3068 adapter->multicast_list[i][4]
3069 && buf[5] ==
3070 adapter->multicast_list[i][5]) {
3071 break;
3072 }
3073 }
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083 if (i == adapter->multicast_addr_count)
3084 len = 0;
3085 }
3086
3087 if (len > 0)
3088 adapter->stats.multicast_pkts_rcvd++;
3089 } else if (word0 & ALCATEL_BROADCAST_PKT)
3090 adapter->stats.broadcast_pkts_rcvd++;
3091 else
3092
3093
3094
3095
3096
3097 adapter->stats.unicast_pkts_rcvd++;
3098 }
3099
3100 if (len > 0) {
3101 struct sk_buff *skb = NULL;
3102
3103
3104 rfd->len = len;
3105
3106 skb = dev_alloc_skb(rfd->len + 2);
3107 if (!skb) {
3108 dev_err(&adapter->pdev->dev,
3109 "Couldn't alloc an SKB for Rx\n");
3110 return NULL;
3111 }
3112
3113 adapter->net_stats.rx_bytes += rfd->len;
3114
3115
3116
3117
3118
3119 memcpy(skb_put(skb, rfd->len),
3120 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3121 rfd->len);
3122
3123 skb->dev = adapter->netdev;
3124 skb->protocol = eth_type_trans(skb, adapter->netdev);
3125 skb->ip_summed = CHECKSUM_NONE;
3126
3127 netif_rx(skb);
3128 } else {
3129 rfd->len = 0;
3130 }
3131
3132 nic_return_rfd(adapter, rfd);
3133 return rfd;
3134}
3135
3136
3137
3138
3139
3140
3141
3142void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3143{
3144 struct rfd *rfd = NULL;
3145 u32 count = 0;
3146 bool done = true;
3147
3148
3149 while (count < NUM_PACKETS_HANDLED) {
3150 if (list_empty(&adapter->rx_ring.recv_list)) {
3151 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3152 done = false;
3153 break;
3154 }
3155
3156 rfd = nic_rx_pkts(adapter);
3157
3158 if (rfd == NULL)
3159 break;
3160
3161
3162
3163
3164
3165
3166 if (!adapter->packet_filter ||
3167 !netif_carrier_ok(adapter->netdev) ||
3168 rfd->len == 0)
3169 continue;
3170
3171
3172 adapter->net_stats.rx_packets++;
3173
3174
3175 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3176 dev_warn(&adapter->pdev->dev,
3177 "RFD's are running out\n");
3178 }
3179 count++;
3180 }
3181
3182 if (count == NUM_PACKETS_HANDLED || !done) {
3183 adapter->rx_ring.unfinished_receives = true;
3184 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3185 &adapter->regs->global.watchdog_timer);
3186 } else
3187
3188 adapter->rx_ring.unfinished_receives = false;
3189}
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3206{
3207 int desc_size = 0;
3208 struct tx_ring *tx_ring = &adapter->tx_ring;
3209
3210
3211 adapter->tx_ring.tcb_ring =
3212 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3213 if (!adapter->tx_ring.tcb_ring) {
3214 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3215 return -ENOMEM;
3216 }
3217
3218
3219
3220
3221 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3222 tx_ring->tx_desc_ring =
3223 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3224 desc_size,
3225 &tx_ring->tx_desc_ring_pa,
3226 GFP_KERNEL);
3227 if (!adapter->tx_ring.tx_desc_ring) {
3228 dev_err(&adapter->pdev->dev,
3229 "Cannot alloc memory for Tx Ring\n");
3230 return -ENOMEM;
3231 }
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3242 sizeof(u32),
3243 &tx_ring->tx_status_pa,
3244 GFP_KERNEL);
3245 if (!adapter->tx_ring.tx_status_pa) {
3246 dev_err(&adapter->pdev->dev,
3247 "Cannot alloc memory for Tx status block\n");
3248 return -ENOMEM;
3249 }
3250 return 0;
3251}
3252
3253
3254
3255
3256
3257
3258
3259void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3260{
3261 int desc_size = 0;
3262
3263 if (adapter->tx_ring.tx_desc_ring) {
3264
3265 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3266 + 4096 - 1;
3267 dma_free_coherent(&adapter->pdev->dev,
3268 desc_size,
3269 adapter->tx_ring.tx_desc_ring,
3270 adapter->tx_ring.tx_desc_ring_pa);
3271 adapter->tx_ring.tx_desc_ring = NULL;
3272 }
3273
3274
3275 if (adapter->tx_ring.tx_status) {
3276 dma_free_coherent(&adapter->pdev->dev,
3277 sizeof(u32),
3278 adapter->tx_ring.tx_status,
3279 adapter->tx_ring.tx_status_pa);
3280
3281 adapter->tx_ring.tx_status = NULL;
3282 }
3283
3284 kfree(adapter->tx_ring.tcb_ring);
3285}
3286
3287
3288
3289
3290
3291
3292
3293
3294static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3295{
3296 u32 i;
3297 struct tx_desc desc[24];
3298 u32 frag = 0;
3299 u32 thiscopy, remainder;
3300 struct sk_buff *skb = tcb->skb;
3301 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3302 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3303 unsigned long flags;
3304 struct phy_device *phydev = adapter->phydev;
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314 if (nr_frags > 23)
3315 return -EIO;
3316
3317 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3318
3319 for (i = 0; i < nr_frags; i++) {
3320
3321
3322
3323 if (i == 0) {
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 if ((skb->len - skb->data_len) <= 1514) {
3334 desc[frag].addr_hi = 0;
3335
3336
3337 desc[frag].len_vlan =
3338 skb->len - skb->data_len;
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348 desc[frag++].addr_lo =
3349 dma_map_single(&adapter->pdev->dev,
3350 skb->data,
3351 skb->len -
3352 skb->data_len,
3353 DMA_TO_DEVICE);
3354 } else {
3355 desc[frag].addr_hi = 0;
3356 desc[frag].len_vlan =
3357 (skb->len - skb->data_len) / 2;
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367 desc[frag++].addr_lo =
3368 dma_map_single(&adapter->pdev->dev,
3369 skb->data,
3370 ((skb->len -
3371 skb->data_len) / 2),
3372 DMA_TO_DEVICE);
3373 desc[frag].addr_hi = 0;
3374
3375 desc[frag].len_vlan =
3376 (skb->len - skb->data_len) / 2;
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386 desc[frag++].addr_lo =
3387 dma_map_single(&adapter->pdev->dev,
3388 skb->data +
3389 ((skb->len -
3390 skb->data_len) / 2),
3391 ((skb->len -
3392 skb->data_len) / 2),
3393 DMA_TO_DEVICE);
3394 }
3395 } else {
3396 desc[frag].addr_hi = 0;
3397 desc[frag].len_vlan =
3398 frags[i - 1].size;
3399
3400
3401
3402
3403
3404
3405
3406 desc[frag++].addr_lo = skb_frag_dma_map(
3407 &adapter->pdev->dev,
3408 &frags[i - 1],
3409 0,
3410 frags[i - 1].size,
3411 DMA_TO_DEVICE);
3412 }
3413 }
3414
3415 if (phydev && phydev->speed == SPEED_1000) {
3416 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3417
3418 desc[frag - 1].flags = 0x5;
3419 adapter->tx_ring.since_irq = 0;
3420 } else {
3421 desc[frag - 1].flags = 0x1;
3422 }
3423 } else
3424 desc[frag - 1].flags = 0x5;
3425
3426 desc[0].flags |= 2;
3427
3428 tcb->index_start = adapter->tx_ring.send_idx;
3429 tcb->stale = 0;
3430
3431 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3432
3433 thiscopy = NUM_DESC_PER_RING_TX -
3434 INDEX10(adapter->tx_ring.send_idx);
3435
3436 if (thiscopy >= frag) {
3437 remainder = 0;
3438 thiscopy = frag;
3439 } else {
3440 remainder = frag - thiscopy;
3441 }
3442
3443 memcpy(adapter->tx_ring.tx_desc_ring +
3444 INDEX10(adapter->tx_ring.send_idx), desc,
3445 sizeof(struct tx_desc) * thiscopy);
3446
3447 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3448
3449 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3450 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3451 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3452 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3453 }
3454
3455 if (remainder) {
3456 memcpy(adapter->tx_ring.tx_desc_ring,
3457 desc + thiscopy,
3458 sizeof(struct tx_desc) * remainder);
3459
3460 add_10bit(&adapter->tx_ring.send_idx, remainder);
3461 }
3462
3463 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3464 if (adapter->tx_ring.send_idx)
3465 tcb->index = NUM_DESC_PER_RING_TX - 1;
3466 else
3467 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3468 } else
3469 tcb->index = adapter->tx_ring.send_idx - 1;
3470
3471 spin_lock(&adapter->tcb_send_qlock);
3472
3473 if (adapter->tx_ring.send_tail)
3474 adapter->tx_ring.send_tail->next = tcb;
3475 else
3476 adapter->tx_ring.send_head = tcb;
3477
3478 adapter->tx_ring.send_tail = tcb;
3479
3480 WARN_ON(tcb->next != NULL);
3481
3482 adapter->tx_ring.used++;
3483
3484 spin_unlock(&adapter->tcb_send_qlock);
3485
3486
3487 writel(adapter->tx_ring.send_idx,
3488 &adapter->regs->txdma.service_request);
3489
3490
3491
3492
3493 if (phydev && phydev->speed == SPEED_1000) {
3494 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3495 &adapter->regs->global.watchdog_timer);
3496 }
3497 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3498
3499 return 0;
3500}
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3512{
3513 int status;
3514 struct tcb *tcb = NULL;
3515 u16 *shbufva;
3516 unsigned long flags;
3517
3518
3519 if (skb->len < ETH_HLEN)
3520 return -EIO;
3521
3522
3523 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3524
3525 tcb = adapter->tx_ring.tcb_qhead;
3526
3527 if (tcb == NULL) {
3528 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3529 return -ENOMEM;
3530 }
3531
3532 adapter->tx_ring.tcb_qhead = tcb->next;
3533
3534 if (adapter->tx_ring.tcb_qhead == NULL)
3535 adapter->tx_ring.tcb_qtail = NULL;
3536
3537 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3538
3539 tcb->skb = skb;
3540
3541 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3542 shbufva = (u16 *) skb->data;
3543
3544 if ((shbufva[0] == 0xffff) &&
3545 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3546 tcb->flags |= fMP_DEST_BROAD;
3547 } else if ((shbufva[0] & 0x3) == 0x0001) {
3548 tcb->flags |= fMP_DEST_MULTI;
3549 }
3550 }
3551
3552 tcb->next = NULL;
3553
3554
3555 status = nic_send_packet(adapter, tcb);
3556
3557 if (status != 0) {
3558 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3559
3560 if (adapter->tx_ring.tcb_qtail)
3561 adapter->tx_ring.tcb_qtail->next = tcb;
3562 else
3563
3564 adapter->tx_ring.tcb_qhead = tcb;
3565
3566 adapter->tx_ring.tcb_qtail = tcb;
3567 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3568 return status;
3569 }
3570 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3571 return 0;
3572}
3573
3574
3575
3576
3577
3578
3579
3580
3581int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3582{
3583 int status = 0;
3584 struct et131x_adapter *adapter = netdev_priv(netdev);
3585
3586
3587
3588
3589
3590
3591
3592
3593 if (adapter->tx_ring.used >= NUM_TCB) {
3594
3595
3596
3597
3598 status = -ENOMEM;
3599 } else {
3600
3601
3602
3603 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3604 !netif_carrier_ok(netdev)) {
3605 dev_kfree_skb_any(skb);
3606 skb = NULL;
3607
3608 adapter->net_stats.tx_dropped++;
3609 } else {
3610 status = send_packet(skb, adapter);
3611 if (status != 0 && status != -ENOMEM) {
3612
3613
3614
3615 dev_kfree_skb_any(skb);
3616 skb = NULL;
3617 adapter->net_stats.tx_dropped++;
3618 }
3619 }
3620 }
3621 return status;
3622}
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632static inline void free_send_packet(struct et131x_adapter *adapter,
3633 struct tcb *tcb)
3634{
3635 unsigned long flags;
3636 struct tx_desc *desc = NULL;
3637 struct net_device_stats *stats = &adapter->net_stats;
3638
3639 if (tcb->flags & fMP_DEST_BROAD)
3640 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3641 else if (tcb->flags & fMP_DEST_MULTI)
3642 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3643 else
3644 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3645
3646 if (tcb->skb) {
3647 stats->tx_bytes += tcb->skb->len;
3648
3649
3650
3651
3652
3653 do {
3654 desc = (struct tx_desc *)
3655 (adapter->tx_ring.tx_desc_ring +
3656 INDEX10(tcb->index_start));
3657
3658 dma_unmap_single(&adapter->pdev->dev,
3659 desc->addr_lo,
3660 desc->len_vlan, DMA_TO_DEVICE);
3661
3662 add_10bit(&tcb->index_start, 1);
3663 if (INDEX10(tcb->index_start) >=
3664 NUM_DESC_PER_RING_TX) {
3665 tcb->index_start &= ~ET_DMA10_MASK;
3666 tcb->index_start ^= ET_DMA10_WRAP;
3667 }
3668 } while (desc != (adapter->tx_ring.tx_desc_ring +
3669 INDEX10(tcb->index)));
3670
3671 dev_kfree_skb_any(tcb->skb);
3672 }
3673
3674 memset(tcb, 0, sizeof(struct tcb));
3675
3676
3677 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3678
3679 adapter->net_stats.tx_packets++;
3680
3681 if (adapter->tx_ring.tcb_qtail)
3682 adapter->tx_ring.tcb_qtail->next = tcb;
3683 else
3684
3685 adapter->tx_ring.tcb_qhead = tcb;
3686
3687 adapter->tx_ring.tcb_qtail = tcb;
3688
3689 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3690 WARN_ON(adapter->tx_ring.used < 0);
3691}
3692
3693
3694
3695
3696
3697
3698
3699void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3700{
3701 struct tcb *tcb;
3702 unsigned long flags;
3703 u32 freed = 0;
3704
3705
3706 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3707
3708 tcb = adapter->tx_ring.send_head;
3709
3710 while (tcb != NULL && freed < NUM_TCB) {
3711 struct tcb *next = tcb->next;
3712
3713 adapter->tx_ring.send_head = next;
3714
3715 if (next == NULL)
3716 adapter->tx_ring.send_tail = NULL;
3717
3718 adapter->tx_ring.used--;
3719
3720 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3721
3722 freed++;
3723 free_send_packet(adapter, tcb);
3724
3725 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3726
3727 tcb = adapter->tx_ring.send_head;
3728 }
3729
3730 WARN_ON(freed == NUM_TCB);
3731
3732 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3733
3734 adapter->tx_ring.used = 0;
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3747{
3748 unsigned long flags;
3749 u32 serviced;
3750 struct tcb *tcb;
3751 u32 index;
3752
3753 serviced = readl(&adapter->regs->txdma.new_service_complete);
3754 index = INDEX10(serviced);
3755
3756
3757
3758
3759 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3760
3761 tcb = adapter->tx_ring.send_head;
3762
3763 while (tcb &&
3764 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3765 index < INDEX10(tcb->index)) {
3766 adapter->tx_ring.used--;
3767 adapter->tx_ring.send_head = tcb->next;
3768 if (tcb->next == NULL)
3769 adapter->tx_ring.send_tail = NULL;
3770
3771 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3772 free_send_packet(adapter, tcb);
3773 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3774
3775
3776 tcb = adapter->tx_ring.send_head;
3777 }
3778 while (tcb &&
3779 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3780 && index > (tcb->index & ET_DMA10_MASK)) {
3781 adapter->tx_ring.used--;
3782 adapter->tx_ring.send_head = tcb->next;
3783 if (tcb->next == NULL)
3784 adapter->tx_ring.send_tail = NULL;
3785
3786 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3787 free_send_packet(adapter, tcb);
3788 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3789
3790
3791 tcb = adapter->tx_ring.send_head;
3792 }
3793
3794
3795 if (adapter->tx_ring.used <= NUM_TCB / 3)
3796 netif_wake_queue(adapter->netdev);
3797
3798 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3799}
3800
3801
3802
3803static int et131x_get_settings(struct net_device *netdev,
3804 struct ethtool_cmd *cmd)
3805{
3806 struct et131x_adapter *adapter = netdev_priv(netdev);
3807
3808 return phy_ethtool_gset(adapter->phydev, cmd);
3809}
3810
3811static int et131x_set_settings(struct net_device *netdev,
3812 struct ethtool_cmd *cmd)
3813{
3814 struct et131x_adapter *adapter = netdev_priv(netdev);
3815
3816 return phy_ethtool_sset(adapter->phydev, cmd);
3817}
3818
3819static int et131x_get_regs_len(struct net_device *netdev)
3820{
3821#define ET131X_REGS_LEN 256
3822 return ET131X_REGS_LEN * sizeof(u32);
3823}
3824
3825static void et131x_get_regs(struct net_device *netdev,
3826 struct ethtool_regs *regs, void *regs_data)
3827{
3828 struct et131x_adapter *adapter = netdev_priv(netdev);
3829 struct address_map __iomem *aregs = adapter->regs;
3830 u32 *regs_buff = regs_data;
3831 u32 num = 0;
3832
3833 memset(regs_data, 0, et131x_get_regs_len(netdev));
3834
3835 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3836 adapter->pdev->device;
3837
3838
3839 et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]);
3840 et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]);
3841 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]);
3842 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]);
3843 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]);
3844 et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]);
3845 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]);
3846
3847 et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]);
3848
3849 et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]);
3850 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]);
3851 et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]);
3852 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]);
3853 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]);
3854 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]);
3855 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3856 (u16 *)®s_buff[num++]);
3857 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3858 (u16 *)®s_buff[num++]);
3859 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3860 (u16 *)®s_buff[num++]);
3861 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3862 (u16 *)®s_buff[num++]);
3863 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]);
3864 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]);
3865 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]);
3866 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3867 (u16 *)®s_buff[num++]);
3868 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]);
3869 et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]);
3870 et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]);
3871
3872
3873 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3874 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3875 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3876 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3877 regs_buff[num++] = readl(&aregs->global.pm_csr);
3878 regs_buff[num++] = adapter->stats.interrupt_status;
3879 regs_buff[num++] = readl(&aregs->global.int_mask);
3880 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3881 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3882 regs_buff[num++] = readl(&aregs->global.sw_reset);
3883 regs_buff[num++] = readl(&aregs->global.slv_timer);
3884 regs_buff[num++] = readl(&aregs->global.msi_config);
3885 regs_buff[num++] = readl(&aregs->global.loopback);
3886 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3887
3888
3889 regs_buff[num++] = readl(&aregs->txdma.csr);
3890 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3891 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3892 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3893 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3894 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3895 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3896 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3897 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3898 regs_buff[num++] = readl(&aregs->txdma.service_request);
3899 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3900 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3901 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3902 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3903 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3904 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3905 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3906 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3907 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3908 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3909 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3910 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3911 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3912 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3913 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3914 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3915
3916
3917 regs_buff[num++] = readl(&aregs->rxdma.csr);
3918 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3919 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3920 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3921 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3922 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3923 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3924 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3925 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3926 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3927 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3928 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3929 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3930 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3931 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3932 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3933 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3934 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3935 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3936 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3939 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3940 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3941 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3942 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3943 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3946}
3947
3948#define ET131X_DRVINFO_LEN 32
3949static void et131x_get_drvinfo(struct net_device *netdev,
3950 struct ethtool_drvinfo *info)
3951{
3952 struct et131x_adapter *adapter = netdev_priv(netdev);
3953
3954 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3955 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3956 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3957}
3958
3959static struct ethtool_ops et131x_ethtool_ops = {
3960 .get_settings = et131x_get_settings,
3961 .set_settings = et131x_set_settings,
3962 .get_drvinfo = et131x_get_drvinfo,
3963 .get_regs_len = et131x_get_regs_len,
3964 .get_regs = et131x_get_regs,
3965 .get_link = ethtool_op_get_link,
3966};
3967
3968void et131x_set_ethtool_ops(struct net_device *netdev)
3969{
3970 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3971}
3972
3973
3974
3975
3976
3977
3978
3979void et131x_hwaddr_init(struct et131x_adapter *adapter)
3980{
3981
3982
3983
3984
3985 if (adapter->rom_addr[0] == 0x00 &&
3986 adapter->rom_addr[1] == 0x00 &&
3987 adapter->rom_addr[2] == 0x00 &&
3988 adapter->rom_addr[3] == 0x00 &&
3989 adapter->rom_addr[4] == 0x00 &&
3990 adapter->rom_addr[5] == 0x00) {
3991
3992
3993
3994
3995
3996 get_random_bytes(&adapter->addr[5], 1);
3997
3998
3999
4000
4001
4002 memcpy(adapter->rom_addr,
4003 adapter->addr, ETH_ALEN);
4004 } else {
4005
4006
4007
4008
4009 memcpy(adapter->addr,
4010 adapter->rom_addr, ETH_ALEN);
4011 }
4012}
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static int et131x_pci_init(struct et131x_adapter *adapter,
4023 struct pci_dev *pdev)
4024{
4025 int i;
4026 u8 max_payload;
4027 u8 read_size_reg;
4028
4029 if (et131x_init_eeprom(adapter) < 0)
4030 return -EIO;
4031
4032
4033
4034
4035 if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
4036 dev_err(&pdev->dev,
4037 "Could not read PCI config space for Max Payload Size\n");
4038 return -EIO;
4039 }
4040
4041
4042 max_payload &= 0x07;
4043
4044 if (max_payload < 2) {
4045 static const u16 acknak[2] = { 0x76, 0xD0 };
4046 static const u16 replay[2] = { 0x1E0, 0x2ED };
4047
4048 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4049 acknak[max_payload])) {
4050 dev_err(&pdev->dev,
4051 "Could not write PCI config space for ACK/NAK\n");
4052 return -EIO;
4053 }
4054 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4055 replay[max_payload])) {
4056 dev_err(&pdev->dev,
4057 "Could not write PCI config space for Replay Timer\n");
4058 return -EIO;
4059 }
4060 }
4061
4062
4063
4064
4065 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4066 dev_err(&pdev->dev,
4067 "Could not write PCI config space for Latency Timers\n");
4068 return -EIO;
4069 }
4070
4071
4072 if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
4073 dev_err(&pdev->dev,
4074 "Could not read PCI config space for Max read size\n");
4075 return -EIO;
4076 }
4077
4078 read_size_reg &= 0x8f;
4079 read_size_reg |= 0x40;
4080
4081 if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
4082 dev_err(&pdev->dev,
4083 "Could not write PCI config space for Max read size\n");
4084 return -EIO;
4085 }
4086
4087
4088
4089
4090 if (!adapter->has_eeprom) {
4091 et131x_hwaddr_init(adapter);
4092 return 0;
4093 }
4094
4095 for (i = 0; i < ETH_ALEN; i++) {
4096 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4097 adapter->rom_addr + i)) {
4098 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4099 return -EIO;
4100 }
4101 }
4102 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4103 return 0;
4104}
4105
4106
4107
4108
4109
4110
4111
4112
4113void et131x_error_timer_handler(unsigned long data)
4114{
4115 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4116 struct phy_device *phydev = adapter->phydev;
4117
4118 if (et1310_in_phy_coma(adapter)) {
4119
4120
4121
4122 et1310_disable_phy_coma(adapter);
4123 adapter->boot_coma = 20;
4124 } else {
4125 et1310_update_macstat_host_counters(adapter);
4126 }
4127
4128 if (!phydev->link && adapter->boot_coma < 11)
4129 adapter->boot_coma++;
4130
4131 if (adapter->boot_coma == 10) {
4132 if (!phydev->link) {
4133 if (!et1310_in_phy_coma(adapter)) {
4134
4135
4136
4137 et131x_enable_interrupts(adapter);
4138 et1310_enable_phy_coma(adapter);
4139 }
4140 }
4141 }
4142
4143
4144 mod_timer(&adapter->error_timer, jiffies +
4145 TX_ERROR_PERIOD * HZ / 1000);
4146}
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4157{
4158 int status;
4159
4160
4161 status = et131x_tx_dma_memory_alloc(adapter);
4162 if (status != 0) {
4163 dev_err(&adapter->pdev->dev,
4164 "et131x_tx_dma_memory_alloc FAILED\n");
4165 return status;
4166 }
4167
4168 status = et131x_rx_dma_memory_alloc(adapter);
4169 if (status != 0) {
4170 dev_err(&adapter->pdev->dev,
4171 "et131x_rx_dma_memory_alloc FAILED\n");
4172 et131x_tx_dma_memory_free(adapter);
4173 return status;
4174 }
4175
4176
4177 status = et131x_init_recv(adapter);
4178 if (status != 0) {
4179 dev_err(&adapter->pdev->dev,
4180 "et131x_init_recv FAILED\n");
4181 et131x_tx_dma_memory_free(adapter);
4182 et131x_rx_dma_memory_free(adapter);
4183 }
4184 return status;
4185}
4186
4187
4188
4189
4190
4191void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4192{
4193
4194 et131x_tx_dma_memory_free(adapter);
4195 et131x_rx_dma_memory_free(adapter);
4196}
4197
4198static void et131x_adjust_link(struct net_device *netdev)
4199{
4200 struct et131x_adapter *adapter = netdev_priv(netdev);
4201 struct phy_device *phydev = adapter->phydev;
4202
4203 if (netif_carrier_ok(netdev)) {
4204 adapter->boot_coma = 20;
4205
4206 if (phydev && phydev->speed == SPEED_10) {
4207
4208
4209
4210
4211
4212
4213 u16 register18;
4214
4215 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4216 ®ister18);
4217 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4218 register18 | 0x4);
4219 et131x_mii_write(adapter, PHY_INDEX_REG,
4220 register18 | 0x8402);
4221 et131x_mii_write(adapter, PHY_DATA_REG,
4222 register18 | 511);
4223 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4224 register18);
4225 }
4226
4227 et1310_config_flow_control(adapter);
4228
4229 if (phydev && phydev->speed == SPEED_1000 &&
4230 adapter->registry_jumbo_packet > 2048) {
4231 u16 reg;
4232
4233 et131x_mii_read(adapter, PHY_CONFIG, ®);
4234 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4235 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4236 et131x_mii_write(adapter, PHY_CONFIG, reg);
4237 }
4238
4239 et131x_set_rx_dma_timer(adapter);
4240 et1310_config_mac_regs2(adapter);
4241 }
4242
4243 if (phydev && phydev->link != adapter->link) {
4244
4245
4246
4247
4248
4249 if (et1310_in_phy_coma(adapter))
4250 et1310_disable_phy_coma(adapter);
4251
4252 if (phydev->link) {
4253 adapter->boot_coma = 20;
4254 } else {
4255 dev_warn(&adapter->pdev->dev,
4256 "Link down - cable problem ?\n");
4257 adapter->boot_coma = 0;
4258
4259 if (phydev->speed == SPEED_10) {
4260
4261
4262
4263
4264
4265 u16 register18;
4266
4267 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4268 ®ister18);
4269 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4270 register18 | 0x4);
4271 et131x_mii_write(adapter, PHY_INDEX_REG,
4272 register18 | 0x8402);
4273 et131x_mii_write(adapter, PHY_DATA_REG,
4274 register18 | 511);
4275 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4276 register18);
4277 }
4278
4279
4280 et131x_free_busy_send_packets(adapter);
4281
4282
4283 et131x_init_send(adapter);
4284
4285
4286
4287
4288
4289
4290
4291 et131x_soft_reset(adapter);
4292
4293
4294 et131x_adapter_setup(adapter);
4295
4296
4297 et131x_disable_txrx(netdev);
4298 et131x_enable_txrx(netdev);
4299 }
4300
4301 adapter->link = phydev->link;
4302
4303 phy_print_status(phydev);
4304 }
4305}
4306
4307static int et131x_mii_probe(struct net_device *netdev)
4308{
4309 struct et131x_adapter *adapter = netdev_priv(netdev);
4310 struct phy_device *phydev = NULL;
4311
4312 phydev = phy_find_first(adapter->mii_bus);
4313 if (!phydev) {
4314 dev_err(&adapter->pdev->dev, "no PHY found\n");
4315 return -ENODEV;
4316 }
4317
4318 phydev = phy_connect(netdev, dev_name(&phydev->dev),
4319 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4320
4321 if (IS_ERR(phydev)) {
4322 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4323 return PTR_ERR(phydev);
4324 }
4325
4326 phydev->supported &= (SUPPORTED_10baseT_Half
4327 | SUPPORTED_10baseT_Full
4328 | SUPPORTED_100baseT_Half
4329 | SUPPORTED_100baseT_Full
4330 | SUPPORTED_Autoneg
4331 | SUPPORTED_MII
4332 | SUPPORTED_TP);
4333
4334 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4335 phydev->supported |= SUPPORTED_1000baseT_Full;
4336
4337 phydev->advertising = phydev->supported;
4338 adapter->phydev = phydev;
4339
4340 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4341 "(mii_bus:phy_addr=%s)\n",
4342 phydev->drv->name, dev_name(&phydev->dev));
4343
4344 return 0;
4345}
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4356 struct pci_dev *pdev)
4357{
4358 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4359
4360 struct et131x_adapter *adapter;
4361
4362
4363 adapter = netdev_priv(netdev);
4364 adapter->pdev = pci_dev_get(pdev);
4365 adapter->netdev = netdev;
4366
4367
4368 netdev->irq = pdev->irq;
4369 netdev->base_addr = pci_resource_start(pdev, 0);
4370
4371
4372 spin_lock_init(&adapter->lock);
4373 spin_lock_init(&adapter->tcb_send_qlock);
4374 spin_lock_init(&adapter->tcb_ready_qlock);
4375 spin_lock_init(&adapter->send_hw_lock);
4376 spin_lock_init(&adapter->rcv_lock);
4377 spin_lock_init(&adapter->rcv_pend_lock);
4378 spin_lock_init(&adapter->fbr_lock);
4379 spin_lock_init(&adapter->phy_lock);
4380
4381 adapter->registry_jumbo_packet = 1514;
4382
4383
4384 memcpy(adapter->addr, default_mac, ETH_ALEN);
4385
4386 return adapter;
4387}
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4398{
4399 struct net_device *netdev = pci_get_drvdata(pdev);
4400 struct et131x_adapter *adapter = netdev_priv(netdev);
4401
4402 unregister_netdev(netdev);
4403 mdiobus_unregister(adapter->mii_bus);
4404 kfree(adapter->mii_bus->irq);
4405 mdiobus_free(adapter->mii_bus);
4406
4407 et131x_adapter_memory_free(adapter);
4408 iounmap(adapter->regs);
4409 pci_dev_put(pdev);
4410
4411 free_netdev(netdev);
4412 pci_release_regions(pdev);
4413 pci_disable_device(pdev);
4414}
4415
4416
4417
4418
4419
4420void et131x_up(struct net_device *netdev)
4421{
4422 struct et131x_adapter *adapter = netdev_priv(netdev);
4423
4424 et131x_enable_txrx(netdev);
4425 phy_start(adapter->phydev);
4426}
4427
4428
4429
4430
4431
4432void et131x_down(struct net_device *netdev)
4433{
4434 struct et131x_adapter *adapter = netdev_priv(netdev);
4435
4436
4437 netdev->trans_start = jiffies;
4438
4439 phy_stop(adapter->phydev);
4440 et131x_disable_txrx(netdev);
4441}
4442
4443#ifdef CONFIG_PM_SLEEP
4444static int et131x_suspend(struct device *dev)
4445{
4446 struct pci_dev *pdev = to_pci_dev(dev);
4447 struct net_device *netdev = pci_get_drvdata(pdev);
4448
4449 if (netif_running(netdev)) {
4450 netif_device_detach(netdev);
4451 et131x_down(netdev);
4452 pci_save_state(pdev);
4453 }
4454
4455 return 0;
4456}
4457
4458static int et131x_resume(struct device *dev)
4459{
4460 struct pci_dev *pdev = to_pci_dev(dev);
4461 struct net_device *netdev = pci_get_drvdata(pdev);
4462
4463 if (netif_running(netdev)) {
4464 pci_restore_state(pdev);
4465 et131x_up(netdev);
4466 netif_device_attach(netdev);
4467 }
4468
4469 return 0;
4470}
4471
4472static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4473#define ET131X_PM_OPS (&et131x_pm_ops)
4474#else
4475#define ET131X_PM_OPS NULL
4476#endif
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487irqreturn_t et131x_isr(int irq, void *dev_id)
4488{
4489 bool handled = true;
4490 struct net_device *netdev = (struct net_device *)dev_id;
4491 struct et131x_adapter *adapter = NULL;
4492 u32 status;
4493
4494 if (!netif_device_present(netdev)) {
4495 handled = false;
4496 goto out;
4497 }
4498
4499 adapter = netdev_priv(netdev);
4500
4501
4502
4503
4504
4505
4506 et131x_disable_interrupts(adapter);
4507
4508
4509
4510
4511 status = readl(&adapter->regs->global.int_status);
4512
4513 if (adapter->flowcontrol == FLOW_TXONLY ||
4514 adapter->flowcontrol == FLOW_BOTH) {
4515 status &= ~INT_MASK_ENABLE;
4516 } else {
4517 status &= ~INT_MASK_ENABLE_NO_FLOW;
4518 }
4519
4520
4521 if (!status) {
4522 handled = false;
4523 et131x_enable_interrupts(adapter);
4524 goto out;
4525 }
4526
4527
4528
4529 if (status & ET_INTR_WATCHDOG) {
4530 struct tcb *tcb = adapter->tx_ring.send_head;
4531
4532 if (tcb)
4533 if (++tcb->stale > 1)
4534 status |= ET_INTR_TXDMA_ISR;
4535
4536 if (adapter->rx_ring.unfinished_receives)
4537 status |= ET_INTR_RXDMA_XFR_DONE;
4538 else if (tcb == NULL)
4539 writel(0, &adapter->regs->global.watchdog_timer);
4540
4541 status &= ~ET_INTR_WATCHDOG;
4542 }
4543
4544 if (status == 0) {
4545
4546
4547
4548
4549
4550 et131x_enable_interrupts(adapter);
4551 goto out;
4552 }
4553
4554
4555
4556
4557
4558 adapter->stats.interrupt_status = status;
4559
4560
4561
4562
4563
4564 schedule_work(&adapter->task);
4565out:
4566 return IRQ_RETVAL(handled);
4567}
4568
4569
4570
4571
4572
4573
4574
4575
4576void et131x_isr_handler(struct work_struct *work)
4577{
4578 struct et131x_adapter *adapter =
4579 container_of(work, struct et131x_adapter, task);
4580 u32 status = adapter->stats.interrupt_status;
4581 struct address_map __iomem *iomem = adapter->regs;
4582
4583
4584
4585
4586
4587
4588
4589 if (status & ET_INTR_TXDMA_ISR)
4590 et131x_handle_send_interrupt(adapter);
4591
4592
4593 if (status & ET_INTR_RXDMA_XFR_DONE)
4594 et131x_handle_recv_interrupt(adapter);
4595
4596 status &= 0xffffffd7;
4597
4598 if (status) {
4599
4600 if (status & ET_INTR_TXDMA_ERR) {
4601 u32 txdma_err;
4602
4603
4604 txdma_err = readl(&iomem->txdma.tx_dma_error);
4605
4606 dev_warn(&adapter->pdev->dev,
4607 "TXDMA_ERR interrupt, error = %d\n",
4608 txdma_err);
4609 }
4610
4611
4612 if (status &
4613 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632 if (adapter->flowcontrol == FLOW_TXONLY ||
4633 adapter->flowcontrol == FLOW_BOTH) {
4634 u32 pm_csr;
4635
4636
4637
4638
4639
4640 pm_csr = readl(&iomem->global.pm_csr);
4641 if (!et1310_in_phy_coma(adapter))
4642 writel(3, &iomem->txmac.bp_ctrl);
4643 }
4644 }
4645
4646
4647 if (status & ET_INTR_RXDMA_STAT_LOW) {
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658 }
4659
4660
4661 if (status & ET_INTR_RXDMA_ERR) {
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682 dev_warn(&adapter->pdev->dev,
4683 "RxDMA_ERR interrupt, error %x\n",
4684 readl(&iomem->txmac.tx_test));
4685 }
4686
4687
4688 if (status & ET_INTR_WOL) {
4689
4690
4691
4692
4693
4694
4695
4696 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4697 }
4698
4699
4700 if (status & ET_INTR_TXMAC) {
4701 u32 err = readl(&iomem->txmac.err);
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713 dev_warn(&adapter->pdev->dev,
4714 "TXMAC interrupt, error 0x%08x\n",
4715 err);
4716
4717
4718
4719
4720
4721 }
4722
4723
4724 if (status & ET_INTR_RXMAC) {
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734 dev_warn(&adapter->pdev->dev,
4735 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4736 readl(&iomem->rxmac.err_reg));
4737
4738 dev_warn(&adapter->pdev->dev,
4739 "Enable 0x%08x, Diag 0x%08x\n",
4740 readl(&iomem->rxmac.ctrl),
4741 readl(&iomem->rxmac.rxq_diag));
4742
4743
4744
4745
4746
4747
4748 }
4749
4750
4751 if (status & ET_INTR_MAC_STAT) {
4752
4753
4754
4755
4756
4757
4758 et1310_handle_macstat_interrupt(adapter);
4759 }
4760
4761
4762 if (status & ET_INTR_SLV_TIMEOUT) {
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772 }
4773 }
4774 et131x_enable_interrupts(adapter);
4775}
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785static struct net_device_stats *et131x_stats(struct net_device *netdev)
4786{
4787 struct et131x_adapter *adapter = netdev_priv(netdev);
4788 struct net_device_stats *stats = &adapter->net_stats;
4789 struct ce_stats *devstat = &adapter->stats;
4790
4791 stats->rx_errors = devstat->rx_length_errs +
4792 devstat->rx_align_errs +
4793 devstat->rx_crc_errs +
4794 devstat->rx_code_violations +
4795 devstat->rx_other_errs;
4796 stats->tx_errors = devstat->tx_max_pkt_errs;
4797 stats->multicast = devstat->multicast_pkts_rcvd;
4798 stats->collisions = devstat->tx_collisions;
4799
4800 stats->rx_length_errors = devstat->rx_length_errs;
4801 stats->rx_over_errors = devstat->rx_overflows;
4802 stats->rx_crc_errors = devstat->rx_crc_errs;
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823 return stats;
4824}
4825
4826
4827
4828
4829
4830
4831
4832int et131x_open(struct net_device *netdev)
4833{
4834 int result = 0;
4835 struct et131x_adapter *adapter = netdev_priv(netdev);
4836
4837
4838 init_timer(&adapter->error_timer);
4839 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4840 adapter->error_timer.function = et131x_error_timer_handler;
4841 adapter->error_timer.data = (unsigned long)adapter;
4842 add_timer(&adapter->error_timer);
4843
4844
4845 result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
4846 netdev->name, netdev);
4847 if (result) {
4848 dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
4849 netdev->irq);
4850 return result;
4851 }
4852
4853 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4854
4855 et131x_up(netdev);
4856
4857 return result;
4858}
4859
4860
4861
4862
4863
4864
4865
4866int et131x_close(struct net_device *netdev)
4867{
4868 struct et131x_adapter *adapter = netdev_priv(netdev);
4869
4870 et131x_down(netdev);
4871
4872 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4873 free_irq(netdev->irq, netdev);
4874
4875
4876 return del_timer_sync(&adapter->error_timer);
4877}
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4888 int cmd)
4889{
4890 struct et131x_adapter *adapter = netdev_priv(netdev);
4891
4892 if (!adapter->phydev)
4893 return -EINVAL;
4894
4895 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4896}
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4907{
4908 int status = 0;
4909 uint32_t filter = adapter->packet_filter;
4910 u32 ctrl;
4911 u32 pf_ctrl;
4912
4913 ctrl = readl(&adapter->regs->rxmac.ctrl);
4914 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4915
4916
4917
4918
4919 ctrl |= 0x04;
4920
4921
4922
4923
4924 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4925 pf_ctrl &= ~7;
4926 else {
4927
4928
4929
4930
4931
4932 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4933 pf_ctrl &= ~2;
4934 else {
4935 et1310_setup_device_for_multicast(adapter);
4936 pf_ctrl |= 2;
4937 ctrl &= ~0x04;
4938 }
4939
4940
4941 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4942 et1310_setup_device_for_unicast(adapter);
4943 pf_ctrl |= 4;
4944 ctrl &= ~0x04;
4945 }
4946
4947
4948 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4949 pf_ctrl |= 1;
4950 ctrl &= ~0x04;
4951 } else
4952 pf_ctrl &= ~1;
4953
4954
4955
4956
4957
4958 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4959 writel(ctrl, &adapter->regs->rxmac.ctrl);
4960 }
4961 return status;
4962}
4963
4964
4965
4966
4967
4968static void et131x_multicast(struct net_device *netdev)
4969{
4970 struct et131x_adapter *adapter = netdev_priv(netdev);
4971 uint32_t packet_filter = 0;
4972 unsigned long flags;
4973 struct netdev_hw_addr *ha;
4974 int i;
4975
4976 spin_lock_irqsave(&adapter->lock, flags);
4977
4978
4979
4980
4981
4982 packet_filter = adapter->packet_filter;
4983
4984
4985
4986
4987
4988
4989 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4990
4991
4992
4993
4994
4995 if (netdev->flags & IFF_PROMISC)
4996 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4997 else
4998 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4999
5000 if (netdev->flags & IFF_ALLMULTI)
5001 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5002
5003 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
5004 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
5005
5006 if (netdev_mc_count(netdev) < 1) {
5007 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
5008 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
5009 } else
5010 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
5011
5012
5013 i = 0;
5014 netdev_for_each_mc_addr(ha, netdev) {
5015 if (i == NIC_MAX_MCAST_LIST)
5016 break;
5017 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
5018 }
5019 adapter->multicast_addr_count = i;
5020
5021
5022
5023
5024
5025
5026
5027 if (packet_filter != adapter->packet_filter) {
5028
5029 et131x_set_packet_filter(adapter);
5030 }
5031 spin_unlock_irqrestore(&adapter->lock, flags);
5032}
5033
5034
5035
5036
5037
5038
5039
5040
5041static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5042{
5043 int status = 0;
5044 struct et131x_adapter *adapter = netdev_priv(netdev);
5045
5046
5047 if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5048 !netif_queue_stopped(netdev))
5049 netif_stop_queue(netdev);
5050
5051
5052 netdev->trans_start = jiffies;
5053
5054
5055 status = et131x_send_packets(skb, netdev);
5056
5057
5058 if (status != 0) {
5059 if (status == -ENOMEM)
5060 status = NETDEV_TX_BUSY;
5061 else
5062 status = NETDEV_TX_OK;
5063 }
5064 return status;
5065}
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075static void et131x_tx_timeout(struct net_device *netdev)
5076{
5077 struct et131x_adapter *adapter = netdev_priv(netdev);
5078 struct tcb *tcb;
5079 unsigned long flags;
5080
5081
5082 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5083 return;
5084
5085
5086
5087
5088 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5089 return;
5090
5091
5092 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5093 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5094 return;
5095 }
5096
5097
5098 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5099
5100 tcb = adapter->tx_ring.send_head;
5101
5102 if (tcb != NULL) {
5103 tcb->count++;
5104
5105 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5106 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5107 flags);
5108
5109 dev_warn(&adapter->pdev->dev,
5110 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5111 tcb->index,
5112 tcb->flags);
5113
5114 adapter->net_stats.tx_errors++;
5115
5116
5117 et131x_disable_txrx(netdev);
5118 et131x_enable_txrx(netdev);
5119 return;
5120 }
5121 }
5122
5123 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5124}
5125
5126
5127
5128
5129
5130
5131
5132
5133static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5134{
5135 int result = 0;
5136 struct et131x_adapter *adapter = netdev_priv(netdev);
5137
5138
5139 if (new_mtu < 64 || new_mtu > 9216)
5140 return -EINVAL;
5141
5142 et131x_disable_txrx(netdev);
5143 et131x_handle_send_interrupt(adapter);
5144 et131x_handle_recv_interrupt(adapter);
5145
5146
5147 netdev->mtu = new_mtu;
5148
5149
5150 et131x_adapter_memory_free(adapter);
5151
5152
5153 adapter->registry_jumbo_packet = new_mtu + 14;
5154 et131x_soft_reset(adapter);
5155
5156
5157 result = et131x_adapter_memory_alloc(adapter);
5158 if (result != 0) {
5159 dev_warn(&adapter->pdev->dev,
5160 "Change MTU failed; couldn't re-alloc DMA memory\n");
5161 return result;
5162 }
5163
5164 et131x_init_send(adapter);
5165
5166 et131x_hwaddr_init(adapter);
5167 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5168
5169
5170 et131x_adapter_setup(adapter);
5171
5172 et131x_enable_txrx(netdev);
5173
5174 return result;
5175}
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5187{
5188 int result = 0;
5189 struct et131x_adapter *adapter = netdev_priv(netdev);
5190 struct sockaddr *address = new_mac;
5191
5192
5193
5194 if (adapter == NULL)
5195 return -ENODEV;
5196
5197
5198 if (!is_valid_ether_addr(address->sa_data))
5199 return -EINVAL;
5200
5201 et131x_disable_txrx(netdev);
5202 et131x_handle_send_interrupt(adapter);
5203 et131x_handle_recv_interrupt(adapter);
5204
5205
5206
5207
5208 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5209
5210 printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5211 netdev->name, netdev->dev_addr);
5212
5213
5214 et131x_adapter_memory_free(adapter);
5215
5216 et131x_soft_reset(adapter);
5217
5218
5219 result = et131x_adapter_memory_alloc(adapter);
5220 if (result != 0) {
5221 dev_err(&adapter->pdev->dev,
5222 "Change MAC failed; couldn't re-alloc DMA memory\n");
5223 return result;
5224 }
5225
5226 et131x_init_send(adapter);
5227
5228 et131x_hwaddr_init(adapter);
5229
5230
5231 et131x_adapter_setup(adapter);
5232
5233 et131x_enable_txrx(netdev);
5234
5235 return result;
5236}
5237
5238static const struct net_device_ops et131x_netdev_ops = {
5239 .ndo_open = et131x_open,
5240 .ndo_stop = et131x_close,
5241 .ndo_start_xmit = et131x_tx,
5242 .ndo_set_rx_mode = et131x_multicast,
5243 .ndo_tx_timeout = et131x_tx_timeout,
5244 .ndo_change_mtu = et131x_change_mtu,
5245 .ndo_set_mac_address = et131x_set_mac_addr,
5246 .ndo_validate_addr = eth_validate_addr,
5247 .ndo_get_stats = et131x_stats,
5248 .ndo_do_ioctl = et131x_ioctl,
5249};
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260struct net_device *et131x_device_alloc(void)
5261{
5262 struct net_device *netdev;
5263
5264
5265 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5266
5267 if (!netdev) {
5268 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
5269 return NULL;
5270 }
5271
5272
5273
5274
5275
5276 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5277 netdev->netdev_ops = &et131x_netdev_ops;
5278
5279
5280
5281
5282 return netdev;
5283}
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5298 const struct pci_device_id *ent)
5299{
5300 int result;
5301 struct net_device *netdev;
5302 struct et131x_adapter *adapter;
5303 int ii;
5304
5305 result = pci_enable_device(pdev);
5306 if (result) {
5307 dev_err(&pdev->dev, "pci_enable_device() failed\n");
5308 goto err_out;
5309 }
5310
5311
5312 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5313 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5314 goto err_disable;
5315 }
5316
5317 if (pci_request_regions(pdev, DRIVER_NAME)) {
5318 dev_err(&pdev->dev, "Can't get PCI resources\n");
5319 goto err_disable;
5320 }
5321
5322 pci_set_master(pdev);
5323
5324
5325 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5326 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5327 if (result) {
5328 dev_err(&pdev->dev,
5329 "Unable to obtain 64 bit DMA for consistent allocations\n");
5330 goto err_release_res;
5331 }
5332 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5333 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5334 if (result) {
5335 dev_err(&pdev->dev,
5336 "Unable to obtain 32 bit DMA for consistent allocations\n");
5337 goto err_release_res;
5338 }
5339 } else {
5340 dev_err(&pdev->dev, "No usable DMA addressing method\n");
5341 result = -EIO;
5342 goto err_release_res;
5343 }
5344
5345
5346 netdev = et131x_device_alloc();
5347 if (!netdev) {
5348 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5349 result = -ENOMEM;
5350 goto err_release_res;
5351 }
5352
5353 SET_NETDEV_DEV(netdev, &pdev->dev);
5354 et131x_set_ethtool_ops(netdev);
5355
5356 adapter = et131x_adapter_init(netdev, pdev);
5357
5358
5359 et131x_pci_init(adapter, pdev);
5360
5361
5362 adapter->regs = pci_ioremap_bar(pdev, 0);
5363 if (!adapter->regs) {
5364 dev_err(&pdev->dev, "Cannot map device registers\n");
5365 result = -ENOMEM;
5366 goto err_free_dev;
5367 }
5368
5369
5370 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
5371
5372
5373 et131x_soft_reset(adapter);
5374
5375
5376 et131x_disable_interrupts(adapter);
5377
5378
5379 result = et131x_adapter_memory_alloc(adapter);
5380 if (result) {
5381 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5382 goto err_iounmap;
5383 }
5384
5385
5386 et131x_init_send(adapter);
5387
5388
5389 INIT_WORK(&adapter->task, et131x_isr_handler);
5390
5391
5392 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5393
5394
5395 adapter->boot_coma = 0;
5396 et1310_disable_phy_coma(adapter);
5397
5398
5399 adapter->mii_bus = mdiobus_alloc();
5400 if (!adapter->mii_bus) {
5401 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5402 goto err_mem_free;
5403 }
5404
5405 adapter->mii_bus->name = "et131x_eth_mii";
5406 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5407 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5408 adapter->mii_bus->priv = netdev;
5409 adapter->mii_bus->read = et131x_mdio_read;
5410 adapter->mii_bus->write = et131x_mdio_write;
5411 adapter->mii_bus->reset = et131x_mdio_reset;
5412 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5413 if (!adapter->mii_bus->irq) {
5414 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5415 goto err_mdio_free;
5416 }
5417
5418 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5419 adapter->mii_bus->irq[ii] = PHY_POLL;
5420
5421 if (mdiobus_register(adapter->mii_bus)) {
5422 dev_err(&pdev->dev, "failed to register MII bus\n");
5423 mdiobus_free(adapter->mii_bus);
5424 goto err_mdio_free_irq;
5425 }
5426
5427 if (et131x_mii_probe(netdev)) {
5428 dev_err(&pdev->dev, "failed to probe MII bus\n");
5429 goto err_mdio_unregister;
5430 }
5431
5432
5433 et131x_adapter_setup(adapter);
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443 result = register_netdev(netdev);
5444 if (result != 0) {
5445 dev_err(&pdev->dev, "register_netdev() failed\n");
5446 goto err_mdio_unregister;
5447 }
5448
5449
5450
5451
5452
5453 pci_set_drvdata(pdev, netdev);
5454 pci_save_state(adapter->pdev);
5455
5456 return result;
5457
5458err_mdio_unregister:
5459 mdiobus_unregister(adapter->mii_bus);
5460err_mdio_free_irq:
5461 kfree(adapter->mii_bus->irq);
5462err_mdio_free:
5463 mdiobus_free(adapter->mii_bus);
5464err_mem_free:
5465 et131x_adapter_memory_free(adapter);
5466err_iounmap:
5467 iounmap(adapter->regs);
5468err_free_dev:
5469 pci_dev_put(pdev);
5470 free_netdev(netdev);
5471err_release_res:
5472 pci_release_regions(pdev);
5473err_disable:
5474 pci_disable_device(pdev);
5475err_out:
5476 return result;
5477}
5478
5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5482 {0,}
5483};
5484MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5485
5486static struct pci_driver et131x_driver = {
5487 .name = DRIVER_NAME,
5488 .id_table = et131x_pci_table,
5489 .probe = et131x_pci_setup,
5490 .remove = __devexit_p(et131x_pci_remove),
5491 .driver.pm = ET131X_PM_OPS,
5492};
5493
5494
5495
5496
5497
5498
5499static int __init et131x_init_module(void)
5500{
5501 return pci_register_driver(&et131x_driver);
5502}
5503
5504
5505
5506
5507static void __exit et131x_cleanup_module(void)
5508{
5509 pci_unregister_driver(&et131x_driver);
5510}
5511
5512module_init(et131x_init_module);
5513module_exit(et131x_cleanup_module);
5514
5515