1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57
58#include <linux/pci.h>
59#include <linux/init.h>
60#include <linux/module.h>
61#include <linux/types.h>
62#include <linux/kernel.h>
63
64#include <linux/sched.h>
65#include <linux/ptrace.h>
66#include <linux/slab.h>
67#include <linux/ctype.h>
68#include <linux/string.h>
69#include <linux/timer.h>
70#include <linux/interrupt.h>
71#include <linux/in.h>
72#include <linux/delay.h>
73#include <linux/bitops.h>
74#include <linux/io.h>
75
76#include <linux/netdevice.h>
77#include <linux/etherdevice.h>
78#include <linux/skbuff.h>
79#include <linux/if_arp.h>
80#include <linux/ioport.h>
81#include <linux/crc32.h>
82#include <linux/random.h>
83#include <linux/phy.h>
84
85#include "et131x.h"
86
87MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
88MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
89MODULE_LICENSE("Dual BSD/GPL");
90MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
91
92
93#define MAX_NUM_REGISTER_POLLS 1000
94#define MAX_NUM_WRITE_RETRIES 2
95
96
97#define COUNTER_WRAP_16_BIT 0x10000
98#define COUNTER_WRAP_12_BIT 0x1000
99
100
101#define INTERNAL_MEM_SIZE 0x400
102#define INTERNAL_MEM_RX_OFFSET 0x1FF
103
104
105
106
107
108
109
110
111
112
113
114#define INT_MASK_DISABLE 0xffffffff
115
116
117
118
119
120#define INT_MASK_ENABLE 0xfffebf17
121#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
122
123
124
125#define NIC_MIN_PACKET_SIZE 60
126
127
128#define NIC_MAX_MCAST_LIST 128
129
130
131#define ET131X_PACKET_TYPE_DIRECTED 0x0001
132#define ET131X_PACKET_TYPE_MULTICAST 0x0002
133#define ET131X_PACKET_TYPE_BROADCAST 0x0004
134#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
136
137
138#define ET131X_TX_TIMEOUT (1 * HZ)
139#define NIC_SEND_HANG_THRESHOLD 0
140
141
142#define fMP_DEST_MULTI 0x00000001
143#define fMP_DEST_BROAD 0x00000002
144
145
146#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
148
149
150#define fMP_ADAPTER_LOWER_POWER 0x00200000
151
152#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
154
155#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
156
157
158#define ET1310_PCI_MAC_ADDRESS 0xA4
159#define ET1310_PCI_EEPROM_STATUS 0xB2
160#define ET1310_PCI_ACK_NACK 0xC0
161#define ET1310_PCI_REPLAY 0xC2
162#define ET1310_PCI_L0L1LATENCY 0xCF
163
164
165#define ET131X_PCI_DEVICE_ID_GIG 0xED00
166#define ET131X_PCI_DEVICE_ID_FAST 0xED01
167
168
169#define NANO_IN_A_MICRO 1000
170
171#define PARM_RX_NUM_BUFS_DEF 4
172#define PARM_RX_TIME_INT_DEF 10
173#define PARM_RX_MEM_END_DEF 0x2bc
174#define PARM_TX_TIME_INT_DEF 40
175#define PARM_TX_NUM_BUFS_DEF 4
176#define PARM_DMA_CACHE_DEF 0
177
178
179#define USE_FBR0 1
180#define FBR_CHUNKS 32
181#define MAX_DESC_PER_RING_RX 1024
182
183
184#ifdef USE_FBR0
185#define RFD_LOW_WATER_MARK 40
186#define NIC_DEFAULT_NUM_RFD 1024
187#define NUM_FBRS 2
188#else
189#define RFD_LOW_WATER_MARK 20
190#define NIC_DEFAULT_NUM_RFD 256
191#define NUM_FBRS 1
192#endif
193
194#define NIC_MIN_NUM_RFD 64
195#define NUM_PACKETS_HANDLED 256
196
197#define ALCATEL_MULTICAST_PKT 0x01000000
198#define ALCATEL_BROADCAST_PKT 0x02000000
199
200
201struct fbr_desc {
202 u32 addr_lo;
203 u32 addr_hi;
204 u32 word2;
205};
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct pkt_stat_desc {
251 u32 word0;
252 u32 word1;
253};
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283struct rx_status_block {
284 u32 word0;
285 u32 word1;
286};
287
288
289
290
291
292struct fbr_lookup {
293 void *virt[MAX_DESC_PER_RING_RX];
294 void *buffer1[MAX_DESC_PER_RING_RX];
295 void *buffer2[MAX_DESC_PER_RING_RX];
296 u32 bus_high[MAX_DESC_PER_RING_RX];
297 u32 bus_low[MAX_DESC_PER_RING_RX];
298 void *ring_virtaddr;
299 dma_addr_t ring_physaddr;
300 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
301 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
302 u64 real_physaddr;
303 u64 offset;
304 u32 local_full;
305 u32 num_entries;
306 u32 buffsize;
307};
308
309
310
311
312
313
314
315
316
317
318struct rx_ring {
319 struct fbr_lookup *fbr[NUM_FBRS];
320 void *ps_ring_virtaddr;
321 dma_addr_t ps_ring_physaddr;
322 u32 local_psr_full;
323 u32 psr_num_entries;
324
325 struct rx_status_block *rx_status_block;
326 dma_addr_t rx_status_bus;
327
328
329 struct list_head recv_list;
330 u32 num_ready_recv;
331
332 u32 num_rfd;
333
334 bool unfinished_receives;
335
336
337 struct kmem_cache *recv_lookaside;
338};
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369struct tx_desc {
370 u32 addr_hi;
371 u32 addr_lo;
372 u32 len_vlan;
373 u32 flags;
374};
375
376
377
378
379
380
381
382struct tcb {
383 struct tcb *next;
384 u32 flags;
385 u32 count;
386 u32 stale;
387 struct sk_buff *skb;
388 u32 index;
389 u32 index_start;
390};
391
392
393struct tx_ring {
394
395 struct tcb *tcb_ring;
396
397
398 struct tcb *tcb_qhead;
399 struct tcb *tcb_qtail;
400
401
402
403
404
405
406
407 struct tcb *send_head;
408 struct tcb *send_tail;
409 int used;
410
411
412 struct tx_desc *tx_desc_ring;
413 dma_addr_t tx_desc_ring_pa;
414
415
416 u32 send_idx;
417
418
419 u32 *tx_status;
420 dma_addr_t tx_status_pa;
421
422
423 int since_irq;
424};
425
426
427
428
429
430#define NUM_DESC_PER_RING_TX 512
431#define NUM_TCB 64
432
433
434
435
436
437
438#define TX_ERROR_PERIOD 1000
439
440#define LO_MARK_PERCENT_FOR_PSR 15
441#define LO_MARK_PERCENT_FOR_RX 15
442
443
444struct rfd {
445 struct list_head list_node;
446 struct sk_buff *skb;
447 u32 len;
448 u16 bufferindex;
449 u8 ringindex;
450};
451
452
453#define FLOW_BOTH 0
454#define FLOW_TXONLY 1
455#define FLOW_RXONLY 2
456#define FLOW_NONE 3
457
458
459struct ce_stats {
460
461
462
463
464
465
466 u32 unicast_pkts_rcvd;
467 atomic_t unicast_pkts_xmtd;
468 u32 multicast_pkts_rcvd;
469 atomic_t multicast_pkts_xmtd;
470 u32 broadcast_pkts_rcvd;
471 atomic_t broadcast_pkts_xmtd;
472 u32 rcvd_pkts_dropped;
473
474
475 u32 tx_underflows;
476
477 u32 tx_collisions;
478 u32 tx_excessive_collisions;
479 u32 tx_first_collisions;
480 u32 tx_late_collisions;
481 u32 tx_max_pkt_errs;
482 u32 tx_deferred;
483
484
485 u32 rx_overflows;
486
487 u32 rx_length_errs;
488 u32 rx_align_errs;
489 u32 rx_crc_errs;
490 u32 rx_code_violations;
491 u32 rx_other_errs;
492
493 u32 synchronous_iterations;
494 u32 interrupt_status;
495};
496
497
498struct et131x_adapter {
499 struct net_device *netdev;
500 struct pci_dev *pdev;
501 struct mii_bus *mii_bus;
502 struct phy_device *phydev;
503 struct work_struct task;
504
505
506 u32 flags;
507
508
509 int link;
510
511
512 u8 rom_addr[ETH_ALEN];
513 u8 addr[ETH_ALEN];
514 bool has_eeprom;
515 u8 eeprom_data[2];
516
517
518 spinlock_t lock;
519
520 spinlock_t tcb_send_qlock;
521 spinlock_t tcb_ready_qlock;
522 spinlock_t send_hw_lock;
523
524 spinlock_t rcv_lock;
525 spinlock_t rcv_pend_lock;
526 spinlock_t fbr_lock;
527
528 spinlock_t phy_lock;
529
530
531 u32 packet_filter;
532
533
534 u32 multicast_addr_count;
535 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
536
537
538 struct address_map __iomem *regs;
539
540
541 u8 wanted_flow;
542 u32 registry_jumbo_packet;
543
544
545 u8 flowcontrol;
546
547
548 struct timer_list error_timer;
549
550
551
552
553 u8 boot_coma;
554
555
556
557
558
559 u16 pdown_speed;
560 u8 pdown_duplex;
561
562
563 struct tx_ring tx_ring;
564
565
566 struct rx_ring rx_ring;
567
568
569 struct ce_stats stats;
570
571 struct net_device_stats net_stats;
572};
573
574static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
575{
576 u32 reg;
577 int i;
578
579
580
581
582
583
584
585
586 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
587
588 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
589 return -EIO;
590
591
592 if ((reg & 0x3000) == 0x3000) {
593 if (status)
594 *status = reg;
595 return reg & 0xFF;
596 }
597 }
598 return -ETIMEDOUT;
599}
600
601
602
603
604
605
606
607
608
609
610static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
611{
612 struct pci_dev *pdev = adapter->pdev;
613 int index = 0;
614 int retries;
615 int err = 0;
616 int i2c_wack = 0;
617 int writeok = 0;
618 u32 status;
619 u32 val = 0;
620
621
622
623
624
625
626
627
628
629
630 err = eeprom_wait_ready(pdev, NULL);
631 if (err)
632 return err;
633
634
635
636
637
638
639
640 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
641 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
642 return -EIO;
643
644 i2c_wack = 1;
645
646
647
648 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
649
650 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
651 break;
652
653
654
655
656 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
657 break;
658
659
660
661
662
663
664
665
666 err = eeprom_wait_ready(pdev, &status);
667 if (err < 0)
668 return 0;
669
670
671
672
673
674
675 if ((status & LBCIF_STATUS_GENERAL_ERROR)
676 && adapter->pdev->revision == 0)
677 break;
678
679
680
681
682
683
684
685
686
687 if (status & LBCIF_STATUS_ACK_ERROR) {
688
689
690
691
692
693
694 udelay(10);
695 continue;
696 }
697
698 writeok = 1;
699 break;
700 }
701
702
703
704
705 udelay(10);
706
707 while (i2c_wack) {
708 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
709 LBCIF_CONTROL_LBCIF_ENABLE))
710 writeok = 0;
711
712
713
714
715 do {
716 pci_write_config_dword(pdev,
717 LBCIF_ADDRESS_REGISTER,
718 addr);
719 do {
720 pci_read_config_dword(pdev,
721 LBCIF_DATA_REGISTER, &val);
722 } while ((val & 0x00010000) == 0);
723 } while (val & 0x00040000);
724
725 if ((val & 0xFF00) != 0xC000 || index == 10000)
726 break;
727 index++;
728 }
729 return writeok ? 0 : -EIO;
730}
731
732
733
734
735
736
737
738
739
740
741
742static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
743{
744 struct pci_dev *pdev = adapter->pdev;
745 int err;
746 u32 status;
747
748
749
750
751
752
753 err = eeprom_wait_ready(pdev, NULL);
754 if (err)
755 return err;
756
757
758
759
760
761
762 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
763 LBCIF_CONTROL_LBCIF_ENABLE))
764 return -EIO;
765
766
767
768
769 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
770 return -EIO;
771
772
773
774
775
776 err = eeprom_wait_ready(pdev, &status);
777 if (err < 0)
778 return err;
779
780
781
782
783 *pdata = err;
784
785
786
787
788 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
789}
790
791static int et131x_init_eeprom(struct et131x_adapter *adapter)
792{
793 struct pci_dev *pdev = adapter->pdev;
794 u8 eestatus;
795
796
797
798
799 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
800 &eestatus);
801
802
803
804
805
806
807
808 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
809 dev_err(&pdev->dev,
810 "Could not read PCI config space for EEPROM Status\n");
811 return -EIO;
812 }
813
814
815
816
817 if (eestatus & 0x4C) {
818 int write_failed = 0;
819 if (pdev->revision == 0x01) {
820 int i;
821 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
822
823
824
825
826
827 for (i = 0; i < 3; i++)
828 if (eeprom_write(adapter, i, eedata[i]) < 0)
829 write_failed = 1;
830 }
831 if (pdev->revision != 0x01 || write_failed) {
832 dev_err(&pdev->dev,
833 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
834
835
836
837
838
839
840
841 adapter->has_eeprom = 0;
842 return -EIO;
843 }
844 }
845 adapter->has_eeprom = 1;
846
847
848
849
850 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
851 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
852
853 if (adapter->eeprom_data[0] != 0xcd)
854
855 adapter->eeprom_data[1] = 0x00;
856
857 return 0;
858}
859
860
861
862
863
864static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
865{
866
867 u32 csr = 0x2000;
868
869 if (adapter->rx_ring.fbr[0]->buffsize == 4096)
870 csr |= 0x0800;
871 else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
872 csr |= 0x1000;
873 else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
874 csr |= 0x1800;
875#ifdef USE_FBR0
876 csr |= 0x0400;
877 if (adapter->rx_ring.fbr[1]->buffsize == 256)
878 csr |= 0x0100;
879 else if (adapter->rx_ring.fbr[1]->buffsize == 512)
880 csr |= 0x0200;
881 else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
882 csr |= 0x0300;
883#endif
884 writel(csr, &adapter->regs->rxdma.csr);
885
886 csr = readl(&adapter->regs->rxdma.csr);
887 if ((csr & 0x00020000) != 0) {
888 udelay(5);
889 csr = readl(&adapter->regs->rxdma.csr);
890 if ((csr & 0x00020000) != 0) {
891 dev_err(&adapter->pdev->dev,
892 "RX Dma failed to exit halt state. CSR 0x%08x\n",
893 csr);
894 }
895 }
896}
897
898
899
900
901
902static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
903{
904 u32 csr;
905
906 writel(0x00002001, &adapter->regs->rxdma.csr);
907 csr = readl(&adapter->regs->rxdma.csr);
908 if ((csr & 0x00020000) == 0) {
909 udelay(5);
910 csr = readl(&adapter->regs->rxdma.csr);
911 if ((csr & 0x00020000) == 0)
912 dev_err(&adapter->pdev->dev,
913 "RX Dma failed to enter halt state. CSR 0x%08x\n",
914 csr);
915 }
916}
917
918
919
920
921
922
923
924static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
925{
926
927
928
929 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
930 &adapter->regs->txdma.csr);
931}
932
933static inline void add_10bit(u32 *v, int n)
934{
935 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
936}
937
938static inline void add_12bit(u32 *v, int n)
939{
940 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
941}
942
943
944
945
946
947static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
948{
949 struct mac_regs __iomem *macregs = &adapter->regs->mac;
950 u32 station1;
951 u32 station2;
952 u32 ipg;
953
954
955
956
957 writel(0xC00F0000, ¯egs->cfg1);
958
959
960 ipg = 0x38005860;
961 ipg |= 0x50 << 8;
962 writel(ipg, ¯egs->ipg);
963
964
965
966 writel(0x00A1F037, ¯egs->hfdp);
967
968
969 writel(0, ¯egs->if_ctrl);
970
971
972 writel(0x07, ¯egs->mii_mgmt_cfg);
973
974
975
976
977
978
979
980
981 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
982 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
983 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
984 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
985 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
986 adapter->addr[2];
987 writel(station1, ¯egs->station_addr_1);
988 writel(station2, ¯egs->station_addr_2);
989
990
991
992
993
994
995
996
997 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
998
999
1000 writel(0, ¯egs->cfg1);
1001}
1002
1003
1004
1005
1006
1007static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1008{
1009 int32_t delay = 0;
1010 struct mac_regs __iomem *mac = &adapter->regs->mac;
1011 struct phy_device *phydev = adapter->phydev;
1012 u32 cfg1;
1013 u32 cfg2;
1014 u32 ifctrl;
1015 u32 ctl;
1016
1017 ctl = readl(&adapter->regs->txmac.ctl);
1018 cfg1 = readl(&mac->cfg1);
1019 cfg2 = readl(&mac->cfg2);
1020 ifctrl = readl(&mac->if_ctrl);
1021
1022
1023 cfg2 &= ~0x300;
1024 if (phydev && phydev->speed == SPEED_1000) {
1025 cfg2 |= 0x200;
1026
1027 ifctrl &= ~(1 << 24);
1028 } else {
1029 cfg2 |= 0x100;
1030 ifctrl |= (1 << 24);
1031 }
1032
1033
1034 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1035
1036 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1037 if (adapter->flowcontrol == FLOW_RXONLY ||
1038 adapter->flowcontrol == FLOW_BOTH)
1039 cfg1 |= CFG1_RX_FLOW;
1040 writel(cfg1, &mac->cfg1);
1041
1042
1043
1044
1045 cfg2 |= 0x7016;
1046 cfg2 &= ~0x0021;
1047
1048
1049 if (phydev && phydev->duplex == DUPLEX_FULL)
1050 cfg2 |= 0x01;
1051
1052 ifctrl &= ~(1 << 26);
1053 if (phydev && phydev->duplex == DUPLEX_HALF)
1054 ifctrl |= (1<<26);
1055
1056 writel(ifctrl, &mac->if_ctrl);
1057 writel(cfg2, &mac->cfg2);
1058
1059 do {
1060 udelay(10);
1061 delay++;
1062 cfg1 = readl(&mac->cfg1);
1063 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1064
1065 if (delay == 100) {
1066 dev_warn(&adapter->pdev->dev,
1067 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1068 cfg1);
1069 }
1070
1071
1072 ctl |= 0x09;
1073 writel(ctl, &adapter->regs->txmac.ctl);
1074
1075
1076 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1077 et131x_rx_dma_enable(adapter);
1078 et131x_tx_dma_enable(adapter);
1079 }
1080}
1081
1082
1083
1084
1085
1086
1087
1088static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1089{
1090 u32 pmcsr;
1091
1092 pmcsr = readl(&adapter->regs->global.pm_csr);
1093
1094 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1095}
1096
1097static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1098{
1099 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1100 u32 hash1 = 0;
1101 u32 hash2 = 0;
1102 u32 hash3 = 0;
1103 u32 hash4 = 0;
1104 u32 pm_csr;
1105
1106
1107
1108
1109
1110
1111 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1112 int i;
1113
1114
1115 for (i = 0; i < adapter->multicast_addr_count; i++) {
1116 u32 result;
1117
1118 result = ether_crc(6, adapter->multicast_list[i]);
1119
1120 result = (result & 0x3F800000) >> 23;
1121
1122 if (result < 32) {
1123 hash1 |= (1 << result);
1124 } else if ((31 < result) && (result < 64)) {
1125 result -= 32;
1126 hash2 |= (1 << result);
1127 } else if ((63 < result) && (result < 96)) {
1128 result -= 64;
1129 hash3 |= (1 << result);
1130 } else {
1131 result -= 96;
1132 hash4 |= (1 << result);
1133 }
1134 }
1135 }
1136
1137
1138 pm_csr = readl(&adapter->regs->global.pm_csr);
1139 if (!et1310_in_phy_coma(adapter)) {
1140 writel(hash1, &rxmac->multi_hash1);
1141 writel(hash2, &rxmac->multi_hash2);
1142 writel(hash3, &rxmac->multi_hash3);
1143 writel(hash4, &rxmac->multi_hash4);
1144 }
1145}
1146
1147static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1148{
1149 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1150 u32 uni_pf1;
1151 u32 uni_pf2;
1152 u32 uni_pf3;
1153 u32 pm_csr;
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1165 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1166 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1167 adapter->addr[1];
1168
1169 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1170 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1171 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1172 adapter->addr[5];
1173
1174 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1175 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1176 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1177 adapter->addr[5];
1178
1179 pm_csr = readl(&adapter->regs->global.pm_csr);
1180 if (!et1310_in_phy_coma(adapter)) {
1181 writel(uni_pf1, &rxmac->uni_pf_addr1);
1182 writel(uni_pf2, &rxmac->uni_pf_addr2);
1183 writel(uni_pf3, &rxmac->uni_pf_addr3);
1184 }
1185}
1186
1187static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1188{
1189 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1190 struct phy_device *phydev = adapter->phydev;
1191 u32 sa_lo;
1192 u32 sa_hi = 0;
1193 u32 pf_ctrl = 0;
1194
1195
1196 writel(0x8, &rxmac->ctrl);
1197
1198
1199 writel(0, &rxmac->crc0);
1200 writel(0, &rxmac->crc12);
1201 writel(0, &rxmac->crc34);
1202
1203
1204
1205
1206
1207 writel(0, &rxmac->mask0_word0);
1208 writel(0, &rxmac->mask0_word1);
1209 writel(0, &rxmac->mask0_word2);
1210 writel(0, &rxmac->mask0_word3);
1211
1212 writel(0, &rxmac->mask1_word0);
1213 writel(0, &rxmac->mask1_word1);
1214 writel(0, &rxmac->mask1_word2);
1215 writel(0, &rxmac->mask1_word3);
1216
1217 writel(0, &rxmac->mask2_word0);
1218 writel(0, &rxmac->mask2_word1);
1219 writel(0, &rxmac->mask2_word2);
1220 writel(0, &rxmac->mask2_word3);
1221
1222 writel(0, &rxmac->mask3_word0);
1223 writel(0, &rxmac->mask3_word1);
1224 writel(0, &rxmac->mask3_word2);
1225 writel(0, &rxmac->mask3_word3);
1226
1227 writel(0, &rxmac->mask4_word0);
1228 writel(0, &rxmac->mask4_word1);
1229 writel(0, &rxmac->mask4_word2);
1230 writel(0, &rxmac->mask4_word3);
1231
1232
1233 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1234 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1235 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1236 adapter->addr[5];
1237 writel(sa_lo, &rxmac->sa_lo);
1238
1239 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1240 adapter->addr[1];
1241 writel(sa_hi, &rxmac->sa_hi);
1242
1243
1244 writel(0, &rxmac->pf_ctrl);
1245
1246
1247 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1248 et1310_setup_device_for_unicast(adapter);
1249 pf_ctrl |= 4;
1250 } else {
1251 writel(0, &rxmac->uni_pf_addr1);
1252 writel(0, &rxmac->uni_pf_addr2);
1253 writel(0, &rxmac->uni_pf_addr3);
1254 }
1255
1256
1257 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1258 pf_ctrl |= 2;
1259 et1310_setup_device_for_multicast(adapter);
1260 }
1261
1262
1263 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1264 pf_ctrl |= 8;
1265
1266 if (adapter->registry_jumbo_packet > 8192)
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1278 else
1279 writel(0, &rxmac->mcif_ctrl_max_seg);
1280
1281
1282 writel(0, &rxmac->mcif_water_mark);
1283
1284
1285 writel(0, &rxmac->mif_ctrl);
1286
1287
1288 writel(0, &rxmac->space_avail);
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 if (phydev && phydev->speed == SPEED_100)
1304 writel(0x30038, &rxmac->mif_ctrl);
1305 else
1306 writel(0x30030, &rxmac->mif_ctrl);
1307
1308
1309
1310
1311
1312
1313
1314 writel(pf_ctrl, &rxmac->pf_ctrl);
1315 writel(0x9, &rxmac->ctrl);
1316}
1317
1318static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1319{
1320 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1321
1322
1323
1324
1325
1326 if (adapter->flowcontrol == FLOW_NONE)
1327 writel(0, &txmac->cf_param);
1328 else
1329 writel(0x40, &txmac->cf_param);
1330}
1331
1332static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1333{
1334 struct macstat_regs __iomem *macstat =
1335 &adapter->regs->macstat;
1336
1337
1338
1339
1340 writel(0, &macstat->txrx_0_64_byte_frames);
1341 writel(0, &macstat->txrx_65_127_byte_frames);
1342 writel(0, &macstat->txrx_128_255_byte_frames);
1343 writel(0, &macstat->txrx_256_511_byte_frames);
1344 writel(0, &macstat->txrx_512_1023_byte_frames);
1345 writel(0, &macstat->txrx_1024_1518_byte_frames);
1346 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1347
1348 writel(0, &macstat->rx_bytes);
1349 writel(0, &macstat->rx_packets);
1350 writel(0, &macstat->rx_fcs_errs);
1351 writel(0, &macstat->rx_multicast_packets);
1352 writel(0, &macstat->rx_broadcast_packets);
1353 writel(0, &macstat->rx_control_frames);
1354 writel(0, &macstat->rx_pause_frames);
1355 writel(0, &macstat->rx_unknown_opcodes);
1356 writel(0, &macstat->rx_align_errs);
1357 writel(0, &macstat->rx_frame_len_errs);
1358 writel(0, &macstat->rx_code_errs);
1359 writel(0, &macstat->rx_carrier_sense_errs);
1360 writel(0, &macstat->rx_undersize_packets);
1361 writel(0, &macstat->rx_oversize_packets);
1362 writel(0, &macstat->rx_fragment_packets);
1363 writel(0, &macstat->rx_jabbers);
1364 writel(0, &macstat->rx_drops);
1365
1366 writel(0, &macstat->tx_bytes);
1367 writel(0, &macstat->tx_packets);
1368 writel(0, &macstat->tx_multicast_packets);
1369 writel(0, &macstat->tx_broadcast_packets);
1370 writel(0, &macstat->tx_pause_frames);
1371 writel(0, &macstat->tx_deferred);
1372 writel(0, &macstat->tx_excessive_deferred);
1373 writel(0, &macstat->tx_single_collisions);
1374 writel(0, &macstat->tx_multiple_collisions);
1375 writel(0, &macstat->tx_late_collisions);
1376 writel(0, &macstat->tx_excessive_collisions);
1377 writel(0, &macstat->tx_total_collisions);
1378 writel(0, &macstat->tx_pause_honored_frames);
1379 writel(0, &macstat->tx_drops);
1380 writel(0, &macstat->tx_jabbers);
1381 writel(0, &macstat->tx_fcs_errs);
1382 writel(0, &macstat->tx_control_frames);
1383 writel(0, &macstat->tx_oversize_frames);
1384 writel(0, &macstat->tx_undersize_frames);
1385 writel(0, &macstat->tx_fragments);
1386 writel(0, &macstat->carry_reg1);
1387 writel(0, &macstat->carry_reg2);
1388
1389
1390
1391
1392
1393 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1394 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1407 u8 reg, u16 *value)
1408{
1409 struct mac_regs __iomem *mac = &adapter->regs->mac;
1410 int status = 0;
1411 u32 delay = 0;
1412 u32 mii_addr;
1413 u32 mii_cmd;
1414 u32 mii_indicator;
1415
1416
1417
1418
1419 mii_addr = readl(&mac->mii_mgmt_addr);
1420 mii_cmd = readl(&mac->mii_mgmt_cmd);
1421
1422
1423 writel(0, &mac->mii_mgmt_cmd);
1424
1425
1426 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1427
1428 writel(0x1, &mac->mii_mgmt_cmd);
1429
1430 do {
1431 udelay(50);
1432 delay++;
1433 mii_indicator = readl(&mac->mii_mgmt_indicator);
1434 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1435
1436
1437 if (delay == 50) {
1438 dev_warn(&adapter->pdev->dev,
1439 "reg 0x%08x could not be read\n", reg);
1440 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1441 mii_indicator);
1442
1443 status = -EIO;
1444 }
1445
1446
1447
1448 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1449
1450
1451 writel(0, &mac->mii_mgmt_cmd);
1452
1453
1454
1455
1456 writel(mii_addr, &mac->mii_mgmt_addr);
1457 writel(mii_cmd, &mac->mii_mgmt_cmd);
1458
1459 return status;
1460}
1461
1462static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1463{
1464 struct phy_device *phydev = adapter->phydev;
1465
1466 if (!phydev)
1467 return -EIO;
1468
1469 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1483{
1484 struct mac_regs __iomem *mac = &adapter->regs->mac;
1485 struct phy_device *phydev = adapter->phydev;
1486 int status = 0;
1487 u8 addr;
1488 u32 delay = 0;
1489 u32 mii_addr;
1490 u32 mii_cmd;
1491 u32 mii_indicator;
1492
1493 if (!phydev)
1494 return -EIO;
1495
1496 addr = phydev->addr;
1497
1498
1499
1500
1501 mii_addr = readl(&mac->mii_mgmt_addr);
1502 mii_cmd = readl(&mac->mii_mgmt_cmd);
1503
1504
1505 writel(0, &mac->mii_mgmt_cmd);
1506
1507
1508 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1509
1510
1511 writel(value, &mac->mii_mgmt_ctrl);
1512
1513 do {
1514 udelay(50);
1515 delay++;
1516 mii_indicator = readl(&mac->mii_mgmt_indicator);
1517 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1518
1519
1520 if (delay == 100) {
1521 u16 tmp;
1522
1523 dev_warn(&adapter->pdev->dev,
1524 "reg 0x%08x could not be written", reg);
1525 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1526 mii_indicator);
1527 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1528 readl(&mac->mii_mgmt_cmd));
1529
1530 et131x_mii_read(adapter, reg, &tmp);
1531
1532 status = -EIO;
1533 }
1534
1535 writel(0, &mac->mii_mgmt_cmd);
1536
1537
1538
1539
1540
1541 writel(mii_addr, &mac->mii_mgmt_addr);
1542 writel(mii_cmd, &mac->mii_mgmt_cmd);
1543
1544 return status;
1545}
1546
1547
1548static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1549 u16 action, u16 regnum, u16 bitnum,
1550 u8 *value)
1551{
1552 u16 reg;
1553 u16 mask = 0x0001 << bitnum;
1554
1555
1556 et131x_mii_read(adapter, regnum, ®);
1557
1558 switch (action) {
1559 case TRUEPHY_BIT_READ:
1560 *value = (reg & mask) >> bitnum;
1561 break;
1562
1563 case TRUEPHY_BIT_SET:
1564 et131x_mii_write(adapter, regnum, reg | mask);
1565 break;
1566
1567 case TRUEPHY_BIT_CLEAR:
1568 et131x_mii_write(adapter, regnum, reg & ~mask);
1569 break;
1570
1571 default:
1572 break;
1573 }
1574}
1575
1576static void et1310_config_flow_control(struct et131x_adapter *adapter)
1577{
1578 struct phy_device *phydev = adapter->phydev;
1579
1580 if (phydev->duplex == DUPLEX_HALF) {
1581 adapter->flowcontrol = FLOW_NONE;
1582 } else {
1583 char remote_pause, remote_async_pause;
1584
1585 et1310_phy_access_mii_bit(adapter,
1586 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1587 et1310_phy_access_mii_bit(adapter,
1588 TRUEPHY_BIT_READ, 5, 11,
1589 &remote_async_pause);
1590
1591 if ((remote_pause == TRUEPHY_BIT_SET) &&
1592 (remote_async_pause == TRUEPHY_BIT_SET)) {
1593 adapter->flowcontrol = adapter->wanted_flow;
1594 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1595 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1596 if (adapter->wanted_flow == FLOW_BOTH)
1597 adapter->flowcontrol = FLOW_BOTH;
1598 else
1599 adapter->flowcontrol = FLOW_NONE;
1600 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1601 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1602 adapter->flowcontrol = FLOW_NONE;
1603 } else {
1604
1605 if (adapter->wanted_flow == FLOW_BOTH)
1606 adapter->flowcontrol = FLOW_RXONLY;
1607 else
1608 adapter->flowcontrol = FLOW_NONE;
1609 }
1610 }
1611}
1612
1613
1614
1615
1616
1617static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1618{
1619 struct ce_stats *stats = &adapter->stats;
1620 struct macstat_regs __iomem *macstat =
1621 &adapter->regs->macstat;
1622
1623 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1624 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1625 stats->tx_deferred += readl(&macstat->tx_deferred);
1626 stats->tx_excessive_collisions +=
1627 readl(&macstat->tx_multiple_collisions);
1628 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1629 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1630 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1631
1632 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1633 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1634 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1635 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1636 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1637 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1638 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1650{
1651 u32 carry_reg1;
1652 u32 carry_reg2;
1653
1654
1655
1656
1657 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1658 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1659
1660 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1661 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1662
1663
1664
1665
1666
1667
1668
1669 if (carry_reg1 & (1 << 14))
1670 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1671 if (carry_reg1 & (1 << 8))
1672 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1673 if (carry_reg1 & (1 << 7))
1674 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1675 if (carry_reg1 & (1 << 2))
1676 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1677 if (carry_reg1 & (1 << 6))
1678 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1679 if (carry_reg1 & (1 << 3))
1680 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1681 if (carry_reg1 & (1 << 0))
1682 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1683 if (carry_reg2 & (1 << 16))
1684 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1685 if (carry_reg2 & (1 << 15))
1686 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1687 if (carry_reg2 & (1 << 6))
1688 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1689 if (carry_reg2 & (1 << 8))
1690 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1691 if (carry_reg2 & (1 << 5))
1692 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1693 if (carry_reg2 & (1 << 4))
1694 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1695 if (carry_reg2 & (1 << 2))
1696 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1697}
1698
1699static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1700{
1701 struct net_device *netdev = bus->priv;
1702 struct et131x_adapter *adapter = netdev_priv(netdev);
1703 u16 value;
1704 int ret;
1705
1706 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1707
1708 if (ret < 0)
1709 return ret;
1710 else
1711 return value;
1712}
1713
1714static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1715 int reg, u16 value)
1716{
1717 struct net_device *netdev = bus->priv;
1718 struct et131x_adapter *adapter = netdev_priv(netdev);
1719
1720 return et131x_mii_write(adapter, reg, value);
1721}
1722
1723static int et131x_mdio_reset(struct mii_bus *bus)
1724{
1725 struct net_device *netdev = bus->priv;
1726 struct et131x_adapter *adapter = netdev_priv(netdev);
1727
1728 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1729
1730 return 0;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1744{
1745 u16 data;
1746
1747 et131x_mii_read(adapter, MII_BMCR, &data);
1748 data &= ~BMCR_PDOWN;
1749 if (down)
1750 data |= BMCR_PDOWN;
1751 et131x_mii_write(adapter, MII_BMCR, data);
1752}
1753
1754
1755
1756
1757
1758
1759static void et131x_xcvr_init(struct et131x_adapter *adapter)
1760{
1761 u16 imr;
1762 u16 isr;
1763 u16 lcr2;
1764
1765 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1766 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1767
1768
1769
1770
1771 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT |
1772 ET_PHY_INT_MASK_LINKSTAT |
1773 ET_PHY_INT_MASK_ENABLE);
1774
1775 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1786 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1787
1788 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1789 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1790
1791 if ((adapter->eeprom_data[1] & 0x8) == 0)
1792 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1793 else
1794 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1795
1796 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1797 }
1798}
1799
1800
1801
1802
1803
1804
1805
1806static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1807{
1808 struct global_regs __iomem *regs = &adapter->regs->global;
1809
1810 writel(0, ®s->rxq_start_addr);
1811 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1812
1813 if (adapter->registry_jumbo_packet < 2048) {
1814
1815
1816
1817
1818
1819 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1820 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1821 } else if (adapter->registry_jumbo_packet < 8192) {
1822
1823 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1824 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1825 } else {
1826
1827
1828
1829
1830
1831 writel(0x01b3, ®s->rxq_end_addr);
1832 writel(0x01b4, ®s->txq_start_addr);
1833 }
1834
1835
1836 writel(0, ®s->loopback);
1837
1838
1839 writel(0, ®s->msi_config);
1840
1841
1842
1843
1844 writel(0, ®s->watchdog_timer);
1845}
1846
1847
1848
1849
1850
1851static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1852{
1853 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1854 struct rx_ring *rx_local = &adapter->rx_ring;
1855 struct fbr_desc *fbr_entry;
1856 u32 entry;
1857 u32 psr_num_des;
1858 unsigned long flags;
1859
1860
1861 et131x_rx_dma_disable(adapter);
1862
1863
1864
1865
1866
1867
1868
1869
1870 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1871 &rx_dma->dma_wb_base_hi);
1872 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1873
1874 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1875
1876
1877
1878
1879 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1880 &rx_dma->psr_base_hi);
1881 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1882 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1883 writel(0, &rx_dma->psr_full_offset);
1884
1885 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1886 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1887 &rx_dma->psr_min_des);
1888
1889 spin_lock_irqsave(&adapter->rcv_lock, flags);
1890
1891
1892 rx_local->local_psr_full = 0;
1893
1894
1895 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1896 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1897 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1898 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1899 fbr_entry->word2 = entry;
1900 fbr_entry++;
1901 }
1902
1903
1904
1905
1906 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1907 &rx_dma->fbr1_base_hi);
1908 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1909 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1910 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1911
1912
1913
1914
1915 rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1916 writel(
1917 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1918 &rx_dma->fbr1_min_des);
1919
1920#ifdef USE_FBR0
1921
1922 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1923 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1924 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1925 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1926 fbr_entry->word2 = entry;
1927 fbr_entry++;
1928 }
1929
1930 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1931 &rx_dma->fbr0_base_hi);
1932 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1933 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1934 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1935
1936
1937
1938
1939 rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1940 writel(
1941 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1942 &rx_dma->fbr0_min_des);
1943#endif
1944
1945
1946
1947
1948
1949
1950 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1951
1952
1953
1954
1955
1956
1957 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1958
1959 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1970{
1971 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1972
1973
1974 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1975 &txdma->pr_base_hi);
1976 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1977 &txdma->pr_base_lo);
1978
1979
1980 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1981
1982
1983 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1984 &txdma->dma_wb_base_hi);
1985 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1986
1987 *adapter->tx_ring.tx_status = 0;
1988
1989 writel(0, &txdma->service_request);
1990 adapter->tx_ring.send_idx = 0;
1991}
1992
1993
1994
1995
1996
1997
1998
1999static void et131x_adapter_setup(struct et131x_adapter *adapter)
2000{
2001
2002 et131x_configure_global_regs(adapter);
2003
2004 et1310_config_mac_regs1(adapter);
2005
2006
2007
2008 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2009
2010 et1310_config_rxmac_regs(adapter);
2011 et1310_config_txmac_regs(adapter);
2012
2013 et131x_config_rx_dma_regs(adapter);
2014 et131x_config_tx_dma_regs(adapter);
2015
2016 et1310_config_macstat_regs(adapter);
2017
2018 et1310_phy_power_down(adapter, 0);
2019 et131x_xcvr_init(adapter);
2020}
2021
2022
2023
2024
2025
2026static void et131x_soft_reset(struct et131x_adapter *adapter)
2027{
2028
2029 writel(0xc00f0000, &adapter->regs->mac.cfg1);
2030
2031
2032 writel(0x7F, &adapter->regs->global.sw_reset);
2033 writel(0x000f0000, &adapter->regs->mac.cfg1);
2034 writel(0x00000000, &adapter->regs->mac.cfg1);
2035}
2036
2037
2038
2039
2040
2041
2042
2043
2044static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2045{
2046 u32 mask;
2047
2048
2049 if (adapter->flowcontrol == FLOW_TXONLY ||
2050 adapter->flowcontrol == FLOW_BOTH)
2051 mask = INT_MASK_ENABLE;
2052 else
2053 mask = INT_MASK_ENABLE_NO_FLOW;
2054
2055 writel(mask, &adapter->regs->global.int_mask);
2056}
2057
2058
2059
2060
2061
2062
2063
2064static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2065{
2066
2067 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2068}
2069
2070
2071
2072
2073
2074static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2075{
2076
2077 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2078 &adapter->regs->txdma.csr);
2079}
2080
2081
2082
2083
2084
2085static void et131x_enable_txrx(struct net_device *netdev)
2086{
2087 struct et131x_adapter *adapter = netdev_priv(netdev);
2088
2089
2090 et131x_rx_dma_enable(adapter);
2091 et131x_tx_dma_enable(adapter);
2092
2093
2094 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2095 et131x_enable_interrupts(adapter);
2096
2097
2098 netif_start_queue(netdev);
2099}
2100
2101
2102
2103
2104
2105static void et131x_disable_txrx(struct net_device *netdev)
2106{
2107 struct et131x_adapter *adapter = netdev_priv(netdev);
2108
2109
2110 netif_stop_queue(netdev);
2111
2112
2113 et131x_rx_dma_disable(adapter);
2114 et131x_tx_dma_disable(adapter);
2115
2116
2117 et131x_disable_interrupts(adapter);
2118}
2119
2120
2121
2122
2123
2124static void et131x_init_send(struct et131x_adapter *adapter)
2125{
2126 struct tcb *tcb;
2127 u32 ct;
2128 struct tx_ring *tx_ring;
2129
2130
2131 tx_ring = &adapter->tx_ring;
2132 tcb = adapter->tx_ring.tcb_ring;
2133
2134 tx_ring->tcb_qhead = tcb;
2135
2136 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2137
2138
2139 for (ct = 0; ct++ < NUM_TCB; tcb++)
2140
2141
2142
2143 tcb->next = tcb + 1;
2144
2145
2146 tcb--;
2147 tx_ring->tcb_qtail = tcb;
2148 tcb->next = NULL;
2149
2150 tx_ring->send_head = NULL;
2151 tx_ring->send_tail = NULL;
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2175{
2176 unsigned long flags;
2177 u32 pmcsr;
2178
2179 pmcsr = readl(&adapter->regs->global.pm_csr);
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2193 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2194 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2195
2196
2197
2198 et131x_disable_txrx(adapter->netdev);
2199
2200
2201 pmcsr &= ~ET_PMCSR_INIT;
2202 writel(pmcsr, &adapter->regs->global.pm_csr);
2203
2204
2205 pmcsr |= ET_PM_PHY_SW_COMA;
2206 writel(pmcsr, &adapter->regs->global.pm_csr);
2207}
2208
2209
2210
2211
2212
2213static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2214{
2215 u32 pmcsr;
2216
2217 pmcsr = readl(&adapter->regs->global.pm_csr);
2218
2219
2220 pmcsr |= ET_PMCSR_INIT;
2221 pmcsr &= ~ET_PM_PHY_SW_COMA;
2222 writel(pmcsr, &adapter->regs->global.pm_csr);
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234 et131x_init_send(adapter);
2235
2236
2237
2238
2239
2240 et131x_soft_reset(adapter);
2241
2242
2243 et131x_adapter_setup(adapter);
2244
2245
2246 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2247
2248 et131x_enable_txrx(adapter->netdev);
2249}
2250
2251static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2252{
2253 u32 tmp_free_buff_ring = *free_buff_ring;
2254 tmp_free_buff_ring++;
2255
2256
2257
2258
2259 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2260 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2261 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2262 }
2263
2264 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2265 *free_buff_ring = tmp_free_buff_ring;
2266 return tmp_free_buff_ring;
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2277 u64 *phys_addr, u64 *offset,
2278 u64 mask)
2279{
2280 u64 new_addr = *phys_addr & ~mask;
2281
2282 *offset = 0;
2283
2284 if (new_addr != *phys_addr) {
2285
2286 new_addr += mask + 1;
2287
2288 *offset = new_addr - *phys_addr;
2289
2290 *phys_addr = new_addr;
2291 }
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2304{
2305 u32 i, j;
2306 u32 bufsize;
2307 u32 pktstat_ringsize, fbr_chunksize;
2308 struct rx_ring *rx_ring;
2309
2310
2311 rx_ring = &adapter->rx_ring;
2312
2313
2314#ifdef USE_FBR0
2315 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2316#endif
2317 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 if (adapter->registry_jumbo_packet < 2048) {
2338#ifdef USE_FBR0
2339 rx_ring->fbr[1]->buffsize = 256;
2340 rx_ring->fbr[1]->num_entries = 512;
2341#endif
2342 rx_ring->fbr[0]->buffsize = 2048;
2343 rx_ring->fbr[0]->num_entries = 512;
2344 } else if (adapter->registry_jumbo_packet < 4096) {
2345#ifdef USE_FBR0
2346 rx_ring->fbr[1]->buffsize = 512;
2347 rx_ring->fbr[1]->num_entries = 1024;
2348#endif
2349 rx_ring->fbr[0]->buffsize = 4096;
2350 rx_ring->fbr[0]->num_entries = 512;
2351 } else {
2352#ifdef USE_FBR0
2353 rx_ring->fbr[1]->buffsize = 1024;
2354 rx_ring->fbr[1]->num_entries = 768;
2355#endif
2356 rx_ring->fbr[0]->buffsize = 16384;
2357 rx_ring->fbr[0]->num_entries = 128;
2358 }
2359
2360#ifdef USE_FBR0
2361 adapter->rx_ring.psr_num_entries =
2362 adapter->rx_ring.fbr[1]->num_entries +
2363 adapter->rx_ring.fbr[0]->num_entries;
2364#else
2365 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2366#endif
2367
2368
2369 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2370 0xfff;
2371 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2372 bufsize,
2373 &rx_ring->fbr[0]->ring_physaddr,
2374 GFP_KERNEL);
2375 if (!rx_ring->fbr[0]->ring_virtaddr) {
2376 dev_err(&adapter->pdev->dev,
2377 "Cannot alloc memory for Free Buffer Ring 1\n");
2378 return -ENOMEM;
2379 }
2380
2381
2382
2383
2384
2385
2386
2387
2388 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2389
2390
2391 et131x_align_allocated_memory(adapter,
2392 &rx_ring->fbr[0]->real_physaddr,
2393 &rx_ring->fbr[0]->offset, 0x0FFF);
2394
2395 rx_ring->fbr[0]->ring_virtaddr =
2396 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2397 rx_ring->fbr[0]->offset);
2398
2399#ifdef USE_FBR0
2400
2401 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2402 0xfff;
2403 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2404 bufsize,
2405 &rx_ring->fbr[1]->ring_physaddr,
2406 GFP_KERNEL);
2407 if (!rx_ring->fbr[1]->ring_virtaddr) {
2408 dev_err(&adapter->pdev->dev,
2409 "Cannot alloc memory for Free Buffer Ring 0\n");
2410 return -ENOMEM;
2411 }
2412
2413
2414
2415
2416
2417
2418
2419
2420 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2421
2422
2423 et131x_align_allocated_memory(adapter,
2424 &rx_ring->fbr[1]->real_physaddr,
2425 &rx_ring->fbr[1]->offset, 0x0FFF);
2426
2427 rx_ring->fbr[1]->ring_virtaddr =
2428 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2429 rx_ring->fbr[1]->offset);
2430#endif
2431 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2432 u64 fbr1_tmp_physaddr;
2433 u64 fbr1_offset;
2434 u32 fbr1_align;
2435
2436
2437
2438
2439
2440
2441
2442
2443 if (rx_ring->fbr[0]->buffsize > 4096)
2444 fbr1_align = 4096;
2445 else
2446 fbr1_align = rx_ring->fbr[0]->buffsize;
2447
2448 fbr_chunksize =
2449 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2450 rx_ring->fbr[0]->mem_virtaddrs[i] =
2451 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2452 &rx_ring->fbr[0]->mem_physaddrs[i],
2453 GFP_KERNEL);
2454
2455 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2456 dev_err(&adapter->pdev->dev,
2457 "Could not alloc memory\n");
2458 return -ENOMEM;
2459 }
2460
2461
2462 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2463
2464 et131x_align_allocated_memory(adapter,
2465 &fbr1_tmp_physaddr,
2466 &fbr1_offset, (fbr1_align - 1));
2467
2468 for (j = 0; j < FBR_CHUNKS; j++) {
2469 u32 index = (i * FBR_CHUNKS) + j;
2470
2471
2472
2473
2474 rx_ring->fbr[0]->virt[index] =
2475 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2476 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2477
2478
2479
2480
2481 rx_ring->fbr[0]->bus_high[index] =
2482 (u32) (fbr1_tmp_physaddr >> 32);
2483 rx_ring->fbr[0]->bus_low[index] =
2484 (u32) fbr1_tmp_physaddr;
2485
2486 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2487
2488 rx_ring->fbr[0]->buffer1[index] =
2489 rx_ring->fbr[0]->virt[index];
2490 rx_ring->fbr[0]->buffer2[index] =
2491 rx_ring->fbr[0]->virt[index] - 4;
2492 }
2493 }
2494
2495#ifdef USE_FBR0
2496
2497 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2498 u64 fbr0_tmp_physaddr;
2499 u64 fbr0_offset;
2500
2501 fbr_chunksize =
2502 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2503 rx_ring->fbr[1]->mem_virtaddrs[i] =
2504 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2505 &rx_ring->fbr[1]->mem_physaddrs[i],
2506 GFP_KERNEL);
2507
2508 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2509 dev_err(&adapter->pdev->dev,
2510 "Could not alloc memory\n");
2511 return -ENOMEM;
2512 }
2513
2514
2515 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2516
2517 et131x_align_allocated_memory(adapter,
2518 &fbr0_tmp_physaddr,
2519 &fbr0_offset,
2520 rx_ring->fbr[1]->buffsize - 1);
2521
2522 for (j = 0; j < FBR_CHUNKS; j++) {
2523 u32 index = (i * FBR_CHUNKS) + j;
2524
2525 rx_ring->fbr[1]->virt[index] =
2526 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2527 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2528
2529 rx_ring->fbr[1]->bus_high[index] =
2530 (u32) (fbr0_tmp_physaddr >> 32);
2531 rx_ring->fbr[1]->bus_low[index] =
2532 (u32) fbr0_tmp_physaddr;
2533
2534 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2535
2536 rx_ring->fbr[1]->buffer1[index] =
2537 rx_ring->fbr[1]->virt[index];
2538 rx_ring->fbr[1]->buffer2[index] =
2539 rx_ring->fbr[1]->virt[index] - 4;
2540 }
2541 }
2542#endif
2543
2544
2545 pktstat_ringsize =
2546 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2547
2548 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2549 pktstat_ringsize,
2550 &rx_ring->ps_ring_physaddr,
2551 GFP_KERNEL);
2552
2553 if (!rx_ring->ps_ring_virtaddr) {
2554 dev_err(&adapter->pdev->dev,
2555 "Cannot alloc memory for Packet Status Ring\n");
2556 return -ENOMEM;
2557 }
2558 pr_info("Packet Status Ring %llx\n",
2559 (unsigned long long) rx_ring->ps_ring_physaddr);
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2570 sizeof(struct rx_status_block),
2571 &rx_ring->rx_status_bus,
2572 GFP_KERNEL);
2573 if (!rx_ring->rx_status_block) {
2574 dev_err(&adapter->pdev->dev,
2575 "Cannot alloc memory for Status Block\n");
2576 return -ENOMEM;
2577 }
2578 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2579 pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
2580
2581
2582
2583
2584
2585
2586
2587 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2588 sizeof(struct rfd),
2589 0,
2590 SLAB_CACHE_DMA |
2591 SLAB_HWCACHE_ALIGN,
2592 NULL);
2593
2594 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2595
2596
2597
2598
2599 INIT_LIST_HEAD(&rx_ring->recv_list);
2600 return 0;
2601}
2602
2603
2604
2605
2606
2607static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2608{
2609 u32 index;
2610 u32 bufsize;
2611 u32 pktstat_ringsize;
2612 struct rfd *rfd;
2613 struct rx_ring *rx_ring;
2614
2615
2616 rx_ring = &adapter->rx_ring;
2617
2618
2619 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2620
2621 while (!list_empty(&rx_ring->recv_list)) {
2622 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2623 struct rfd, list_node);
2624
2625 list_del(&rfd->list_node);
2626 rfd->skb = NULL;
2627 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2628 }
2629
2630
2631 if (rx_ring->fbr[0]->ring_virtaddr) {
2632
2633 for (index = 0; index <
2634 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2635 if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2636 u32 fbr1_align;
2637
2638 if (rx_ring->fbr[0]->buffsize > 4096)
2639 fbr1_align = 4096;
2640 else
2641 fbr1_align = rx_ring->fbr[0]->buffsize;
2642
2643 bufsize =
2644 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2645 fbr1_align - 1;
2646
2647 dma_free_coherent(&adapter->pdev->dev,
2648 bufsize,
2649 rx_ring->fbr[0]->mem_virtaddrs[index],
2650 rx_ring->fbr[0]->mem_physaddrs[index]);
2651
2652 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2653 }
2654 }
2655
2656
2657 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2658 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2659
2660 bufsize =
2661 (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2662 0xfff;
2663
2664 dma_free_coherent(&adapter->pdev->dev, bufsize,
2665 rx_ring->fbr[0]->ring_virtaddr,
2666 rx_ring->fbr[0]->ring_physaddr);
2667
2668 rx_ring->fbr[0]->ring_virtaddr = NULL;
2669 }
2670
2671#ifdef USE_FBR0
2672
2673 if (rx_ring->fbr[1]->ring_virtaddr) {
2674
2675 for (index = 0; index <
2676 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2677 if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2678 bufsize =
2679 (rx_ring->fbr[1]->buffsize *
2680 (FBR_CHUNKS + 1)) - 1;
2681
2682 dma_free_coherent(&adapter->pdev->dev,
2683 bufsize,
2684 rx_ring->fbr[1]->mem_virtaddrs[index],
2685 rx_ring->fbr[1]->mem_physaddrs[index]);
2686
2687 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2688 }
2689 }
2690
2691
2692 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2693 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2694
2695 bufsize =
2696 (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2697 0xfff;
2698
2699 dma_free_coherent(&adapter->pdev->dev,
2700 bufsize,
2701 rx_ring->fbr[1]->ring_virtaddr,
2702 rx_ring->fbr[1]->ring_physaddr);
2703
2704 rx_ring->fbr[1]->ring_virtaddr = NULL;
2705 }
2706#endif
2707
2708
2709 if (rx_ring->ps_ring_virtaddr) {
2710 pktstat_ringsize =
2711 sizeof(struct pkt_stat_desc) *
2712 adapter->rx_ring.psr_num_entries;
2713
2714 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2715 rx_ring->ps_ring_virtaddr,
2716 rx_ring->ps_ring_physaddr);
2717
2718 rx_ring->ps_ring_virtaddr = NULL;
2719 }
2720
2721
2722 if (rx_ring->rx_status_block) {
2723 dma_free_coherent(&adapter->pdev->dev,
2724 sizeof(struct rx_status_block),
2725 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2726 rx_ring->rx_status_block = NULL;
2727 }
2728
2729
2730 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2731 kmem_cache_destroy(rx_ring->recv_lookaside);
2732 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2733 }
2734
2735
2736#ifdef USE_FBR0
2737 kfree(rx_ring->fbr[1]);
2738#endif
2739
2740 kfree(rx_ring->fbr[0]);
2741
2742
2743 rx_ring->num_ready_recv = 0;
2744}
2745
2746
2747
2748
2749
2750
2751
2752static int et131x_init_recv(struct et131x_adapter *adapter)
2753{
2754 int status = -ENOMEM;
2755 struct rfd *rfd = NULL;
2756 u32 rfdct;
2757 u32 numrfd = 0;
2758 struct rx_ring *rx_ring;
2759
2760
2761 rx_ring = &adapter->rx_ring;
2762
2763
2764 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2765 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2766 GFP_ATOMIC | GFP_DMA);
2767
2768 if (!rfd) {
2769 dev_err(&adapter->pdev->dev,
2770 "Couldn't alloc RFD out of kmem_cache\n");
2771 status = -ENOMEM;
2772 continue;
2773 }
2774
2775 rfd->skb = NULL;
2776
2777
2778 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2779
2780
2781 rx_ring->num_ready_recv++;
2782 numrfd++;
2783 }
2784
2785 if (numrfd > NIC_MIN_NUM_RFD)
2786 status = 0;
2787
2788 rx_ring->num_rfd = numrfd;
2789
2790 if (status != 0) {
2791 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2792 dev_err(&adapter->pdev->dev,
2793 "Allocation problems in et131x_init_recv\n");
2794 }
2795 return status;
2796}
2797
2798
2799
2800
2801
2802static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2803{
2804 struct phy_device *phydev = adapter->phydev;
2805
2806 if (!phydev)
2807 return;
2808
2809
2810
2811
2812 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2813 writel(0, &adapter->regs->rxdma.max_pkt_time);
2814 writel(1, &adapter->regs->rxdma.num_pkt_done);
2815 }
2816}
2817
2818
2819
2820
2821
2822
2823static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2824{
2825 struct rx_ring *rx_local = &adapter->rx_ring;
2826 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2827 u16 buff_index = rfd->bufferindex;
2828 u8 ring_index = rfd->ringindex;
2829 unsigned long flags;
2830
2831
2832
2833
2834 if (
2835#ifdef USE_FBR0
2836 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2837#endif
2838 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2839 spin_lock_irqsave(&adapter->fbr_lock, flags);
2840
2841 if (ring_index == 1) {
2842 struct fbr_desc *next = (struct fbr_desc *)
2843 (rx_local->fbr[0]->ring_virtaddr) +
2844 INDEX10(rx_local->fbr[0]->local_full);
2845
2846
2847
2848
2849
2850 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2851 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2852 next->word2 = buff_index;
2853
2854 writel(bump_free_buff_ring(
2855 &rx_local->fbr[0]->local_full,
2856 rx_local->fbr[0]->num_entries - 1),
2857 &rx_dma->fbr1_full_offset);
2858 }
2859#ifdef USE_FBR0
2860 else {
2861 struct fbr_desc *next = (struct fbr_desc *)
2862 rx_local->fbr[1]->ring_virtaddr +
2863 INDEX10(rx_local->fbr[1]->local_full);
2864
2865
2866
2867
2868
2869 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2870 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2871 next->word2 = buff_index;
2872
2873 writel(bump_free_buff_ring(
2874 &rx_local->fbr[1]->local_full,
2875 rx_local->fbr[1]->num_entries - 1),
2876 &rx_dma->fbr0_full_offset);
2877 }
2878#endif
2879 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2880 } else {
2881 dev_err(&adapter->pdev->dev,
2882 "%s illegal Buffer Index returned\n", __func__);
2883 }
2884
2885
2886
2887
2888 spin_lock_irqsave(&adapter->rcv_lock, flags);
2889 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2890 rx_local->num_ready_recv++;
2891 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2892
2893 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2908{
2909 struct rx_ring *rx_local = &adapter->rx_ring;
2910 struct rx_status_block *status;
2911 struct pkt_stat_desc *psr;
2912 struct rfd *rfd;
2913 u32 i;
2914 u8 *buf;
2915 unsigned long flags;
2916 struct list_head *element;
2917 u8 ring_index;
2918 u16 buff_index;
2919 u32 len;
2920 u32 word0;
2921 u32 word1;
2922
2923
2924
2925
2926
2927 status = rx_local->rx_status_block;
2928 word1 = status->word1 >> 16;
2929
2930
2931 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2932
2933 return NULL;
2934
2935
2936 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2937 (rx_local->local_psr_full & 0xFFF);
2938
2939
2940
2941
2942
2943 len = psr->word1 & 0xFFFF;
2944 ring_index = (psr->word1 >> 26) & 0x03;
2945 buff_index = (psr->word1 >> 16) & 0x3FF;
2946 word0 = psr->word0;
2947
2948
2949
2950 add_12bit(&rx_local->local_psr_full, 1);
2951 if (
2952 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2953
2954 rx_local->local_psr_full &= ~0xFFF;
2955 rx_local->local_psr_full ^= 0x1000;
2956 }
2957
2958 writel(rx_local->local_psr_full,
2959 &adapter->regs->rxdma.psr_full_offset);
2960
2961#ifndef USE_FBR0
2962 if (ring_index != 1)
2963 return NULL;
2964#endif
2965
2966#ifdef USE_FBR0
2967 if (ring_index > 1 ||
2968 (ring_index == 0 &&
2969 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2970 (ring_index == 1 &&
2971 buff_index > rx_local->fbr[0]->num_entries - 1)) {
2972#else
2973 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) {
2974#endif
2975
2976 dev_err(&adapter->pdev->dev,
2977 "NICRxPkts PSR Entry %d indicates "
2978 "length of %d and/or bad bi(%d)\n",
2979 rx_local->local_psr_full & 0xFFF,
2980 len, buff_index);
2981 return NULL;
2982 }
2983
2984
2985 spin_lock_irqsave(&adapter->rcv_lock, flags);
2986
2987 rfd = NULL;
2988 element = rx_local->recv_list.next;
2989 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2990
2991 if (rfd == NULL) {
2992 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2993 return NULL;
2994 }
2995
2996 list_del(&rfd->list_node);
2997 rx_local->num_ready_recv--;
2998
2999 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3000
3001 rfd->bufferindex = buff_index;
3002 rfd->ringindex = ring_index;
3003
3004
3005
3006
3007
3008
3009 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3010 adapter->stats.rx_other_errs++;
3011 len = 0;
3012 }
3013
3014 if (len) {
3015
3016 if ((word0 & ALCATEL_MULTICAST_PKT) &&
3017 !(word0 & ALCATEL_BROADCAST_PKT)) {
3018
3019
3020
3021
3022
3023
3024
3025 if ((adapter->packet_filter &
3026 ET131X_PACKET_TYPE_MULTICAST)
3027 && !(adapter->packet_filter &
3028 ET131X_PACKET_TYPE_PROMISCUOUS)
3029 && !(adapter->packet_filter &
3030 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3031
3032
3033
3034
3035 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3036 virt[buff_index];
3037
3038
3039
3040
3041
3042 for (i = 0; i < adapter->multicast_addr_count;
3043 i++) {
3044 if (buf[0] ==
3045 adapter->multicast_list[i][0]
3046 && buf[1] ==
3047 adapter->multicast_list[i][1]
3048 && buf[2] ==
3049 adapter->multicast_list[i][2]
3050 && buf[3] ==
3051 adapter->multicast_list[i][3]
3052 && buf[4] ==
3053 adapter->multicast_list[i][4]
3054 && buf[5] ==
3055 adapter->multicast_list[i][5]) {
3056 break;
3057 }
3058 }
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068 if (i == adapter->multicast_addr_count)
3069 len = 0;
3070 }
3071
3072 if (len > 0)
3073 adapter->stats.multicast_pkts_rcvd++;
3074 } else if (word0 & ALCATEL_BROADCAST_PKT)
3075 adapter->stats.broadcast_pkts_rcvd++;
3076 else
3077
3078
3079
3080
3081
3082 adapter->stats.unicast_pkts_rcvd++;
3083 }
3084
3085 if (len > 0) {
3086 struct sk_buff *skb = NULL;
3087
3088
3089 rfd->len = len;
3090
3091 skb = dev_alloc_skb(rfd->len + 2);
3092 if (!skb) {
3093 dev_err(&adapter->pdev->dev,
3094 "Couldn't alloc an SKB for Rx\n");
3095 return NULL;
3096 }
3097
3098 adapter->net_stats.rx_bytes += rfd->len;
3099
3100
3101
3102
3103
3104 memcpy(skb_put(skb, rfd->len),
3105 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3106 rfd->len);
3107
3108 skb->dev = adapter->netdev;
3109 skb->protocol = eth_type_trans(skb, adapter->netdev);
3110 skb->ip_summed = CHECKSUM_NONE;
3111
3112 netif_rx_ni(skb);
3113 } else {
3114 rfd->len = 0;
3115 }
3116
3117 nic_return_rfd(adapter, rfd);
3118 return rfd;
3119}
3120
3121
3122
3123
3124
3125
3126
3127static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3128{
3129 struct rfd *rfd = NULL;
3130 u32 count = 0;
3131 bool done = true;
3132
3133
3134 while (count < NUM_PACKETS_HANDLED) {
3135 if (list_empty(&adapter->rx_ring.recv_list)) {
3136 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3137 done = false;
3138 break;
3139 }
3140
3141 rfd = nic_rx_pkts(adapter);
3142
3143 if (rfd == NULL)
3144 break;
3145
3146
3147
3148
3149
3150
3151 if (!adapter->packet_filter ||
3152 !netif_carrier_ok(adapter->netdev) ||
3153 rfd->len == 0)
3154 continue;
3155
3156
3157 adapter->net_stats.rx_packets++;
3158
3159
3160 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3161 dev_warn(&adapter->pdev->dev,
3162 "RFD's are running out\n");
3163 }
3164 count++;
3165 }
3166
3167 if (count == NUM_PACKETS_HANDLED || !done) {
3168 adapter->rx_ring.unfinished_receives = true;
3169 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3170 &adapter->regs->global.watchdog_timer);
3171 } else
3172
3173 adapter->rx_ring.unfinished_receives = false;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3189{
3190 int desc_size = 0;
3191 struct tx_ring *tx_ring = &adapter->tx_ring;
3192
3193
3194 adapter->tx_ring.tcb_ring =
3195 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3196 if (!adapter->tx_ring.tcb_ring) {
3197 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3198 return -ENOMEM;
3199 }
3200
3201
3202
3203
3204 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3205 tx_ring->tx_desc_ring =
3206 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3207 desc_size,
3208 &tx_ring->tx_desc_ring_pa,
3209 GFP_KERNEL);
3210 if (!adapter->tx_ring.tx_desc_ring) {
3211 dev_err(&adapter->pdev->dev,
3212 "Cannot alloc memory for Tx Ring\n");
3213 return -ENOMEM;
3214 }
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3225 sizeof(u32),
3226 &tx_ring->tx_status_pa,
3227 GFP_KERNEL);
3228 if (!adapter->tx_ring.tx_status_pa) {
3229 dev_err(&adapter->pdev->dev,
3230 "Cannot alloc memory for Tx status block\n");
3231 return -ENOMEM;
3232 }
3233 return 0;
3234}
3235
3236
3237
3238
3239
3240
3241
3242static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3243{
3244 int desc_size = 0;
3245
3246 if (adapter->tx_ring.tx_desc_ring) {
3247
3248 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3249 + 4096 - 1;
3250 dma_free_coherent(&adapter->pdev->dev,
3251 desc_size,
3252 adapter->tx_ring.tx_desc_ring,
3253 adapter->tx_ring.tx_desc_ring_pa);
3254 adapter->tx_ring.tx_desc_ring = NULL;
3255 }
3256
3257
3258 if (adapter->tx_ring.tx_status) {
3259 dma_free_coherent(&adapter->pdev->dev,
3260 sizeof(u32),
3261 adapter->tx_ring.tx_status,
3262 adapter->tx_ring.tx_status_pa);
3263
3264 adapter->tx_ring.tx_status = NULL;
3265 }
3266
3267 kfree(adapter->tx_ring.tcb_ring);
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3278{
3279 u32 i;
3280 struct tx_desc desc[24];
3281 u32 frag = 0;
3282 u32 thiscopy, remainder;
3283 struct sk_buff *skb = tcb->skb;
3284 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3285 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3286 unsigned long flags;
3287 struct phy_device *phydev = adapter->phydev;
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297 if (nr_frags > 23)
3298 return -EIO;
3299
3300 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3301
3302 for (i = 0; i < nr_frags; i++) {
3303
3304
3305
3306 if (i == 0) {
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316 if ((skb->len - skb->data_len) <= 1514) {
3317 desc[frag].addr_hi = 0;
3318
3319
3320 desc[frag].len_vlan =
3321 skb->len - skb->data_len;
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331 desc[frag++].addr_lo =
3332 dma_map_single(&adapter->pdev->dev,
3333 skb->data,
3334 skb->len -
3335 skb->data_len,
3336 DMA_TO_DEVICE);
3337 } else {
3338 desc[frag].addr_hi = 0;
3339 desc[frag].len_vlan =
3340 (skb->len - skb->data_len) / 2;
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 desc[frag++].addr_lo =
3351 dma_map_single(&adapter->pdev->dev,
3352 skb->data,
3353 ((skb->len -
3354 skb->data_len) / 2),
3355 DMA_TO_DEVICE);
3356 desc[frag].addr_hi = 0;
3357
3358 desc[frag].len_vlan =
3359 (skb->len - skb->data_len) / 2;
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369 desc[frag++].addr_lo =
3370 dma_map_single(&adapter->pdev->dev,
3371 skb->data +
3372 ((skb->len -
3373 skb->data_len) / 2),
3374 ((skb->len -
3375 skb->data_len) / 2),
3376 DMA_TO_DEVICE);
3377 }
3378 } else {
3379 desc[frag].addr_hi = 0;
3380 desc[frag].len_vlan =
3381 frags[i - 1].size;
3382
3383
3384
3385
3386
3387
3388
3389 desc[frag++].addr_lo = skb_frag_dma_map(
3390 &adapter->pdev->dev,
3391 &frags[i - 1],
3392 0,
3393 frags[i - 1].size,
3394 DMA_TO_DEVICE);
3395 }
3396 }
3397
3398 if (phydev && phydev->speed == SPEED_1000) {
3399 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3400
3401 desc[frag - 1].flags = 0x5;
3402 adapter->tx_ring.since_irq = 0;
3403 } else {
3404 desc[frag - 1].flags = 0x1;
3405 }
3406 } else
3407 desc[frag - 1].flags = 0x5;
3408
3409 desc[0].flags |= 2;
3410
3411 tcb->index_start = adapter->tx_ring.send_idx;
3412 tcb->stale = 0;
3413
3414 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3415
3416 thiscopy = NUM_DESC_PER_RING_TX -
3417 INDEX10(adapter->tx_ring.send_idx);
3418
3419 if (thiscopy >= frag) {
3420 remainder = 0;
3421 thiscopy = frag;
3422 } else {
3423 remainder = frag - thiscopy;
3424 }
3425
3426 memcpy(adapter->tx_ring.tx_desc_ring +
3427 INDEX10(adapter->tx_ring.send_idx), desc,
3428 sizeof(struct tx_desc) * thiscopy);
3429
3430 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3431
3432 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3433 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3434 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3435 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3436 }
3437
3438 if (remainder) {
3439 memcpy(adapter->tx_ring.tx_desc_ring,
3440 desc + thiscopy,
3441 sizeof(struct tx_desc) * remainder);
3442
3443 add_10bit(&adapter->tx_ring.send_idx, remainder);
3444 }
3445
3446 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3447 if (adapter->tx_ring.send_idx)
3448 tcb->index = NUM_DESC_PER_RING_TX - 1;
3449 else
3450 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3451 } else
3452 tcb->index = adapter->tx_ring.send_idx - 1;
3453
3454 spin_lock(&adapter->tcb_send_qlock);
3455
3456 if (adapter->tx_ring.send_tail)
3457 adapter->tx_ring.send_tail->next = tcb;
3458 else
3459 adapter->tx_ring.send_head = tcb;
3460
3461 adapter->tx_ring.send_tail = tcb;
3462
3463 WARN_ON(tcb->next != NULL);
3464
3465 adapter->tx_ring.used++;
3466
3467 spin_unlock(&adapter->tcb_send_qlock);
3468
3469
3470 writel(adapter->tx_ring.send_idx,
3471 &adapter->regs->txdma.service_request);
3472
3473
3474
3475
3476 if (phydev && phydev->speed == SPEED_1000) {
3477 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3478 &adapter->regs->global.watchdog_timer);
3479 }
3480 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3481
3482 return 0;
3483}
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3495{
3496 int status;
3497 struct tcb *tcb = NULL;
3498 u16 *shbufva;
3499 unsigned long flags;
3500
3501
3502 if (skb->len < ETH_HLEN)
3503 return -EIO;
3504
3505
3506 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3507
3508 tcb = adapter->tx_ring.tcb_qhead;
3509
3510 if (tcb == NULL) {
3511 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3512 return -ENOMEM;
3513 }
3514
3515 adapter->tx_ring.tcb_qhead = tcb->next;
3516
3517 if (adapter->tx_ring.tcb_qhead == NULL)
3518 adapter->tx_ring.tcb_qtail = NULL;
3519
3520 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3521
3522 tcb->skb = skb;
3523
3524 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3525 shbufva = (u16 *) skb->data;
3526
3527 if ((shbufva[0] == 0xffff) &&
3528 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3529 tcb->flags |= fMP_DEST_BROAD;
3530 } else if ((shbufva[0] & 0x3) == 0x0001) {
3531 tcb->flags |= fMP_DEST_MULTI;
3532 }
3533 }
3534
3535 tcb->next = NULL;
3536
3537
3538 status = nic_send_packet(adapter, tcb);
3539
3540 if (status != 0) {
3541 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3542
3543 if (adapter->tx_ring.tcb_qtail)
3544 adapter->tx_ring.tcb_qtail->next = tcb;
3545 else
3546
3547 adapter->tx_ring.tcb_qhead = tcb;
3548
3549 adapter->tx_ring.tcb_qtail = tcb;
3550 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3551 return status;
3552 }
3553 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3554 return 0;
3555}
3556
3557
3558
3559
3560
3561
3562
3563
3564static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3565{
3566 int status = 0;
3567 struct et131x_adapter *adapter = netdev_priv(netdev);
3568
3569
3570
3571
3572
3573
3574
3575
3576 if (adapter->tx_ring.used >= NUM_TCB) {
3577
3578
3579
3580
3581 status = -ENOMEM;
3582 } else {
3583
3584
3585
3586 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3587 !netif_carrier_ok(netdev)) {
3588 dev_kfree_skb_any(skb);
3589 skb = NULL;
3590
3591 adapter->net_stats.tx_dropped++;
3592 } else {
3593 status = send_packet(skb, adapter);
3594 if (status != 0 && status != -ENOMEM) {
3595
3596
3597
3598 dev_kfree_skb_any(skb);
3599 skb = NULL;
3600 adapter->net_stats.tx_dropped++;
3601 }
3602 }
3603 }
3604 return status;
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615static inline void free_send_packet(struct et131x_adapter *adapter,
3616 struct tcb *tcb)
3617{
3618 unsigned long flags;
3619 struct tx_desc *desc = NULL;
3620 struct net_device_stats *stats = &adapter->net_stats;
3621
3622 if (tcb->flags & fMP_DEST_BROAD)
3623 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3624 else if (tcb->flags & fMP_DEST_MULTI)
3625 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3626 else
3627 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3628
3629 if (tcb->skb) {
3630 stats->tx_bytes += tcb->skb->len;
3631
3632
3633
3634
3635
3636 do {
3637 desc = (struct tx_desc *)
3638 (adapter->tx_ring.tx_desc_ring +
3639 INDEX10(tcb->index_start));
3640
3641 dma_unmap_single(&adapter->pdev->dev,
3642 desc->addr_lo,
3643 desc->len_vlan, DMA_TO_DEVICE);
3644
3645 add_10bit(&tcb->index_start, 1);
3646 if (INDEX10(tcb->index_start) >=
3647 NUM_DESC_PER_RING_TX) {
3648 tcb->index_start &= ~ET_DMA10_MASK;
3649 tcb->index_start ^= ET_DMA10_WRAP;
3650 }
3651 } while (desc != (adapter->tx_ring.tx_desc_ring +
3652 INDEX10(tcb->index)));
3653
3654 dev_kfree_skb_any(tcb->skb);
3655 }
3656
3657 memset(tcb, 0, sizeof(struct tcb));
3658
3659
3660 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3661
3662 adapter->net_stats.tx_packets++;
3663
3664 if (adapter->tx_ring.tcb_qtail)
3665 adapter->tx_ring.tcb_qtail->next = tcb;
3666 else
3667
3668 adapter->tx_ring.tcb_qhead = tcb;
3669
3670 adapter->tx_ring.tcb_qtail = tcb;
3671
3672 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3673 WARN_ON(adapter->tx_ring.used < 0);
3674}
3675
3676
3677
3678
3679
3680
3681
3682static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3683{
3684 struct tcb *tcb;
3685 unsigned long flags;
3686 u32 freed = 0;
3687
3688
3689 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3690
3691 tcb = adapter->tx_ring.send_head;
3692
3693 while (tcb != NULL && freed < NUM_TCB) {
3694 struct tcb *next = tcb->next;
3695
3696 adapter->tx_ring.send_head = next;
3697
3698 if (next == NULL)
3699 adapter->tx_ring.send_tail = NULL;
3700
3701 adapter->tx_ring.used--;
3702
3703 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3704
3705 freed++;
3706 free_send_packet(adapter, tcb);
3707
3708 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3709
3710 tcb = adapter->tx_ring.send_head;
3711 }
3712
3713 WARN_ON(freed == NUM_TCB);
3714
3715 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3716
3717 adapter->tx_ring.used = 0;
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3730{
3731 unsigned long flags;
3732 u32 serviced;
3733 struct tcb *tcb;
3734 u32 index;
3735
3736 serviced = readl(&adapter->regs->txdma.new_service_complete);
3737 index = INDEX10(serviced);
3738
3739
3740
3741
3742 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3743
3744 tcb = adapter->tx_ring.send_head;
3745
3746 while (tcb &&
3747 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3748 index < INDEX10(tcb->index)) {
3749 adapter->tx_ring.used--;
3750 adapter->tx_ring.send_head = tcb->next;
3751 if (tcb->next == NULL)
3752 adapter->tx_ring.send_tail = NULL;
3753
3754 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3755 free_send_packet(adapter, tcb);
3756 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3757
3758
3759 tcb = adapter->tx_ring.send_head;
3760 }
3761 while (tcb &&
3762 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3763 && index > (tcb->index & ET_DMA10_MASK)) {
3764 adapter->tx_ring.used--;
3765 adapter->tx_ring.send_head = tcb->next;
3766 if (tcb->next == NULL)
3767 adapter->tx_ring.send_tail = NULL;
3768
3769 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3770 free_send_packet(adapter, tcb);
3771 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3772
3773
3774 tcb = adapter->tx_ring.send_head;
3775 }
3776
3777
3778 if (adapter->tx_ring.used <= NUM_TCB / 3)
3779 netif_wake_queue(adapter->netdev);
3780
3781 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3782}
3783
3784static int et131x_get_settings(struct net_device *netdev,
3785 struct ethtool_cmd *cmd)
3786{
3787 struct et131x_adapter *adapter = netdev_priv(netdev);
3788
3789 return phy_ethtool_gset(adapter->phydev, cmd);
3790}
3791
3792static int et131x_set_settings(struct net_device *netdev,
3793 struct ethtool_cmd *cmd)
3794{
3795 struct et131x_adapter *adapter = netdev_priv(netdev);
3796
3797 return phy_ethtool_sset(adapter->phydev, cmd);
3798}
3799
3800static int et131x_get_regs_len(struct net_device *netdev)
3801{
3802#define ET131X_REGS_LEN 256
3803 return ET131X_REGS_LEN * sizeof(u32);
3804}
3805
3806static void et131x_get_regs(struct net_device *netdev,
3807 struct ethtool_regs *regs, void *regs_data)
3808{
3809 struct et131x_adapter *adapter = netdev_priv(netdev);
3810 struct address_map __iomem *aregs = adapter->regs;
3811 u32 *regs_buff = regs_data;
3812 u32 num = 0;
3813
3814 memset(regs_data, 0, et131x_get_regs_len(netdev));
3815
3816 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3817 adapter->pdev->device;
3818
3819
3820 et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]);
3821 et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]);
3822 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]);
3823 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]);
3824 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]);
3825 et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]);
3826 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]);
3827
3828 et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]);
3829
3830 et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]);
3831 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]);
3832 et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]);
3833 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]);
3834 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]);
3835 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]);
3836 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3837 (u16 *)®s_buff[num++]);
3838 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3839 (u16 *)®s_buff[num++]);
3840 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3841 (u16 *)®s_buff[num++]);
3842 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3843 (u16 *)®s_buff[num++]);
3844 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]);
3845 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]);
3846 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]);
3847 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3848 (u16 *)®s_buff[num++]);
3849 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]);
3850 et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]);
3851 et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]);
3852
3853
3854 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3855 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3856 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3857 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3858 regs_buff[num++] = readl(&aregs->global.pm_csr);
3859 regs_buff[num++] = adapter->stats.interrupt_status;
3860 regs_buff[num++] = readl(&aregs->global.int_mask);
3861 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3862 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3863 regs_buff[num++] = readl(&aregs->global.sw_reset);
3864 regs_buff[num++] = readl(&aregs->global.slv_timer);
3865 regs_buff[num++] = readl(&aregs->global.msi_config);
3866 regs_buff[num++] = readl(&aregs->global.loopback);
3867 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3868
3869
3870 regs_buff[num++] = readl(&aregs->txdma.csr);
3871 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3872 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3873 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3874 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3875 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3876 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3877 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3878 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3879 regs_buff[num++] = readl(&aregs->txdma.service_request);
3880 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3881 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3882 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3883 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3884 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3885 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3886 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3887 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3888 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3889 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3890 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3891 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3892 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3893 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3894 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3895 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3896
3897
3898 regs_buff[num++] = readl(&aregs->rxdma.csr);
3899 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3900 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3901 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3902 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3903 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3904 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3905 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3906 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3907 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3908 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3909 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3910 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3911 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3912 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3913 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3914 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3915 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3916 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3917 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3918 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3919 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3920 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3921 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3922 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3923 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3924 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3925 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3926 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3927}
3928
3929#define ET131X_DRVINFO_LEN 32
3930static void et131x_get_drvinfo(struct net_device *netdev,
3931 struct ethtool_drvinfo *info)
3932{
3933 struct et131x_adapter *adapter = netdev_priv(netdev);
3934
3935 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3936 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3937 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3938}
3939
3940static struct ethtool_ops et131x_ethtool_ops = {
3941 .get_settings = et131x_get_settings,
3942 .set_settings = et131x_set_settings,
3943 .get_drvinfo = et131x_get_drvinfo,
3944 .get_regs_len = et131x_get_regs_len,
3945 .get_regs = et131x_get_regs,
3946 .get_link = ethtool_op_get_link,
3947};
3948
3949
3950
3951
3952static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3953{
3954
3955
3956
3957
3958 if (adapter->rom_addr[0] == 0x00 &&
3959 adapter->rom_addr[1] == 0x00 &&
3960 adapter->rom_addr[2] == 0x00 &&
3961 adapter->rom_addr[3] == 0x00 &&
3962 adapter->rom_addr[4] == 0x00 &&
3963 adapter->rom_addr[5] == 0x00) {
3964
3965
3966
3967
3968
3969 get_random_bytes(&adapter->addr[5], 1);
3970
3971
3972
3973
3974
3975 memcpy(adapter->rom_addr,
3976 adapter->addr, ETH_ALEN);
3977 } else {
3978
3979
3980
3981
3982 memcpy(adapter->addr,
3983 adapter->rom_addr, ETH_ALEN);
3984 }
3985}
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995static int et131x_pci_init(struct et131x_adapter *adapter,
3996 struct pci_dev *pdev)
3997{
3998 int cap = pci_pcie_cap(pdev);
3999 u16 max_payload;
4000 u16 ctl;
4001 int i, rc;
4002
4003 rc = et131x_init_eeprom(adapter);
4004 if (rc < 0)
4005 goto out;
4006
4007 if (!cap) {
4008 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4009 goto err_out;
4010 }
4011
4012
4013
4014
4015 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
4016 dev_err(&pdev->dev,
4017 "Could not read PCI config space for Max Payload Size\n");
4018 goto err_out;
4019 }
4020
4021
4022 max_payload &= 0x07;
4023
4024 if (max_payload < 2) {
4025 static const u16 acknak[2] = { 0x76, 0xD0 };
4026 static const u16 replay[2] = { 0x1E0, 0x2ED };
4027
4028 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4029 acknak[max_payload])) {
4030 dev_err(&pdev->dev,
4031 "Could not write PCI config space for ACK/NAK\n");
4032 goto err_out;
4033 }
4034 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4035 replay[max_payload])) {
4036 dev_err(&pdev->dev,
4037 "Could not write PCI config space for Replay Timer\n");
4038 goto err_out;
4039 }
4040 }
4041
4042
4043
4044
4045 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4046 dev_err(&pdev->dev,
4047 "Could not write PCI config space for Latency Timers\n");
4048 goto err_out;
4049 }
4050
4051
4052 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
4053 dev_err(&pdev->dev,
4054 "Could not read PCI config space for Max read size\n");
4055 goto err_out;
4056 }
4057
4058 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12);
4059
4060 if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4061 dev_err(&pdev->dev,
4062 "Could not write PCI config space for Max read size\n");
4063 goto err_out;
4064 }
4065
4066
4067
4068
4069 if (!adapter->has_eeprom) {
4070 et131x_hwaddr_init(adapter);
4071 return 0;
4072 }
4073
4074 for (i = 0; i < ETH_ALEN; i++) {
4075 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4076 adapter->rom_addr + i)) {
4077 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4078 goto err_out;
4079 }
4080 }
4081 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4082out:
4083 return rc;
4084err_out:
4085 rc = -EIO;
4086 goto out;
4087}
4088
4089
4090
4091
4092
4093
4094
4095
4096static void et131x_error_timer_handler(unsigned long data)
4097{
4098 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4099 struct phy_device *phydev = adapter->phydev;
4100
4101 if (et1310_in_phy_coma(adapter)) {
4102
4103
4104
4105 et1310_disable_phy_coma(adapter);
4106 adapter->boot_coma = 20;
4107 } else {
4108 et1310_update_macstat_host_counters(adapter);
4109 }
4110
4111 if (!phydev->link && adapter->boot_coma < 11)
4112 adapter->boot_coma++;
4113
4114 if (adapter->boot_coma == 10) {
4115 if (!phydev->link) {
4116 if (!et1310_in_phy_coma(adapter)) {
4117
4118
4119
4120 et131x_enable_interrupts(adapter);
4121 et1310_enable_phy_coma(adapter);
4122 }
4123 }
4124 }
4125
4126
4127 mod_timer(&adapter->error_timer, jiffies +
4128 TX_ERROR_PERIOD * HZ / 1000);
4129}
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4140{
4141 int status;
4142
4143
4144 status = et131x_tx_dma_memory_alloc(adapter);
4145 if (status != 0) {
4146 dev_err(&adapter->pdev->dev,
4147 "et131x_tx_dma_memory_alloc FAILED\n");
4148 return status;
4149 }
4150
4151 status = et131x_rx_dma_memory_alloc(adapter);
4152 if (status != 0) {
4153 dev_err(&adapter->pdev->dev,
4154 "et131x_rx_dma_memory_alloc FAILED\n");
4155 et131x_tx_dma_memory_free(adapter);
4156 return status;
4157 }
4158
4159
4160 status = et131x_init_recv(adapter);
4161 if (status != 0) {
4162 dev_err(&adapter->pdev->dev,
4163 "et131x_init_recv FAILED\n");
4164 et131x_tx_dma_memory_free(adapter);
4165 et131x_rx_dma_memory_free(adapter);
4166 }
4167 return status;
4168}
4169
4170
4171
4172
4173
4174static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4175{
4176
4177 et131x_tx_dma_memory_free(adapter);
4178 et131x_rx_dma_memory_free(adapter);
4179}
4180
4181static void et131x_adjust_link(struct net_device *netdev)
4182{
4183 struct et131x_adapter *adapter = netdev_priv(netdev);
4184 struct phy_device *phydev = adapter->phydev;
4185
4186 if (netif_carrier_ok(netdev)) {
4187 adapter->boot_coma = 20;
4188
4189 if (phydev && phydev->speed == SPEED_10) {
4190
4191
4192
4193
4194
4195
4196 u16 register18;
4197
4198 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4199 ®ister18);
4200 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4201 register18 | 0x4);
4202 et131x_mii_write(adapter, PHY_INDEX_REG,
4203 register18 | 0x8402);
4204 et131x_mii_write(adapter, PHY_DATA_REG,
4205 register18 | 511);
4206 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4207 register18);
4208 }
4209
4210 et1310_config_flow_control(adapter);
4211
4212 if (phydev && phydev->speed == SPEED_1000 &&
4213 adapter->registry_jumbo_packet > 2048) {
4214 u16 reg;
4215
4216 et131x_mii_read(adapter, PHY_CONFIG, ®);
4217 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4218 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4219 et131x_mii_write(adapter, PHY_CONFIG, reg);
4220 }
4221
4222 et131x_set_rx_dma_timer(adapter);
4223 et1310_config_mac_regs2(adapter);
4224 }
4225
4226 if (phydev && phydev->link != adapter->link) {
4227
4228
4229
4230
4231
4232 if (et1310_in_phy_coma(adapter))
4233 et1310_disable_phy_coma(adapter);
4234
4235 if (phydev->link) {
4236 adapter->boot_coma = 20;
4237 } else {
4238 dev_warn(&adapter->pdev->dev,
4239 "Link down - cable problem ?\n");
4240 adapter->boot_coma = 0;
4241
4242 if (phydev->speed == SPEED_10) {
4243
4244
4245
4246
4247
4248 u16 register18;
4249
4250 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4251 ®ister18);
4252 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4253 register18 | 0x4);
4254 et131x_mii_write(adapter, PHY_INDEX_REG,
4255 register18 | 0x8402);
4256 et131x_mii_write(adapter, PHY_DATA_REG,
4257 register18 | 511);
4258 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4259 register18);
4260 }
4261
4262
4263 et131x_free_busy_send_packets(adapter);
4264
4265
4266 et131x_init_send(adapter);
4267
4268
4269
4270
4271
4272
4273
4274 et131x_soft_reset(adapter);
4275
4276
4277 et131x_adapter_setup(adapter);
4278
4279
4280 et131x_disable_txrx(netdev);
4281 et131x_enable_txrx(netdev);
4282 }
4283
4284 adapter->link = phydev->link;
4285
4286 phy_print_status(phydev);
4287 }
4288}
4289
4290static int et131x_mii_probe(struct net_device *netdev)
4291{
4292 struct et131x_adapter *adapter = netdev_priv(netdev);
4293 struct phy_device *phydev = NULL;
4294
4295 phydev = phy_find_first(adapter->mii_bus);
4296 if (!phydev) {
4297 dev_err(&adapter->pdev->dev, "no PHY found\n");
4298 return -ENODEV;
4299 }
4300
4301 phydev = phy_connect(netdev, dev_name(&phydev->dev),
4302 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4303
4304 if (IS_ERR(phydev)) {
4305 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4306 return PTR_ERR(phydev);
4307 }
4308
4309 phydev->supported &= (SUPPORTED_10baseT_Half
4310 | SUPPORTED_10baseT_Full
4311 | SUPPORTED_100baseT_Half
4312 | SUPPORTED_100baseT_Full
4313 | SUPPORTED_Autoneg
4314 | SUPPORTED_MII
4315 | SUPPORTED_TP);
4316
4317 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4318 phydev->supported |= SUPPORTED_1000baseT_Full;
4319
4320 phydev->advertising = phydev->supported;
4321 adapter->phydev = phydev;
4322
4323 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
4324 phydev->drv->name, dev_name(&phydev->dev));
4325
4326 return 0;
4327}
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4338 struct pci_dev *pdev)
4339{
4340 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4341
4342 struct et131x_adapter *adapter;
4343
4344
4345 adapter = netdev_priv(netdev);
4346 adapter->pdev = pci_dev_get(pdev);
4347 adapter->netdev = netdev;
4348
4349
4350 spin_lock_init(&adapter->lock);
4351 spin_lock_init(&adapter->tcb_send_qlock);
4352 spin_lock_init(&adapter->tcb_ready_qlock);
4353 spin_lock_init(&adapter->send_hw_lock);
4354 spin_lock_init(&adapter->rcv_lock);
4355 spin_lock_init(&adapter->rcv_pend_lock);
4356 spin_lock_init(&adapter->fbr_lock);
4357 spin_lock_init(&adapter->phy_lock);
4358
4359 adapter->registry_jumbo_packet = 1514;
4360
4361
4362 memcpy(adapter->addr, default_mac, ETH_ALEN);
4363
4364 return adapter;
4365}
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4376{
4377 struct net_device *netdev = pci_get_drvdata(pdev);
4378 struct et131x_adapter *adapter = netdev_priv(netdev);
4379
4380 unregister_netdev(netdev);
4381 phy_disconnect(adapter->phydev);
4382 mdiobus_unregister(adapter->mii_bus);
4383 kfree(adapter->mii_bus->irq);
4384 mdiobus_free(adapter->mii_bus);
4385
4386 et131x_adapter_memory_free(adapter);
4387 iounmap(adapter->regs);
4388 pci_dev_put(pdev);
4389
4390 free_netdev(netdev);
4391 pci_release_regions(pdev);
4392 pci_disable_device(pdev);
4393}
4394
4395
4396
4397
4398
4399static void et131x_up(struct net_device *netdev)
4400{
4401 struct et131x_adapter *adapter = netdev_priv(netdev);
4402
4403 et131x_enable_txrx(netdev);
4404 phy_start(adapter->phydev);
4405}
4406
4407
4408
4409
4410
4411static void et131x_down(struct net_device *netdev)
4412{
4413 struct et131x_adapter *adapter = netdev_priv(netdev);
4414
4415
4416 netdev->trans_start = jiffies;
4417
4418 phy_stop(adapter->phydev);
4419 et131x_disable_txrx(netdev);
4420}
4421
4422#ifdef CONFIG_PM_SLEEP
4423static int et131x_suspend(struct device *dev)
4424{
4425 struct pci_dev *pdev = to_pci_dev(dev);
4426 struct net_device *netdev = pci_get_drvdata(pdev);
4427
4428 if (netif_running(netdev)) {
4429 netif_device_detach(netdev);
4430 et131x_down(netdev);
4431 pci_save_state(pdev);
4432 }
4433
4434 return 0;
4435}
4436
4437static int et131x_resume(struct device *dev)
4438{
4439 struct pci_dev *pdev = to_pci_dev(dev);
4440 struct net_device *netdev = pci_get_drvdata(pdev);
4441
4442 if (netif_running(netdev)) {
4443 pci_restore_state(pdev);
4444 et131x_up(netdev);
4445 netif_device_attach(netdev);
4446 }
4447
4448 return 0;
4449}
4450
4451static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4452#define ET131X_PM_OPS (&et131x_pm_ops)
4453#else
4454#define ET131X_PM_OPS NULL
4455#endif
4456
4457
4458
4459
4460
4461
4462
4463
4464irqreturn_t et131x_isr(int irq, void *dev_id)
4465{
4466 bool handled = true;
4467 struct net_device *netdev = (struct net_device *)dev_id;
4468 struct et131x_adapter *adapter = NULL;
4469 u32 status;
4470
4471 if (!netif_device_present(netdev)) {
4472 handled = false;
4473 goto out;
4474 }
4475
4476 adapter = netdev_priv(netdev);
4477
4478
4479
4480
4481
4482
4483 et131x_disable_interrupts(adapter);
4484
4485
4486
4487
4488 status = readl(&adapter->regs->global.int_status);
4489
4490 if (adapter->flowcontrol == FLOW_TXONLY ||
4491 adapter->flowcontrol == FLOW_BOTH) {
4492 status &= ~INT_MASK_ENABLE;
4493 } else {
4494 status &= ~INT_MASK_ENABLE_NO_FLOW;
4495 }
4496
4497
4498 if (!status) {
4499 handled = false;
4500 et131x_enable_interrupts(adapter);
4501 goto out;
4502 }
4503
4504
4505
4506 if (status & ET_INTR_WATCHDOG) {
4507 struct tcb *tcb = adapter->tx_ring.send_head;
4508
4509 if (tcb)
4510 if (++tcb->stale > 1)
4511 status |= ET_INTR_TXDMA_ISR;
4512
4513 if (adapter->rx_ring.unfinished_receives)
4514 status |= ET_INTR_RXDMA_XFR_DONE;
4515 else if (tcb == NULL)
4516 writel(0, &adapter->regs->global.watchdog_timer);
4517
4518 status &= ~ET_INTR_WATCHDOG;
4519 }
4520
4521 if (status == 0) {
4522
4523
4524
4525
4526
4527 et131x_enable_interrupts(adapter);
4528 goto out;
4529 }
4530
4531
4532
4533
4534
4535 adapter->stats.interrupt_status = status;
4536
4537
4538
4539
4540
4541 schedule_work(&adapter->task);
4542out:
4543 return IRQ_RETVAL(handled);
4544}
4545
4546
4547
4548
4549
4550
4551
4552
4553static void et131x_isr_handler(struct work_struct *work)
4554{
4555 struct et131x_adapter *adapter =
4556 container_of(work, struct et131x_adapter, task);
4557 u32 status = adapter->stats.interrupt_status;
4558 struct address_map __iomem *iomem = adapter->regs;
4559
4560
4561
4562
4563
4564
4565
4566 if (status & ET_INTR_TXDMA_ISR)
4567 et131x_handle_send_interrupt(adapter);
4568
4569
4570 if (status & ET_INTR_RXDMA_XFR_DONE)
4571 et131x_handle_recv_interrupt(adapter);
4572
4573 status &= 0xffffffd7;
4574
4575 if (status) {
4576
4577 if (status & ET_INTR_TXDMA_ERR) {
4578 u32 txdma_err;
4579
4580
4581 txdma_err = readl(&iomem->txdma.tx_dma_error);
4582
4583 dev_warn(&adapter->pdev->dev,
4584 "TXDMA_ERR interrupt, error = %d\n",
4585 txdma_err);
4586 }
4587
4588
4589 if (status &
4590 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609 if (adapter->flowcontrol == FLOW_TXONLY ||
4610 adapter->flowcontrol == FLOW_BOTH) {
4611 u32 pm_csr;
4612
4613
4614
4615
4616
4617 pm_csr = readl(&iomem->global.pm_csr);
4618 if (!et1310_in_phy_coma(adapter))
4619 writel(3, &iomem->txmac.bp_ctrl);
4620 }
4621 }
4622
4623
4624 if (status & ET_INTR_RXDMA_STAT_LOW) {
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635 }
4636
4637
4638 if (status & ET_INTR_RXDMA_ERR) {
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659 dev_warn(&adapter->pdev->dev,
4660 "RxDMA_ERR interrupt, error %x\n",
4661 readl(&iomem->txmac.tx_test));
4662 }
4663
4664
4665 if (status & ET_INTR_WOL) {
4666
4667
4668
4669
4670
4671
4672
4673 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4674 }
4675
4676
4677 if (status & ET_INTR_TXMAC) {
4678 u32 err = readl(&iomem->txmac.err);
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690 dev_warn(&adapter->pdev->dev,
4691 "TXMAC interrupt, error 0x%08x\n",
4692 err);
4693
4694
4695
4696
4697
4698 }
4699
4700
4701 if (status & ET_INTR_RXMAC) {
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711 dev_warn(&adapter->pdev->dev,
4712 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4713 readl(&iomem->rxmac.err_reg));
4714
4715 dev_warn(&adapter->pdev->dev,
4716 "Enable 0x%08x, Diag 0x%08x\n",
4717 readl(&iomem->rxmac.ctrl),
4718 readl(&iomem->rxmac.rxq_diag));
4719
4720
4721
4722
4723
4724
4725 }
4726
4727
4728 if (status & ET_INTR_MAC_STAT) {
4729
4730
4731
4732
4733
4734
4735 et1310_handle_macstat_interrupt(adapter);
4736 }
4737
4738
4739 if (status & ET_INTR_SLV_TIMEOUT) {
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749 }
4750 }
4751 et131x_enable_interrupts(adapter);
4752}
4753
4754
4755
4756
4757
4758
4759
4760static struct net_device_stats *et131x_stats(struct net_device *netdev)
4761{
4762 struct et131x_adapter *adapter = netdev_priv(netdev);
4763 struct net_device_stats *stats = &adapter->net_stats;
4764 struct ce_stats *devstat = &adapter->stats;
4765
4766 stats->rx_errors = devstat->rx_length_errs +
4767 devstat->rx_align_errs +
4768 devstat->rx_crc_errs +
4769 devstat->rx_code_violations +
4770 devstat->rx_other_errs;
4771 stats->tx_errors = devstat->tx_max_pkt_errs;
4772 stats->multicast = devstat->multicast_pkts_rcvd;
4773 stats->collisions = devstat->tx_collisions;
4774
4775 stats->rx_length_errors = devstat->rx_length_errs;
4776 stats->rx_over_errors = devstat->rx_overflows;
4777 stats->rx_crc_errors = devstat->rx_crc_errs;
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798 return stats;
4799}
4800
4801
4802
4803
4804
4805
4806
4807static int et131x_open(struct net_device *netdev)
4808{
4809 struct et131x_adapter *adapter = netdev_priv(netdev);
4810 struct pci_dev *pdev = adapter->pdev;
4811 unsigned int irq = pdev->irq;
4812 int result;
4813
4814
4815 init_timer(&adapter->error_timer);
4816 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4817 adapter->error_timer.function = et131x_error_timer_handler;
4818 adapter->error_timer.data = (unsigned long)adapter;
4819 add_timer(&adapter->error_timer);
4820
4821 result = request_irq(irq, et131x_isr,
4822 IRQF_SHARED, netdev->name, netdev);
4823 if (result) {
4824 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4825 return result;
4826 }
4827
4828 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4829
4830 et131x_up(netdev);
4831
4832 return result;
4833}
4834
4835
4836
4837
4838
4839
4840
4841static int et131x_close(struct net_device *netdev)
4842{
4843 struct et131x_adapter *adapter = netdev_priv(netdev);
4844
4845 et131x_down(netdev);
4846
4847 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4848 free_irq(adapter->pdev->irq, netdev);
4849
4850
4851 return del_timer_sync(&adapter->error_timer);
4852}
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4863 int cmd)
4864{
4865 struct et131x_adapter *adapter = netdev_priv(netdev);
4866
4867 if (!adapter->phydev)
4868 return -EINVAL;
4869
4870 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4871}
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4882{
4883 int filter = adapter->packet_filter;
4884 int status = 0;
4885 u32 ctrl;
4886 u32 pf_ctrl;
4887
4888 ctrl = readl(&adapter->regs->rxmac.ctrl);
4889 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4890
4891
4892
4893
4894 ctrl |= 0x04;
4895
4896
4897
4898
4899 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4900 pf_ctrl &= ~7;
4901 else {
4902
4903
4904
4905
4906
4907 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4908 pf_ctrl &= ~2;
4909 else {
4910 et1310_setup_device_for_multicast(adapter);
4911 pf_ctrl |= 2;
4912 ctrl &= ~0x04;
4913 }
4914
4915
4916 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4917 et1310_setup_device_for_unicast(adapter);
4918 pf_ctrl |= 4;
4919 ctrl &= ~0x04;
4920 }
4921
4922
4923 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4924 pf_ctrl |= 1;
4925 ctrl &= ~0x04;
4926 } else
4927 pf_ctrl &= ~1;
4928
4929
4930
4931
4932
4933 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4934 writel(ctrl, &adapter->regs->rxmac.ctrl);
4935 }
4936 return status;
4937}
4938
4939
4940
4941
4942
4943static void et131x_multicast(struct net_device *netdev)
4944{
4945 struct et131x_adapter *adapter = netdev_priv(netdev);
4946 int packet_filter;
4947 unsigned long flags;
4948 struct netdev_hw_addr *ha;
4949 int i;
4950
4951 spin_lock_irqsave(&adapter->lock, flags);
4952
4953
4954
4955
4956
4957 packet_filter = adapter->packet_filter;
4958
4959
4960
4961
4962
4963
4964 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4965
4966
4967
4968
4969
4970 if (netdev->flags & IFF_PROMISC)
4971 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4972 else
4973 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4974
4975 if (netdev->flags & IFF_ALLMULTI)
4976 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4977
4978 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4979 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4980
4981 if (netdev_mc_count(netdev) < 1) {
4982 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4983 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4984 } else
4985 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4986
4987
4988 i = 0;
4989 netdev_for_each_mc_addr(ha, netdev) {
4990 if (i == NIC_MAX_MCAST_LIST)
4991 break;
4992 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4993 }
4994 adapter->multicast_addr_count = i;
4995
4996
4997
4998
4999
5000
5001
5002 if (packet_filter != adapter->packet_filter) {
5003
5004 et131x_set_packet_filter(adapter);
5005 }
5006 spin_unlock_irqrestore(&adapter->lock, flags);
5007}
5008
5009
5010
5011
5012
5013
5014
5015
5016static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5017{
5018 int status = 0;
5019 struct et131x_adapter *adapter = netdev_priv(netdev);
5020
5021
5022 if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5023 !netif_queue_stopped(netdev))
5024 netif_stop_queue(netdev);
5025
5026
5027 netdev->trans_start = jiffies;
5028
5029
5030 status = et131x_send_packets(skb, netdev);
5031
5032
5033 if (status != 0) {
5034 if (status == -ENOMEM)
5035 status = NETDEV_TX_BUSY;
5036 else
5037 status = NETDEV_TX_OK;
5038 }
5039 return status;
5040}
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050static void et131x_tx_timeout(struct net_device *netdev)
5051{
5052 struct et131x_adapter *adapter = netdev_priv(netdev);
5053 struct tcb *tcb;
5054 unsigned long flags;
5055
5056
5057 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5058 return;
5059
5060
5061
5062
5063 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5064 return;
5065
5066
5067 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5068 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5069 return;
5070 }
5071
5072
5073 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5074
5075 tcb = adapter->tx_ring.send_head;
5076
5077 if (tcb != NULL) {
5078 tcb->count++;
5079
5080 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5081 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5082 flags);
5083
5084 dev_warn(&adapter->pdev->dev,
5085 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5086 tcb->index,
5087 tcb->flags);
5088
5089 adapter->net_stats.tx_errors++;
5090
5091
5092 et131x_disable_txrx(netdev);
5093 et131x_enable_txrx(netdev);
5094 return;
5095 }
5096 }
5097
5098 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5099}
5100
5101
5102
5103
5104
5105
5106
5107
5108static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5109{
5110 int result = 0;
5111 struct et131x_adapter *adapter = netdev_priv(netdev);
5112
5113
5114 if (new_mtu < 64 || new_mtu > 9216)
5115 return -EINVAL;
5116
5117 et131x_disable_txrx(netdev);
5118 et131x_handle_send_interrupt(adapter);
5119 et131x_handle_recv_interrupt(adapter);
5120
5121
5122 netdev->mtu = new_mtu;
5123
5124
5125 et131x_adapter_memory_free(adapter);
5126
5127
5128 adapter->registry_jumbo_packet = new_mtu + 14;
5129 et131x_soft_reset(adapter);
5130
5131
5132 result = et131x_adapter_memory_alloc(adapter);
5133 if (result != 0) {
5134 dev_warn(&adapter->pdev->dev,
5135 "Change MTU failed; couldn't re-alloc DMA memory\n");
5136 return result;
5137 }
5138
5139 et131x_init_send(adapter);
5140
5141 et131x_hwaddr_init(adapter);
5142 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5143
5144
5145 et131x_adapter_setup(adapter);
5146
5147 et131x_enable_txrx(netdev);
5148
5149 return result;
5150}
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5162{
5163 int result = 0;
5164 struct et131x_adapter *adapter = netdev_priv(netdev);
5165 struct sockaddr *address = new_mac;
5166
5167
5168
5169 if (adapter == NULL)
5170 return -ENODEV;
5171
5172
5173 if (!is_valid_ether_addr(address->sa_data))
5174 return -EADDRNOTAVAIL;
5175
5176 et131x_disable_txrx(netdev);
5177 et131x_handle_send_interrupt(adapter);
5178 et131x_handle_recv_interrupt(adapter);
5179
5180
5181
5182
5183 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5184
5185 netdev_info(netdev, "Setting MAC address to %pM\n",
5186 netdev->dev_addr);
5187
5188
5189 et131x_adapter_memory_free(adapter);
5190
5191 et131x_soft_reset(adapter);
5192
5193
5194 result = et131x_adapter_memory_alloc(adapter);
5195 if (result != 0) {
5196 dev_err(&adapter->pdev->dev,
5197 "Change MAC failed; couldn't re-alloc DMA memory\n");
5198 return result;
5199 }
5200
5201 et131x_init_send(adapter);
5202
5203 et131x_hwaddr_init(adapter);
5204
5205
5206 et131x_adapter_setup(adapter);
5207
5208 et131x_enable_txrx(netdev);
5209
5210 return result;
5211}
5212
5213static const struct net_device_ops et131x_netdev_ops = {
5214 .ndo_open = et131x_open,
5215 .ndo_stop = et131x_close,
5216 .ndo_start_xmit = et131x_tx,
5217 .ndo_set_rx_mode = et131x_multicast,
5218 .ndo_tx_timeout = et131x_tx_timeout,
5219 .ndo_change_mtu = et131x_change_mtu,
5220 .ndo_set_mac_address = et131x_set_mac_addr,
5221 .ndo_validate_addr = eth_validate_addr,
5222 .ndo_get_stats = et131x_stats,
5223 .ndo_do_ioctl = et131x_ioctl,
5224};
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5239 const struct pci_device_id *ent)
5240{
5241 struct net_device *netdev;
5242 struct et131x_adapter *adapter;
5243 int rc;
5244 int ii;
5245
5246 rc = pci_enable_device(pdev);
5247 if (rc < 0) {
5248 dev_err(&pdev->dev, "pci_enable_device() failed\n");
5249 goto out;
5250 }
5251
5252
5253 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5254 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5255 rc = -ENODEV;
5256 goto err_disable;
5257 }
5258
5259 rc = pci_request_regions(pdev, DRIVER_NAME);
5260 if (rc < 0) {
5261 dev_err(&pdev->dev, "Can't get PCI resources\n");
5262 goto err_disable;
5263 }
5264
5265 pci_set_master(pdev);
5266
5267
5268 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5269 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5270 if (rc < 0) {
5271 dev_err(&pdev->dev,
5272 "Unable to obtain 64 bit DMA for consistent allocations\n");
5273 goto err_release_res;
5274 }
5275 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5276 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5277 if (rc < 0) {
5278 dev_err(&pdev->dev,
5279 "Unable to obtain 32 bit DMA for consistent allocations\n");
5280 goto err_release_res;
5281 }
5282 } else {
5283 dev_err(&pdev->dev, "No usable DMA addressing method\n");
5284 rc = -EIO;
5285 goto err_release_res;
5286 }
5287
5288
5289 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5290 if (!netdev) {
5291 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5292 rc = -ENOMEM;
5293 goto err_release_res;
5294 }
5295
5296 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5297 netdev->netdev_ops = &et131x_netdev_ops;
5298
5299 SET_NETDEV_DEV(netdev, &pdev->dev);
5300 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
5301
5302 adapter = et131x_adapter_init(netdev, pdev);
5303
5304 rc = et131x_pci_init(adapter, pdev);
5305 if (rc < 0)
5306 goto err_free_dev;
5307
5308
5309 adapter->regs = pci_ioremap_bar(pdev, 0);
5310 if (!adapter->regs) {
5311 dev_err(&pdev->dev, "Cannot map device registers\n");
5312 rc = -ENOMEM;
5313 goto err_free_dev;
5314 }
5315
5316
5317 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
5318
5319
5320 et131x_soft_reset(adapter);
5321
5322
5323 et131x_disable_interrupts(adapter);
5324
5325
5326 rc = et131x_adapter_memory_alloc(adapter);
5327 if (rc < 0) {
5328 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5329 goto err_iounmap;
5330 }
5331
5332
5333 et131x_init_send(adapter);
5334
5335
5336 INIT_WORK(&adapter->task, et131x_isr_handler);
5337
5338
5339 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5340
5341
5342 adapter->boot_coma = 0;
5343 et1310_disable_phy_coma(adapter);
5344
5345 rc = -ENOMEM;
5346
5347
5348 adapter->mii_bus = mdiobus_alloc();
5349 if (!adapter->mii_bus) {
5350 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5351 goto err_mem_free;
5352 }
5353
5354 adapter->mii_bus->name = "et131x_eth_mii";
5355 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5356 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5357 adapter->mii_bus->priv = netdev;
5358 adapter->mii_bus->read = et131x_mdio_read;
5359 adapter->mii_bus->write = et131x_mdio_write;
5360 adapter->mii_bus->reset = et131x_mdio_reset;
5361 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5362 if (!adapter->mii_bus->irq) {
5363 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5364 goto err_mdio_free;
5365 }
5366
5367 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5368 adapter->mii_bus->irq[ii] = PHY_POLL;
5369
5370 rc = mdiobus_register(adapter->mii_bus);
5371 if (rc < 0) {
5372 dev_err(&pdev->dev, "failed to register MII bus\n");
5373 goto err_mdio_free_irq;
5374 }
5375
5376 rc = et131x_mii_probe(netdev);
5377 if (rc < 0) {
5378 dev_err(&pdev->dev, "failed to probe MII bus\n");
5379 goto err_mdio_unregister;
5380 }
5381
5382
5383 et131x_adapter_setup(adapter);
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393 rc = register_netdev(netdev);
5394 if (rc < 0) {
5395 dev_err(&pdev->dev, "register_netdev() failed\n");
5396 goto err_phy_disconnect;
5397 }
5398
5399
5400
5401
5402
5403 pci_set_drvdata(pdev, netdev);
5404out:
5405 return rc;
5406
5407err_phy_disconnect:
5408 phy_disconnect(adapter->phydev);
5409err_mdio_unregister:
5410 mdiobus_unregister(adapter->mii_bus);
5411err_mdio_free_irq:
5412 kfree(adapter->mii_bus->irq);
5413err_mdio_free:
5414 mdiobus_free(adapter->mii_bus);
5415err_mem_free:
5416 et131x_adapter_memory_free(adapter);
5417err_iounmap:
5418 iounmap(adapter->regs);
5419err_free_dev:
5420 pci_dev_put(pdev);
5421 free_netdev(netdev);
5422err_release_res:
5423 pci_release_regions(pdev);
5424err_disable:
5425 pci_disable_device(pdev);
5426 goto out;
5427}
5428
5429static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5430 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5431 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5432 {0,}
5433};
5434MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5435
5436static struct pci_driver et131x_driver = {
5437 .name = DRIVER_NAME,
5438 .id_table = et131x_pci_table,
5439 .probe = et131x_pci_setup,
5440 .remove = __devexit_p(et131x_pci_remove),
5441 .driver.pm = ET131X_PM_OPS,
5442};
5443
5444module_pci_driver(et131x_driver);
5445