1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/pci.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/types.h>
60#include <linux/kernel.h>
61
62#include <linux/sched.h>
63#include <linux/ptrace.h>
64#include <linux/slab.h>
65#include <linux/ctype.h>
66#include <linux/string.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/in.h>
70#include <linux/delay.h>
71#include <linux/bitops.h>
72#include <linux/io.h>
73
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
77#include <linux/if_arp.h>
78#include <linux/ioport.h>
79#include <linux/crc32.h>
80#include <linux/random.h>
81#include <linux/phy.h>
82
83#include "et131x.h"
84
85MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
86MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
87MODULE_LICENSE("Dual BSD/GPL");
88MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
89 "for the ET1310 by Agere Systems");
90
91
92#define MAX_NUM_REGISTER_POLLS 1000
93#define MAX_NUM_WRITE_RETRIES 2
94
95
96#define COUNTER_WRAP_16_BIT 0x10000
97#define COUNTER_WRAP_12_BIT 0x1000
98
99
100#define INTERNAL_MEM_SIZE 0x400
101#define INTERNAL_MEM_RX_OFFSET 0x1FF
102
103
104
105
106
107
108
109
110
111
112
113#define INT_MASK_DISABLE 0xffffffff
114
115
116
117
118
119#define INT_MASK_ENABLE 0xfffebf17
120#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
121
122
123
124#define NIC_MIN_PACKET_SIZE 60
125
126
127#define NIC_MAX_MCAST_LIST 128
128
129
130#define ET131X_PACKET_TYPE_DIRECTED 0x0001
131#define ET131X_PACKET_TYPE_MULTICAST 0x0002
132#define ET131X_PACKET_TYPE_BROADCAST 0x0004
133#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
134#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
135
136
137#define ET131X_TX_TIMEOUT (1 * HZ)
138#define NIC_SEND_HANG_THRESHOLD 0
139
140
141#define fMP_DEST_MULTI 0x00000001
142#define fMP_DEST_BROAD 0x00000002
143
144
145#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
146#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
147
148
149#define fMP_ADAPTER_LOWER_POWER 0x00200000
150
151#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
152#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
153
154#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
155
156
157#define ET1310_PCI_MAC_ADDRESS 0xA4
158#define ET1310_PCI_EEPROM_STATUS 0xB2
159#define ET1310_PCI_ACK_NACK 0xC0
160#define ET1310_PCI_REPLAY 0xC2
161#define ET1310_PCI_L0L1LATENCY 0xCF
162
163
164#define ET131X_PCI_DEVICE_ID_GIG 0xED00
165#define ET131X_PCI_DEVICE_ID_FAST 0xED01
166
167
168#define NANO_IN_A_MICRO 1000
169
170#define PARM_RX_NUM_BUFS_DEF 4
171#define PARM_RX_TIME_INT_DEF 10
172#define PARM_RX_MEM_END_DEF 0x2bc
173#define PARM_TX_TIME_INT_DEF 40
174#define PARM_TX_NUM_BUFS_DEF 4
175#define PARM_DMA_CACHE_DEF 0
176
177
178#define USE_FBR0 1
179#define FBR_CHUNKS 32
180#define MAX_DESC_PER_RING_RX 1024
181
182
183#ifdef USE_FBR0
184#define RFD_LOW_WATER_MARK 40
185#define NIC_DEFAULT_NUM_RFD 1024
186#define NUM_FBRS 2
187#else
188#define RFD_LOW_WATER_MARK 20
189#define NIC_DEFAULT_NUM_RFD 256
190#define NUM_FBRS 1
191#endif
192
193#define NIC_MIN_NUM_RFD 64
194#define NUM_PACKETS_HANDLED 256
195
196#define ALCATEL_MULTICAST_PKT 0x01000000
197#define ALCATEL_BROADCAST_PKT 0x02000000
198
199
200struct fbr_desc {
201 u32 addr_lo;
202 u32 addr_hi;
203 u32 word2;
204};
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct pkt_stat_desc {
250 u32 word0;
251 u32 word1;
252};
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282struct rx_status_block {
283 u32 word0;
284 u32 word1;
285};
286
287
288
289
290
291struct fbr_lookup {
292 void *virt[MAX_DESC_PER_RING_RX];
293 void *buffer1[MAX_DESC_PER_RING_RX];
294 void *buffer2[MAX_DESC_PER_RING_RX];
295 u32 bus_high[MAX_DESC_PER_RING_RX];
296 u32 bus_low[MAX_DESC_PER_RING_RX];
297 void *ring_virtaddr;
298 dma_addr_t ring_physaddr;
299 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
300 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
301 u64 real_physaddr;
302 u64 offset;
303 u32 local_full;
304 u32 num_entries;
305 u32 buffsize;
306};
307
308
309
310
311
312
313
314
315
316
317struct rx_ring {
318 struct fbr_lookup *fbr[NUM_FBRS];
319 void *ps_ring_virtaddr;
320 dma_addr_t ps_ring_physaddr;
321 u32 local_psr_full;
322 u32 psr_num_entries;
323
324 struct rx_status_block *rx_status_block;
325 dma_addr_t rx_status_bus;
326
327
328 struct list_head recv_list;
329 u32 num_ready_recv;
330
331 u32 num_rfd;
332
333 bool unfinished_receives;
334
335
336 struct kmem_cache *recv_lookaside;
337};
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368struct tx_desc {
369 u32 addr_hi;
370 u32 addr_lo;
371 u32 len_vlan;
372 u32 flags;
373};
374
375
376
377
378
379
380
381struct tcb {
382 struct tcb *next;
383 u32 flags;
384 u32 count;
385 u32 stale;
386 struct sk_buff *skb;
387 u32 index;
388 u32 index_start;
389};
390
391
392struct tx_ring {
393
394 struct tcb *tcb_ring;
395
396
397 struct tcb *tcb_qhead;
398 struct tcb *tcb_qtail;
399
400
401
402
403
404
405
406 struct tcb *send_head;
407 struct tcb *send_tail;
408 int used;
409
410
411 struct tx_desc *tx_desc_ring;
412 dma_addr_t tx_desc_ring_pa;
413
414
415 u32 send_idx;
416
417
418 u32 *tx_status;
419 dma_addr_t tx_status_pa;
420
421
422 int since_irq;
423};
424
425
426
427
428
429#define NUM_DESC_PER_RING_TX 512
430#define NUM_TCB 64
431
432
433
434
435
436
437#define TX_ERROR_PERIOD 1000
438
439#define LO_MARK_PERCENT_FOR_PSR 15
440#define LO_MARK_PERCENT_FOR_RX 15
441
442
443struct rfd {
444 struct list_head list_node;
445 struct sk_buff *skb;
446 u32 len;
447 u16 bufferindex;
448 u8 ringindex;
449};
450
451
452#define FLOW_BOTH 0
453#define FLOW_TXONLY 1
454#define FLOW_RXONLY 2
455#define FLOW_NONE 3
456
457
458struct ce_stats {
459
460
461
462
463
464
465 u32 unicast_pkts_rcvd;
466 atomic_t unicast_pkts_xmtd;
467 u32 multicast_pkts_rcvd;
468 atomic_t multicast_pkts_xmtd;
469 u32 broadcast_pkts_rcvd;
470 atomic_t broadcast_pkts_xmtd;
471 u32 rcvd_pkts_dropped;
472
473
474 u32 tx_underflows;
475
476 u32 tx_collisions;
477 u32 tx_excessive_collisions;
478 u32 tx_first_collisions;
479 u32 tx_late_collisions;
480 u32 tx_max_pkt_errs;
481 u32 tx_deferred;
482
483
484 u32 rx_overflows;
485
486 u32 rx_length_errs;
487 u32 rx_align_errs;
488 u32 rx_crc_errs;
489 u32 rx_code_violations;
490 u32 rx_other_errs;
491
492 u32 synchronous_iterations;
493 u32 interrupt_status;
494};
495
496
497struct et131x_adapter {
498 struct net_device *netdev;
499 struct pci_dev *pdev;
500 struct mii_bus *mii_bus;
501 struct phy_device *phydev;
502 struct work_struct task;
503
504
505 u32 flags;
506
507
508 int link;
509
510
511 u8 rom_addr[ETH_ALEN];
512 u8 addr[ETH_ALEN];
513 bool has_eeprom;
514 u8 eeprom_data[2];
515
516
517 spinlock_t lock;
518
519 spinlock_t tcb_send_qlock;
520 spinlock_t tcb_ready_qlock;
521 spinlock_t send_hw_lock;
522
523 spinlock_t rcv_lock;
524 spinlock_t rcv_pend_lock;
525 spinlock_t fbr_lock;
526
527 spinlock_t phy_lock;
528
529
530 u32 packet_filter;
531
532
533 u32 multicast_addr_count;
534 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
535
536
537 struct address_map __iomem *regs;
538
539
540 u8 wanted_flow;
541 u32 registry_jumbo_packet;
542
543
544 u8 flowcontrol;
545
546
547 struct timer_list error_timer;
548
549
550
551
552 u8 boot_coma;
553
554
555
556
557
558 u16 pdown_speed;
559 u8 pdown_duplex;
560
561
562 struct tx_ring tx_ring;
563
564
565 struct rx_ring rx_ring;
566
567
568 struct ce_stats stats;
569
570 struct net_device_stats net_stats;
571};
572
573static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
574{
575 u32 reg;
576 int i;
577
578
579
580
581
582
583
584
585 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
586
587 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
588 return -EIO;
589
590
591 if ((reg & 0x3000) == 0x3000) {
592 if (status)
593 *status = reg;
594 return reg & 0xFF;
595 }
596 }
597 return -ETIMEDOUT;
598}
599
600
601
602
603
604
605
606
607
608
609static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
610{
611 struct pci_dev *pdev = adapter->pdev;
612 int index = 0;
613 int retries;
614 int err = 0;
615 int i2c_wack = 0;
616 int writeok = 0;
617 u32 status;
618 u32 val = 0;
619
620
621
622
623
624
625
626
627
628
629 err = eeprom_wait_ready(pdev, NULL);
630 if (err)
631 return err;
632
633
634
635
636
637
638
639 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
640 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
641 return -EIO;
642
643 i2c_wack = 1;
644
645
646
647 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
648
649 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
650 break;
651
652
653
654
655 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
656 break;
657
658
659
660
661
662
663
664
665 err = eeprom_wait_ready(pdev, &status);
666 if (err < 0)
667 return 0;
668
669
670
671
672
673
674 if ((status & LBCIF_STATUS_GENERAL_ERROR)
675 && adapter->pdev->revision == 0)
676 break;
677
678
679
680
681
682
683
684
685
686 if (status & LBCIF_STATUS_ACK_ERROR) {
687
688
689
690
691
692
693 udelay(10);
694 continue;
695 }
696
697 writeok = 1;
698 break;
699 }
700
701
702
703
704 udelay(10);
705
706 while (i2c_wack) {
707 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
708 LBCIF_CONTROL_LBCIF_ENABLE))
709 writeok = 0;
710
711
712
713
714 do {
715 pci_write_config_dword(pdev,
716 LBCIF_ADDRESS_REGISTER,
717 addr);
718 do {
719 pci_read_config_dword(pdev,
720 LBCIF_DATA_REGISTER, &val);
721 } while ((val & 0x00010000) == 0);
722 } while (val & 0x00040000);
723
724 if ((val & 0xFF00) != 0xC000 || index == 10000)
725 break;
726 index++;
727 }
728 return writeok ? 0 : -EIO;
729}
730
731
732
733
734
735
736
737
738
739
740
741static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
742{
743 struct pci_dev *pdev = adapter->pdev;
744 int err;
745 u32 status;
746
747
748
749
750
751
752 err = eeprom_wait_ready(pdev, NULL);
753 if (err)
754 return err;
755
756
757
758
759
760
761 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
762 LBCIF_CONTROL_LBCIF_ENABLE))
763 return -EIO;
764
765
766
767
768 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
769 return -EIO;
770
771
772
773
774
775 err = eeprom_wait_ready(pdev, &status);
776 if (err < 0)
777 return err;
778
779
780
781
782 *pdata = err;
783
784
785
786
787 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
788}
789
790static int et131x_init_eeprom(struct et131x_adapter *adapter)
791{
792 struct pci_dev *pdev = adapter->pdev;
793 u8 eestatus;
794
795
796
797
798 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
799 &eestatus);
800
801
802
803
804
805
806
807 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
808 dev_err(&pdev->dev,
809 "Could not read PCI config space for EEPROM Status\n");
810 return -EIO;
811 }
812
813
814
815
816 if (eestatus & 0x4C) {
817 int write_failed = 0;
818 if (pdev->revision == 0x01) {
819 int i;
820 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
821
822
823
824
825
826 for (i = 0; i < 3; i++)
827 if (eeprom_write(adapter, i, eedata[i]) < 0)
828 write_failed = 1;
829 }
830 if (pdev->revision != 0x01 || write_failed) {
831 dev_err(&pdev->dev,
832 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
833
834
835
836
837
838
839
840 adapter->has_eeprom = 0;
841 return -EIO;
842 }
843 }
844 adapter->has_eeprom = 1;
845
846
847
848
849 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
850 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
851
852 if (adapter->eeprom_data[0] != 0xcd)
853
854 adapter->eeprom_data[1] = 0x00;
855
856 return 0;
857}
858
859
860
861
862
863static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
864{
865
866 u32 csr = 0x2000;
867
868 if (adapter->rx_ring.fbr[0]->buffsize == 4096)
869 csr |= 0x0800;
870 else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
871 csr |= 0x1000;
872 else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
873 csr |= 0x1800;
874#ifdef USE_FBR0
875 csr |= 0x0400;
876 if (adapter->rx_ring.fbr[1]->buffsize == 256)
877 csr |= 0x0100;
878 else if (adapter->rx_ring.fbr[1]->buffsize == 512)
879 csr |= 0x0200;
880 else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
881 csr |= 0x0300;
882#endif
883 writel(csr, &adapter->regs->rxdma.csr);
884
885 csr = readl(&adapter->regs->rxdma.csr);
886 if ((csr & 0x00020000) != 0) {
887 udelay(5);
888 csr = readl(&adapter->regs->rxdma.csr);
889 if ((csr & 0x00020000) != 0) {
890 dev_err(&adapter->pdev->dev,
891 "RX Dma failed to exit halt state. CSR 0x%08x\n",
892 csr);
893 }
894 }
895}
896
897
898
899
900
901static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
902{
903 u32 csr;
904
905 writel(0x00002001, &adapter->regs->rxdma.csr);
906 csr = readl(&adapter->regs->rxdma.csr);
907 if ((csr & 0x00020000) == 0) {
908 udelay(5);
909 csr = readl(&adapter->regs->rxdma.csr);
910 if ((csr & 0x00020000) == 0)
911 dev_err(&adapter->pdev->dev,
912 "RX Dma failed to enter halt state. CSR 0x%08x\n",
913 csr);
914 }
915}
916
917
918
919
920
921
922
923static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
924{
925
926
927
928 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
929 &adapter->regs->txdma.csr);
930}
931
932static inline void add_10bit(u32 *v, int n)
933{
934 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
935}
936
937static inline void add_12bit(u32 *v, int n)
938{
939 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
940}
941
942
943
944
945
946static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
947{
948 struct mac_regs __iomem *macregs = &adapter->regs->mac;
949 u32 station1;
950 u32 station2;
951 u32 ipg;
952
953
954
955
956 writel(0xC00F0000, ¯egs->cfg1);
957
958
959 ipg = 0x38005860;
960 ipg |= 0x50 << 8;
961 writel(ipg, ¯egs->ipg);
962
963
964
965 writel(0x00A1F037, ¯egs->hfdp);
966
967
968 writel(0, ¯egs->if_ctrl);
969
970
971 writel(0x07, ¯egs->mii_mgmt_cfg);
972
973
974
975
976
977
978
979
980 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
981 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
982 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
983 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
984 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
985 adapter->addr[2];
986 writel(station1, ¯egs->station_addr_1);
987 writel(station2, ¯egs->station_addr_2);
988
989
990
991
992
993
994
995
996 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
997
998
999 writel(0, ¯egs->cfg1);
1000}
1001
1002
1003
1004
1005
1006static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1007{
1008 int32_t delay = 0;
1009 struct mac_regs __iomem *mac = &adapter->regs->mac;
1010 struct phy_device *phydev = adapter->phydev;
1011 u32 cfg1;
1012 u32 cfg2;
1013 u32 ifctrl;
1014 u32 ctl;
1015
1016 ctl = readl(&adapter->regs->txmac.ctl);
1017 cfg1 = readl(&mac->cfg1);
1018 cfg2 = readl(&mac->cfg2);
1019 ifctrl = readl(&mac->if_ctrl);
1020
1021
1022 cfg2 &= ~0x300;
1023 if (phydev && phydev->speed == SPEED_1000) {
1024 cfg2 |= 0x200;
1025
1026 ifctrl &= ~(1 << 24);
1027 } else {
1028 cfg2 |= 0x100;
1029 ifctrl |= (1 << 24);
1030 }
1031
1032
1033 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1034
1035 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1036 if (adapter->flowcontrol == FLOW_RXONLY ||
1037 adapter->flowcontrol == FLOW_BOTH)
1038 cfg1 |= CFG1_RX_FLOW;
1039 writel(cfg1, &mac->cfg1);
1040
1041
1042
1043
1044 cfg2 |= 0x7016;
1045 cfg2 &= ~0x0021;
1046
1047
1048 if (phydev && phydev->duplex == DUPLEX_FULL)
1049 cfg2 |= 0x01;
1050
1051 ifctrl &= ~(1 << 26);
1052 if (phydev && phydev->duplex == DUPLEX_HALF)
1053 ifctrl |= (1<<26);
1054
1055 writel(ifctrl, &mac->if_ctrl);
1056 writel(cfg2, &mac->cfg2);
1057
1058 do {
1059 udelay(10);
1060 delay++;
1061 cfg1 = readl(&mac->cfg1);
1062 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1063
1064 if (delay == 100) {
1065 dev_warn(&adapter->pdev->dev,
1066 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1067 cfg1);
1068 }
1069
1070
1071 ctl |= 0x09;
1072 writel(ctl, &adapter->regs->txmac.ctl);
1073
1074
1075 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1076 et131x_rx_dma_enable(adapter);
1077 et131x_tx_dma_enable(adapter);
1078 }
1079}
1080
1081
1082
1083
1084
1085
1086
1087static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1088{
1089 u32 pmcsr;
1090
1091 pmcsr = readl(&adapter->regs->global.pm_csr);
1092
1093 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1094}
1095
1096static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1097{
1098 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1099 u32 hash1 = 0;
1100 u32 hash2 = 0;
1101 u32 hash3 = 0;
1102 u32 hash4 = 0;
1103 u32 pm_csr;
1104
1105
1106
1107
1108
1109
1110 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1111 int i;
1112
1113
1114 for (i = 0; i < adapter->multicast_addr_count; i++) {
1115 u32 result;
1116
1117 result = ether_crc(6, adapter->multicast_list[i]);
1118
1119 result = (result & 0x3F800000) >> 23;
1120
1121 if (result < 32) {
1122 hash1 |= (1 << result);
1123 } else if ((31 < result) && (result < 64)) {
1124 result -= 32;
1125 hash2 |= (1 << result);
1126 } else if ((63 < result) && (result < 96)) {
1127 result -= 64;
1128 hash3 |= (1 << result);
1129 } else {
1130 result -= 96;
1131 hash4 |= (1 << result);
1132 }
1133 }
1134 }
1135
1136
1137 pm_csr = readl(&adapter->regs->global.pm_csr);
1138 if (!et1310_in_phy_coma(adapter)) {
1139 writel(hash1, &rxmac->multi_hash1);
1140 writel(hash2, &rxmac->multi_hash2);
1141 writel(hash3, &rxmac->multi_hash3);
1142 writel(hash4, &rxmac->multi_hash4);
1143 }
1144}
1145
1146static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1147{
1148 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1149 u32 uni_pf1;
1150 u32 uni_pf2;
1151 u32 uni_pf3;
1152 u32 pm_csr;
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1164 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1165 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1166 adapter->addr[1];
1167
1168 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1169 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1170 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1171 adapter->addr[5];
1172
1173 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1174 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1175 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1176 adapter->addr[5];
1177
1178 pm_csr = readl(&adapter->regs->global.pm_csr);
1179 if (!et1310_in_phy_coma(adapter)) {
1180 writel(uni_pf1, &rxmac->uni_pf_addr1);
1181 writel(uni_pf2, &rxmac->uni_pf_addr2);
1182 writel(uni_pf3, &rxmac->uni_pf_addr3);
1183 }
1184}
1185
1186static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1187{
1188 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1189 struct phy_device *phydev = adapter->phydev;
1190 u32 sa_lo;
1191 u32 sa_hi = 0;
1192 u32 pf_ctrl = 0;
1193
1194
1195 writel(0x8, &rxmac->ctrl);
1196
1197
1198 writel(0, &rxmac->crc0);
1199 writel(0, &rxmac->crc12);
1200 writel(0, &rxmac->crc34);
1201
1202
1203
1204
1205
1206 writel(0, &rxmac->mask0_word0);
1207 writel(0, &rxmac->mask0_word1);
1208 writel(0, &rxmac->mask0_word2);
1209 writel(0, &rxmac->mask0_word3);
1210
1211 writel(0, &rxmac->mask1_word0);
1212 writel(0, &rxmac->mask1_word1);
1213 writel(0, &rxmac->mask1_word2);
1214 writel(0, &rxmac->mask1_word3);
1215
1216 writel(0, &rxmac->mask2_word0);
1217 writel(0, &rxmac->mask2_word1);
1218 writel(0, &rxmac->mask2_word2);
1219 writel(0, &rxmac->mask2_word3);
1220
1221 writel(0, &rxmac->mask3_word0);
1222 writel(0, &rxmac->mask3_word1);
1223 writel(0, &rxmac->mask3_word2);
1224 writel(0, &rxmac->mask3_word3);
1225
1226 writel(0, &rxmac->mask4_word0);
1227 writel(0, &rxmac->mask4_word1);
1228 writel(0, &rxmac->mask4_word2);
1229 writel(0, &rxmac->mask4_word3);
1230
1231
1232 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1233 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1234 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1235 adapter->addr[5];
1236 writel(sa_lo, &rxmac->sa_lo);
1237
1238 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1239 adapter->addr[1];
1240 writel(sa_hi, &rxmac->sa_hi);
1241
1242
1243 writel(0, &rxmac->pf_ctrl);
1244
1245
1246 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1247 et1310_setup_device_for_unicast(adapter);
1248 pf_ctrl |= 4;
1249 } else {
1250 writel(0, &rxmac->uni_pf_addr1);
1251 writel(0, &rxmac->uni_pf_addr2);
1252 writel(0, &rxmac->uni_pf_addr3);
1253 }
1254
1255
1256 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1257 pf_ctrl |= 2;
1258 et1310_setup_device_for_multicast(adapter);
1259 }
1260
1261
1262 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1263 pf_ctrl |= 8;
1264
1265 if (adapter->registry_jumbo_packet > 8192)
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1277 else
1278 writel(0, &rxmac->mcif_ctrl_max_seg);
1279
1280
1281 writel(0, &rxmac->mcif_water_mark);
1282
1283
1284 writel(0, &rxmac->mif_ctrl);
1285
1286
1287 writel(0, &rxmac->space_avail);
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 if (phydev && phydev->speed == SPEED_100)
1303 writel(0x30038, &rxmac->mif_ctrl);
1304 else
1305 writel(0x30030, &rxmac->mif_ctrl);
1306
1307
1308
1309
1310
1311
1312
1313 writel(pf_ctrl, &rxmac->pf_ctrl);
1314 writel(0x9, &rxmac->ctrl);
1315}
1316
1317static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1318{
1319 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1320
1321
1322
1323
1324
1325 if (adapter->flowcontrol == FLOW_NONE)
1326 writel(0, &txmac->cf_param);
1327 else
1328 writel(0x40, &txmac->cf_param);
1329}
1330
1331static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1332{
1333 struct macstat_regs __iomem *macstat =
1334 &adapter->regs->macstat;
1335
1336
1337
1338
1339 writel(0, &macstat->txrx_0_64_byte_frames);
1340 writel(0, &macstat->txrx_65_127_byte_frames);
1341 writel(0, &macstat->txrx_128_255_byte_frames);
1342 writel(0, &macstat->txrx_256_511_byte_frames);
1343 writel(0, &macstat->txrx_512_1023_byte_frames);
1344 writel(0, &macstat->txrx_1024_1518_byte_frames);
1345 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1346
1347 writel(0, &macstat->rx_bytes);
1348 writel(0, &macstat->rx_packets);
1349 writel(0, &macstat->rx_fcs_errs);
1350 writel(0, &macstat->rx_multicast_packets);
1351 writel(0, &macstat->rx_broadcast_packets);
1352 writel(0, &macstat->rx_control_frames);
1353 writel(0, &macstat->rx_pause_frames);
1354 writel(0, &macstat->rx_unknown_opcodes);
1355 writel(0, &macstat->rx_align_errs);
1356 writel(0, &macstat->rx_frame_len_errs);
1357 writel(0, &macstat->rx_code_errs);
1358 writel(0, &macstat->rx_carrier_sense_errs);
1359 writel(0, &macstat->rx_undersize_packets);
1360 writel(0, &macstat->rx_oversize_packets);
1361 writel(0, &macstat->rx_fragment_packets);
1362 writel(0, &macstat->rx_jabbers);
1363 writel(0, &macstat->rx_drops);
1364
1365 writel(0, &macstat->tx_bytes);
1366 writel(0, &macstat->tx_packets);
1367 writel(0, &macstat->tx_multicast_packets);
1368 writel(0, &macstat->tx_broadcast_packets);
1369 writel(0, &macstat->tx_pause_frames);
1370 writel(0, &macstat->tx_deferred);
1371 writel(0, &macstat->tx_excessive_deferred);
1372 writel(0, &macstat->tx_single_collisions);
1373 writel(0, &macstat->tx_multiple_collisions);
1374 writel(0, &macstat->tx_late_collisions);
1375 writel(0, &macstat->tx_excessive_collisions);
1376 writel(0, &macstat->tx_total_collisions);
1377 writel(0, &macstat->tx_pause_honored_frames);
1378 writel(0, &macstat->tx_drops);
1379 writel(0, &macstat->tx_jabbers);
1380 writel(0, &macstat->tx_fcs_errs);
1381 writel(0, &macstat->tx_control_frames);
1382 writel(0, &macstat->tx_oversize_frames);
1383 writel(0, &macstat->tx_undersize_frames);
1384 writel(0, &macstat->tx_fragments);
1385 writel(0, &macstat->carry_reg1);
1386 writel(0, &macstat->carry_reg2);
1387
1388
1389
1390
1391
1392 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1393 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1406 u8 reg, u16 *value)
1407{
1408 struct mac_regs __iomem *mac = &adapter->regs->mac;
1409 int status = 0;
1410 u32 delay = 0;
1411 u32 mii_addr;
1412 u32 mii_cmd;
1413 u32 mii_indicator;
1414
1415
1416
1417
1418 mii_addr = readl(&mac->mii_mgmt_addr);
1419 mii_cmd = readl(&mac->mii_mgmt_cmd);
1420
1421
1422 writel(0, &mac->mii_mgmt_cmd);
1423
1424
1425 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1426
1427 writel(0x1, &mac->mii_mgmt_cmd);
1428
1429 do {
1430 udelay(50);
1431 delay++;
1432 mii_indicator = readl(&mac->mii_mgmt_indicator);
1433 } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1434
1435
1436 if (delay == 50) {
1437 dev_warn(&adapter->pdev->dev,
1438 "reg 0x%08x could not be read\n", reg);
1439 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1440 mii_indicator);
1441
1442 status = -EIO;
1443 }
1444
1445
1446
1447 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1448
1449
1450 writel(0, &mac->mii_mgmt_cmd);
1451
1452
1453
1454
1455 writel(mii_addr, &mac->mii_mgmt_addr);
1456 writel(mii_cmd, &mac->mii_mgmt_cmd);
1457
1458 return status;
1459}
1460
1461static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1462{
1463 struct phy_device *phydev = adapter->phydev;
1464
1465 if (!phydev)
1466 return -EIO;
1467
1468 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1482{
1483 struct mac_regs __iomem *mac = &adapter->regs->mac;
1484 struct phy_device *phydev = adapter->phydev;
1485 int status = 0;
1486 u8 addr;
1487 u32 delay = 0;
1488 u32 mii_addr;
1489 u32 mii_cmd;
1490 u32 mii_indicator;
1491
1492 if (!phydev)
1493 return -EIO;
1494
1495 addr = phydev->addr;
1496
1497
1498
1499
1500 mii_addr = readl(&mac->mii_mgmt_addr);
1501 mii_cmd = readl(&mac->mii_mgmt_cmd);
1502
1503
1504 writel(0, &mac->mii_mgmt_cmd);
1505
1506
1507 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1508
1509
1510 writel(value, &mac->mii_mgmt_ctrl);
1511
1512 do {
1513 udelay(50);
1514 delay++;
1515 mii_indicator = readl(&mac->mii_mgmt_indicator);
1516 } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1517
1518
1519 if (delay == 100) {
1520 u16 tmp;
1521
1522 dev_warn(&adapter->pdev->dev,
1523 "reg 0x%08x could not be written", reg);
1524 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1525 mii_indicator);
1526 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1527 readl(&mac->mii_mgmt_cmd));
1528
1529 et131x_mii_read(adapter, reg, &tmp);
1530
1531 status = -EIO;
1532 }
1533
1534 writel(0, &mac->mii_mgmt_cmd);
1535
1536
1537
1538
1539
1540 writel(mii_addr, &mac->mii_mgmt_addr);
1541 writel(mii_cmd, &mac->mii_mgmt_cmd);
1542
1543 return status;
1544}
1545
1546
1547static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1548 u16 action, u16 regnum, u16 bitnum,
1549 u8 *value)
1550{
1551 u16 reg;
1552 u16 mask = 0x0001 << bitnum;
1553
1554
1555 et131x_mii_read(adapter, regnum, ®);
1556
1557 switch (action) {
1558 case TRUEPHY_BIT_READ:
1559 *value = (reg & mask) >> bitnum;
1560 break;
1561
1562 case TRUEPHY_BIT_SET:
1563 et131x_mii_write(adapter, regnum, reg | mask);
1564 break;
1565
1566 case TRUEPHY_BIT_CLEAR:
1567 et131x_mii_write(adapter, regnum, reg & ~mask);
1568 break;
1569
1570 default:
1571 break;
1572 }
1573}
1574
1575static void et1310_config_flow_control(struct et131x_adapter *adapter)
1576{
1577 struct phy_device *phydev = adapter->phydev;
1578
1579 if (phydev->duplex == DUPLEX_HALF) {
1580 adapter->flowcontrol = FLOW_NONE;
1581 } else {
1582 char remote_pause, remote_async_pause;
1583
1584 et1310_phy_access_mii_bit(adapter,
1585 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1586 et1310_phy_access_mii_bit(adapter,
1587 TRUEPHY_BIT_READ, 5, 11,
1588 &remote_async_pause);
1589
1590 if ((remote_pause == TRUEPHY_BIT_SET) &&
1591 (remote_async_pause == TRUEPHY_BIT_SET)) {
1592 adapter->flowcontrol = adapter->wanted_flow;
1593 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1594 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1595 if (adapter->wanted_flow == FLOW_BOTH)
1596 adapter->flowcontrol = FLOW_BOTH;
1597 else
1598 adapter->flowcontrol = FLOW_NONE;
1599 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1600 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1601 adapter->flowcontrol = FLOW_NONE;
1602 } else {
1603
1604 if (adapter->wanted_flow == FLOW_BOTH)
1605 adapter->flowcontrol = FLOW_RXONLY;
1606 else
1607 adapter->flowcontrol = FLOW_NONE;
1608 }
1609 }
1610}
1611
1612
1613
1614
1615
1616static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1617{
1618 struct ce_stats *stats = &adapter->stats;
1619 struct macstat_regs __iomem *macstat =
1620 &adapter->regs->macstat;
1621
1622 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1623 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1624 stats->tx_deferred += readl(&macstat->tx_deferred);
1625 stats->tx_excessive_collisions +=
1626 readl(&macstat->tx_multiple_collisions);
1627 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1628 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1629 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1630
1631 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1632 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1633 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1634 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1635 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1636 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1637 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1649{
1650 u32 carry_reg1;
1651 u32 carry_reg2;
1652
1653
1654
1655
1656 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1657 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1658
1659 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1660 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1661
1662
1663
1664
1665
1666
1667
1668 if (carry_reg1 & (1 << 14))
1669 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1670 if (carry_reg1 & (1 << 8))
1671 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1672 if (carry_reg1 & (1 << 7))
1673 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1674 if (carry_reg1 & (1 << 2))
1675 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1676 if (carry_reg1 & (1 << 6))
1677 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1678 if (carry_reg1 & (1 << 3))
1679 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1680 if (carry_reg1 & (1 << 0))
1681 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1682 if (carry_reg2 & (1 << 16))
1683 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1684 if (carry_reg2 & (1 << 15))
1685 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1686 if (carry_reg2 & (1 << 6))
1687 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1688 if (carry_reg2 & (1 << 8))
1689 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1690 if (carry_reg2 & (1 << 5))
1691 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1692 if (carry_reg2 & (1 << 4))
1693 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1694 if (carry_reg2 & (1 << 2))
1695 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1696}
1697
1698static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1699{
1700 struct net_device *netdev = bus->priv;
1701 struct et131x_adapter *adapter = netdev_priv(netdev);
1702 u16 value;
1703 int ret;
1704
1705 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1706
1707 if (ret < 0)
1708 return ret;
1709 else
1710 return value;
1711}
1712
1713static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1714 int reg, u16 value)
1715{
1716 struct net_device *netdev = bus->priv;
1717 struct et131x_adapter *adapter = netdev_priv(netdev);
1718
1719 return et131x_mii_write(adapter, reg, value);
1720}
1721
1722static int et131x_mdio_reset(struct mii_bus *bus)
1723{
1724 struct net_device *netdev = bus->priv;
1725 struct et131x_adapter *adapter = netdev_priv(netdev);
1726
1727 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1728
1729 return 0;
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1743{
1744 u16 data;
1745
1746 et131x_mii_read(adapter, MII_BMCR, &data);
1747 data &= ~BMCR_PDOWN;
1748 if (down)
1749 data |= BMCR_PDOWN;
1750 et131x_mii_write(adapter, MII_BMCR, data);
1751}
1752
1753
1754
1755
1756
1757
1758static void et131x_xcvr_init(struct et131x_adapter *adapter)
1759{
1760 u16 imr;
1761 u16 isr;
1762 u16 lcr2;
1763
1764 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1765 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1766
1767
1768
1769
1770 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1771 ET_PHY_INT_MASK_LINKSTAT &
1772 ET_PHY_INT_MASK_ENABLE);
1773
1774 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1785 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1786
1787 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1788 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1789
1790 if ((adapter->eeprom_data[1] & 0x8) == 0)
1791 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1792 else
1793 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1794
1795 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1796 }
1797}
1798
1799
1800
1801
1802
1803
1804
1805static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1806{
1807 struct global_regs __iomem *regs = &adapter->regs->global;
1808
1809 writel(0, ®s->rxq_start_addr);
1810 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1811
1812 if (adapter->registry_jumbo_packet < 2048) {
1813
1814
1815
1816
1817
1818 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1819 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1820 } else if (adapter->registry_jumbo_packet < 8192) {
1821
1822 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1823 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1824 } else {
1825
1826
1827
1828
1829
1830 writel(0x01b3, ®s->rxq_end_addr);
1831 writel(0x01b4, ®s->txq_start_addr);
1832 }
1833
1834
1835 writel(0, ®s->loopback);
1836
1837
1838 writel(0, ®s->msi_config);
1839
1840
1841
1842
1843 writel(0, ®s->watchdog_timer);
1844}
1845
1846
1847
1848
1849
1850static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1851{
1852 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1853 struct rx_ring *rx_local = &adapter->rx_ring;
1854 struct fbr_desc *fbr_entry;
1855 u32 entry;
1856 u32 psr_num_des;
1857 unsigned long flags;
1858
1859
1860 et131x_rx_dma_disable(adapter);
1861
1862
1863
1864
1865
1866
1867
1868
1869 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1870 &rx_dma->dma_wb_base_hi);
1871 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1872
1873 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1874
1875
1876
1877
1878 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1879 &rx_dma->psr_base_hi);
1880 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1881 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1882 writel(0, &rx_dma->psr_full_offset);
1883
1884 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1885 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1886 &rx_dma->psr_min_des);
1887
1888 spin_lock_irqsave(&adapter->rcv_lock, flags);
1889
1890
1891 rx_local->local_psr_full = 0;
1892
1893
1894 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1895 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1896 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1897 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1898 fbr_entry->word2 = entry;
1899 fbr_entry++;
1900 }
1901
1902
1903
1904
1905 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1906 &rx_dma->fbr1_base_hi);
1907 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1908 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1909 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1910
1911
1912
1913
1914 rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1915 writel(
1916 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1917 &rx_dma->fbr1_min_des);
1918
1919#ifdef USE_FBR0
1920
1921 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1922 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1923 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1924 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1925 fbr_entry->word2 = entry;
1926 fbr_entry++;
1927 }
1928
1929 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1930 &rx_dma->fbr0_base_hi);
1931 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1932 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1933 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1934
1935
1936
1937
1938 rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1939 writel(
1940 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1941 &rx_dma->fbr0_min_des);
1942#endif
1943
1944
1945
1946
1947
1948
1949 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1950
1951
1952
1953
1954
1955
1956 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1957
1958 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1959}
1960
1961
1962
1963
1964
1965
1966
1967
1968static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1969{
1970 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1971
1972
1973 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1974 &txdma->pr_base_hi);
1975 writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1976 &txdma->pr_base_lo);
1977
1978
1979 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1980
1981
1982 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1983 &txdma->dma_wb_base_hi);
1984 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1985
1986 *adapter->tx_ring.tx_status = 0;
1987
1988 writel(0, &txdma->service_request);
1989 adapter->tx_ring.send_idx = 0;
1990}
1991
1992
1993
1994
1995
1996
1997
1998static void et131x_adapter_setup(struct et131x_adapter *adapter)
1999{
2000
2001 et131x_configure_global_regs(adapter);
2002
2003 et1310_config_mac_regs1(adapter);
2004
2005
2006
2007 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2008
2009 et1310_config_rxmac_regs(adapter);
2010 et1310_config_txmac_regs(adapter);
2011
2012 et131x_config_rx_dma_regs(adapter);
2013 et131x_config_tx_dma_regs(adapter);
2014
2015 et1310_config_macstat_regs(adapter);
2016
2017 et1310_phy_power_down(adapter, 0);
2018 et131x_xcvr_init(adapter);
2019}
2020
2021
2022
2023
2024
2025static void et131x_soft_reset(struct et131x_adapter *adapter)
2026{
2027
2028 writel(0xc00f0000, &adapter->regs->mac.cfg1);
2029
2030
2031 writel(0x7F, &adapter->regs->global.sw_reset);
2032 writel(0x000f0000, &adapter->regs->mac.cfg1);
2033 writel(0x00000000, &adapter->regs->mac.cfg1);
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2044{
2045 u32 mask;
2046
2047
2048 if (adapter->flowcontrol == FLOW_TXONLY ||
2049 adapter->flowcontrol == FLOW_BOTH)
2050 mask = INT_MASK_ENABLE;
2051 else
2052 mask = INT_MASK_ENABLE_NO_FLOW;
2053
2054 writel(mask, &adapter->regs->global.int_mask);
2055}
2056
2057
2058
2059
2060
2061
2062
2063static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2064{
2065
2066 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2067}
2068
2069
2070
2071
2072
2073static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2074{
2075
2076 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2077 &adapter->regs->txdma.csr);
2078}
2079
2080
2081
2082
2083
2084static void et131x_enable_txrx(struct net_device *netdev)
2085{
2086 struct et131x_adapter *adapter = netdev_priv(netdev);
2087
2088
2089 et131x_rx_dma_enable(adapter);
2090 et131x_tx_dma_enable(adapter);
2091
2092
2093 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2094 et131x_enable_interrupts(adapter);
2095
2096
2097 netif_start_queue(netdev);
2098}
2099
2100
2101
2102
2103
2104static void et131x_disable_txrx(struct net_device *netdev)
2105{
2106 struct et131x_adapter *adapter = netdev_priv(netdev);
2107
2108
2109 netif_stop_queue(netdev);
2110
2111
2112 et131x_rx_dma_disable(adapter);
2113 et131x_tx_dma_disable(adapter);
2114
2115
2116 et131x_disable_interrupts(adapter);
2117}
2118
2119
2120
2121
2122
2123static void et131x_init_send(struct et131x_adapter *adapter)
2124{
2125 struct tcb *tcb;
2126 u32 ct;
2127 struct tx_ring *tx_ring;
2128
2129
2130 tx_ring = &adapter->tx_ring;
2131 tcb = adapter->tx_ring.tcb_ring;
2132
2133 tx_ring->tcb_qhead = tcb;
2134
2135 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2136
2137
2138 for (ct = 0; ct++ < NUM_TCB; tcb++)
2139
2140
2141
2142 tcb->next = tcb + 1;
2143
2144
2145 tcb--;
2146 tx_ring->tcb_qtail = tcb;
2147 tcb->next = NULL;
2148
2149 tx_ring->send_head = NULL;
2150 tx_ring->send_tail = NULL;
2151}
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2174{
2175 unsigned long flags;
2176 u32 pmcsr;
2177
2178 pmcsr = readl(&adapter->regs->global.pm_csr);
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2192 adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2193 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2194
2195
2196
2197 et131x_disable_txrx(adapter->netdev);
2198
2199
2200 pmcsr &= ~ET_PMCSR_INIT;
2201 writel(pmcsr, &adapter->regs->global.pm_csr);
2202
2203
2204 pmcsr |= ET_PM_PHY_SW_COMA;
2205 writel(pmcsr, &adapter->regs->global.pm_csr);
2206}
2207
2208
2209
2210
2211
2212static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2213{
2214 u32 pmcsr;
2215
2216 pmcsr = readl(&adapter->regs->global.pm_csr);
2217
2218
2219 pmcsr |= ET_PMCSR_INIT;
2220 pmcsr &= ~ET_PM_PHY_SW_COMA;
2221 writel(pmcsr, &adapter->regs->global.pm_csr);
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 et131x_init_send(adapter);
2234
2235
2236
2237
2238
2239 et131x_soft_reset(adapter);
2240
2241
2242 et131x_adapter_setup(adapter);
2243
2244
2245 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2246
2247 et131x_enable_txrx(adapter->netdev);
2248}
2249
2250static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2251{
2252 u32 tmp_free_buff_ring = *free_buff_ring;
2253 tmp_free_buff_ring++;
2254
2255
2256
2257
2258 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2259 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2260 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2261 }
2262
2263 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2264 *free_buff_ring = tmp_free_buff_ring;
2265 return tmp_free_buff_ring;
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2276 u64 *phys_addr, u64 *offset,
2277 u64 mask)
2278{
2279 u64 new_addr = *phys_addr & ~mask;
2280
2281 *offset = 0;
2282
2283 if (new_addr != *phys_addr) {
2284
2285 new_addr += mask + 1;
2286
2287 *offset = new_addr - *phys_addr;
2288
2289 *phys_addr = new_addr;
2290 }
2291}
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2303{
2304 u32 i, j;
2305 u32 bufsize;
2306 u32 pktstat_ringsize, fbr_chunksize;
2307 struct rx_ring *rx_ring;
2308
2309
2310 rx_ring = &adapter->rx_ring;
2311
2312
2313#ifdef USE_FBR0
2314 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2315#endif
2316 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 if (adapter->registry_jumbo_packet < 2048) {
2337#ifdef USE_FBR0
2338 rx_ring->fbr[1]->buffsize = 256;
2339 rx_ring->fbr[1]->num_entries = 512;
2340#endif
2341 rx_ring->fbr[0]->buffsize = 2048;
2342 rx_ring->fbr[0]->num_entries = 512;
2343 } else if (adapter->registry_jumbo_packet < 4096) {
2344#ifdef USE_FBR0
2345 rx_ring->fbr[1]->buffsize = 512;
2346 rx_ring->fbr[1]->num_entries = 1024;
2347#endif
2348 rx_ring->fbr[0]->buffsize = 4096;
2349 rx_ring->fbr[0]->num_entries = 512;
2350 } else {
2351#ifdef USE_FBR0
2352 rx_ring->fbr[1]->buffsize = 1024;
2353 rx_ring->fbr[1]->num_entries = 768;
2354#endif
2355 rx_ring->fbr[0]->buffsize = 16384;
2356 rx_ring->fbr[0]->num_entries = 128;
2357 }
2358
2359#ifdef USE_FBR0
2360 adapter->rx_ring.psr_num_entries =
2361 adapter->rx_ring.fbr[1]->num_entries +
2362 adapter->rx_ring.fbr[0]->num_entries;
2363#else
2364 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2365#endif
2366
2367
2368 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2369 0xfff;
2370 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2371 bufsize,
2372 &rx_ring->fbr[0]->ring_physaddr,
2373 GFP_KERNEL);
2374 if (!rx_ring->fbr[0]->ring_virtaddr) {
2375 dev_err(&adapter->pdev->dev,
2376 "Cannot alloc memory for Free Buffer Ring 1\n");
2377 return -ENOMEM;
2378 }
2379
2380
2381
2382
2383
2384
2385
2386
2387 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2388
2389
2390 et131x_align_allocated_memory(adapter,
2391 &rx_ring->fbr[0]->real_physaddr,
2392 &rx_ring->fbr[0]->offset, 0x0FFF);
2393
2394 rx_ring->fbr[0]->ring_virtaddr =
2395 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2396 rx_ring->fbr[0]->offset);
2397
2398#ifdef USE_FBR0
2399
2400 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2401 0xfff;
2402 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2403 bufsize,
2404 &rx_ring->fbr[1]->ring_physaddr,
2405 GFP_KERNEL);
2406 if (!rx_ring->fbr[1]->ring_virtaddr) {
2407 dev_err(&adapter->pdev->dev,
2408 "Cannot alloc memory for Free Buffer Ring 0\n");
2409 return -ENOMEM;
2410 }
2411
2412
2413
2414
2415
2416
2417
2418
2419 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2420
2421
2422 et131x_align_allocated_memory(adapter,
2423 &rx_ring->fbr[1]->real_physaddr,
2424 &rx_ring->fbr[1]->offset, 0x0FFF);
2425
2426 rx_ring->fbr[1]->ring_virtaddr =
2427 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2428 rx_ring->fbr[1]->offset);
2429#endif
2430 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2431 u64 fbr1_tmp_physaddr;
2432 u64 fbr1_offset;
2433 u32 fbr1_align;
2434
2435
2436
2437
2438
2439
2440
2441
2442 if (rx_ring->fbr[0]->buffsize > 4096)
2443 fbr1_align = 4096;
2444 else
2445 fbr1_align = rx_ring->fbr[0]->buffsize;
2446
2447 fbr_chunksize =
2448 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2449 rx_ring->fbr[0]->mem_virtaddrs[i] =
2450 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2451 &rx_ring->fbr[0]->mem_physaddrs[i],
2452 GFP_KERNEL);
2453
2454 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2455 dev_err(&adapter->pdev->dev,
2456 "Could not alloc memory\n");
2457 return -ENOMEM;
2458 }
2459
2460
2461 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2462
2463 et131x_align_allocated_memory(adapter,
2464 &fbr1_tmp_physaddr,
2465 &fbr1_offset, (fbr1_align - 1));
2466
2467 for (j = 0; j < FBR_CHUNKS; j++) {
2468 u32 index = (i * FBR_CHUNKS) + j;
2469
2470
2471
2472
2473 rx_ring->fbr[0]->virt[index] =
2474 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2475 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2476
2477
2478
2479
2480 rx_ring->fbr[0]->bus_high[index] =
2481 (u32) (fbr1_tmp_physaddr >> 32);
2482 rx_ring->fbr[0]->bus_low[index] =
2483 (u32) fbr1_tmp_physaddr;
2484
2485 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2486
2487 rx_ring->fbr[0]->buffer1[index] =
2488 rx_ring->fbr[0]->virt[index];
2489 rx_ring->fbr[0]->buffer2[index] =
2490 rx_ring->fbr[0]->virt[index] - 4;
2491 }
2492 }
2493
2494#ifdef USE_FBR0
2495
2496 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2497 u64 fbr0_tmp_physaddr;
2498 u64 fbr0_offset;
2499
2500 fbr_chunksize =
2501 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2502 rx_ring->fbr[1]->mem_virtaddrs[i] =
2503 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2504 &rx_ring->fbr[1]->mem_physaddrs[i],
2505 GFP_KERNEL);
2506
2507 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2508 dev_err(&adapter->pdev->dev,
2509 "Could not alloc memory\n");
2510 return -ENOMEM;
2511 }
2512
2513
2514 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2515
2516 et131x_align_allocated_memory(adapter,
2517 &fbr0_tmp_physaddr,
2518 &fbr0_offset,
2519 rx_ring->fbr[1]->buffsize - 1);
2520
2521 for (j = 0; j < FBR_CHUNKS; j++) {
2522 u32 index = (i * FBR_CHUNKS) + j;
2523
2524 rx_ring->fbr[1]->virt[index] =
2525 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2526 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2527
2528 rx_ring->fbr[1]->bus_high[index] =
2529 (u32) (fbr0_tmp_physaddr >> 32);
2530 rx_ring->fbr[1]->bus_low[index] =
2531 (u32) fbr0_tmp_physaddr;
2532
2533 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2534
2535 rx_ring->fbr[1]->buffer1[index] =
2536 rx_ring->fbr[1]->virt[index];
2537 rx_ring->fbr[1]->buffer2[index] =
2538 rx_ring->fbr[1]->virt[index] - 4;
2539 }
2540 }
2541#endif
2542
2543
2544 pktstat_ringsize =
2545 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2546
2547 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2548 pktstat_ringsize,
2549 &rx_ring->ps_ring_physaddr,
2550 GFP_KERNEL);
2551
2552 if (!rx_ring->ps_ring_virtaddr) {
2553 dev_err(&adapter->pdev->dev,
2554 "Cannot alloc memory for Packet Status Ring\n");
2555 return -ENOMEM;
2556 }
2557 printk(KERN_INFO "Packet Status Ring %lx\n",
2558 (unsigned long) rx_ring->ps_ring_physaddr);
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2569 sizeof(struct rx_status_block),
2570 &rx_ring->rx_status_bus,
2571 GFP_KERNEL);
2572 if (!rx_ring->rx_status_block) {
2573 dev_err(&adapter->pdev->dev,
2574 "Cannot alloc memory for Status Block\n");
2575 return -ENOMEM;
2576 }
2577 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2578 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2579
2580
2581
2582
2583
2584
2585
2586 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2587 sizeof(struct rfd),
2588 0,
2589 SLAB_CACHE_DMA |
2590 SLAB_HWCACHE_ALIGN,
2591 NULL);
2592
2593 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2594
2595
2596
2597
2598 INIT_LIST_HEAD(&rx_ring->recv_list);
2599 return 0;
2600}
2601
2602
2603
2604
2605
2606static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2607{
2608 u32 index;
2609 u32 bufsize;
2610 u32 pktstat_ringsize;
2611 struct rfd *rfd;
2612 struct rx_ring *rx_ring;
2613
2614
2615 rx_ring = &adapter->rx_ring;
2616
2617
2618 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2619
2620 while (!list_empty(&rx_ring->recv_list)) {
2621 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2622 struct rfd, list_node);
2623
2624 list_del(&rfd->list_node);
2625 rfd->skb = NULL;
2626 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2627 }
2628
2629
2630 if (rx_ring->fbr[0]->ring_virtaddr) {
2631
2632 for (index = 0; index <
2633 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2634 if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2635 u32 fbr1_align;
2636
2637 if (rx_ring->fbr[0]->buffsize > 4096)
2638 fbr1_align = 4096;
2639 else
2640 fbr1_align = rx_ring->fbr[0]->buffsize;
2641
2642 bufsize =
2643 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2644 fbr1_align - 1;
2645
2646 dma_free_coherent(&adapter->pdev->dev,
2647 bufsize,
2648 rx_ring->fbr[0]->mem_virtaddrs[index],
2649 rx_ring->fbr[0]->mem_physaddrs[index]);
2650
2651 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2652 }
2653 }
2654
2655
2656 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2657 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2658
2659 bufsize =
2660 (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2661 0xfff;
2662
2663 dma_free_coherent(&adapter->pdev->dev, bufsize,
2664 rx_ring->fbr[0]->ring_virtaddr,
2665 rx_ring->fbr[0]->ring_physaddr);
2666
2667 rx_ring->fbr[0]->ring_virtaddr = NULL;
2668 }
2669
2670#ifdef USE_FBR0
2671
2672 if (rx_ring->fbr[1]->ring_virtaddr) {
2673
2674 for (index = 0; index <
2675 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2676 if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2677 bufsize =
2678 (rx_ring->fbr[1]->buffsize *
2679 (FBR_CHUNKS + 1)) - 1;
2680
2681 dma_free_coherent(&adapter->pdev->dev,
2682 bufsize,
2683 rx_ring->fbr[1]->mem_virtaddrs[index],
2684 rx_ring->fbr[1]->mem_physaddrs[index]);
2685
2686 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2687 }
2688 }
2689
2690
2691 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2692 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2693
2694 bufsize =
2695 (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2696 0xfff;
2697
2698 dma_free_coherent(&adapter->pdev->dev,
2699 bufsize,
2700 rx_ring->fbr[1]->ring_virtaddr,
2701 rx_ring->fbr[1]->ring_physaddr);
2702
2703 rx_ring->fbr[1]->ring_virtaddr = NULL;
2704 }
2705#endif
2706
2707
2708 if (rx_ring->ps_ring_virtaddr) {
2709 pktstat_ringsize =
2710 sizeof(struct pkt_stat_desc) *
2711 adapter->rx_ring.psr_num_entries;
2712
2713 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2714 rx_ring->ps_ring_virtaddr,
2715 rx_ring->ps_ring_physaddr);
2716
2717 rx_ring->ps_ring_virtaddr = NULL;
2718 }
2719
2720
2721 if (rx_ring->rx_status_block) {
2722 dma_free_coherent(&adapter->pdev->dev,
2723 sizeof(struct rx_status_block),
2724 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2725 rx_ring->rx_status_block = NULL;
2726 }
2727
2728
2729 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2730 kmem_cache_destroy(rx_ring->recv_lookaside);
2731 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2732 }
2733
2734
2735#ifdef USE_FBR0
2736 kfree(rx_ring->fbr[1]);
2737#endif
2738
2739 kfree(rx_ring->fbr[0]);
2740
2741
2742 rx_ring->num_ready_recv = 0;
2743}
2744
2745
2746
2747
2748
2749
2750
2751static int et131x_init_recv(struct et131x_adapter *adapter)
2752{
2753 int status = -ENOMEM;
2754 struct rfd *rfd = NULL;
2755 u32 rfdct;
2756 u32 numrfd = 0;
2757 struct rx_ring *rx_ring;
2758
2759
2760 rx_ring = &adapter->rx_ring;
2761
2762
2763 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2764 rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2765 GFP_ATOMIC | GFP_DMA);
2766
2767 if (!rfd) {
2768 dev_err(&adapter->pdev->dev,
2769 "Couldn't alloc RFD out of kmem_cache\n");
2770 status = -ENOMEM;
2771 continue;
2772 }
2773
2774 rfd->skb = NULL;
2775
2776
2777 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2778
2779
2780 rx_ring->num_ready_recv++;
2781 numrfd++;
2782 }
2783
2784 if (numrfd > NIC_MIN_NUM_RFD)
2785 status = 0;
2786
2787 rx_ring->num_rfd = numrfd;
2788
2789 if (status != 0) {
2790 kmem_cache_free(rx_ring->recv_lookaside, rfd);
2791 dev_err(&adapter->pdev->dev,
2792 "Allocation problems in et131x_init_recv\n");
2793 }
2794 return status;
2795}
2796
2797
2798
2799
2800
2801static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2802{
2803 struct phy_device *phydev = adapter->phydev;
2804
2805 if (!phydev)
2806 return;
2807
2808
2809
2810
2811 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2812 writel(0, &adapter->regs->rxdma.max_pkt_time);
2813 writel(1, &adapter->regs->rxdma.num_pkt_done);
2814 }
2815}
2816
2817
2818
2819
2820
2821
2822static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2823{
2824 struct rx_ring *rx_local = &adapter->rx_ring;
2825 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2826 u16 buff_index = rfd->bufferindex;
2827 u8 ring_index = rfd->ringindex;
2828 unsigned long flags;
2829
2830
2831
2832
2833 if (
2834#ifdef USE_FBR0
2835 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2836#endif
2837 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2838 spin_lock_irqsave(&adapter->fbr_lock, flags);
2839
2840 if (ring_index == 1) {
2841 struct fbr_desc *next = (struct fbr_desc *)
2842 (rx_local->fbr[0]->ring_virtaddr) +
2843 INDEX10(rx_local->fbr[0]->local_full);
2844
2845
2846
2847
2848
2849 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2850 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2851 next->word2 = buff_index;
2852
2853 writel(bump_free_buff_ring(
2854 &rx_local->fbr[0]->local_full,
2855 rx_local->fbr[0]->num_entries - 1),
2856 &rx_dma->fbr1_full_offset);
2857 }
2858#ifdef USE_FBR0
2859 else {
2860 struct fbr_desc *next = (struct fbr_desc *)
2861 rx_local->fbr[1]->ring_virtaddr +
2862 INDEX10(rx_local->fbr[1]->local_full);
2863
2864
2865
2866
2867
2868 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2869 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2870 next->word2 = buff_index;
2871
2872 writel(bump_free_buff_ring(
2873 &rx_local->fbr[1]->local_full,
2874 rx_local->fbr[1]->num_entries - 1),
2875 &rx_dma->fbr0_full_offset);
2876 }
2877#endif
2878 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2879 } else {
2880 dev_err(&adapter->pdev->dev,
2881 "%s illegal Buffer Index returned\n", __func__);
2882 }
2883
2884
2885
2886
2887 spin_lock_irqsave(&adapter->rcv_lock, flags);
2888 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2889 rx_local->num_ready_recv++;
2890 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2891
2892 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2893}
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2907{
2908 struct rx_ring *rx_local = &adapter->rx_ring;
2909 struct rx_status_block *status;
2910 struct pkt_stat_desc *psr;
2911 struct rfd *rfd;
2912 u32 i;
2913 u8 *buf;
2914 unsigned long flags;
2915 struct list_head *element;
2916 u8 ring_index;
2917 u16 buff_index;
2918 u32 len;
2919 u32 word0;
2920 u32 word1;
2921
2922
2923
2924
2925
2926 status = rx_local->rx_status_block;
2927 word1 = status->word1 >> 16;
2928
2929
2930 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2931
2932 return NULL;
2933
2934
2935 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2936 (rx_local->local_psr_full & 0xFFF);
2937
2938
2939
2940
2941
2942 len = psr->word1 & 0xFFFF;
2943 ring_index = (psr->word1 >> 26) & 0x03;
2944 buff_index = (psr->word1 >> 16) & 0x3FF;
2945 word0 = psr->word0;
2946
2947
2948
2949 add_12bit(&rx_local->local_psr_full, 1);
2950 if (
2951 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2952
2953 rx_local->local_psr_full &= ~0xFFF;
2954 rx_local->local_psr_full ^= 0x1000;
2955 }
2956
2957 writel(rx_local->local_psr_full,
2958 &adapter->regs->rxdma.psr_full_offset);
2959
2960#ifndef USE_FBR0
2961 if (ring_index != 1)
2962 return NULL;
2963#endif
2964
2965#ifdef USE_FBR0
2966 if (ring_index > 1 ||
2967 (ring_index == 0 &&
2968 buff_index > rx_local->fbr[1]->num_entries - 1) ||
2969 (ring_index == 1 &&
2970 buff_index > rx_local->fbr[0]->num_entries - 1))
2971#else
2972 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2973#endif
2974 {
2975
2976 dev_err(&adapter->pdev->dev,
2977 "NICRxPkts PSR Entry %d indicates "
2978 "length of %d and/or bad bi(%d)\n",
2979 rx_local->local_psr_full & 0xFFF,
2980 len, buff_index);
2981 return NULL;
2982 }
2983
2984
2985 spin_lock_irqsave(&adapter->rcv_lock, flags);
2986
2987 rfd = NULL;
2988 element = rx_local->recv_list.next;
2989 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2990
2991 if (rfd == NULL) {
2992 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2993 return NULL;
2994 }
2995
2996 list_del(&rfd->list_node);
2997 rx_local->num_ready_recv--;
2998
2999 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3000
3001 rfd->bufferindex = buff_index;
3002 rfd->ringindex = ring_index;
3003
3004
3005
3006
3007
3008
3009 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3010 adapter->stats.rx_other_errs++;
3011 len = 0;
3012 }
3013
3014 if (len) {
3015
3016 if ((word0 & ALCATEL_MULTICAST_PKT) &&
3017 !(word0 & ALCATEL_BROADCAST_PKT)) {
3018
3019
3020
3021
3022
3023
3024
3025 if ((adapter->packet_filter &
3026 ET131X_PACKET_TYPE_MULTICAST)
3027 && !(adapter->packet_filter &
3028 ET131X_PACKET_TYPE_PROMISCUOUS)
3029 && !(adapter->packet_filter &
3030 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3031
3032
3033
3034
3035 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3036 virt[buff_index];
3037
3038
3039
3040
3041
3042 for (i = 0; i < adapter->multicast_addr_count;
3043 i++) {
3044 if (buf[0] ==
3045 adapter->multicast_list[i][0]
3046 && buf[1] ==
3047 adapter->multicast_list[i][1]
3048 && buf[2] ==
3049 adapter->multicast_list[i][2]
3050 && buf[3] ==
3051 adapter->multicast_list[i][3]
3052 && buf[4] ==
3053 adapter->multicast_list[i][4]
3054 && buf[5] ==
3055 adapter->multicast_list[i][5]) {
3056 break;
3057 }
3058 }
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068 if (i == adapter->multicast_addr_count)
3069 len = 0;
3070 }
3071
3072 if (len > 0)
3073 adapter->stats.multicast_pkts_rcvd++;
3074 } else if (word0 & ALCATEL_BROADCAST_PKT)
3075 adapter->stats.broadcast_pkts_rcvd++;
3076 else
3077
3078
3079
3080
3081
3082 adapter->stats.unicast_pkts_rcvd++;
3083 }
3084
3085 if (len > 0) {
3086 struct sk_buff *skb = NULL;
3087
3088
3089 rfd->len = len;
3090
3091 skb = dev_alloc_skb(rfd->len + 2);
3092 if (!skb) {
3093 dev_err(&adapter->pdev->dev,
3094 "Couldn't alloc an SKB for Rx\n");
3095 return NULL;
3096 }
3097
3098 adapter->net_stats.rx_bytes += rfd->len;
3099
3100
3101
3102
3103
3104 memcpy(skb_put(skb, rfd->len),
3105 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3106 rfd->len);
3107
3108 skb->dev = adapter->netdev;
3109 skb->protocol = eth_type_trans(skb, adapter->netdev);
3110 skb->ip_summed = CHECKSUM_NONE;
3111
3112 netif_rx_ni(skb);
3113 } else {
3114 rfd->len = 0;
3115 }
3116
3117 nic_return_rfd(adapter, rfd);
3118 return rfd;
3119}
3120
3121
3122
3123
3124
3125
3126
3127static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3128{
3129 struct rfd *rfd = NULL;
3130 u32 count = 0;
3131 bool done = true;
3132
3133
3134 while (count < NUM_PACKETS_HANDLED) {
3135 if (list_empty(&adapter->rx_ring.recv_list)) {
3136 WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3137 done = false;
3138 break;
3139 }
3140
3141 rfd = nic_rx_pkts(adapter);
3142
3143 if (rfd == NULL)
3144 break;
3145
3146
3147
3148
3149
3150
3151 if (!adapter->packet_filter ||
3152 !netif_carrier_ok(adapter->netdev) ||
3153 rfd->len == 0)
3154 continue;
3155
3156
3157 adapter->net_stats.rx_packets++;
3158
3159
3160 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3161 dev_warn(&adapter->pdev->dev,
3162 "RFD's are running out\n");
3163 }
3164 count++;
3165 }
3166
3167 if (count == NUM_PACKETS_HANDLED || !done) {
3168 adapter->rx_ring.unfinished_receives = true;
3169 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3170 &adapter->regs->global.watchdog_timer);
3171 } else
3172
3173 adapter->rx_ring.unfinished_receives = false;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3189{
3190 int desc_size = 0;
3191 struct tx_ring *tx_ring = &adapter->tx_ring;
3192
3193
3194 adapter->tx_ring.tcb_ring =
3195 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3196 if (!adapter->tx_ring.tcb_ring) {
3197 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3198 return -ENOMEM;
3199 }
3200
3201
3202
3203
3204 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3205 tx_ring->tx_desc_ring =
3206 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3207 desc_size,
3208 &tx_ring->tx_desc_ring_pa,
3209 GFP_KERNEL);
3210 if (!adapter->tx_ring.tx_desc_ring) {
3211 dev_err(&adapter->pdev->dev,
3212 "Cannot alloc memory for Tx Ring\n");
3213 return -ENOMEM;
3214 }
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3225 sizeof(u32),
3226 &tx_ring->tx_status_pa,
3227 GFP_KERNEL);
3228 if (!adapter->tx_ring.tx_status_pa) {
3229 dev_err(&adapter->pdev->dev,
3230 "Cannot alloc memory for Tx status block\n");
3231 return -ENOMEM;
3232 }
3233 return 0;
3234}
3235
3236
3237
3238
3239
3240
3241
3242static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3243{
3244 int desc_size = 0;
3245
3246 if (adapter->tx_ring.tx_desc_ring) {
3247
3248 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3249 + 4096 - 1;
3250 dma_free_coherent(&adapter->pdev->dev,
3251 desc_size,
3252 adapter->tx_ring.tx_desc_ring,
3253 adapter->tx_ring.tx_desc_ring_pa);
3254 adapter->tx_ring.tx_desc_ring = NULL;
3255 }
3256
3257
3258 if (adapter->tx_ring.tx_status) {
3259 dma_free_coherent(&adapter->pdev->dev,
3260 sizeof(u32),
3261 adapter->tx_ring.tx_status,
3262 adapter->tx_ring.tx_status_pa);
3263
3264 adapter->tx_ring.tx_status = NULL;
3265 }
3266
3267 kfree(adapter->tx_ring.tcb_ring);
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3278{
3279 u32 i;
3280 struct tx_desc desc[24];
3281 u32 frag = 0;
3282 u32 thiscopy, remainder;
3283 struct sk_buff *skb = tcb->skb;
3284 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3285 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3286 unsigned long flags;
3287 struct phy_device *phydev = adapter->phydev;
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297 if (nr_frags > 23)
3298 return -EIO;
3299
3300 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3301
3302 for (i = 0; i < nr_frags; i++) {
3303
3304
3305
3306 if (i == 0) {
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316 if ((skb->len - skb->data_len) <= 1514) {
3317 desc[frag].addr_hi = 0;
3318
3319
3320 desc[frag].len_vlan =
3321 skb->len - skb->data_len;
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331 desc[frag++].addr_lo =
3332 dma_map_single(&adapter->pdev->dev,
3333 skb->data,
3334 skb->len -
3335 skb->data_len,
3336 DMA_TO_DEVICE);
3337 } else {
3338 desc[frag].addr_hi = 0;
3339 desc[frag].len_vlan =
3340 (skb->len - skb->data_len) / 2;
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 desc[frag++].addr_lo =
3351 dma_map_single(&adapter->pdev->dev,
3352 skb->data,
3353 ((skb->len -
3354 skb->data_len) / 2),
3355 DMA_TO_DEVICE);
3356 desc[frag].addr_hi = 0;
3357
3358 desc[frag].len_vlan =
3359 (skb->len - skb->data_len) / 2;
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369 desc[frag++].addr_lo =
3370 dma_map_single(&adapter->pdev->dev,
3371 skb->data +
3372 ((skb->len -
3373 skb->data_len) / 2),
3374 ((skb->len -
3375 skb->data_len) / 2),
3376 DMA_TO_DEVICE);
3377 }
3378 } else {
3379 desc[frag].addr_hi = 0;
3380 desc[frag].len_vlan =
3381 frags[i - 1].size;
3382
3383
3384
3385
3386
3387
3388
3389 desc[frag++].addr_lo = skb_frag_dma_map(
3390 &adapter->pdev->dev,
3391 &frags[i - 1],
3392 0,
3393 frags[i - 1].size,
3394 DMA_TO_DEVICE);
3395 }
3396 }
3397
3398 if (phydev && phydev->speed == SPEED_1000) {
3399 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3400
3401 desc[frag - 1].flags = 0x5;
3402 adapter->tx_ring.since_irq = 0;
3403 } else {
3404 desc[frag - 1].flags = 0x1;
3405 }
3406 } else
3407 desc[frag - 1].flags = 0x5;
3408
3409 desc[0].flags |= 2;
3410
3411 tcb->index_start = adapter->tx_ring.send_idx;
3412 tcb->stale = 0;
3413
3414 spin_lock_irqsave(&adapter->send_hw_lock, flags);
3415
3416 thiscopy = NUM_DESC_PER_RING_TX -
3417 INDEX10(adapter->tx_ring.send_idx);
3418
3419 if (thiscopy >= frag) {
3420 remainder = 0;
3421 thiscopy = frag;
3422 } else {
3423 remainder = frag - thiscopy;
3424 }
3425
3426 memcpy(adapter->tx_ring.tx_desc_ring +
3427 INDEX10(adapter->tx_ring.send_idx), desc,
3428 sizeof(struct tx_desc) * thiscopy);
3429
3430 add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3431
3432 if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3433 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3434 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3435 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3436 }
3437
3438 if (remainder) {
3439 memcpy(adapter->tx_ring.tx_desc_ring,
3440 desc + thiscopy,
3441 sizeof(struct tx_desc) * remainder);
3442
3443 add_10bit(&adapter->tx_ring.send_idx, remainder);
3444 }
3445
3446 if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3447 if (adapter->tx_ring.send_idx)
3448 tcb->index = NUM_DESC_PER_RING_TX - 1;
3449 else
3450 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3451 } else
3452 tcb->index = adapter->tx_ring.send_idx - 1;
3453
3454 spin_lock(&adapter->tcb_send_qlock);
3455
3456 if (adapter->tx_ring.send_tail)
3457 adapter->tx_ring.send_tail->next = tcb;
3458 else
3459 adapter->tx_ring.send_head = tcb;
3460
3461 adapter->tx_ring.send_tail = tcb;
3462
3463 WARN_ON(tcb->next != NULL);
3464
3465 adapter->tx_ring.used++;
3466
3467 spin_unlock(&adapter->tcb_send_qlock);
3468
3469
3470 writel(adapter->tx_ring.send_idx,
3471 &adapter->regs->txdma.service_request);
3472
3473
3474
3475
3476 if (phydev && phydev->speed == SPEED_1000) {
3477 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3478 &adapter->regs->global.watchdog_timer);
3479 }
3480 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3481
3482 return 0;
3483}
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3495{
3496 int status;
3497 struct tcb *tcb = NULL;
3498 u16 *shbufva;
3499 unsigned long flags;
3500
3501
3502 if (skb->len < ETH_HLEN)
3503 return -EIO;
3504
3505
3506 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3507
3508 tcb = adapter->tx_ring.tcb_qhead;
3509
3510 if (tcb == NULL) {
3511 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3512 return -ENOMEM;
3513 }
3514
3515 adapter->tx_ring.tcb_qhead = tcb->next;
3516
3517 if (adapter->tx_ring.tcb_qhead == NULL)
3518 adapter->tx_ring.tcb_qtail = NULL;
3519
3520 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3521
3522 tcb->skb = skb;
3523
3524 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3525 shbufva = (u16 *) skb->data;
3526
3527 if ((shbufva[0] == 0xffff) &&
3528 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3529 tcb->flags |= fMP_DEST_BROAD;
3530 } else if ((shbufva[0] & 0x3) == 0x0001) {
3531 tcb->flags |= fMP_DEST_MULTI;
3532 }
3533 }
3534
3535 tcb->next = NULL;
3536
3537
3538 status = nic_send_packet(adapter, tcb);
3539
3540 if (status != 0) {
3541 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3542
3543 if (adapter->tx_ring.tcb_qtail)
3544 adapter->tx_ring.tcb_qtail->next = tcb;
3545 else
3546
3547 adapter->tx_ring.tcb_qhead = tcb;
3548
3549 adapter->tx_ring.tcb_qtail = tcb;
3550 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3551 return status;
3552 }
3553 WARN_ON(adapter->tx_ring.used > NUM_TCB);
3554 return 0;
3555}
3556
3557
3558
3559
3560
3561
3562
3563
3564static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3565{
3566 int status = 0;
3567 struct et131x_adapter *adapter = netdev_priv(netdev);
3568
3569
3570
3571
3572
3573
3574
3575
3576 if (adapter->tx_ring.used >= NUM_TCB) {
3577
3578
3579
3580
3581 status = -ENOMEM;
3582 } else {
3583
3584
3585
3586 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3587 !netif_carrier_ok(netdev)) {
3588 dev_kfree_skb_any(skb);
3589 skb = NULL;
3590
3591 adapter->net_stats.tx_dropped++;
3592 } else {
3593 status = send_packet(skb, adapter);
3594 if (status != 0 && status != -ENOMEM) {
3595
3596
3597
3598 dev_kfree_skb_any(skb);
3599 skb = NULL;
3600 adapter->net_stats.tx_dropped++;
3601 }
3602 }
3603 }
3604 return status;
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615static inline void free_send_packet(struct et131x_adapter *adapter,
3616 struct tcb *tcb)
3617{
3618 unsigned long flags;
3619 struct tx_desc *desc = NULL;
3620 struct net_device_stats *stats = &adapter->net_stats;
3621
3622 if (tcb->flags & fMP_DEST_BROAD)
3623 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3624 else if (tcb->flags & fMP_DEST_MULTI)
3625 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3626 else
3627 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3628
3629 if (tcb->skb) {
3630 stats->tx_bytes += tcb->skb->len;
3631
3632
3633
3634
3635
3636 do {
3637 desc = (struct tx_desc *)
3638 (adapter->tx_ring.tx_desc_ring +
3639 INDEX10(tcb->index_start));
3640
3641 dma_unmap_single(&adapter->pdev->dev,
3642 desc->addr_lo,
3643 desc->len_vlan, DMA_TO_DEVICE);
3644
3645 add_10bit(&tcb->index_start, 1);
3646 if (INDEX10(tcb->index_start) >=
3647 NUM_DESC_PER_RING_TX) {
3648 tcb->index_start &= ~ET_DMA10_MASK;
3649 tcb->index_start ^= ET_DMA10_WRAP;
3650 }
3651 } while (desc != (adapter->tx_ring.tx_desc_ring +
3652 INDEX10(tcb->index)));
3653
3654 dev_kfree_skb_any(tcb->skb);
3655 }
3656
3657 memset(tcb, 0, sizeof(struct tcb));
3658
3659
3660 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3661
3662 adapter->net_stats.tx_packets++;
3663
3664 if (adapter->tx_ring.tcb_qtail)
3665 adapter->tx_ring.tcb_qtail->next = tcb;
3666 else
3667
3668 adapter->tx_ring.tcb_qhead = tcb;
3669
3670 adapter->tx_ring.tcb_qtail = tcb;
3671
3672 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3673 WARN_ON(adapter->tx_ring.used < 0);
3674}
3675
3676
3677
3678
3679
3680
3681
3682static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3683{
3684 struct tcb *tcb;
3685 unsigned long flags;
3686 u32 freed = 0;
3687
3688
3689 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3690
3691 tcb = adapter->tx_ring.send_head;
3692
3693 while (tcb != NULL && freed < NUM_TCB) {
3694 struct tcb *next = tcb->next;
3695
3696 adapter->tx_ring.send_head = next;
3697
3698 if (next == NULL)
3699 adapter->tx_ring.send_tail = NULL;
3700
3701 adapter->tx_ring.used--;
3702
3703 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3704
3705 freed++;
3706 free_send_packet(adapter, tcb);
3707
3708 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3709
3710 tcb = adapter->tx_ring.send_head;
3711 }
3712
3713 WARN_ON(freed == NUM_TCB);
3714
3715 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3716
3717 adapter->tx_ring.used = 0;
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3730{
3731 unsigned long flags;
3732 u32 serviced;
3733 struct tcb *tcb;
3734 u32 index;
3735
3736 serviced = readl(&adapter->regs->txdma.new_service_complete);
3737 index = INDEX10(serviced);
3738
3739
3740
3741
3742 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3743
3744 tcb = adapter->tx_ring.send_head;
3745
3746 while (tcb &&
3747 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3748 index < INDEX10(tcb->index)) {
3749 adapter->tx_ring.used--;
3750 adapter->tx_ring.send_head = tcb->next;
3751 if (tcb->next == NULL)
3752 adapter->tx_ring.send_tail = NULL;
3753
3754 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3755 free_send_packet(adapter, tcb);
3756 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3757
3758
3759 tcb = adapter->tx_ring.send_head;
3760 }
3761 while (tcb &&
3762 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3763 && index > (tcb->index & ET_DMA10_MASK)) {
3764 adapter->tx_ring.used--;
3765 adapter->tx_ring.send_head = tcb->next;
3766 if (tcb->next == NULL)
3767 adapter->tx_ring.send_tail = NULL;
3768
3769 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3770 free_send_packet(adapter, tcb);
3771 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3772
3773
3774 tcb = adapter->tx_ring.send_head;
3775 }
3776
3777
3778 if (adapter->tx_ring.used <= NUM_TCB / 3)
3779 netif_wake_queue(adapter->netdev);
3780
3781 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3782}
3783
3784static int et131x_get_settings(struct net_device *netdev,
3785 struct ethtool_cmd *cmd)
3786{
3787 struct et131x_adapter *adapter = netdev_priv(netdev);
3788
3789 return phy_ethtool_gset(adapter->phydev, cmd);
3790}
3791
3792static int et131x_set_settings(struct net_device *netdev,
3793 struct ethtool_cmd *cmd)
3794{
3795 struct et131x_adapter *adapter = netdev_priv(netdev);
3796
3797 return phy_ethtool_sset(adapter->phydev, cmd);
3798}
3799
3800static int et131x_get_regs_len(struct net_device *netdev)
3801{
3802#define ET131X_REGS_LEN 256
3803 return ET131X_REGS_LEN * sizeof(u32);
3804}
3805
3806static void et131x_get_regs(struct net_device *netdev,
3807 struct ethtool_regs *regs, void *regs_data)
3808{
3809 struct et131x_adapter *adapter = netdev_priv(netdev);
3810 struct address_map __iomem *aregs = adapter->regs;
3811 u32 *regs_buff = regs_data;
3812 u32 num = 0;
3813
3814 memset(regs_data, 0, et131x_get_regs_len(netdev));
3815
3816 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3817 adapter->pdev->device;
3818
3819
3820 et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]);
3821 et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]);
3822 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]);
3823 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]);
3824 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]);
3825 et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]);
3826 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]);
3827
3828 et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]);
3829
3830 et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]);
3831 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]);
3832 et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]);
3833 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]);
3834 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]);
3835 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]);
3836 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3837 (u16 *)®s_buff[num++]);
3838 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3839 (u16 *)®s_buff[num++]);
3840 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3841 (u16 *)®s_buff[num++]);
3842 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3843 (u16 *)®s_buff[num++]);
3844 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]);
3845 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]);
3846 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]);
3847 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3848 (u16 *)®s_buff[num++]);
3849 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]);
3850 et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]);
3851 et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]);
3852
3853
3854 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3855 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3856 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3857 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3858 regs_buff[num++] = readl(&aregs->global.pm_csr);
3859 regs_buff[num++] = adapter->stats.interrupt_status;
3860 regs_buff[num++] = readl(&aregs->global.int_mask);
3861 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3862 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3863 regs_buff[num++] = readl(&aregs->global.sw_reset);
3864 regs_buff[num++] = readl(&aregs->global.slv_timer);
3865 regs_buff[num++] = readl(&aregs->global.msi_config);
3866 regs_buff[num++] = readl(&aregs->global.loopback);
3867 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3868
3869
3870 regs_buff[num++] = readl(&aregs->txdma.csr);
3871 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3872 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3873 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3874 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3875 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3876 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3877 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3878 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3879 regs_buff[num++] = readl(&aregs->txdma.service_request);
3880 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3881 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3882 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3883 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3884 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3885 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3886 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3887 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3888 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3889 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3890 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3891 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3892 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3893 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3894 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3895 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3896
3897
3898 regs_buff[num++] = readl(&aregs->rxdma.csr);
3899 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3900 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3901 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3902 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3903 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3904 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3905 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3906 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3907 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3908 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3909 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3910 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3911 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3912 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3913 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3914 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3915 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3916 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3917 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3918 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3919 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3920 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3921 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3922 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3923 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3924 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3925 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3926 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3927}
3928
3929#define ET131X_DRVINFO_LEN 32
3930static void et131x_get_drvinfo(struct net_device *netdev,
3931 struct ethtool_drvinfo *info)
3932{
3933 struct et131x_adapter *adapter = netdev_priv(netdev);
3934
3935 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3936 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3937 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3938}
3939
3940static struct ethtool_ops et131x_ethtool_ops = {
3941 .get_settings = et131x_get_settings,
3942 .set_settings = et131x_set_settings,
3943 .get_drvinfo = et131x_get_drvinfo,
3944 .get_regs_len = et131x_get_regs_len,
3945 .get_regs = et131x_get_regs,
3946 .get_link = ethtool_op_get_link,
3947};
3948
3949static void et131x_set_ethtool_ops(struct net_device *netdev)
3950{
3951 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3952}
3953
3954
3955
3956
3957
3958static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3959{
3960
3961
3962
3963
3964 if (adapter->rom_addr[0] == 0x00 &&
3965 adapter->rom_addr[1] == 0x00 &&
3966 adapter->rom_addr[2] == 0x00 &&
3967 adapter->rom_addr[3] == 0x00 &&
3968 adapter->rom_addr[4] == 0x00 &&
3969 adapter->rom_addr[5] == 0x00) {
3970
3971
3972
3973
3974
3975 get_random_bytes(&adapter->addr[5], 1);
3976
3977
3978
3979
3980
3981 memcpy(adapter->rom_addr,
3982 adapter->addr, ETH_ALEN);
3983 } else {
3984
3985
3986
3987
3988 memcpy(adapter->addr,
3989 adapter->rom_addr, ETH_ALEN);
3990 }
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001static int et131x_pci_init(struct et131x_adapter *adapter,
4002 struct pci_dev *pdev)
4003{
4004 int cap = pci_pcie_cap(pdev);
4005 u16 max_payload;
4006 u16 ctl;
4007 int i, rc;
4008
4009 rc = et131x_init_eeprom(adapter);
4010 if (rc < 0)
4011 goto out;
4012
4013 if (!cap) {
4014 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4015 goto err_out;
4016 }
4017
4018
4019
4020
4021 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
4022 dev_err(&pdev->dev,
4023 "Could not read PCI config space for Max Payload Size\n");
4024 goto err_out;
4025 }
4026
4027
4028 max_payload &= 0x07;
4029
4030 if (max_payload < 2) {
4031 static const u16 acknak[2] = { 0x76, 0xD0 };
4032 static const u16 replay[2] = { 0x1E0, 0x2ED };
4033
4034 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4035 acknak[max_payload])) {
4036 dev_err(&pdev->dev,
4037 "Could not write PCI config space for ACK/NAK\n");
4038 goto err_out;
4039 }
4040 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4041 replay[max_payload])) {
4042 dev_err(&pdev->dev,
4043 "Could not write PCI config space for Replay Timer\n");
4044 goto err_out;
4045 }
4046 }
4047
4048
4049
4050
4051 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4052 dev_err(&pdev->dev,
4053 "Could not write PCI config space for Latency Timers\n");
4054 goto err_out;
4055 }
4056
4057
4058 if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
4059 dev_err(&pdev->dev,
4060 "Could not read PCI config space for Max read size\n");
4061 goto err_out;
4062 }
4063
4064 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12);
4065
4066 if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4067 dev_err(&pdev->dev,
4068 "Could not write PCI config space for Max read size\n");
4069 goto err_out;
4070 }
4071
4072
4073
4074
4075 if (!adapter->has_eeprom) {
4076 et131x_hwaddr_init(adapter);
4077 return 0;
4078 }
4079
4080 for (i = 0; i < ETH_ALEN; i++) {
4081 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4082 adapter->rom_addr + i)) {
4083 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4084 goto err_out;
4085 }
4086 }
4087 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4088out:
4089 return rc;
4090err_out:
4091 rc = -EIO;
4092 goto out;
4093}
4094
4095
4096
4097
4098
4099
4100
4101
4102static void et131x_error_timer_handler(unsigned long data)
4103{
4104 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4105 struct phy_device *phydev = adapter->phydev;
4106
4107 if (et1310_in_phy_coma(adapter)) {
4108
4109
4110
4111 et1310_disable_phy_coma(adapter);
4112 adapter->boot_coma = 20;
4113 } else {
4114 et1310_update_macstat_host_counters(adapter);
4115 }
4116
4117 if (!phydev->link && adapter->boot_coma < 11)
4118 adapter->boot_coma++;
4119
4120 if (adapter->boot_coma == 10) {
4121 if (!phydev->link) {
4122 if (!et1310_in_phy_coma(adapter)) {
4123
4124
4125
4126 et131x_enable_interrupts(adapter);
4127 et1310_enable_phy_coma(adapter);
4128 }
4129 }
4130 }
4131
4132
4133 mod_timer(&adapter->error_timer, jiffies +
4134 TX_ERROR_PERIOD * HZ / 1000);
4135}
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4146{
4147 int status;
4148
4149
4150 status = et131x_tx_dma_memory_alloc(adapter);
4151 if (status != 0) {
4152 dev_err(&adapter->pdev->dev,
4153 "et131x_tx_dma_memory_alloc FAILED\n");
4154 return status;
4155 }
4156
4157 status = et131x_rx_dma_memory_alloc(adapter);
4158 if (status != 0) {
4159 dev_err(&adapter->pdev->dev,
4160 "et131x_rx_dma_memory_alloc FAILED\n");
4161 et131x_tx_dma_memory_free(adapter);
4162 return status;
4163 }
4164
4165
4166 status = et131x_init_recv(adapter);
4167 if (status != 0) {
4168 dev_err(&adapter->pdev->dev,
4169 "et131x_init_recv FAILED\n");
4170 et131x_tx_dma_memory_free(adapter);
4171 et131x_rx_dma_memory_free(adapter);
4172 }
4173 return status;
4174}
4175
4176
4177
4178
4179
4180static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4181{
4182
4183 et131x_tx_dma_memory_free(adapter);
4184 et131x_rx_dma_memory_free(adapter);
4185}
4186
4187static void et131x_adjust_link(struct net_device *netdev)
4188{
4189 struct et131x_adapter *adapter = netdev_priv(netdev);
4190 struct phy_device *phydev = adapter->phydev;
4191
4192 if (netif_carrier_ok(netdev)) {
4193 adapter->boot_coma = 20;
4194
4195 if (phydev && phydev->speed == SPEED_10) {
4196
4197
4198
4199
4200
4201
4202 u16 register18;
4203
4204 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4205 ®ister18);
4206 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4207 register18 | 0x4);
4208 et131x_mii_write(adapter, PHY_INDEX_REG,
4209 register18 | 0x8402);
4210 et131x_mii_write(adapter, PHY_DATA_REG,
4211 register18 | 511);
4212 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4213 register18);
4214 }
4215
4216 et1310_config_flow_control(adapter);
4217
4218 if (phydev && phydev->speed == SPEED_1000 &&
4219 adapter->registry_jumbo_packet > 2048) {
4220 u16 reg;
4221
4222 et131x_mii_read(adapter, PHY_CONFIG, ®);
4223 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4224 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4225 et131x_mii_write(adapter, PHY_CONFIG, reg);
4226 }
4227
4228 et131x_set_rx_dma_timer(adapter);
4229 et1310_config_mac_regs2(adapter);
4230 }
4231
4232 if (phydev && phydev->link != adapter->link) {
4233
4234
4235
4236
4237
4238 if (et1310_in_phy_coma(adapter))
4239 et1310_disable_phy_coma(adapter);
4240
4241 if (phydev->link) {
4242 adapter->boot_coma = 20;
4243 } else {
4244 dev_warn(&adapter->pdev->dev,
4245 "Link down - cable problem ?\n");
4246 adapter->boot_coma = 0;
4247
4248 if (phydev->speed == SPEED_10) {
4249
4250
4251
4252
4253
4254 u16 register18;
4255
4256 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4257 ®ister18);
4258 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4259 register18 | 0x4);
4260 et131x_mii_write(adapter, PHY_INDEX_REG,
4261 register18 | 0x8402);
4262 et131x_mii_write(adapter, PHY_DATA_REG,
4263 register18 | 511);
4264 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4265 register18);
4266 }
4267
4268
4269 et131x_free_busy_send_packets(adapter);
4270
4271
4272 et131x_init_send(adapter);
4273
4274
4275
4276
4277
4278
4279
4280 et131x_soft_reset(adapter);
4281
4282
4283 et131x_adapter_setup(adapter);
4284
4285
4286 et131x_disable_txrx(netdev);
4287 et131x_enable_txrx(netdev);
4288 }
4289
4290 adapter->link = phydev->link;
4291
4292 phy_print_status(phydev);
4293 }
4294}
4295
4296static int et131x_mii_probe(struct net_device *netdev)
4297{
4298 struct et131x_adapter *adapter = netdev_priv(netdev);
4299 struct phy_device *phydev = NULL;
4300
4301 phydev = phy_find_first(adapter->mii_bus);
4302 if (!phydev) {
4303 dev_err(&adapter->pdev->dev, "no PHY found\n");
4304 return -ENODEV;
4305 }
4306
4307 phydev = phy_connect(netdev, dev_name(&phydev->dev),
4308 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4309
4310 if (IS_ERR(phydev)) {
4311 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4312 return PTR_ERR(phydev);
4313 }
4314
4315 phydev->supported &= (SUPPORTED_10baseT_Half
4316 | SUPPORTED_10baseT_Full
4317 | SUPPORTED_100baseT_Half
4318 | SUPPORTED_100baseT_Full
4319 | SUPPORTED_Autoneg
4320 | SUPPORTED_MII
4321 | SUPPORTED_TP);
4322
4323 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4324 phydev->supported |= SUPPORTED_1000baseT_Full;
4325
4326 phydev->advertising = phydev->supported;
4327 adapter->phydev = phydev;
4328
4329 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4330 "(mii_bus:phy_addr=%s)\n",
4331 phydev->drv->name, dev_name(&phydev->dev));
4332
4333 return 0;
4334}
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4345 struct pci_dev *pdev)
4346{
4347 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4348
4349 struct et131x_adapter *adapter;
4350
4351
4352 adapter = netdev_priv(netdev);
4353 adapter->pdev = pci_dev_get(pdev);
4354 adapter->netdev = netdev;
4355
4356
4357 spin_lock_init(&adapter->lock);
4358 spin_lock_init(&adapter->tcb_send_qlock);
4359 spin_lock_init(&adapter->tcb_ready_qlock);
4360 spin_lock_init(&adapter->send_hw_lock);
4361 spin_lock_init(&adapter->rcv_lock);
4362 spin_lock_init(&adapter->rcv_pend_lock);
4363 spin_lock_init(&adapter->fbr_lock);
4364 spin_lock_init(&adapter->phy_lock);
4365
4366 adapter->registry_jumbo_packet = 1514;
4367
4368
4369 memcpy(adapter->addr, default_mac, ETH_ALEN);
4370
4371 return adapter;
4372}
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4383{
4384 struct net_device *netdev = pci_get_drvdata(pdev);
4385 struct et131x_adapter *adapter = netdev_priv(netdev);
4386
4387 unregister_netdev(netdev);
4388 phy_disconnect(adapter->phydev);
4389 mdiobus_unregister(adapter->mii_bus);
4390 kfree(adapter->mii_bus->irq);
4391 mdiobus_free(adapter->mii_bus);
4392
4393 et131x_adapter_memory_free(adapter);
4394 iounmap(adapter->regs);
4395 pci_dev_put(pdev);
4396
4397 free_netdev(netdev);
4398 pci_release_regions(pdev);
4399 pci_disable_device(pdev);
4400}
4401
4402
4403
4404
4405
4406static void et131x_up(struct net_device *netdev)
4407{
4408 struct et131x_adapter *adapter = netdev_priv(netdev);
4409
4410 et131x_enable_txrx(netdev);
4411 phy_start(adapter->phydev);
4412}
4413
4414
4415
4416
4417
4418static void et131x_down(struct net_device *netdev)
4419{
4420 struct et131x_adapter *adapter = netdev_priv(netdev);
4421
4422
4423 netdev->trans_start = jiffies;
4424
4425 phy_stop(adapter->phydev);
4426 et131x_disable_txrx(netdev);
4427}
4428
4429#ifdef CONFIG_PM_SLEEP
4430static int et131x_suspend(struct device *dev)
4431{
4432 struct pci_dev *pdev = to_pci_dev(dev);
4433 struct net_device *netdev = pci_get_drvdata(pdev);
4434
4435 if (netif_running(netdev)) {
4436 netif_device_detach(netdev);
4437 et131x_down(netdev);
4438 pci_save_state(pdev);
4439 }
4440
4441 return 0;
4442}
4443
4444static int et131x_resume(struct device *dev)
4445{
4446 struct pci_dev *pdev = to_pci_dev(dev);
4447 struct net_device *netdev = pci_get_drvdata(pdev);
4448
4449 if (netif_running(netdev)) {
4450 pci_restore_state(pdev);
4451 et131x_up(netdev);
4452 netif_device_attach(netdev);
4453 }
4454
4455 return 0;
4456}
4457
4458static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4459#define ET131X_PM_OPS (&et131x_pm_ops)
4460#else
4461#define ET131X_PM_OPS NULL
4462#endif
4463
4464
4465
4466
4467
4468
4469
4470
4471irqreturn_t et131x_isr(int irq, void *dev_id)
4472{
4473 bool handled = true;
4474 struct net_device *netdev = (struct net_device *)dev_id;
4475 struct et131x_adapter *adapter = NULL;
4476 u32 status;
4477
4478 if (!netif_device_present(netdev)) {
4479 handled = false;
4480 goto out;
4481 }
4482
4483 adapter = netdev_priv(netdev);
4484
4485
4486
4487
4488
4489
4490 et131x_disable_interrupts(adapter);
4491
4492
4493
4494
4495 status = readl(&adapter->regs->global.int_status);
4496
4497 if (adapter->flowcontrol == FLOW_TXONLY ||
4498 adapter->flowcontrol == FLOW_BOTH) {
4499 status &= ~INT_MASK_ENABLE;
4500 } else {
4501 status &= ~INT_MASK_ENABLE_NO_FLOW;
4502 }
4503
4504
4505 if (!status) {
4506 handled = false;
4507 et131x_enable_interrupts(adapter);
4508 goto out;
4509 }
4510
4511
4512
4513 if (status & ET_INTR_WATCHDOG) {
4514 struct tcb *tcb = adapter->tx_ring.send_head;
4515
4516 if (tcb)
4517 if (++tcb->stale > 1)
4518 status |= ET_INTR_TXDMA_ISR;
4519
4520 if (adapter->rx_ring.unfinished_receives)
4521 status |= ET_INTR_RXDMA_XFR_DONE;
4522 else if (tcb == NULL)
4523 writel(0, &adapter->regs->global.watchdog_timer);
4524
4525 status &= ~ET_INTR_WATCHDOG;
4526 }
4527
4528 if (status == 0) {
4529
4530
4531
4532
4533
4534 et131x_enable_interrupts(adapter);
4535 goto out;
4536 }
4537
4538
4539
4540
4541
4542 adapter->stats.interrupt_status = status;
4543
4544
4545
4546
4547
4548 schedule_work(&adapter->task);
4549out:
4550 return IRQ_RETVAL(handled);
4551}
4552
4553
4554
4555
4556
4557
4558
4559
4560static void et131x_isr_handler(struct work_struct *work)
4561{
4562 struct et131x_adapter *adapter =
4563 container_of(work, struct et131x_adapter, task);
4564 u32 status = adapter->stats.interrupt_status;
4565 struct address_map __iomem *iomem = adapter->regs;
4566
4567
4568
4569
4570
4571
4572
4573 if (status & ET_INTR_TXDMA_ISR)
4574 et131x_handle_send_interrupt(adapter);
4575
4576
4577 if (status & ET_INTR_RXDMA_XFR_DONE)
4578 et131x_handle_recv_interrupt(adapter);
4579
4580 status &= 0xffffffd7;
4581
4582 if (status) {
4583
4584 if (status & ET_INTR_TXDMA_ERR) {
4585 u32 txdma_err;
4586
4587
4588 txdma_err = readl(&iomem->txdma.tx_dma_error);
4589
4590 dev_warn(&adapter->pdev->dev,
4591 "TXDMA_ERR interrupt, error = %d\n",
4592 txdma_err);
4593 }
4594
4595
4596 if (status &
4597 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616 if (adapter->flowcontrol == FLOW_TXONLY ||
4617 adapter->flowcontrol == FLOW_BOTH) {
4618 u32 pm_csr;
4619
4620
4621
4622
4623
4624 pm_csr = readl(&iomem->global.pm_csr);
4625 if (!et1310_in_phy_coma(adapter))
4626 writel(3, &iomem->txmac.bp_ctrl);
4627 }
4628 }
4629
4630
4631 if (status & ET_INTR_RXDMA_STAT_LOW) {
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642 }
4643
4644
4645 if (status & ET_INTR_RXDMA_ERR) {
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666 dev_warn(&adapter->pdev->dev,
4667 "RxDMA_ERR interrupt, error %x\n",
4668 readl(&iomem->txmac.tx_test));
4669 }
4670
4671
4672 if (status & ET_INTR_WOL) {
4673
4674
4675
4676
4677
4678
4679
4680 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4681 }
4682
4683
4684 if (status & ET_INTR_TXMAC) {
4685 u32 err = readl(&iomem->txmac.err);
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697 dev_warn(&adapter->pdev->dev,
4698 "TXMAC interrupt, error 0x%08x\n",
4699 err);
4700
4701
4702
4703
4704
4705 }
4706
4707
4708 if (status & ET_INTR_RXMAC) {
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718 dev_warn(&adapter->pdev->dev,
4719 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4720 readl(&iomem->rxmac.err_reg));
4721
4722 dev_warn(&adapter->pdev->dev,
4723 "Enable 0x%08x, Diag 0x%08x\n",
4724 readl(&iomem->rxmac.ctrl),
4725 readl(&iomem->rxmac.rxq_diag));
4726
4727
4728
4729
4730
4731
4732 }
4733
4734
4735 if (status & ET_INTR_MAC_STAT) {
4736
4737
4738
4739
4740
4741
4742 et1310_handle_macstat_interrupt(adapter);
4743 }
4744
4745
4746 if (status & ET_INTR_SLV_TIMEOUT) {
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756 }
4757 }
4758 et131x_enable_interrupts(adapter);
4759}
4760
4761
4762
4763
4764
4765
4766
4767static struct net_device_stats *et131x_stats(struct net_device *netdev)
4768{
4769 struct et131x_adapter *adapter = netdev_priv(netdev);
4770 struct net_device_stats *stats = &adapter->net_stats;
4771 struct ce_stats *devstat = &adapter->stats;
4772
4773 stats->rx_errors = devstat->rx_length_errs +
4774 devstat->rx_align_errs +
4775 devstat->rx_crc_errs +
4776 devstat->rx_code_violations +
4777 devstat->rx_other_errs;
4778 stats->tx_errors = devstat->tx_max_pkt_errs;
4779 stats->multicast = devstat->multicast_pkts_rcvd;
4780 stats->collisions = devstat->tx_collisions;
4781
4782 stats->rx_length_errors = devstat->rx_length_errs;
4783 stats->rx_over_errors = devstat->rx_overflows;
4784 stats->rx_crc_errors = devstat->rx_crc_errs;
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805 return stats;
4806}
4807
4808
4809
4810
4811
4812
4813
4814static int et131x_open(struct net_device *netdev)
4815{
4816 struct et131x_adapter *adapter = netdev_priv(netdev);
4817 struct pci_dev *pdev = adapter->pdev;
4818 unsigned int irq = pdev->irq;
4819 int result;
4820
4821
4822 init_timer(&adapter->error_timer);
4823 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4824 adapter->error_timer.function = et131x_error_timer_handler;
4825 adapter->error_timer.data = (unsigned long)adapter;
4826 add_timer(&adapter->error_timer);
4827
4828 result = request_irq(irq, et131x_isr,
4829 IRQF_SHARED, netdev->name, netdev);
4830 if (result) {
4831 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4832 return result;
4833 }
4834
4835 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4836
4837 et131x_up(netdev);
4838
4839 return result;
4840}
4841
4842
4843
4844
4845
4846
4847
4848static int et131x_close(struct net_device *netdev)
4849{
4850 struct et131x_adapter *adapter = netdev_priv(netdev);
4851
4852 et131x_down(netdev);
4853
4854 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4855 free_irq(adapter->pdev->irq, netdev);
4856
4857
4858 return del_timer_sync(&adapter->error_timer);
4859}
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4870 int cmd)
4871{
4872 struct et131x_adapter *adapter = netdev_priv(netdev);
4873
4874 if (!adapter->phydev)
4875 return -EINVAL;
4876
4877 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4878}
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4889{
4890 int filter = adapter->packet_filter;
4891 int status = 0;
4892 u32 ctrl;
4893 u32 pf_ctrl;
4894
4895 ctrl = readl(&adapter->regs->rxmac.ctrl);
4896 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4897
4898
4899
4900
4901 ctrl |= 0x04;
4902
4903
4904
4905
4906 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4907 pf_ctrl &= ~7;
4908 else {
4909
4910
4911
4912
4913
4914 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4915 pf_ctrl &= ~2;
4916 else {
4917 et1310_setup_device_for_multicast(adapter);
4918 pf_ctrl |= 2;
4919 ctrl &= ~0x04;
4920 }
4921
4922
4923 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4924 et1310_setup_device_for_unicast(adapter);
4925 pf_ctrl |= 4;
4926 ctrl &= ~0x04;
4927 }
4928
4929
4930 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4931 pf_ctrl |= 1;
4932 ctrl &= ~0x04;
4933 } else
4934 pf_ctrl &= ~1;
4935
4936
4937
4938
4939
4940 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4941 writel(ctrl, &adapter->regs->rxmac.ctrl);
4942 }
4943 return status;
4944}
4945
4946
4947
4948
4949
4950static void et131x_multicast(struct net_device *netdev)
4951{
4952 struct et131x_adapter *adapter = netdev_priv(netdev);
4953 int packet_filter;
4954 unsigned long flags;
4955 struct netdev_hw_addr *ha;
4956 int i;
4957
4958 spin_lock_irqsave(&adapter->lock, flags);
4959
4960
4961
4962
4963
4964 packet_filter = adapter->packet_filter;
4965
4966
4967
4968
4969
4970
4971 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4972
4973
4974
4975
4976
4977 if (netdev->flags & IFF_PROMISC)
4978 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4979 else
4980 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4981
4982 if (netdev->flags & IFF_ALLMULTI)
4983 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4984
4985 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4986 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4987
4988 if (netdev_mc_count(netdev) < 1) {
4989 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4990 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4991 } else
4992 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4993
4994
4995 i = 0;
4996 netdev_for_each_mc_addr(ha, netdev) {
4997 if (i == NIC_MAX_MCAST_LIST)
4998 break;
4999 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
5000 }
5001 adapter->multicast_addr_count = i;
5002
5003
5004
5005
5006
5007
5008
5009 if (packet_filter != adapter->packet_filter) {
5010
5011 et131x_set_packet_filter(adapter);
5012 }
5013 spin_unlock_irqrestore(&adapter->lock, flags);
5014}
5015
5016
5017
5018
5019
5020
5021
5022
5023static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5024{
5025 int status = 0;
5026 struct et131x_adapter *adapter = netdev_priv(netdev);
5027
5028
5029 if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5030 !netif_queue_stopped(netdev))
5031 netif_stop_queue(netdev);
5032
5033
5034 netdev->trans_start = jiffies;
5035
5036
5037 status = et131x_send_packets(skb, netdev);
5038
5039
5040 if (status != 0) {
5041 if (status == -ENOMEM)
5042 status = NETDEV_TX_BUSY;
5043 else
5044 status = NETDEV_TX_OK;
5045 }
5046 return status;
5047}
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057static void et131x_tx_timeout(struct net_device *netdev)
5058{
5059 struct et131x_adapter *adapter = netdev_priv(netdev);
5060 struct tcb *tcb;
5061 unsigned long flags;
5062
5063
5064 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5065 return;
5066
5067
5068
5069
5070 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5071 return;
5072
5073
5074 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5075 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5076 return;
5077 }
5078
5079
5080 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5081
5082 tcb = adapter->tx_ring.send_head;
5083
5084 if (tcb != NULL) {
5085 tcb->count++;
5086
5087 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5088 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5089 flags);
5090
5091 dev_warn(&adapter->pdev->dev,
5092 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5093 tcb->index,
5094 tcb->flags);
5095
5096 adapter->net_stats.tx_errors++;
5097
5098
5099 et131x_disable_txrx(netdev);
5100 et131x_enable_txrx(netdev);
5101 return;
5102 }
5103 }
5104
5105 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5106}
5107
5108
5109
5110
5111
5112
5113
5114
5115static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5116{
5117 int result = 0;
5118 struct et131x_adapter *adapter = netdev_priv(netdev);
5119
5120
5121 if (new_mtu < 64 || new_mtu > 9216)
5122 return -EINVAL;
5123
5124 et131x_disable_txrx(netdev);
5125 et131x_handle_send_interrupt(adapter);
5126 et131x_handle_recv_interrupt(adapter);
5127
5128
5129 netdev->mtu = new_mtu;
5130
5131
5132 et131x_adapter_memory_free(adapter);
5133
5134
5135 adapter->registry_jumbo_packet = new_mtu + 14;
5136 et131x_soft_reset(adapter);
5137
5138
5139 result = et131x_adapter_memory_alloc(adapter);
5140 if (result != 0) {
5141 dev_warn(&adapter->pdev->dev,
5142 "Change MTU failed; couldn't re-alloc DMA memory\n");
5143 return result;
5144 }
5145
5146 et131x_init_send(adapter);
5147
5148 et131x_hwaddr_init(adapter);
5149 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5150
5151
5152 et131x_adapter_setup(adapter);
5153
5154 et131x_enable_txrx(netdev);
5155
5156 return result;
5157}
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5169{
5170 int result = 0;
5171 struct et131x_adapter *adapter = netdev_priv(netdev);
5172 struct sockaddr *address = new_mac;
5173
5174
5175
5176 if (adapter == NULL)
5177 return -ENODEV;
5178
5179
5180 if (!is_valid_ether_addr(address->sa_data))
5181 return -EADDRNOTAVAIL;
5182
5183 et131x_disable_txrx(netdev);
5184 et131x_handle_send_interrupt(adapter);
5185 et131x_handle_recv_interrupt(adapter);
5186
5187
5188
5189
5190 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5191
5192 printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5193 netdev->name, netdev->dev_addr);
5194
5195
5196 et131x_adapter_memory_free(adapter);
5197
5198 et131x_soft_reset(adapter);
5199
5200
5201 result = et131x_adapter_memory_alloc(adapter);
5202 if (result != 0) {
5203 dev_err(&adapter->pdev->dev,
5204 "Change MAC failed; couldn't re-alloc DMA memory\n");
5205 return result;
5206 }
5207
5208 et131x_init_send(adapter);
5209
5210 et131x_hwaddr_init(adapter);
5211
5212
5213 et131x_adapter_setup(adapter);
5214
5215 et131x_enable_txrx(netdev);
5216
5217 return result;
5218}
5219
5220static const struct net_device_ops et131x_netdev_ops = {
5221 .ndo_open = et131x_open,
5222 .ndo_stop = et131x_close,
5223 .ndo_start_xmit = et131x_tx,
5224 .ndo_set_rx_mode = et131x_multicast,
5225 .ndo_tx_timeout = et131x_tx_timeout,
5226 .ndo_change_mtu = et131x_change_mtu,
5227 .ndo_set_mac_address = et131x_set_mac_addr,
5228 .ndo_validate_addr = eth_validate_addr,
5229 .ndo_get_stats = et131x_stats,
5230 .ndo_do_ioctl = et131x_ioctl,
5231};
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5246 const struct pci_device_id *ent)
5247{
5248 struct net_device *netdev;
5249 struct et131x_adapter *adapter;
5250 int rc;
5251 int ii;
5252
5253 rc = pci_enable_device(pdev);
5254 if (rc < 0) {
5255 dev_err(&pdev->dev, "pci_enable_device() failed\n");
5256 goto out;
5257 }
5258
5259
5260 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5261 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5262 rc = -ENODEV;
5263 goto err_disable;
5264 }
5265
5266 rc = pci_request_regions(pdev, DRIVER_NAME);
5267 if (rc < 0) {
5268 dev_err(&pdev->dev, "Can't get PCI resources\n");
5269 goto err_disable;
5270 }
5271
5272 pci_set_master(pdev);
5273
5274
5275 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5276 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5277 if (rc < 0) {
5278 dev_err(&pdev->dev,
5279 "Unable to obtain 64 bit DMA for consistent allocations\n");
5280 goto err_release_res;
5281 }
5282 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5283 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5284 if (rc < 0) {
5285 dev_err(&pdev->dev,
5286 "Unable to obtain 32 bit DMA for consistent allocations\n");
5287 goto err_release_res;
5288 }
5289 } else {
5290 dev_err(&pdev->dev, "No usable DMA addressing method\n");
5291 rc = -EIO;
5292 goto err_release_res;
5293 }
5294
5295
5296 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5297 if (!netdev) {
5298 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5299 rc = -ENOMEM;
5300 goto err_release_res;
5301 }
5302
5303 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5304 netdev->netdev_ops = &et131x_netdev_ops;
5305
5306 SET_NETDEV_DEV(netdev, &pdev->dev);
5307 et131x_set_ethtool_ops(netdev);
5308
5309 adapter = et131x_adapter_init(netdev, pdev);
5310
5311 rc = et131x_pci_init(adapter, pdev);
5312 if (rc < 0)
5313 goto err_free_dev;
5314
5315
5316 adapter->regs = pci_ioremap_bar(pdev, 0);
5317 if (!adapter->regs) {
5318 dev_err(&pdev->dev, "Cannot map device registers\n");
5319 rc = -ENOMEM;
5320 goto err_free_dev;
5321 }
5322
5323
5324 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
5325
5326
5327 et131x_soft_reset(adapter);
5328
5329
5330 et131x_disable_interrupts(adapter);
5331
5332
5333 rc = et131x_adapter_memory_alloc(adapter);
5334 if (rc < 0) {
5335 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5336 goto err_iounmap;
5337 }
5338
5339
5340 et131x_init_send(adapter);
5341
5342
5343 INIT_WORK(&adapter->task, et131x_isr_handler);
5344
5345
5346 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5347
5348
5349 adapter->boot_coma = 0;
5350 et1310_disable_phy_coma(adapter);
5351
5352 rc = -ENOMEM;
5353
5354
5355 adapter->mii_bus = mdiobus_alloc();
5356 if (!adapter->mii_bus) {
5357 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5358 goto err_mem_free;
5359 }
5360
5361 adapter->mii_bus->name = "et131x_eth_mii";
5362 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5363 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5364 adapter->mii_bus->priv = netdev;
5365 adapter->mii_bus->read = et131x_mdio_read;
5366 adapter->mii_bus->write = et131x_mdio_write;
5367 adapter->mii_bus->reset = et131x_mdio_reset;
5368 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5369 if (!adapter->mii_bus->irq) {
5370 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5371 goto err_mdio_free;
5372 }
5373
5374 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5375 adapter->mii_bus->irq[ii] = PHY_POLL;
5376
5377 rc = mdiobus_register(adapter->mii_bus);
5378 if (rc < 0) {
5379 dev_err(&pdev->dev, "failed to register MII bus\n");
5380 goto err_mdio_free_irq;
5381 }
5382
5383 rc = et131x_mii_probe(netdev);
5384 if (rc < 0) {
5385 dev_err(&pdev->dev, "failed to probe MII bus\n");
5386 goto err_mdio_unregister;
5387 }
5388
5389
5390 et131x_adapter_setup(adapter);
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400 rc = register_netdev(netdev);
5401 if (rc < 0) {
5402 dev_err(&pdev->dev, "register_netdev() failed\n");
5403 goto err_phy_disconnect;
5404 }
5405
5406
5407
5408
5409
5410 pci_set_drvdata(pdev, netdev);
5411out:
5412 return rc;
5413
5414err_phy_disconnect:
5415 phy_disconnect(adapter->phydev);
5416err_mdio_unregister:
5417 mdiobus_unregister(adapter->mii_bus);
5418err_mdio_free_irq:
5419 kfree(adapter->mii_bus->irq);
5420err_mdio_free:
5421 mdiobus_free(adapter->mii_bus);
5422err_mem_free:
5423 et131x_adapter_memory_free(adapter);
5424err_iounmap:
5425 iounmap(adapter->regs);
5426err_free_dev:
5427 pci_dev_put(pdev);
5428 free_netdev(netdev);
5429err_release_res:
5430 pci_release_regions(pdev);
5431err_disable:
5432 pci_disable_device(pdev);
5433 goto out;
5434}
5435
5436static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5437 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5438 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5439 {0,}
5440};
5441MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5442
5443static struct pci_driver et131x_driver = {
5444 .name = DRIVER_NAME,
5445 .id_table = et131x_pci_table,
5446 .probe = et131x_pci_setup,
5447 .remove = __devexit_p(et131x_pci_remove),
5448 .driver.pm = ET131X_PM_OPS,
5449};
5450
5451
5452
5453
5454
5455
5456static int __init et131x_init_module(void)
5457{
5458 return pci_register_driver(&et131x_driver);
5459}
5460
5461
5462
5463
5464static void __exit et131x_cleanup_module(void)
5465{
5466 pci_unregister_driver(&et131x_driver);
5467}
5468
5469module_init(et131x_init_module);
5470module_exit(et131x_cleanup_module);
5471
5472