1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/pci.h>
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97
98#define INTERNAL_MEM_SIZE 0x400
99#define INTERNAL_MEM_RX_OFFSET 0x1FF
100
101
102
103
104
105
106
107
108
109
110#define INT_MASK_DISABLE 0xffffffff
111
112
113
114
115
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119
120
121#define NIC_MIN_PACKET_SIZE 60
122
123
124#define NIC_MAX_MCAST_LIST 128
125
126
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137
138#define FMP_DEST_MULTI 0x00000001
139#define FMP_DEST_BROAD 0x00000002
140
141
142#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
143
144
145#define FMP_ADAPTER_LOWER_POWER 0x00200000
146
147#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
148#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
149
150#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
151
152
153#define ET1310_PCI_MAC_ADDRESS 0xA4
154#define ET1310_PCI_EEPROM_STATUS 0xB2
155#define ET1310_PCI_ACK_NACK 0xC0
156#define ET1310_PCI_REPLAY 0xC2
157#define ET1310_PCI_L0L1LATENCY 0xCF
158
159
160#define ET131X_PCI_DEVICE_ID_GIG 0xED00
161#define ET131X_PCI_DEVICE_ID_FAST 0xED01
162
163
164#define NANO_IN_A_MICRO 1000
165
166#define PARM_RX_NUM_BUFS_DEF 4
167#define PARM_RX_TIME_INT_DEF 10
168#define PARM_RX_MEM_END_DEF 0x2bc
169#define PARM_TX_TIME_INT_DEF 40
170#define PARM_TX_NUM_BUFS_DEF 4
171#define PARM_DMA_CACHE_DEF 0
172
173
174#define FBR_CHUNKS 32
175#define MAX_DESC_PER_RING_RX 1024
176
177
178#define RFD_LOW_WATER_MARK 40
179#define NIC_DEFAULT_NUM_RFD 1024
180#define NUM_FBRS 2
181
182#define NUM_PACKETS_HANDLED 256
183
184#define ALCATEL_MULTICAST_PKT 0x01000000
185#define ALCATEL_BROADCAST_PKT 0x02000000
186
187
188struct fbr_desc {
189 u32 addr_lo;
190 u32 addr_hi;
191 u32 word2;
192};
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237struct pkt_stat_desc {
238 u32 word0;
239 u32 word1;
240};
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267struct rx_status_block {
268 u32 word0;
269 u32 word1;
270};
271
272
273
274
275struct fbr_lookup {
276 void *virt[MAX_DESC_PER_RING_RX];
277 u32 bus_high[MAX_DESC_PER_RING_RX];
278 u32 bus_low[MAX_DESC_PER_RING_RX];
279 void *ring_virtaddr;
280 dma_addr_t ring_physaddr;
281 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
282 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
283 u32 local_full;
284 u32 num_entries;
285 dma_addr_t buffsize;
286};
287
288
289
290
291struct rx_ring {
292 struct fbr_lookup *fbr[NUM_FBRS];
293 void *ps_ring_virtaddr;
294 dma_addr_t ps_ring_physaddr;
295 u32 local_psr_full;
296 u32 psr_num_entries;
297
298 struct rx_status_block *rx_status_block;
299 dma_addr_t rx_status_bus;
300
301
302 struct list_head recv_list;
303 u32 num_ready_recv;
304
305 u32 num_rfd;
306
307 bool unfinished_receives;
308};
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#define TXDESC_FLAG_LASTPKT 0x0001
338#define TXDESC_FLAG_FIRSTPKT 0x0002
339#define TXDESC_FLAG_INTPROC 0x0004
340
341
342struct tx_desc {
343 u32 addr_hi;
344 u32 addr_lo;
345 u32 len_vlan;
346 u32 flags;
347};
348
349
350
351
352
353
354struct tcb {
355 struct tcb *next;
356 u32 flags;
357 u32 count;
358 u32 stale;
359 struct sk_buff *skb;
360 u32 index;
361 u32 index_start;
362};
363
364
365struct tx_ring {
366
367 struct tcb *tcb_ring;
368
369
370 struct tcb *tcb_qhead;
371 struct tcb *tcb_qtail;
372
373
374
375
376
377
378
379 struct tcb *send_head;
380 struct tcb *send_tail;
381 int used;
382
383
384 struct tx_desc *tx_desc_ring;
385 dma_addr_t tx_desc_ring_pa;
386
387
388 u32 send_idx;
389
390
391 u32 *tx_status;
392 dma_addr_t tx_status_pa;
393
394
395 int since_irq;
396};
397
398
399
400
401#define NUM_DESC_PER_RING_TX 512
402#define NUM_TCB 64
403
404
405
406
407
408#define TX_ERROR_PERIOD 1000
409
410#define LO_MARK_PERCENT_FOR_PSR 15
411#define LO_MARK_PERCENT_FOR_RX 15
412
413
414struct rfd {
415 struct list_head list_node;
416 struct sk_buff *skb;
417 u32 len;
418 u16 bufferindex;
419 u8 ringindex;
420};
421
422
423#define FLOW_BOTH 0
424#define FLOW_TXONLY 1
425#define FLOW_RXONLY 2
426#define FLOW_NONE 3
427
428
429struct ce_stats {
430
431
432
433
434
435
436 u32 unicast_pkts_rcvd;
437 atomic_t unicast_pkts_xmtd;
438 u32 multicast_pkts_rcvd;
439 atomic_t multicast_pkts_xmtd;
440 u32 broadcast_pkts_rcvd;
441 atomic_t broadcast_pkts_xmtd;
442 u32 rcvd_pkts_dropped;
443
444
445 u32 tx_underflows;
446
447 u32 tx_collisions;
448 u32 tx_excessive_collisions;
449 u32 tx_first_collisions;
450 u32 tx_late_collisions;
451 u32 tx_max_pkt_errs;
452 u32 tx_deferred;
453
454
455 u32 rx_overflows;
456
457 u32 rx_length_errs;
458 u32 rx_align_errs;
459 u32 rx_crc_errs;
460 u32 rx_code_violations;
461 u32 rx_other_errs;
462
463 u32 synchronous_iterations;
464 u32 interrupt_status;
465};
466
467
468struct et131x_adapter {
469 struct net_device *netdev;
470 struct pci_dev *pdev;
471 struct mii_bus *mii_bus;
472 struct phy_device *phydev;
473 struct work_struct task;
474
475
476 u32 flags;
477
478
479 int link;
480
481
482 u8 rom_addr[ETH_ALEN];
483 u8 addr[ETH_ALEN];
484 bool has_eeprom;
485 u8 eeprom_data[2];
486
487
488 spinlock_t tcb_send_qlock;
489 spinlock_t tcb_ready_qlock;
490 spinlock_t send_hw_lock;
491
492 spinlock_t rcv_lock;
493 spinlock_t fbr_lock;
494
495
496 u32 packet_filter;
497
498
499 u32 multicast_addr_count;
500 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
501
502
503 struct address_map __iomem *regs;
504
505
506 u8 wanted_flow;
507 u32 registry_jumbo_packet;
508
509
510 u8 flowcontrol;
511
512
513 struct timer_list error_timer;
514
515
516
517
518 u8 boot_coma;
519
520
521
522
523
524 u16 pdown_speed;
525 u8 pdown_duplex;
526
527
528 struct tx_ring tx_ring;
529
530
531 struct rx_ring rx_ring;
532
533
534 struct ce_stats stats;
535
536 struct net_device_stats net_stats;
537};
538
539static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
540{
541 u32 reg;
542 int i;
543
544
545
546
547
548
549
550 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
551
552 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
553 return -EIO;
554
555
556 if ((reg & 0x3000) == 0x3000) {
557 if (status)
558 *status = reg;
559 return reg & 0xFF;
560 }
561 }
562 return -ETIMEDOUT;
563}
564
565
566
567
568
569
570
571
572static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
573{
574 struct pci_dev *pdev = adapter->pdev;
575 int index = 0;
576 int retries;
577 int err = 0;
578 int i2c_wack = 0;
579 int writeok = 0;
580 u32 status;
581 u32 val = 0;
582
583
584
585
586
587
588
589
590
591 err = eeprom_wait_ready(pdev, NULL);
592 if (err < 0)
593 return err;
594
595
596
597
598
599
600 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
601 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
602 return -EIO;
603
604 i2c_wack = 1;
605
606
607
608 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
609
610 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
611 break;
612
613
614
615 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
616 break;
617
618
619
620
621
622
623
624 err = eeprom_wait_ready(pdev, &status);
625 if (err < 0)
626 return 0;
627
628
629
630
631
632 if ((status & LBCIF_STATUS_GENERAL_ERROR)
633 && adapter->pdev->revision == 0)
634 break;
635
636
637
638
639
640
641
642
643 if (status & LBCIF_STATUS_ACK_ERROR) {
644
645
646
647
648
649 udelay(10);
650 continue;
651 }
652
653 writeok = 1;
654 break;
655 }
656
657
658
659 udelay(10);
660
661 while (i2c_wack) {
662 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
663 LBCIF_CONTROL_LBCIF_ENABLE))
664 writeok = 0;
665
666
667
668
669 do {
670 pci_write_config_dword(pdev,
671 LBCIF_ADDRESS_REGISTER,
672 addr);
673 do {
674 pci_read_config_dword(pdev,
675 LBCIF_DATA_REGISTER, &val);
676 } while ((val & 0x00010000) == 0);
677 } while (val & 0x00040000);
678
679 if ((val & 0xFF00) != 0xC000 || index == 10000)
680 break;
681 index++;
682 }
683 return writeok ? 0 : -EIO;
684}
685
686
687
688
689
690
691
692
693
694
695static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
696{
697 struct pci_dev *pdev = adapter->pdev;
698 int err;
699 u32 status;
700
701
702
703
704
705 err = eeprom_wait_ready(pdev, NULL);
706 if (err < 0)
707 return err;
708
709
710
711
712
713 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
714 LBCIF_CONTROL_LBCIF_ENABLE))
715 return -EIO;
716
717
718
719 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
720 return -EIO;
721
722
723
724
725 err = eeprom_wait_ready(pdev, &status);
726 if (err < 0)
727 return err;
728
729
730
731 *pdata = err;
732
733
734
735 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
736}
737
738static int et131x_init_eeprom(struct et131x_adapter *adapter)
739{
740 struct pci_dev *pdev = adapter->pdev;
741 u8 eestatus;
742
743
744
745
746 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
747
748
749
750
751
752
753
754 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
755 dev_err(&pdev->dev,
756 "Could not read PCI config space for EEPROM Status\n");
757 return -EIO;
758 }
759
760
761
762
763 if (eestatus & 0x4C) {
764 int write_failed = 0;
765
766 if (pdev->revision == 0x01) {
767 int i;
768 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
769
770
771
772
773
774 for (i = 0; i < 3; i++)
775 if (eeprom_write(adapter, i, eedata[i]) < 0)
776 write_failed = 1;
777 }
778 if (pdev->revision != 0x01 || write_failed) {
779 dev_err(&pdev->dev,
780 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
781
782
783
784
785
786
787
788 adapter->has_eeprom = 0;
789 return -EIO;
790 }
791 }
792 adapter->has_eeprom = 1;
793
794
795
796
797 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
798 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
799
800 if (adapter->eeprom_data[0] != 0xcd)
801
802 adapter->eeprom_data[1] = 0x00;
803
804 return 0;
805}
806
807
808
809
810static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
811{
812
813 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
814 struct rx_ring *rx_ring = &adapter->rx_ring;
815
816 if (rx_ring->fbr[1]->buffsize == 4096)
817 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
818 else if (rx_ring->fbr[1]->buffsize == 8192)
819 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
820 else if (rx_ring->fbr[1]->buffsize == 16384)
821 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
822
823 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
824 if (rx_ring->fbr[0]->buffsize == 256)
825 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
826 else if (rx_ring->fbr[0]->buffsize == 512)
827 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
828 else if (rx_ring->fbr[0]->buffsize == 1024)
829 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
830 writel(csr, &adapter->regs->rxdma.csr);
831
832 csr = readl(&adapter->regs->rxdma.csr);
833 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
834 udelay(5);
835 csr = readl(&adapter->regs->rxdma.csr);
836 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
837 dev_err(&adapter->pdev->dev,
838 "RX Dma failed to exit halt state. CSR 0x%08x\n",
839 csr);
840 }
841 }
842}
843
844
845
846
847static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
848{
849 u32 csr;
850
851 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
852 &adapter->regs->rxdma.csr);
853 csr = readl(&adapter->regs->rxdma.csr);
854 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
855 udelay(5);
856 csr = readl(&adapter->regs->rxdma.csr);
857 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
858 dev_err(&adapter->pdev->dev,
859 "RX Dma failed to enter halt state. CSR 0x%08x\n",
860 csr);
861 }
862}
863
864
865
866
867
868
869static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
870{
871
872
873
874 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
875 &adapter->regs->txdma.csr);
876}
877
878static inline void add_10bit(u32 *v, int n)
879{
880 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
881}
882
883static inline void add_12bit(u32 *v, int n)
884{
885 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
886}
887
888
889
890
891static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
892{
893 struct mac_regs __iomem *macregs = &adapter->regs->mac;
894 u32 station1;
895 u32 station2;
896 u32 ipg;
897
898
899
900
901 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
902 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
903 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
904 ¯egs->cfg1);
905
906
907 ipg = 0x38005860;
908 ipg |= 0x50 << 8;
909 writel(ipg, ¯egs->ipg);
910
911
912
913 writel(0x00A1F037, ¯egs->hfdp);
914
915
916 writel(0, ¯egs->if_ctrl);
917
918
919 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg);
920
921
922
923
924
925
926
927
928 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
929 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
930 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
931 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
932 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
933 adapter->addr[2];
934 writel(station1, ¯egs->station_addr_1);
935 writel(station2, ¯egs->station_addr_2);
936
937
938
939
940
941
942
943
944 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
945
946
947 writel(0, ¯egs->cfg1);
948}
949
950
951
952
953static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
954{
955 int32_t delay = 0;
956 struct mac_regs __iomem *mac = &adapter->regs->mac;
957 struct phy_device *phydev = adapter->phydev;
958 u32 cfg1;
959 u32 cfg2;
960 u32 ifctrl;
961 u32 ctl;
962
963 ctl = readl(&adapter->regs->txmac.ctl);
964 cfg1 = readl(&mac->cfg1);
965 cfg2 = readl(&mac->cfg2);
966 ifctrl = readl(&mac->if_ctrl);
967
968
969 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
970 if (phydev->speed == SPEED_1000) {
971 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
972
973 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
974 } else {
975 cfg2 |= ET_MAC_CFG2_IFMODE_100;
976 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
977 }
978
979
980 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
981 ET_MAC_CFG1_TX_FLOW;
982
983 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
984 if (adapter->flowcontrol == FLOW_RXONLY ||
985 adapter->flowcontrol == FLOW_BOTH)
986 cfg1 |= ET_MAC_CFG1_RX_FLOW;
987 writel(cfg1, &mac->cfg1);
988
989
990
991
992
993 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
994 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
995 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
996 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
997 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
998 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
999
1000
1001 if (phydev->duplex == DUPLEX_FULL)
1002 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
1003
1004 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
1005 if (phydev->duplex == DUPLEX_HALF)
1006 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
1007
1008 writel(ifctrl, &mac->if_ctrl);
1009 writel(cfg2, &mac->cfg2);
1010
1011 do {
1012 udelay(10);
1013 delay++;
1014 cfg1 = readl(&mac->cfg1);
1015 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
1016
1017 if (delay == 100) {
1018 dev_warn(&adapter->pdev->dev,
1019 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1020 cfg1);
1021 }
1022
1023
1024 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
1025 writel(ctl, &adapter->regs->txmac.ctl);
1026
1027
1028 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
1029 et131x_rx_dma_enable(adapter);
1030 et131x_tx_dma_enable(adapter);
1031 }
1032}
1033
1034
1035
1036
1037
1038
1039static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1040{
1041 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1042
1043 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1044}
1045
1046static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1047{
1048 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1049 u32 hash1 = 0;
1050 u32 hash2 = 0;
1051 u32 hash3 = 0;
1052 u32 hash4 = 0;
1053 u32 pm_csr;
1054
1055
1056
1057
1058
1059
1060 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1061 int i;
1062
1063
1064 for (i = 0; i < adapter->multicast_addr_count; i++) {
1065 u32 result;
1066
1067 result = ether_crc(6, adapter->multicast_list[i]);
1068
1069 result = (result & 0x3F800000) >> 23;
1070
1071 if (result < 32) {
1072 hash1 |= (1 << result);
1073 } else if ((31 < result) && (result < 64)) {
1074 result -= 32;
1075 hash2 |= (1 << result);
1076 } else if ((63 < result) && (result < 96)) {
1077 result -= 64;
1078 hash3 |= (1 << result);
1079 } else {
1080 result -= 96;
1081 hash4 |= (1 << result);
1082 }
1083 }
1084 }
1085
1086
1087 pm_csr = readl(&adapter->regs->global.pm_csr);
1088 if (!et1310_in_phy_coma(adapter)) {
1089 writel(hash1, &rxmac->multi_hash1);
1090 writel(hash2, &rxmac->multi_hash2);
1091 writel(hash3, &rxmac->multi_hash3);
1092 writel(hash4, &rxmac->multi_hash4);
1093 }
1094}
1095
1096static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1097{
1098 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1099 u32 uni_pf1;
1100 u32 uni_pf2;
1101 u32 uni_pf3;
1102 u32 pm_csr;
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1114 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1115 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1116 adapter->addr[1];
1117
1118 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1119 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1120 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1121 adapter->addr[5];
1122
1123 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1124 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1125 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1126 adapter->addr[5];
1127
1128 pm_csr = readl(&adapter->regs->global.pm_csr);
1129 if (!et1310_in_phy_coma(adapter)) {
1130 writel(uni_pf1, &rxmac->uni_pf_addr1);
1131 writel(uni_pf2, &rxmac->uni_pf_addr2);
1132 writel(uni_pf3, &rxmac->uni_pf_addr3);
1133 }
1134}
1135
1136static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1137{
1138 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1139 struct phy_device *phydev = adapter->phydev;
1140 u32 sa_lo;
1141 u32 sa_hi = 0;
1142 u32 pf_ctrl = 0;
1143
1144
1145 writel(0x8, &rxmac->ctrl);
1146
1147
1148 writel(0, &rxmac->crc0);
1149 writel(0, &rxmac->crc12);
1150 writel(0, &rxmac->crc34);
1151
1152
1153
1154
1155
1156 writel(0, &rxmac->mask0_word0);
1157 writel(0, &rxmac->mask0_word1);
1158 writel(0, &rxmac->mask0_word2);
1159 writel(0, &rxmac->mask0_word3);
1160
1161 writel(0, &rxmac->mask1_word0);
1162 writel(0, &rxmac->mask1_word1);
1163 writel(0, &rxmac->mask1_word2);
1164 writel(0, &rxmac->mask1_word3);
1165
1166 writel(0, &rxmac->mask2_word0);
1167 writel(0, &rxmac->mask2_word1);
1168 writel(0, &rxmac->mask2_word2);
1169 writel(0, &rxmac->mask2_word3);
1170
1171 writel(0, &rxmac->mask3_word0);
1172 writel(0, &rxmac->mask3_word1);
1173 writel(0, &rxmac->mask3_word2);
1174 writel(0, &rxmac->mask3_word3);
1175
1176 writel(0, &rxmac->mask4_word0);
1177 writel(0, &rxmac->mask4_word1);
1178 writel(0, &rxmac->mask4_word2);
1179 writel(0, &rxmac->mask4_word3);
1180
1181
1182 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1183 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1184 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1185 adapter->addr[5];
1186 writel(sa_lo, &rxmac->sa_lo);
1187
1188 sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1189 adapter->addr[1];
1190 writel(sa_hi, &rxmac->sa_hi);
1191
1192
1193 writel(0, &rxmac->pf_ctrl);
1194
1195
1196 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1197 et1310_setup_device_for_unicast(adapter);
1198 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1199 } else {
1200 writel(0, &rxmac->uni_pf_addr1);
1201 writel(0, &rxmac->uni_pf_addr2);
1202 writel(0, &rxmac->uni_pf_addr3);
1203 }
1204
1205
1206 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1207 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1208 et1310_setup_device_for_multicast(adapter);
1209 }
1210
1211
1212 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1213 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1214
1215 if (adapter->registry_jumbo_packet > 8192)
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1227 else
1228 writel(0, &rxmac->mcif_ctrl_max_seg);
1229
1230
1231 writel(0, &rxmac->mcif_water_mark);
1232
1233
1234 writel(0, &rxmac->mif_ctrl);
1235
1236
1237 writel(0, &rxmac->space_avail);
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 if (phydev && phydev->speed == SPEED_100)
1253 writel(0x30038, &rxmac->mif_ctrl);
1254 else
1255 writel(0x30030, &rxmac->mif_ctrl);
1256
1257
1258
1259
1260
1261
1262
1263 writel(pf_ctrl, &rxmac->pf_ctrl);
1264 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1265}
1266
1267static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1268{
1269 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1270
1271
1272
1273
1274
1275 if (adapter->flowcontrol == FLOW_NONE)
1276 writel(0, &txmac->cf_param);
1277 else
1278 writel(0x40, &txmac->cf_param);
1279}
1280
1281static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1282{
1283 struct macstat_regs __iomem *macstat =
1284 &adapter->regs->macstat;
1285
1286
1287
1288
1289 writel(0, &macstat->txrx_0_64_byte_frames);
1290 writel(0, &macstat->txrx_65_127_byte_frames);
1291 writel(0, &macstat->txrx_128_255_byte_frames);
1292 writel(0, &macstat->txrx_256_511_byte_frames);
1293 writel(0, &macstat->txrx_512_1023_byte_frames);
1294 writel(0, &macstat->txrx_1024_1518_byte_frames);
1295 writel(0, &macstat->txrx_1519_1522_gvln_frames);
1296
1297 writel(0, &macstat->rx_bytes);
1298 writel(0, &macstat->rx_packets);
1299 writel(0, &macstat->rx_fcs_errs);
1300 writel(0, &macstat->rx_multicast_packets);
1301 writel(0, &macstat->rx_broadcast_packets);
1302 writel(0, &macstat->rx_control_frames);
1303 writel(0, &macstat->rx_pause_frames);
1304 writel(0, &macstat->rx_unknown_opcodes);
1305 writel(0, &macstat->rx_align_errs);
1306 writel(0, &macstat->rx_frame_len_errs);
1307 writel(0, &macstat->rx_code_errs);
1308 writel(0, &macstat->rx_carrier_sense_errs);
1309 writel(0, &macstat->rx_undersize_packets);
1310 writel(0, &macstat->rx_oversize_packets);
1311 writel(0, &macstat->rx_fragment_packets);
1312 writel(0, &macstat->rx_jabbers);
1313 writel(0, &macstat->rx_drops);
1314
1315 writel(0, &macstat->tx_bytes);
1316 writel(0, &macstat->tx_packets);
1317 writel(0, &macstat->tx_multicast_packets);
1318 writel(0, &macstat->tx_broadcast_packets);
1319 writel(0, &macstat->tx_pause_frames);
1320 writel(0, &macstat->tx_deferred);
1321 writel(0, &macstat->tx_excessive_deferred);
1322 writel(0, &macstat->tx_single_collisions);
1323 writel(0, &macstat->tx_multiple_collisions);
1324 writel(0, &macstat->tx_late_collisions);
1325 writel(0, &macstat->tx_excessive_collisions);
1326 writel(0, &macstat->tx_total_collisions);
1327 writel(0, &macstat->tx_pause_honored_frames);
1328 writel(0, &macstat->tx_drops);
1329 writel(0, &macstat->tx_jabbers);
1330 writel(0, &macstat->tx_fcs_errs);
1331 writel(0, &macstat->tx_control_frames);
1332 writel(0, &macstat->tx_oversize_frames);
1333 writel(0, &macstat->tx_undersize_frames);
1334 writel(0, &macstat->tx_fragments);
1335 writel(0, &macstat->carry_reg1);
1336 writel(0, &macstat->carry_reg2);
1337
1338
1339
1340
1341
1342 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1343 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1344}
1345
1346
1347
1348
1349
1350
1351
1352static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1353 u8 reg, u16 *value)
1354{
1355 struct mac_regs __iomem *mac = &adapter->regs->mac;
1356 int status = 0;
1357 u32 delay = 0;
1358 u32 mii_addr;
1359 u32 mii_cmd;
1360 u32 mii_indicator;
1361
1362
1363
1364
1365 mii_addr = readl(&mac->mii_mgmt_addr);
1366 mii_cmd = readl(&mac->mii_mgmt_cmd);
1367
1368
1369 writel(0, &mac->mii_mgmt_cmd);
1370
1371
1372 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1373
1374 writel(0x1, &mac->mii_mgmt_cmd);
1375
1376 do {
1377 udelay(50);
1378 delay++;
1379 mii_indicator = readl(&mac->mii_mgmt_indicator);
1380 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1381
1382
1383 if (delay == 50) {
1384 dev_warn(&adapter->pdev->dev,
1385 "reg 0x%08x could not be read\n", reg);
1386 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1387 mii_indicator);
1388
1389 status = -EIO;
1390 goto out;
1391 }
1392
1393
1394
1395
1396 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1397
1398out:
1399
1400 writel(0, &mac->mii_mgmt_cmd);
1401
1402
1403
1404
1405 writel(mii_addr, &mac->mii_mgmt_addr);
1406 writel(mii_cmd, &mac->mii_mgmt_cmd);
1407
1408 return status;
1409}
1410
1411static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1412{
1413 struct phy_device *phydev = adapter->phydev;
1414
1415 if (!phydev)
1416 return -EIO;
1417
1418 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1419}
1420
1421
1422
1423
1424
1425
1426static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1427{
1428 struct mac_regs __iomem *mac = &adapter->regs->mac;
1429 struct phy_device *phydev = adapter->phydev;
1430 int status = 0;
1431 u8 addr;
1432 u32 delay = 0;
1433 u32 mii_addr;
1434 u32 mii_cmd;
1435 u32 mii_indicator;
1436
1437 if (!phydev)
1438 return -EIO;
1439
1440 addr = phydev->addr;
1441
1442
1443
1444
1445 mii_addr = readl(&mac->mii_mgmt_addr);
1446 mii_cmd = readl(&mac->mii_mgmt_cmd);
1447
1448
1449 writel(0, &mac->mii_mgmt_cmd);
1450
1451
1452 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1453
1454
1455 writel(value, &mac->mii_mgmt_ctrl);
1456
1457 do {
1458 udelay(50);
1459 delay++;
1460 mii_indicator = readl(&mac->mii_mgmt_indicator);
1461 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1462
1463
1464 if (delay == 100) {
1465 u16 tmp;
1466
1467 dev_warn(&adapter->pdev->dev,
1468 "reg 0x%08x could not be written", reg);
1469 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1470 mii_indicator);
1471 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1472 readl(&mac->mii_mgmt_cmd));
1473
1474 et131x_mii_read(adapter, reg, &tmp);
1475
1476 status = -EIO;
1477 }
1478
1479 writel(0, &mac->mii_mgmt_cmd);
1480
1481
1482
1483
1484 writel(mii_addr, &mac->mii_mgmt_addr);
1485 writel(mii_cmd, &mac->mii_mgmt_cmd);
1486
1487 return status;
1488}
1489
1490static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1491 u16 regnum,
1492 u16 bitnum,
1493 u8 *value)
1494{
1495 u16 reg;
1496 u16 mask = 1 << bitnum;
1497
1498
1499 et131x_mii_read(adapter, regnum, ®);
1500
1501 *value = (reg & mask) >> bitnum;
1502}
1503
1504static void et1310_config_flow_control(struct et131x_adapter *adapter)
1505{
1506 struct phy_device *phydev = adapter->phydev;
1507
1508 if (phydev->duplex == DUPLEX_HALF) {
1509 adapter->flowcontrol = FLOW_NONE;
1510 } else {
1511 char remote_pause, remote_async_pause;
1512
1513 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1514 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1515
1516 if (remote_pause && remote_async_pause) {
1517 adapter->flowcontrol = adapter->wanted_flow;
1518 } else if (remote_pause && !remote_async_pause) {
1519 if (adapter->wanted_flow == FLOW_BOTH)
1520 adapter->flowcontrol = FLOW_BOTH;
1521 else
1522 adapter->flowcontrol = FLOW_NONE;
1523 } else if (!remote_pause && !remote_async_pause) {
1524 adapter->flowcontrol = FLOW_NONE;
1525 } else {
1526 if (adapter->wanted_flow == FLOW_BOTH)
1527 adapter->flowcontrol = FLOW_RXONLY;
1528 else
1529 adapter->flowcontrol = FLOW_NONE;
1530 }
1531 }
1532}
1533
1534
1535static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1536{
1537 struct ce_stats *stats = &adapter->stats;
1538 struct macstat_regs __iomem *macstat =
1539 &adapter->regs->macstat;
1540
1541 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1542 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1543 stats->tx_deferred += readl(&macstat->tx_deferred);
1544 stats->tx_excessive_collisions +=
1545 readl(&macstat->tx_multiple_collisions);
1546 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1547 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1548 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1549
1550 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1551 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1552 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1553 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1554 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1555 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1556 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1557}
1558
1559
1560
1561
1562
1563
1564
1565static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1566{
1567 u32 carry_reg1;
1568 u32 carry_reg2;
1569
1570
1571
1572
1573 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1574 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1575
1576 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1577 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1578
1579
1580
1581
1582
1583
1584
1585 if (carry_reg1 & (1 << 14))
1586 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1587 if (carry_reg1 & (1 << 8))
1588 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1589 if (carry_reg1 & (1 << 7))
1590 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1591 if (carry_reg1 & (1 << 2))
1592 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1593 if (carry_reg1 & (1 << 6))
1594 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1595 if (carry_reg1 & (1 << 3))
1596 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1597 if (carry_reg1 & (1 << 0))
1598 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1599 if (carry_reg2 & (1 << 16))
1600 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1601 if (carry_reg2 & (1 << 15))
1602 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1603 if (carry_reg2 & (1 << 6))
1604 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1605 if (carry_reg2 & (1 << 8))
1606 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1607 if (carry_reg2 & (1 << 5))
1608 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1609 if (carry_reg2 & (1 << 4))
1610 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1611 if (carry_reg2 & (1 << 2))
1612 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1613}
1614
1615static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1616{
1617 struct net_device *netdev = bus->priv;
1618 struct et131x_adapter *adapter = netdev_priv(netdev);
1619 u16 value;
1620 int ret;
1621
1622 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1623
1624 if (ret < 0)
1625 return ret;
1626 else
1627 return value;
1628}
1629
1630static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1631 int reg, u16 value)
1632{
1633 struct net_device *netdev = bus->priv;
1634 struct et131x_adapter *adapter = netdev_priv(netdev);
1635
1636 return et131x_mii_write(adapter, reg, value);
1637}
1638
1639static int et131x_mdio_reset(struct mii_bus *bus)
1640{
1641 struct net_device *netdev = bus->priv;
1642 struct et131x_adapter *adapter = netdev_priv(netdev);
1643
1644 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1645
1646 return 0;
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1659{
1660 u16 data;
1661
1662 et131x_mii_read(adapter, MII_BMCR, &data);
1663 data &= ~BMCR_PDOWN;
1664 if (down)
1665 data |= BMCR_PDOWN;
1666 et131x_mii_write(adapter, MII_BMCR, data);
1667}
1668
1669
1670static void et131x_xcvr_init(struct et131x_adapter *adapter)
1671{
1672 u16 lcr2;
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1683 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1684
1685 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1686 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1687
1688 if ((adapter->eeprom_data[1] & 0x8) == 0)
1689 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1690 else
1691 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1692
1693 et131x_mii_write(adapter, PHY_LED_2, lcr2);
1694 }
1695}
1696
1697
1698
1699
1700
1701static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1702{
1703 struct global_regs __iomem *regs = &adapter->regs->global;
1704
1705 writel(0, ®s->rxq_start_addr);
1706 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1707
1708 if (adapter->registry_jumbo_packet < 2048) {
1709
1710
1711
1712
1713
1714 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1715 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1716 } else if (adapter->registry_jumbo_packet < 8192) {
1717
1718 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1719 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1720 } else {
1721
1722
1723
1724
1725
1726 writel(0x01b3, ®s->rxq_end_addr);
1727 writel(0x01b4, ®s->txq_start_addr);
1728 }
1729
1730
1731 writel(0, ®s->loopback);
1732
1733
1734 writel(0, ®s->msi_config);
1735
1736
1737
1738
1739 writel(0, ®s->watchdog_timer);
1740}
1741
1742
1743static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1744{
1745 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1746 struct rx_ring *rx_local = &adapter->rx_ring;
1747 struct fbr_desc *fbr_entry;
1748 u32 entry;
1749 u32 psr_num_des;
1750 unsigned long flags;
1751 u8 id;
1752
1753
1754 et131x_rx_dma_disable(adapter);
1755
1756
1757 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1758 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1759
1760 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1761
1762
1763
1764
1765 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1766 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1767 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1768 writel(0, &rx_dma->psr_full_offset);
1769
1770 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1771 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1772 &rx_dma->psr_min_des);
1773
1774 spin_lock_irqsave(&adapter->rcv_lock, flags);
1775
1776
1777 rx_local->local_psr_full = 0;
1778
1779 for (id = 0; id < NUM_FBRS; id++) {
1780 u32 __iomem *num_des;
1781 u32 __iomem *full_offset;
1782 u32 __iomem *min_des;
1783 u32 __iomem *base_hi;
1784 u32 __iomem *base_lo;
1785 struct fbr_lookup *fbr = rx_local->fbr[id];
1786
1787 if (id == 0) {
1788 num_des = &rx_dma->fbr0_num_des;
1789 full_offset = &rx_dma->fbr0_full_offset;
1790 min_des = &rx_dma->fbr0_min_des;
1791 base_hi = &rx_dma->fbr0_base_hi;
1792 base_lo = &rx_dma->fbr0_base_lo;
1793 } else {
1794 num_des = &rx_dma->fbr1_num_des;
1795 full_offset = &rx_dma->fbr1_full_offset;
1796 min_des = &rx_dma->fbr1_min_des;
1797 base_hi = &rx_dma->fbr1_base_hi;
1798 base_lo = &rx_dma->fbr1_base_lo;
1799 }
1800
1801
1802 fbr_entry = fbr->ring_virtaddr;
1803 for (entry = 0; entry < fbr->num_entries; entry++) {
1804 fbr_entry->addr_hi = fbr->bus_high[entry];
1805 fbr_entry->addr_lo = fbr->bus_low[entry];
1806 fbr_entry->word2 = entry;
1807 fbr_entry++;
1808 }
1809
1810
1811
1812
1813 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1814 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1815 writel(fbr->num_entries - 1, num_des);
1816 writel(ET_DMA10_WRAP, full_offset);
1817
1818
1819
1820
1821 fbr->local_full = ET_DMA10_WRAP;
1822 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1823 min_des);
1824 }
1825
1826
1827
1828
1829
1830
1831 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1832
1833
1834
1835
1836
1837
1838 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1839
1840 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1841}
1842
1843
1844
1845
1846
1847
1848static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1849{
1850 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1851 struct tx_ring *tx_ring = &adapter->tx_ring;
1852
1853
1854 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1855 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1856
1857
1858 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1859
1860
1861 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1862 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1863
1864 *tx_ring->tx_status = 0;
1865
1866 writel(0, &txdma->service_request);
1867 tx_ring->send_idx = 0;
1868}
1869
1870
1871static void et131x_adapter_setup(struct et131x_adapter *adapter)
1872{
1873
1874 et131x_configure_global_regs(adapter);
1875
1876 et1310_config_mac_regs1(adapter);
1877
1878
1879
1880 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1881
1882 et1310_config_rxmac_regs(adapter);
1883 et1310_config_txmac_regs(adapter);
1884
1885 et131x_config_rx_dma_regs(adapter);
1886 et131x_config_tx_dma_regs(adapter);
1887
1888 et1310_config_macstat_regs(adapter);
1889
1890 et1310_phy_power_switch(adapter, 0);
1891 et131x_xcvr_init(adapter);
1892}
1893
1894
1895static void et131x_soft_reset(struct et131x_adapter *adapter)
1896{
1897 u32 reg;
1898
1899
1900 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1901 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1902 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1903 writel(reg, &adapter->regs->mac.cfg1);
1904
1905 reg = ET_RESET_ALL;
1906 writel(reg, &adapter->regs->global.sw_reset);
1907
1908 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1909 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1910 writel(reg, &adapter->regs->mac.cfg1);
1911 writel(0, &adapter->regs->mac.cfg1);
1912}
1913
1914
1915
1916
1917
1918
1919static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1920{
1921 u32 mask;
1922
1923
1924 if (adapter->flowcontrol == FLOW_TXONLY ||
1925 adapter->flowcontrol == FLOW_BOTH)
1926 mask = INT_MASK_ENABLE;
1927 else
1928 mask = INT_MASK_ENABLE_NO_FLOW;
1929
1930 writel(mask, &adapter->regs->global.int_mask);
1931}
1932
1933
1934
1935
1936
1937static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1938{
1939
1940 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1941}
1942
1943
1944static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1945{
1946
1947 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1948 &adapter->regs->txdma.csr);
1949}
1950
1951
1952static void et131x_enable_txrx(struct net_device *netdev)
1953{
1954 struct et131x_adapter *adapter = netdev_priv(netdev);
1955
1956
1957 et131x_rx_dma_enable(adapter);
1958 et131x_tx_dma_enable(adapter);
1959
1960
1961 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1962 et131x_enable_interrupts(adapter);
1963
1964
1965 netif_start_queue(netdev);
1966}
1967
1968
1969static void et131x_disable_txrx(struct net_device *netdev)
1970{
1971 struct et131x_adapter *adapter = netdev_priv(netdev);
1972
1973
1974 netif_stop_queue(netdev);
1975
1976
1977 et131x_rx_dma_disable(adapter);
1978 et131x_tx_dma_disable(adapter);
1979
1980
1981 et131x_disable_interrupts(adapter);
1982}
1983
1984
1985static void et131x_init_send(struct et131x_adapter *adapter)
1986{
1987 u32 ct;
1988 struct tx_ring *tx_ring = &adapter->tx_ring;
1989 struct tcb *tcb = tx_ring->tcb_ring;
1990
1991 tx_ring->tcb_qhead = tcb;
1992
1993 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1994
1995
1996 for (ct = 0; ct++ < NUM_TCB; tcb++)
1997
1998
1999
2000 tcb->next = tcb + 1;
2001
2002
2003 tcb--;
2004 tx_ring->tcb_qtail = tcb;
2005 tcb->next = NULL;
2006
2007 tx_ring->send_head = NULL;
2008 tx_ring->send_tail = NULL;
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2030{
2031 unsigned long flags;
2032 u32 pmcsr;
2033
2034 pmcsr = readl(&adapter->regs->global.pm_csr);
2035
2036
2037
2038
2039
2040
2041 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2042 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
2043 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2044
2045
2046
2047 et131x_disable_txrx(adapter->netdev);
2048
2049
2050 pmcsr &= ~ET_PMCSR_INIT;
2051 writel(pmcsr, &adapter->regs->global.pm_csr);
2052
2053
2054 pmcsr |= ET_PM_PHY_SW_COMA;
2055 writel(pmcsr, &adapter->regs->global.pm_csr);
2056}
2057
2058
2059static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2060{
2061 u32 pmcsr;
2062
2063 pmcsr = readl(&adapter->regs->global.pm_csr);
2064
2065
2066 pmcsr |= ET_PMCSR_INIT;
2067 pmcsr &= ~ET_PM_PHY_SW_COMA;
2068 writel(pmcsr, &adapter->regs->global.pm_csr);
2069
2070
2071
2072
2073
2074
2075 et131x_init_send(adapter);
2076
2077
2078
2079
2080
2081 et131x_soft_reset(adapter);
2082
2083
2084 et131x_adapter_setup(adapter);
2085
2086
2087 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
2088
2089 et131x_enable_txrx(adapter->netdev);
2090}
2091
2092static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2093{
2094 u32 tmp_free_buff_ring = *free_buff_ring;
2095
2096 tmp_free_buff_ring++;
2097
2098
2099
2100
2101
2102 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2103 tmp_free_buff_ring &= ~ET_DMA10_MASK;
2104 tmp_free_buff_ring ^= ET_DMA10_WRAP;
2105 }
2106
2107 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
2108 *free_buff_ring = tmp_free_buff_ring;
2109 return tmp_free_buff_ring;
2110}
2111
2112
2113
2114
2115
2116
2117static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2118{
2119 u8 id;
2120 u32 i, j;
2121 u32 bufsize;
2122 u32 pktstat_ringsize;
2123 u32 fbr_chunksize;
2124 struct rx_ring *rx_ring = &adapter->rx_ring;
2125 struct fbr_lookup *fbr;
2126
2127
2128 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2129 if (rx_ring->fbr[0] == NULL)
2130 return -ENOMEM;
2131 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2132 if (rx_ring->fbr[1] == NULL)
2133 return -ENOMEM;
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153 if (adapter->registry_jumbo_packet < 2048) {
2154 rx_ring->fbr[0]->buffsize = 256;
2155 rx_ring->fbr[0]->num_entries = 512;
2156 rx_ring->fbr[1]->buffsize = 2048;
2157 rx_ring->fbr[1]->num_entries = 512;
2158 } else if (adapter->registry_jumbo_packet < 4096) {
2159 rx_ring->fbr[0]->buffsize = 512;
2160 rx_ring->fbr[0]->num_entries = 1024;
2161 rx_ring->fbr[1]->buffsize = 4096;
2162 rx_ring->fbr[1]->num_entries = 512;
2163 } else {
2164 rx_ring->fbr[0]->buffsize = 1024;
2165 rx_ring->fbr[0]->num_entries = 768;
2166 rx_ring->fbr[1]->buffsize = 16384;
2167 rx_ring->fbr[1]->num_entries = 128;
2168 }
2169
2170 rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries +
2171 rx_ring->fbr[1]->num_entries;
2172
2173 for (id = 0; id < NUM_FBRS; id++) {
2174 fbr = rx_ring->fbr[id];
2175
2176 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2177 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2178 bufsize,
2179 &fbr->ring_physaddr,
2180 GFP_KERNEL);
2181 if (!fbr->ring_virtaddr) {
2182 dev_err(&adapter->pdev->dev,
2183 "Cannot alloc memory for Free Buffer Ring %d\n", id);
2184 return -ENOMEM;
2185 }
2186 }
2187
2188 for (id = 0; id < NUM_FBRS; id++) {
2189 fbr = rx_ring->fbr[id];
2190 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
2191
2192 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
2193 dma_addr_t fbr_tmp_physaddr;
2194
2195 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
2196 &adapter->pdev->dev, fbr_chunksize,
2197 &fbr->mem_physaddrs[i],
2198 GFP_KERNEL);
2199
2200 if (!fbr->mem_virtaddrs[i]) {
2201 dev_err(&adapter->pdev->dev,
2202 "Could not alloc memory\n");
2203 return -ENOMEM;
2204 }
2205
2206
2207 fbr_tmp_physaddr = fbr->mem_physaddrs[i];
2208
2209 for (j = 0; j < FBR_CHUNKS; j++) {
2210 u32 index = (i * FBR_CHUNKS) + j;
2211
2212
2213
2214
2215 fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] +
2216 (j * fbr->buffsize);
2217
2218
2219
2220
2221 fbr->bus_high[index] =
2222 upper_32_bits(fbr_tmp_physaddr);
2223 fbr->bus_low[index] =
2224 lower_32_bits(fbr_tmp_physaddr);
2225
2226 fbr_tmp_physaddr += fbr->buffsize;
2227 }
2228 }
2229 }
2230
2231
2232 pktstat_ringsize =
2233 sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries;
2234
2235 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2236 pktstat_ringsize,
2237 &rx_ring->ps_ring_physaddr,
2238 GFP_KERNEL);
2239
2240 if (!rx_ring->ps_ring_virtaddr) {
2241 dev_err(&adapter->pdev->dev,
2242 "Cannot alloc memory for Packet Status Ring\n");
2243 return -ENOMEM;
2244 }
2245
2246
2247
2248
2249
2250
2251
2252
2253 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2254 sizeof(struct rx_status_block),
2255 &rx_ring->rx_status_bus,
2256 GFP_KERNEL);
2257 if (!rx_ring->rx_status_block) {
2258 dev_err(&adapter->pdev->dev,
2259 "Cannot alloc memory for Status Block\n");
2260 return -ENOMEM;
2261 }
2262 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2263
2264
2265
2266
2267 INIT_LIST_HEAD(&rx_ring->recv_list);
2268 return 0;
2269}
2270
2271
2272static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2273{
2274 u8 id;
2275 u32 index;
2276 u32 bufsize;
2277 u32 pktstat_ringsize;
2278 struct rfd *rfd;
2279 struct rx_ring *rx_ring = &adapter->rx_ring;
2280 struct fbr_lookup *fbr;
2281
2282
2283 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2284
2285 while (!list_empty(&rx_ring->recv_list)) {
2286 rfd = list_entry(rx_ring->recv_list.next,
2287 struct rfd, list_node);
2288
2289 list_del(&rfd->list_node);
2290 rfd->skb = NULL;
2291 kfree(rfd);
2292 }
2293
2294
2295 for (id = 0; id < NUM_FBRS; id++) {
2296 fbr = rx_ring->fbr[id];
2297
2298 if (!fbr || !fbr->ring_virtaddr)
2299 continue;
2300
2301
2302 for (index = 0;
2303 index < fbr->num_entries / FBR_CHUNKS;
2304 index++) {
2305 if (fbr->mem_virtaddrs[index]) {
2306 bufsize = fbr->buffsize * FBR_CHUNKS;
2307
2308 dma_free_coherent(&adapter->pdev->dev,
2309 bufsize,
2310 fbr->mem_virtaddrs[index],
2311 fbr->mem_physaddrs[index]);
2312
2313 fbr->mem_virtaddrs[index] = NULL;
2314 }
2315 }
2316
2317 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2318
2319 dma_free_coherent(&adapter->pdev->dev,
2320 bufsize,
2321 fbr->ring_virtaddr,
2322 fbr->ring_physaddr);
2323
2324 fbr->ring_virtaddr = NULL;
2325 }
2326
2327
2328 if (rx_ring->ps_ring_virtaddr) {
2329 pktstat_ringsize = sizeof(struct pkt_stat_desc) *
2330 rx_ring->psr_num_entries;
2331
2332 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2333 rx_ring->ps_ring_virtaddr,
2334 rx_ring->ps_ring_physaddr);
2335
2336 rx_ring->ps_ring_virtaddr = NULL;
2337 }
2338
2339
2340 if (rx_ring->rx_status_block) {
2341 dma_free_coherent(&adapter->pdev->dev,
2342 sizeof(struct rx_status_block),
2343 rx_ring->rx_status_block, rx_ring->rx_status_bus);
2344 rx_ring->rx_status_block = NULL;
2345 }
2346
2347
2348 kfree(rx_ring->fbr[0]);
2349 kfree(rx_ring->fbr[1]);
2350
2351
2352 rx_ring->num_ready_recv = 0;
2353}
2354
2355
2356static int et131x_init_recv(struct et131x_adapter *adapter)
2357{
2358 struct rfd *rfd;
2359 u32 rfdct;
2360 struct rx_ring *rx_ring = &adapter->rx_ring;
2361
2362
2363 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2364 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
2365 if (!rfd)
2366 return -ENOMEM;
2367
2368 rfd->skb = NULL;
2369
2370
2371 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2372
2373
2374 rx_ring->num_ready_recv++;
2375 }
2376
2377 return 0;
2378}
2379
2380
2381static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2382{
2383 struct phy_device *phydev = adapter->phydev;
2384
2385
2386
2387
2388 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2389 writel(0, &adapter->regs->rxdma.max_pkt_time);
2390 writel(1, &adapter->regs->rxdma.num_pkt_done);
2391 }
2392}
2393
2394
2395
2396
2397
2398static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2399{
2400 struct rx_ring *rx_local = &adapter->rx_ring;
2401 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2402 u16 buff_index = rfd->bufferindex;
2403 u8 ring_index = rfd->ringindex;
2404 unsigned long flags;
2405 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2406
2407
2408
2409
2410 if (buff_index < fbr->num_entries) {
2411 u32 free_buff_ring;
2412 u32 __iomem *offset;
2413 struct fbr_desc *next;
2414
2415 spin_lock_irqsave(&adapter->fbr_lock, flags);
2416
2417 if (ring_index == 0)
2418 offset = &rx_dma->fbr0_full_offset;
2419 else
2420 offset = &rx_dma->fbr1_full_offset;
2421
2422 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2423 INDEX10(fbr->local_full);
2424
2425
2426
2427
2428
2429 next->addr_hi = fbr->bus_high[buff_index];
2430 next->addr_lo = fbr->bus_low[buff_index];
2431 next->word2 = buff_index;
2432
2433 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2434 fbr->num_entries - 1);
2435 writel(free_buff_ring, offset);
2436
2437 spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2438 } else {
2439 dev_err(&adapter->pdev->dev,
2440 "%s illegal Buffer Index returned\n", __func__);
2441 }
2442
2443
2444
2445
2446 spin_lock_irqsave(&adapter->rcv_lock, flags);
2447 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2448 rx_local->num_ready_recv++;
2449 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2450
2451 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2452}
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2464{
2465 struct rx_ring *rx_local = &adapter->rx_ring;
2466 struct rx_status_block *status;
2467 struct pkt_stat_desc *psr;
2468 struct rfd *rfd;
2469 u32 i;
2470 u8 *buf;
2471 unsigned long flags;
2472 struct list_head *element;
2473 u8 ring_index;
2474 u16 buff_index;
2475 u32 len;
2476 u32 word0;
2477 u32 word1;
2478 struct sk_buff *skb;
2479 struct fbr_lookup *fbr;
2480
2481
2482
2483
2484
2485 status = rx_local->rx_status_block;
2486 word1 = status->word1 >> 16;
2487
2488
2489 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2490 return NULL;
2491
2492
2493 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2494 (rx_local->local_psr_full & 0xFFF);
2495
2496
2497
2498
2499 len = psr->word1 & 0xFFFF;
2500 ring_index = (psr->word1 >> 26) & 0x03;
2501 fbr = rx_local->fbr[ring_index];
2502 buff_index = (psr->word1 >> 16) & 0x3FF;
2503 word0 = psr->word0;
2504
2505
2506
2507 add_12bit(&rx_local->local_psr_full, 1);
2508 if (
2509 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2510
2511 rx_local->local_psr_full &= ~0xFFF;
2512 rx_local->local_psr_full ^= 0x1000;
2513 }
2514
2515 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2516
2517 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2518
2519 dev_err(&adapter->pdev->dev,
2520 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2521 rx_local->local_psr_full & 0xFFF, len, buff_index);
2522 return NULL;
2523 }
2524
2525
2526 spin_lock_irqsave(&adapter->rcv_lock, flags);
2527
2528 element = rx_local->recv_list.next;
2529 rfd = list_entry(element, struct rfd, list_node);
2530
2531 if (!rfd) {
2532 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2533 return NULL;
2534 }
2535
2536 list_del(&rfd->list_node);
2537 rx_local->num_ready_recv--;
2538
2539 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2540
2541 rfd->bufferindex = buff_index;
2542 rfd->ringindex = ring_index;
2543
2544
2545
2546
2547
2548 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2549 adapter->stats.rx_other_errs++;
2550 len = 0;
2551 }
2552
2553 if (len == 0) {
2554 rfd->len = 0;
2555 goto out;
2556 }
2557
2558
2559 if ((word0 & ALCATEL_MULTICAST_PKT) &&
2560 !(word0 & ALCATEL_BROADCAST_PKT)) {
2561
2562
2563
2564
2565
2566 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST)
2567 && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
2568 && !(adapter->packet_filter &
2569 ET131X_PACKET_TYPE_ALL_MULTICAST)) {
2570 buf = fbr->virt[buff_index];
2571
2572
2573
2574
2575 for (i = 0; i < adapter->multicast_addr_count; i++) {
2576 if (buf[0] == adapter->multicast_list[i][0]
2577 && buf[1] == adapter->multicast_list[i][1]
2578 && buf[2] == adapter->multicast_list[i][2]
2579 && buf[3] == adapter->multicast_list[i][3]
2580 && buf[4] == adapter->multicast_list[i][4]
2581 && buf[5] == adapter->multicast_list[i][5]) {
2582 break;
2583 }
2584 }
2585
2586
2587
2588
2589
2590
2591
2592 if (i == adapter->multicast_addr_count)
2593 len = 0;
2594 }
2595
2596 if (len > 0)
2597 adapter->stats.multicast_pkts_rcvd++;
2598 } else if (word0 & ALCATEL_BROADCAST_PKT) {
2599 adapter->stats.broadcast_pkts_rcvd++;
2600 } else {
2601
2602
2603
2604
2605 adapter->stats.unicast_pkts_rcvd++;
2606 }
2607
2608 if (!len) {
2609 rfd->len = 0;
2610 goto out;
2611 }
2612
2613 rfd->len = len;
2614
2615 skb = dev_alloc_skb(rfd->len + 2);
2616 if (!skb) {
2617 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
2618 return NULL;
2619 }
2620
2621 adapter->net_stats.rx_bytes += rfd->len;
2622
2623 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
2624
2625 skb->protocol = eth_type_trans(skb, adapter->netdev);
2626 skb->ip_summed = CHECKSUM_NONE;
2627 netif_rx_ni(skb);
2628
2629out:
2630 nic_return_rfd(adapter, rfd);
2631 return rfd;
2632}
2633
2634
2635
2636
2637
2638static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
2639{
2640 struct rfd *rfd = NULL;
2641 u32 count = 0;
2642 bool done = true;
2643 struct rx_ring *rx_ring = &adapter->rx_ring;
2644
2645
2646 while (count < NUM_PACKETS_HANDLED) {
2647 if (list_empty(&rx_ring->recv_list)) {
2648 WARN_ON(rx_ring->num_ready_recv != 0);
2649 done = false;
2650 break;
2651 }
2652
2653 rfd = nic_rx_pkts(adapter);
2654
2655 if (rfd == NULL)
2656 break;
2657
2658
2659
2660
2661
2662
2663 if (!adapter->packet_filter ||
2664 !netif_carrier_ok(adapter->netdev) ||
2665 rfd->len == 0)
2666 continue;
2667
2668
2669 adapter->net_stats.rx_packets++;
2670
2671
2672 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2673 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2674
2675 count++;
2676 }
2677
2678 if (count == NUM_PACKETS_HANDLED || !done) {
2679 rx_ring->unfinished_receives = true;
2680 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2681 &adapter->regs->global.watchdog_timer);
2682 } else
2683
2684 rx_ring->unfinished_receives = false;
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2696{
2697 int desc_size = 0;
2698 struct tx_ring *tx_ring = &adapter->tx_ring;
2699
2700
2701 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2702 GFP_ATOMIC | GFP_DMA);
2703 if (!tx_ring->tcb_ring)
2704 return -ENOMEM;
2705
2706 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2707 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2708 desc_size,
2709 &tx_ring->tx_desc_ring_pa,
2710 GFP_KERNEL);
2711 if (!tx_ring->tx_desc_ring) {
2712 dev_err(&adapter->pdev->dev,
2713 "Cannot alloc memory for Tx Ring\n");
2714 return -ENOMEM;
2715 }
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2726 sizeof(u32),
2727 &tx_ring->tx_status_pa,
2728 GFP_KERNEL);
2729 if (!tx_ring->tx_status_pa) {
2730 dev_err(&adapter->pdev->dev,
2731 "Cannot alloc memory for Tx status block\n");
2732 return -ENOMEM;
2733 }
2734 return 0;
2735}
2736
2737
2738static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2739{
2740 int desc_size = 0;
2741 struct tx_ring *tx_ring = &adapter->tx_ring;
2742
2743 if (tx_ring->tx_desc_ring) {
2744
2745 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2746 dma_free_coherent(&adapter->pdev->dev,
2747 desc_size,
2748 tx_ring->tx_desc_ring,
2749 tx_ring->tx_desc_ring_pa);
2750 tx_ring->tx_desc_ring = NULL;
2751 }
2752
2753
2754 if (tx_ring->tx_status) {
2755 dma_free_coherent(&adapter->pdev->dev,
2756 sizeof(u32),
2757 tx_ring->tx_status,
2758 tx_ring->tx_status_pa);
2759
2760 tx_ring->tx_status = NULL;
2761 }
2762
2763 kfree(tx_ring->tcb_ring);
2764}
2765
2766
2767
2768
2769
2770static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2771{
2772 u32 i;
2773 struct tx_desc desc[24];
2774 u32 frag = 0;
2775 u32 thiscopy, remainder;
2776 struct sk_buff *skb = tcb->skb;
2777 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2778 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2779 unsigned long flags;
2780 struct phy_device *phydev = adapter->phydev;
2781 dma_addr_t dma_addr;
2782 struct tx_ring *tx_ring = &adapter->tx_ring;
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2795
2796 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2797
2798 for (i = 0; i < nr_frags; i++) {
2799
2800
2801
2802 if (i == 0) {
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 if (skb_headlen(skb) <= 1514) {
2813
2814
2815
2816 desc[frag].len_vlan = skb_headlen(skb);
2817 dma_addr = dma_map_single(&adapter->pdev->dev,
2818 skb->data,
2819 skb_headlen(skb),
2820 DMA_TO_DEVICE);
2821 desc[frag].addr_lo = lower_32_bits(dma_addr);
2822 desc[frag].addr_hi = upper_32_bits(dma_addr);
2823 frag++;
2824 } else {
2825 desc[frag].len_vlan = skb_headlen(skb) / 2;
2826 dma_addr = dma_map_single(&adapter->pdev->dev,
2827 skb->data,
2828 (skb_headlen(skb) / 2),
2829 DMA_TO_DEVICE);
2830 desc[frag].addr_lo = lower_32_bits(dma_addr);
2831 desc[frag].addr_hi = upper_32_bits(dma_addr);
2832 frag++;
2833
2834 desc[frag].len_vlan = skb_headlen(skb) / 2;
2835 dma_addr = dma_map_single(&adapter->pdev->dev,
2836 skb->data +
2837 (skb_headlen(skb) / 2),
2838 (skb_headlen(skb) / 2),
2839 DMA_TO_DEVICE);
2840 desc[frag].addr_lo = lower_32_bits(dma_addr);
2841 desc[frag].addr_hi = upper_32_bits(dma_addr);
2842 frag++;
2843 }
2844 } else {
2845 desc[frag].len_vlan = frags[i - 1].size;
2846 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2847 &frags[i - 1],
2848 0,
2849 frags[i - 1].size,
2850 DMA_TO_DEVICE);
2851 desc[frag].addr_lo = lower_32_bits(dma_addr);
2852 desc[frag].addr_hi = upper_32_bits(dma_addr);
2853 frag++;
2854 }
2855 }
2856
2857 if (phydev && phydev->speed == SPEED_1000) {
2858 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2859
2860 desc[frag - 1].flags =
2861 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2862 tx_ring->since_irq = 0;
2863 } else {
2864 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2865 }
2866 } else
2867 desc[frag - 1].flags =
2868 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2869
2870 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2871
2872 tcb->index_start = tx_ring->send_idx;
2873 tcb->stale = 0;
2874
2875 spin_lock_irqsave(&adapter->send_hw_lock, flags);
2876
2877 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2878
2879 if (thiscopy >= frag) {
2880 remainder = 0;
2881 thiscopy = frag;
2882 } else {
2883 remainder = frag - thiscopy;
2884 }
2885
2886 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2887 desc,
2888 sizeof(struct tx_desc) * thiscopy);
2889
2890 add_10bit(&tx_ring->send_idx, thiscopy);
2891
2892 if (INDEX10(tx_ring->send_idx) == 0 ||
2893 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2894 tx_ring->send_idx &= ~ET_DMA10_MASK;
2895 tx_ring->send_idx ^= ET_DMA10_WRAP;
2896 }
2897
2898 if (remainder) {
2899 memcpy(tx_ring->tx_desc_ring,
2900 desc + thiscopy,
2901 sizeof(struct tx_desc) * remainder);
2902
2903 add_10bit(&tx_ring->send_idx, remainder);
2904 }
2905
2906 if (INDEX10(tx_ring->send_idx) == 0) {
2907 if (tx_ring->send_idx)
2908 tcb->index = NUM_DESC_PER_RING_TX - 1;
2909 else
2910 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2911 } else
2912 tcb->index = tx_ring->send_idx - 1;
2913
2914 spin_lock(&adapter->tcb_send_qlock);
2915
2916 if (tx_ring->send_tail)
2917 tx_ring->send_tail->next = tcb;
2918 else
2919 tx_ring->send_head = tcb;
2920
2921 tx_ring->send_tail = tcb;
2922
2923 WARN_ON(tcb->next != NULL);
2924
2925 tx_ring->used++;
2926
2927 spin_unlock(&adapter->tcb_send_qlock);
2928
2929
2930 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2931
2932
2933
2934
2935 if (phydev && phydev->speed == SPEED_1000) {
2936 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2937 &adapter->regs->global.watchdog_timer);
2938 }
2939 spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2940
2941 return 0;
2942}
2943
2944
2945
2946
2947
2948static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2949{
2950 int status;
2951 struct tcb *tcb;
2952 u16 *shbufva;
2953 unsigned long flags;
2954 struct tx_ring *tx_ring = &adapter->tx_ring;
2955
2956
2957 if (skb->len < ETH_HLEN)
2958 return -EIO;
2959
2960
2961 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2962
2963 tcb = tx_ring->tcb_qhead;
2964
2965 if (tcb == NULL) {
2966 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2967 return -ENOMEM;
2968 }
2969
2970 tx_ring->tcb_qhead = tcb->next;
2971
2972 if (tx_ring->tcb_qhead == NULL)
2973 tx_ring->tcb_qtail = NULL;
2974
2975 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2976
2977 tcb->skb = skb;
2978
2979 if (skb->data != NULL && skb_headlen(skb) >= 6) {
2980 shbufva = (u16 *) skb->data;
2981
2982 if ((shbufva[0] == 0xffff) &&
2983 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff))
2984 tcb->flags |= FMP_DEST_BROAD;
2985 else if ((shbufva[0] & 0x3) == 0x0001)
2986 tcb->flags |= FMP_DEST_MULTI;
2987 }
2988
2989 tcb->next = NULL;
2990
2991
2992 status = nic_send_packet(adapter, tcb);
2993
2994 if (status != 0) {
2995 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2996
2997 if (tx_ring->tcb_qtail)
2998 tx_ring->tcb_qtail->next = tcb;
2999 else
3000
3001 tx_ring->tcb_qhead = tcb;
3002
3003 tx_ring->tcb_qtail = tcb;
3004 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3005 return status;
3006 }
3007 WARN_ON(tx_ring->used > NUM_TCB);
3008 return 0;
3009}
3010
3011
3012static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3013{
3014 int status = 0;
3015 struct et131x_adapter *adapter = netdev_priv(netdev);
3016 struct tx_ring *tx_ring = &adapter->tx_ring;
3017
3018
3019
3020
3021
3022
3023
3024
3025 if (tx_ring->used >= NUM_TCB) {
3026
3027
3028
3029
3030 status = -ENOMEM;
3031 } else {
3032
3033
3034
3035 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3036 !netif_carrier_ok(netdev)) {
3037 dev_kfree_skb_any(skb);
3038 skb = NULL;
3039
3040 adapter->net_stats.tx_dropped++;
3041 } else {
3042 status = send_packet(skb, adapter);
3043 if (status != 0 && status != -ENOMEM) {
3044
3045
3046
3047 dev_kfree_skb_any(skb);
3048 skb = NULL;
3049 adapter->net_stats.tx_dropped++;
3050 }
3051 }
3052 }
3053 return status;
3054}
3055
3056
3057
3058
3059
3060
3061
3062
3063static inline void free_send_packet(struct et131x_adapter *adapter,
3064 struct tcb *tcb)
3065{
3066 unsigned long flags;
3067 struct tx_desc *desc = NULL;
3068 struct net_device_stats *stats = &adapter->net_stats;
3069 struct tx_ring *tx_ring = &adapter->tx_ring;
3070 u64 dma_addr;
3071
3072 if (tcb->flags & FMP_DEST_BROAD)
3073 atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3074 else if (tcb->flags & FMP_DEST_MULTI)
3075 atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3076 else
3077 atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3078
3079 if (tcb->skb) {
3080 stats->tx_bytes += tcb->skb->len;
3081
3082
3083
3084
3085
3086 do {
3087 desc = tx_ring->tx_desc_ring +
3088 INDEX10(tcb->index_start);
3089
3090 dma_addr = desc->addr_lo;
3091 dma_addr |= (u64)desc->addr_hi << 32;
3092
3093 dma_unmap_single(&adapter->pdev->dev,
3094 dma_addr,
3095 desc->len_vlan, DMA_TO_DEVICE);
3096
3097 add_10bit(&tcb->index_start, 1);
3098 if (INDEX10(tcb->index_start) >=
3099 NUM_DESC_PER_RING_TX) {
3100 tcb->index_start &= ~ET_DMA10_MASK;
3101 tcb->index_start ^= ET_DMA10_WRAP;
3102 }
3103 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
3104
3105 dev_kfree_skb_any(tcb->skb);
3106 }
3107
3108 memset(tcb, 0, sizeof(struct tcb));
3109
3110
3111 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3112
3113 adapter->net_stats.tx_packets++;
3114
3115 if (tx_ring->tcb_qtail)
3116 tx_ring->tcb_qtail->next = tcb;
3117 else
3118
3119 tx_ring->tcb_qhead = tcb;
3120
3121 tx_ring->tcb_qtail = tcb;
3122
3123 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3124 WARN_ON(tx_ring->used < 0);
3125}
3126
3127
3128
3129
3130
3131static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3132{
3133 struct tcb *tcb;
3134 unsigned long flags;
3135 u32 freed = 0;
3136 struct tx_ring *tx_ring = &adapter->tx_ring;
3137
3138
3139 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3140
3141 tcb = tx_ring->send_head;
3142
3143 while (tcb != NULL && freed < NUM_TCB) {
3144 struct tcb *next = tcb->next;
3145
3146 tx_ring->send_head = next;
3147
3148 if (next == NULL)
3149 tx_ring->send_tail = NULL;
3150
3151 tx_ring->used--;
3152
3153 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3154
3155 freed++;
3156 free_send_packet(adapter, tcb);
3157
3158 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3159
3160 tcb = tx_ring->send_head;
3161 }
3162
3163 WARN_ON(freed == NUM_TCB);
3164
3165 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3166
3167 tx_ring->used = 0;
3168}
3169
3170
3171
3172
3173
3174
3175
3176
3177static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3178{
3179 unsigned long flags;
3180 u32 serviced;
3181 struct tcb *tcb;
3182 u32 index;
3183 struct tx_ring *tx_ring = &adapter->tx_ring;
3184
3185 serviced = readl(&adapter->regs->txdma.new_service_complete);
3186 index = INDEX10(serviced);
3187
3188
3189
3190
3191 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3192
3193 tcb = tx_ring->send_head;
3194
3195 while (tcb &&
3196 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3197 index < INDEX10(tcb->index)) {
3198 tx_ring->used--;
3199 tx_ring->send_head = tcb->next;
3200 if (tcb->next == NULL)
3201 tx_ring->send_tail = NULL;
3202
3203 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3204 free_send_packet(adapter, tcb);
3205 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3206
3207
3208 tcb = tx_ring->send_head;
3209 }
3210 while (tcb &&
3211 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3212 && index > (tcb->index & ET_DMA10_MASK)) {
3213 tx_ring->used--;
3214 tx_ring->send_head = tcb->next;
3215 if (tcb->next == NULL)
3216 tx_ring->send_tail = NULL;
3217
3218 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3219 free_send_packet(adapter, tcb);
3220 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3221
3222
3223 tcb = tx_ring->send_head;
3224 }
3225
3226
3227 if (tx_ring->used <= NUM_TCB / 3)
3228 netif_wake_queue(adapter->netdev);
3229
3230 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3231}
3232
3233static int et131x_get_settings(struct net_device *netdev,
3234 struct ethtool_cmd *cmd)
3235{
3236 struct et131x_adapter *adapter = netdev_priv(netdev);
3237
3238 return phy_ethtool_gset(adapter->phydev, cmd);
3239}
3240
3241static int et131x_set_settings(struct net_device *netdev,
3242 struct ethtool_cmd *cmd)
3243{
3244 struct et131x_adapter *adapter = netdev_priv(netdev);
3245
3246 return phy_ethtool_sset(adapter->phydev, cmd);
3247}
3248
3249static int et131x_get_regs_len(struct net_device *netdev)
3250{
3251#define ET131X_REGS_LEN 256
3252 return ET131X_REGS_LEN * sizeof(u32);
3253}
3254
3255static void et131x_get_regs(struct net_device *netdev,
3256 struct ethtool_regs *regs, void *regs_data)
3257{
3258 struct et131x_adapter *adapter = netdev_priv(netdev);
3259 struct address_map __iomem *aregs = adapter->regs;
3260 u32 *regs_buff = regs_data;
3261 u32 num = 0;
3262 u16 tmp;
3263
3264 memset(regs_data, 0, et131x_get_regs_len(netdev));
3265
3266 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3267 adapter->pdev->device;
3268
3269
3270 et131x_mii_read(adapter, MII_BMCR, &tmp);
3271 regs_buff[num++] = tmp;
3272 et131x_mii_read(adapter, MII_BMSR, &tmp);
3273 regs_buff[num++] = tmp;
3274 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
3275 regs_buff[num++] = tmp;
3276 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
3277 regs_buff[num++] = tmp;
3278 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
3279 regs_buff[num++] = tmp;
3280 et131x_mii_read(adapter, MII_LPA, &tmp);
3281 regs_buff[num++] = tmp;
3282 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
3283 regs_buff[num++] = tmp;
3284
3285 et131x_mii_read(adapter, 0x07, &tmp);
3286 regs_buff[num++] = tmp;
3287
3288 et131x_mii_read(adapter, 0x08, &tmp);
3289 regs_buff[num++] = tmp;
3290 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
3291 regs_buff[num++] = tmp;
3292 et131x_mii_read(adapter, MII_STAT1000, &tmp);
3293 regs_buff[num++] = tmp;
3294 et131x_mii_read(adapter, 0x0b, &tmp);
3295 regs_buff[num++] = tmp;
3296 et131x_mii_read(adapter, 0x0c, &tmp);
3297 regs_buff[num++] = tmp;
3298 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
3299 regs_buff[num++] = tmp;
3300 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
3301 regs_buff[num++] = tmp;
3302 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
3303 regs_buff[num++] = tmp;
3304
3305 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
3306 regs_buff[num++] = tmp;
3307 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
3308 regs_buff[num++] = tmp;
3309 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
3310 regs_buff[num++] = tmp;
3311 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
3312 regs_buff[num++] = tmp;
3313 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
3314 regs_buff[num++] = tmp;
3315
3316 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
3317 regs_buff[num++] = tmp;
3318 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
3319 regs_buff[num++] = tmp;
3320 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
3321 regs_buff[num++] = tmp;
3322 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
3323 regs_buff[num++] = tmp;
3324 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
3325 regs_buff[num++] = tmp;
3326 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
3327 regs_buff[num++] = tmp;
3328 et131x_mii_read(adapter, PHY_LED_1, &tmp);
3329 regs_buff[num++] = tmp;
3330 et131x_mii_read(adapter, PHY_LED_2, &tmp);
3331 regs_buff[num++] = tmp;
3332
3333
3334 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3335 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3336 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3337 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3338 regs_buff[num++] = readl(&aregs->global.pm_csr);
3339 regs_buff[num++] = adapter->stats.interrupt_status;
3340 regs_buff[num++] = readl(&aregs->global.int_mask);
3341 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3342 regs_buff[num++] = readl(&aregs->global.int_status_alias);
3343 regs_buff[num++] = readl(&aregs->global.sw_reset);
3344 regs_buff[num++] = readl(&aregs->global.slv_timer);
3345 regs_buff[num++] = readl(&aregs->global.msi_config);
3346 regs_buff[num++] = readl(&aregs->global.loopback);
3347 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3348
3349
3350 regs_buff[num++] = readl(&aregs->txdma.csr);
3351 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3352 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3353 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3354 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3355 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3356 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3357 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3358 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3359 regs_buff[num++] = readl(&aregs->txdma.service_request);
3360 regs_buff[num++] = readl(&aregs->txdma.service_complete);
3361 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3362 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3363 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3364 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3365 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3366 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3367 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3368 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3369 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3370 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3371 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3372 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3373 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3374 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3375 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3376
3377
3378 regs_buff[num++] = readl(&aregs->rxdma.csr);
3379 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3380 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3381 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3382 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3383 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3384 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3385 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3386 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3387 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3388 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3389 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3390 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3391 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3392 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3393 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3394 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3395 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3396 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3397 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3398 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3399 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3400 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3401 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3402 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3403 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3404 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3405 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3406 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3407}
3408
3409static void et131x_get_drvinfo(struct net_device *netdev,
3410 struct ethtool_drvinfo *info)
3411{
3412 struct et131x_adapter *adapter = netdev_priv(netdev);
3413
3414 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3415 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3416 strlcpy(info->bus_info, pci_name(adapter->pdev),
3417 sizeof(info->bus_info));
3418}
3419
3420static struct ethtool_ops et131x_ethtool_ops = {
3421 .get_settings = et131x_get_settings,
3422 .set_settings = et131x_set_settings,
3423 .get_drvinfo = et131x_get_drvinfo,
3424 .get_regs_len = et131x_get_regs_len,
3425 .get_regs = et131x_get_regs,
3426 .get_link = ethtool_op_get_link,
3427};
3428
3429
3430static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3431{
3432
3433
3434
3435
3436 if (is_zero_ether_addr(adapter->rom_addr)) {
3437
3438
3439
3440
3441 get_random_bytes(&adapter->addr[5], 1);
3442
3443
3444
3445
3446 memcpy(adapter->rom_addr,
3447 adapter->addr, ETH_ALEN);
3448 } else {
3449
3450
3451
3452
3453 memcpy(adapter->addr,
3454 adapter->rom_addr, ETH_ALEN);
3455 }
3456}
3457
3458
3459
3460
3461
3462
3463static int et131x_pci_init(struct et131x_adapter *adapter,
3464 struct pci_dev *pdev)
3465{
3466 u16 max_payload;
3467 int i, rc;
3468
3469 rc = et131x_init_eeprom(adapter);
3470 if (rc < 0)
3471 goto out;
3472
3473 if (!pci_is_pcie(pdev)) {
3474 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3475 goto err_out;
3476 }
3477
3478
3479
3480
3481 max_payload = pdev->pcie_mpss;
3482
3483 if (max_payload < 2) {
3484 static const u16 acknak[2] = { 0x76, 0xD0 };
3485 static const u16 replay[2] = { 0x1E0, 0x2ED };
3486
3487 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3488 acknak[max_payload])) {
3489 dev_err(&pdev->dev,
3490 "Could not write PCI config space for ACK/NAK\n");
3491 goto err_out;
3492 }
3493 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3494 replay[max_payload])) {
3495 dev_err(&pdev->dev,
3496 "Could not write PCI config space for Replay Timer\n");
3497 goto err_out;
3498 }
3499 }
3500
3501
3502
3503
3504 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3505 dev_err(&pdev->dev,
3506 "Could not write PCI config space for Latency Timers\n");
3507 goto err_out;
3508 }
3509
3510
3511 if (pcie_set_readrq(pdev, 2048)) {
3512 dev_err(&pdev->dev,
3513 "Couldn't change PCI config space for Max read size\n");
3514 goto err_out;
3515 }
3516
3517
3518
3519
3520 if (!adapter->has_eeprom) {
3521 et131x_hwaddr_init(adapter);
3522 return 0;
3523 }
3524
3525 for (i = 0; i < ETH_ALEN; i++) {
3526 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3527 adapter->rom_addr + i)) {
3528 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3529 goto err_out;
3530 }
3531 }
3532 ether_addr_copy(adapter->addr, adapter->rom_addr);
3533out:
3534 return rc;
3535err_out:
3536 rc = -EIO;
3537 goto out;
3538}
3539
3540
3541
3542
3543
3544
3545
3546static void et131x_error_timer_handler(unsigned long data)
3547{
3548 struct et131x_adapter *adapter = (struct et131x_adapter *) data;
3549 struct phy_device *phydev = adapter->phydev;
3550
3551 if (et1310_in_phy_coma(adapter)) {
3552
3553
3554
3555
3556 et1310_disable_phy_coma(adapter);
3557 adapter->boot_coma = 20;
3558 } else {
3559 et1310_update_macstat_host_counters(adapter);
3560 }
3561
3562 if (!phydev->link && adapter->boot_coma < 11)
3563 adapter->boot_coma++;
3564
3565 if (adapter->boot_coma == 10) {
3566 if (!phydev->link) {
3567 if (!et1310_in_phy_coma(adapter)) {
3568
3569
3570
3571 et131x_enable_interrupts(adapter);
3572 et1310_enable_phy_coma(adapter);
3573 }
3574 }
3575 }
3576
3577
3578 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3579}
3580
3581
3582static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3583{
3584 et131x_tx_dma_memory_free(adapter);
3585 et131x_rx_dma_memory_free(adapter);
3586}
3587
3588
3589
3590
3591static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3592{
3593 int status;
3594
3595
3596 status = et131x_tx_dma_memory_alloc(adapter);
3597 if (status) {
3598 dev_err(&adapter->pdev->dev,
3599 "et131x_tx_dma_memory_alloc FAILED\n");
3600 et131x_tx_dma_memory_free(adapter);
3601 return status;
3602 }
3603
3604 status = et131x_rx_dma_memory_alloc(adapter);
3605 if (status) {
3606 dev_err(&adapter->pdev->dev,
3607 "et131x_rx_dma_memory_alloc FAILED\n");
3608 et131x_adapter_memory_free(adapter);
3609 return status;
3610 }
3611
3612
3613 status = et131x_init_recv(adapter);
3614 if (status) {
3615 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3616 et131x_adapter_memory_free(adapter);
3617 }
3618 return status;
3619}
3620
3621static void et131x_adjust_link(struct net_device *netdev)
3622{
3623 struct et131x_adapter *adapter = netdev_priv(netdev);
3624 struct phy_device *phydev = adapter->phydev;
3625
3626 if (!phydev)
3627 return;
3628 if (phydev->link == adapter->link)
3629 return;
3630
3631
3632
3633
3634
3635 if (et1310_in_phy_coma(adapter))
3636 et1310_disable_phy_coma(adapter);
3637
3638 adapter->link = phydev->link;
3639 phy_print_status(phydev);
3640
3641 if (phydev->link) {
3642 adapter->boot_coma = 20;
3643 if (phydev->speed == SPEED_10) {
3644 u16 register18;
3645
3646 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3647 ®ister18);
3648 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3649 register18 | 0x4);
3650 et131x_mii_write(adapter, PHY_INDEX_REG,
3651 register18 | 0x8402);
3652 et131x_mii_write(adapter, PHY_DATA_REG,
3653 register18 | 511);
3654 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3655 register18);
3656 }
3657
3658 et1310_config_flow_control(adapter);
3659
3660 if (phydev->speed == SPEED_1000 &&
3661 adapter->registry_jumbo_packet > 2048) {
3662 u16 reg;
3663
3664 et131x_mii_read(adapter, PHY_CONFIG, ®);
3665 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3666 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3667 et131x_mii_write(adapter, PHY_CONFIG, reg);
3668 }
3669
3670 et131x_set_rx_dma_timer(adapter);
3671 et1310_config_mac_regs2(adapter);
3672 } else {
3673 adapter->boot_coma = 0;
3674
3675 if (phydev->speed == SPEED_10) {
3676 u16 register18;
3677
3678 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3679 ®ister18);
3680 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3681 register18 | 0x4);
3682 et131x_mii_write(adapter, PHY_INDEX_REG,
3683 register18 | 0x8402);
3684 et131x_mii_write(adapter, PHY_DATA_REG,
3685 register18 | 511);
3686 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
3687 register18);
3688 }
3689
3690
3691 et131x_free_busy_send_packets(adapter);
3692
3693
3694 et131x_init_send(adapter);
3695
3696
3697
3698
3699
3700
3701 et131x_soft_reset(adapter);
3702
3703
3704 et131x_adapter_setup(adapter);
3705
3706
3707 et131x_disable_txrx(netdev);
3708 et131x_enable_txrx(netdev);
3709 }
3710}
3711
3712static int et131x_mii_probe(struct net_device *netdev)
3713{
3714 struct et131x_adapter *adapter = netdev_priv(netdev);
3715 struct phy_device *phydev = NULL;
3716
3717 phydev = phy_find_first(adapter->mii_bus);
3718 if (!phydev) {
3719 dev_err(&adapter->pdev->dev, "no PHY found\n");
3720 return -ENODEV;
3721 }
3722
3723 phydev = phy_connect(netdev, dev_name(&phydev->dev),
3724 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3725
3726 if (IS_ERR(phydev)) {
3727 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3728 return PTR_ERR(phydev);
3729 }
3730
3731 phydev->supported &= (SUPPORTED_10baseT_Half
3732 | SUPPORTED_10baseT_Full
3733 | SUPPORTED_100baseT_Half
3734 | SUPPORTED_100baseT_Full
3735 | SUPPORTED_Autoneg
3736 | SUPPORTED_MII
3737 | SUPPORTED_TP);
3738
3739 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3740 phydev->supported |= SUPPORTED_1000baseT_Full;
3741
3742 phydev->advertising = phydev->supported;
3743 adapter->phydev = phydev;
3744
3745 dev_info(&adapter->pdev->dev,
3746 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3747 phydev->drv->name, dev_name(&phydev->dev));
3748
3749 return 0;
3750}
3751
3752
3753
3754
3755
3756
3757static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3758 struct pci_dev *pdev)
3759{
3760 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3761
3762 struct et131x_adapter *adapter;
3763
3764
3765 adapter = netdev_priv(netdev);
3766 adapter->pdev = pci_dev_get(pdev);
3767 adapter->netdev = netdev;
3768
3769
3770 spin_lock_init(&adapter->tcb_send_qlock);
3771 spin_lock_init(&adapter->tcb_ready_qlock);
3772 spin_lock_init(&adapter->send_hw_lock);
3773 spin_lock_init(&adapter->rcv_lock);
3774 spin_lock_init(&adapter->fbr_lock);
3775
3776 adapter->registry_jumbo_packet = 1514;
3777
3778
3779 ether_addr_copy(adapter->addr, default_mac);
3780
3781 return adapter;
3782}
3783
3784
3785
3786
3787
3788
3789
3790static void et131x_pci_remove(struct pci_dev *pdev)
3791{
3792 struct net_device *netdev = pci_get_drvdata(pdev);
3793 struct et131x_adapter *adapter = netdev_priv(netdev);
3794
3795 unregister_netdev(netdev);
3796 phy_disconnect(adapter->phydev);
3797 mdiobus_unregister(adapter->mii_bus);
3798 cancel_work_sync(&adapter->task);
3799 kfree(adapter->mii_bus->irq);
3800 mdiobus_free(adapter->mii_bus);
3801
3802 et131x_adapter_memory_free(adapter);
3803 iounmap(adapter->regs);
3804 pci_dev_put(pdev);
3805
3806 free_netdev(netdev);
3807 pci_release_regions(pdev);
3808 pci_disable_device(pdev);
3809}
3810
3811
3812static void et131x_up(struct net_device *netdev)
3813{
3814 struct et131x_adapter *adapter = netdev_priv(netdev);
3815
3816 et131x_enable_txrx(netdev);
3817 phy_start(adapter->phydev);
3818}
3819
3820
3821static void et131x_down(struct net_device *netdev)
3822{
3823 struct et131x_adapter *adapter = netdev_priv(netdev);
3824
3825
3826 netdev->trans_start = jiffies;
3827
3828 phy_stop(adapter->phydev);
3829 et131x_disable_txrx(netdev);
3830}
3831
3832#ifdef CONFIG_PM_SLEEP
3833static int et131x_suspend(struct device *dev)
3834{
3835 struct pci_dev *pdev = to_pci_dev(dev);
3836 struct net_device *netdev = pci_get_drvdata(pdev);
3837
3838 if (netif_running(netdev)) {
3839 netif_device_detach(netdev);
3840 et131x_down(netdev);
3841 pci_save_state(pdev);
3842 }
3843
3844 return 0;
3845}
3846
3847static int et131x_resume(struct device *dev)
3848{
3849 struct pci_dev *pdev = to_pci_dev(dev);
3850 struct net_device *netdev = pci_get_drvdata(pdev);
3851
3852 if (netif_running(netdev)) {
3853 pci_restore_state(pdev);
3854 et131x_up(netdev);
3855 netif_device_attach(netdev);
3856 }
3857
3858 return 0;
3859}
3860
3861static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3862#define ET131X_PM_OPS (&et131x_pm_ops)
3863#else
3864#define ET131X_PM_OPS NULL
3865#endif
3866
3867
3868
3869
3870
3871
3872
3873static irqreturn_t et131x_isr(int irq, void *dev_id)
3874{
3875 bool handled = true;
3876 struct net_device *netdev = (struct net_device *)dev_id;
3877 struct et131x_adapter *adapter = netdev_priv(netdev);
3878 struct rx_ring *rx_ring = &adapter->rx_ring;
3879 struct tx_ring *tx_ring = &adapter->tx_ring;
3880 u32 status;
3881
3882 if (!netif_device_present(netdev)) {
3883 handled = false;
3884 goto out;
3885 }
3886
3887
3888
3889
3890
3891
3892 et131x_disable_interrupts(adapter);
3893
3894
3895
3896
3897 status = readl(&adapter->regs->global.int_status);
3898
3899 if (adapter->flowcontrol == FLOW_TXONLY ||
3900 adapter->flowcontrol == FLOW_BOTH) {
3901 status &= ~INT_MASK_ENABLE;
3902 } else {
3903 status &= ~INT_MASK_ENABLE_NO_FLOW;
3904 }
3905
3906
3907 if (!status) {
3908 handled = false;
3909 et131x_enable_interrupts(adapter);
3910 goto out;
3911 }
3912
3913
3914
3915 if (status & ET_INTR_WATCHDOG) {
3916 struct tcb *tcb = tx_ring->send_head;
3917
3918 if (tcb)
3919 if (++tcb->stale > 1)
3920 status |= ET_INTR_TXDMA_ISR;
3921
3922 if (rx_ring->unfinished_receives)
3923 status |= ET_INTR_RXDMA_XFR_DONE;
3924 else if (tcb == NULL)
3925 writel(0, &adapter->regs->global.watchdog_timer);
3926
3927 status &= ~ET_INTR_WATCHDOG;
3928 }
3929
3930 if (!status) {
3931
3932
3933
3934
3935
3936 et131x_enable_interrupts(adapter);
3937 goto out;
3938 }
3939
3940
3941
3942
3943
3944 adapter->stats.interrupt_status = status;
3945
3946
3947
3948
3949
3950 schedule_work(&adapter->task);
3951out:
3952 return IRQ_RETVAL(handled);
3953}
3954
3955
3956
3957
3958
3959
3960static void et131x_isr_handler(struct work_struct *work)
3961{
3962 struct et131x_adapter *adapter =
3963 container_of(work, struct et131x_adapter, task);
3964 u32 status = adapter->stats.interrupt_status;
3965 struct address_map __iomem *iomem = adapter->regs;
3966
3967
3968
3969
3970
3971
3972 if (status & ET_INTR_TXDMA_ISR)
3973 et131x_handle_send_interrupt(adapter);
3974
3975
3976 if (status & ET_INTR_RXDMA_XFR_DONE)
3977 et131x_handle_recv_interrupt(adapter);
3978
3979 status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE);
3980
3981 if (!status)
3982 goto out;
3983
3984
3985 if (status & ET_INTR_TXDMA_ERR) {
3986
3987 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3988
3989 dev_warn(&adapter->pdev->dev,
3990 "TXDMA_ERR interrupt, error = %d\n",
3991 txdma_err);
3992 }
3993
3994
3995 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011 if (adapter->flowcontrol == FLOW_TXONLY ||
4012 adapter->flowcontrol == FLOW_BOTH) {
4013 u32 pm_csr;
4014
4015
4016
4017
4018 pm_csr = readl(&iomem->global.pm_csr);
4019 if (!et1310_in_phy_coma(adapter))
4020 writel(3, &iomem->txmac.bp_ctrl);
4021 }
4022 }
4023
4024
4025 if (status & ET_INTR_RXDMA_STAT_LOW) {
4026
4027
4028
4029
4030
4031
4032
4033 }
4034
4035
4036 if (status & ET_INTR_RXDMA_ERR) {
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053 dev_warn(&adapter->pdev->dev,
4054 "RxDMA_ERR interrupt, error %x\n",
4055 readl(&iomem->txmac.tx_test));
4056 }
4057
4058
4059 if (status & ET_INTR_WOL) {
4060
4061
4062
4063
4064
4065 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4066 }
4067
4068
4069 if (status & ET_INTR_TXMAC) {
4070 u32 err = readl(&iomem->txmac.err);
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080 dev_warn(&adapter->pdev->dev,
4081 "TXMAC interrupt, error 0x%08x\n",
4082 err);
4083
4084
4085
4086
4087 }
4088
4089
4090 if (status & ET_INTR_RXMAC) {
4091
4092
4093
4094
4095
4096
4097 dev_warn(&adapter->pdev->dev,
4098 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4099 readl(&iomem->rxmac.err_reg));
4100
4101 dev_warn(&adapter->pdev->dev,
4102 "Enable 0x%08x, Diag 0x%08x\n",
4103 readl(&iomem->rxmac.ctrl),
4104 readl(&iomem->rxmac.rxq_diag));
4105
4106
4107
4108
4109 }
4110
4111
4112 if (status & ET_INTR_MAC_STAT) {
4113
4114
4115
4116
4117 et1310_handle_macstat_interrupt(adapter);
4118 }
4119
4120
4121 if (status & ET_INTR_SLV_TIMEOUT) {
4122
4123
4124
4125
4126
4127
4128 }
4129out:
4130 et131x_enable_interrupts(adapter);
4131}
4132
4133
4134static struct net_device_stats *et131x_stats(struct net_device *netdev)
4135{
4136 struct et131x_adapter *adapter = netdev_priv(netdev);
4137 struct net_device_stats *stats = &adapter->net_stats;
4138 struct ce_stats *devstat = &adapter->stats;
4139
4140 stats->rx_errors = devstat->rx_length_errs +
4141 devstat->rx_align_errs +
4142 devstat->rx_crc_errs +
4143 devstat->rx_code_violations +
4144 devstat->rx_other_errs;
4145 stats->tx_errors = devstat->tx_max_pkt_errs;
4146 stats->multicast = devstat->multicast_pkts_rcvd;
4147 stats->collisions = devstat->tx_collisions;
4148
4149 stats->rx_length_errors = devstat->rx_length_errs;
4150 stats->rx_over_errors = devstat->rx_overflows;
4151 stats->rx_crc_errors = devstat->rx_crc_errs;
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172 return stats;
4173}
4174
4175
4176static int et131x_open(struct net_device *netdev)
4177{
4178 struct et131x_adapter *adapter = netdev_priv(netdev);
4179 struct pci_dev *pdev = adapter->pdev;
4180 unsigned int irq = pdev->irq;
4181 int result;
4182
4183
4184 init_timer(&adapter->error_timer);
4185 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4186 adapter->error_timer.function = et131x_error_timer_handler;
4187 adapter->error_timer.data = (unsigned long)adapter;
4188 add_timer(&adapter->error_timer);
4189
4190 result = request_irq(irq, et131x_isr,
4191 IRQF_SHARED, netdev->name, netdev);
4192 if (result) {
4193 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4194 return result;
4195 }
4196
4197 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
4198
4199 et131x_up(netdev);
4200
4201 return result;
4202}
4203
4204
4205static int et131x_close(struct net_device *netdev)
4206{
4207 struct et131x_adapter *adapter = netdev_priv(netdev);
4208
4209 et131x_down(netdev);
4210
4211 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
4212 free_irq(adapter->pdev->irq, netdev);
4213
4214
4215 return del_timer_sync(&adapter->error_timer);
4216}
4217
4218
4219
4220
4221
4222
4223static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4224 int cmd)
4225{
4226 struct et131x_adapter *adapter = netdev_priv(netdev);
4227
4228 if (!adapter->phydev)
4229 return -EINVAL;
4230
4231 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4232}
4233
4234
4235
4236
4237
4238
4239static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4240{
4241 int filter = adapter->packet_filter;
4242 u32 ctrl;
4243 u32 pf_ctrl;
4244
4245 ctrl = readl(&adapter->regs->rxmac.ctrl);
4246 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4247
4248
4249
4250
4251 ctrl |= 0x04;
4252
4253
4254
4255
4256 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4257 pf_ctrl &= ~7;
4258 else {
4259
4260
4261
4262
4263 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4264 pf_ctrl &= ~2;
4265 else {
4266 et1310_setup_device_for_multicast(adapter);
4267 pf_ctrl |= 2;
4268 ctrl &= ~0x04;
4269 }
4270
4271
4272 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4273 et1310_setup_device_for_unicast(adapter);
4274 pf_ctrl |= 4;
4275 ctrl &= ~0x04;
4276 }
4277
4278
4279 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4280 pf_ctrl |= 1;
4281 ctrl &= ~0x04;
4282 } else
4283 pf_ctrl &= ~1;
4284
4285
4286
4287
4288
4289 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4290 writel(ctrl, &adapter->regs->rxmac.ctrl);
4291 }
4292 return 0;
4293}
4294
4295
4296static void et131x_multicast(struct net_device *netdev)
4297{
4298 struct et131x_adapter *adapter = netdev_priv(netdev);
4299 int packet_filter;
4300 struct netdev_hw_addr *ha;
4301 int i;
4302
4303
4304
4305
4306
4307 packet_filter = adapter->packet_filter;
4308
4309
4310
4311
4312
4313
4314 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4315
4316
4317
4318
4319
4320 if (netdev->flags & IFF_PROMISC)
4321 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4322 else
4323 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4324
4325 if (netdev->flags & IFF_ALLMULTI)
4326 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4327
4328 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4329 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4330
4331 if (netdev_mc_count(netdev) < 1) {
4332 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4333 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4334 } else
4335 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4336
4337
4338 i = 0;
4339 netdev_for_each_mc_addr(ha, netdev) {
4340 if (i == NIC_MAX_MCAST_LIST)
4341 break;
4342 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4343 }
4344 adapter->multicast_addr_count = i;
4345
4346
4347
4348
4349
4350
4351
4352 if (packet_filter != adapter->packet_filter)
4353 et131x_set_packet_filter(adapter);
4354}
4355
4356
4357static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
4358{
4359 int status = 0;
4360 struct et131x_adapter *adapter = netdev_priv(netdev);
4361 struct tx_ring *tx_ring = &adapter->tx_ring;
4362
4363
4364 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
4365 netif_stop_queue(netdev);
4366
4367
4368 netdev->trans_start = jiffies;
4369
4370
4371 status = et131x_send_packets(skb, netdev);
4372
4373
4374 if (status != 0) {
4375 if (status == -ENOMEM)
4376 status = NETDEV_TX_BUSY;
4377 else
4378 status = NETDEV_TX_OK;
4379 }
4380 return status;
4381}
4382
4383
4384
4385
4386
4387
4388
4389static void et131x_tx_timeout(struct net_device *netdev)
4390{
4391 struct et131x_adapter *adapter = netdev_priv(netdev);
4392 struct tx_ring *tx_ring = &adapter->tx_ring;
4393 struct tcb *tcb;
4394 unsigned long flags;
4395
4396
4397 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
4398 return;
4399
4400
4401
4402
4403 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
4404 return;
4405
4406
4407 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
4408 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
4409 return;
4410 }
4411
4412
4413 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
4414
4415 tcb = tx_ring->send_head;
4416
4417 if (tcb != NULL) {
4418 tcb->count++;
4419
4420 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
4421 spin_unlock_irqrestore(&adapter->tcb_send_qlock,
4422 flags);
4423
4424 dev_warn(&adapter->pdev->dev,
4425 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
4426 tcb->index,
4427 tcb->flags);
4428
4429 adapter->net_stats.tx_errors++;
4430
4431
4432 et131x_disable_txrx(netdev);
4433 et131x_enable_txrx(netdev);
4434 return;
4435 }
4436 }
4437
4438 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
4439}
4440
4441
4442static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
4443{
4444 int result = 0;
4445 struct et131x_adapter *adapter = netdev_priv(netdev);
4446
4447
4448 if (new_mtu < 64 || new_mtu > 9216)
4449 return -EINVAL;
4450
4451 et131x_disable_txrx(netdev);
4452 et131x_handle_send_interrupt(adapter);
4453 et131x_handle_recv_interrupt(adapter);
4454
4455
4456 netdev->mtu = new_mtu;
4457
4458
4459 et131x_adapter_memory_free(adapter);
4460
4461
4462 adapter->registry_jumbo_packet = new_mtu + 14;
4463 et131x_soft_reset(adapter);
4464
4465
4466 result = et131x_adapter_memory_alloc(adapter);
4467 if (result != 0) {
4468 dev_warn(&adapter->pdev->dev,
4469 "Change MTU failed; couldn't re-alloc DMA memory\n");
4470 return result;
4471 }
4472
4473 et131x_init_send(adapter);
4474
4475 et131x_hwaddr_init(adapter);
4476 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4477
4478
4479 et131x_adapter_setup(adapter);
4480
4481 et131x_enable_txrx(netdev);
4482
4483 return result;
4484}
4485
4486
4487static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
4488{
4489 int result = 0;
4490 struct et131x_adapter *adapter = netdev_priv(netdev);
4491 struct sockaddr *address = new_mac;
4492
4493 if (adapter == NULL)
4494 return -ENODEV;
4495
4496
4497 if (!is_valid_ether_addr(address->sa_data))
4498 return -EADDRNOTAVAIL;
4499
4500 et131x_disable_txrx(netdev);
4501 et131x_handle_send_interrupt(adapter);
4502 et131x_handle_recv_interrupt(adapter);
4503
4504
4505
4506
4507 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
4508
4509 netdev_info(netdev, "Setting MAC address to %pM\n",
4510 netdev->dev_addr);
4511
4512
4513 et131x_adapter_memory_free(adapter);
4514
4515 et131x_soft_reset(adapter);
4516
4517
4518 result = et131x_adapter_memory_alloc(adapter);
4519 if (result != 0) {
4520 dev_err(&adapter->pdev->dev,
4521 "Change MAC failed; couldn't re-alloc DMA memory\n");
4522 return result;
4523 }
4524
4525 et131x_init_send(adapter);
4526
4527 et131x_hwaddr_init(adapter);
4528
4529
4530 et131x_adapter_setup(adapter);
4531
4532 et131x_enable_txrx(netdev);
4533
4534 return result;
4535}
4536
4537static const struct net_device_ops et131x_netdev_ops = {
4538 .ndo_open = et131x_open,
4539 .ndo_stop = et131x_close,
4540 .ndo_start_xmit = et131x_tx,
4541 .ndo_set_rx_mode = et131x_multicast,
4542 .ndo_tx_timeout = et131x_tx_timeout,
4543 .ndo_change_mtu = et131x_change_mtu,
4544 .ndo_set_mac_address = et131x_set_mac_addr,
4545 .ndo_validate_addr = eth_validate_addr,
4546 .ndo_get_stats = et131x_stats,
4547 .ndo_do_ioctl = et131x_ioctl,
4548};
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559static int et131x_pci_setup(struct pci_dev *pdev,
4560 const struct pci_device_id *ent)
4561{
4562 struct net_device *netdev;
4563 struct et131x_adapter *adapter;
4564 int rc;
4565 int ii;
4566
4567 rc = pci_enable_device(pdev);
4568 if (rc < 0) {
4569 dev_err(&pdev->dev, "pci_enable_device() failed\n");
4570 goto out;
4571 }
4572
4573
4574 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4575 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
4576 rc = -ENODEV;
4577 goto err_disable;
4578 }
4579
4580 rc = pci_request_regions(pdev, DRIVER_NAME);
4581 if (rc < 0) {
4582 dev_err(&pdev->dev, "Can't get PCI resources\n");
4583 goto err_disable;
4584 }
4585
4586 pci_set_master(pdev);
4587
4588
4589 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
4590 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
4591 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4592 rc = -EIO;
4593 goto err_release_res;
4594 }
4595
4596
4597 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
4598 if (!netdev) {
4599 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
4600 rc = -ENOMEM;
4601 goto err_release_res;
4602 }
4603
4604 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
4605 netdev->netdev_ops = &et131x_netdev_ops;
4606
4607 SET_NETDEV_DEV(netdev, &pdev->dev);
4608 netdev->ethtool_ops = &et131x_ethtool_ops;
4609
4610 adapter = et131x_adapter_init(netdev, pdev);
4611
4612 rc = et131x_pci_init(adapter, pdev);
4613 if (rc < 0)
4614 goto err_free_dev;
4615
4616
4617 adapter->regs = pci_ioremap_bar(pdev, 0);
4618 if (!adapter->regs) {
4619 dev_err(&pdev->dev, "Cannot map device registers\n");
4620 rc = -ENOMEM;
4621 goto err_free_dev;
4622 }
4623
4624
4625 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4626
4627
4628 et131x_soft_reset(adapter);
4629
4630
4631 et131x_disable_interrupts(adapter);
4632
4633
4634 rc = et131x_adapter_memory_alloc(adapter);
4635 if (rc < 0) {
4636 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
4637 goto err_iounmap;
4638 }
4639
4640
4641 et131x_init_send(adapter);
4642
4643
4644 INIT_WORK(&adapter->task, et131x_isr_handler);
4645
4646
4647 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
4648
4649
4650 adapter->boot_coma = 0;
4651 et1310_disable_phy_coma(adapter);
4652
4653 rc = -ENOMEM;
4654
4655
4656 adapter->mii_bus = mdiobus_alloc();
4657 if (!adapter->mii_bus) {
4658 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4659 goto err_mem_free;
4660 }
4661
4662 adapter->mii_bus->name = "et131x_eth_mii";
4663 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4664 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4665 adapter->mii_bus->priv = netdev;
4666 adapter->mii_bus->read = et131x_mdio_read;
4667 adapter->mii_bus->write = et131x_mdio_write;
4668 adapter->mii_bus->reset = et131x_mdio_reset;
4669 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4670 GFP_KERNEL);
4671 if (!adapter->mii_bus->irq)
4672 goto err_mdio_free;
4673
4674 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4675 adapter->mii_bus->irq[ii] = PHY_POLL;
4676
4677 rc = mdiobus_register(adapter->mii_bus);
4678 if (rc < 0) {
4679 dev_err(&pdev->dev, "failed to register MII bus\n");
4680 goto err_mdio_free_irq;
4681 }
4682
4683 rc = et131x_mii_probe(netdev);
4684 if (rc < 0) {
4685 dev_err(&pdev->dev, "failed to probe MII bus\n");
4686 goto err_mdio_unregister;
4687 }
4688
4689
4690 et131x_adapter_setup(adapter);
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700 rc = register_netdev(netdev);
4701 if (rc < 0) {
4702 dev_err(&pdev->dev, "register_netdev() failed\n");
4703 goto err_phy_disconnect;
4704 }
4705
4706
4707
4708
4709
4710 pci_set_drvdata(pdev, netdev);
4711out:
4712 return rc;
4713
4714err_phy_disconnect:
4715 phy_disconnect(adapter->phydev);
4716err_mdio_unregister:
4717 mdiobus_unregister(adapter->mii_bus);
4718err_mdio_free_irq:
4719 kfree(adapter->mii_bus->irq);
4720err_mdio_free:
4721 mdiobus_free(adapter->mii_bus);
4722err_mem_free:
4723 et131x_adapter_memory_free(adapter);
4724err_iounmap:
4725 iounmap(adapter->regs);
4726err_free_dev:
4727 pci_dev_put(pdev);
4728 free_netdev(netdev);
4729err_release_res:
4730 pci_release_regions(pdev);
4731err_disable:
4732 pci_disable_device(pdev);
4733 goto out;
4734}
4735
4736static const struct pci_device_id et131x_pci_table[] = {
4737 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4738 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4739 {0,}
4740};
4741MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4742
4743static struct pci_driver et131x_driver = {
4744 .name = DRIVER_NAME,
4745 .id_table = et131x_pci_table,
4746 .probe = et131x_pci_setup,
4747 .remove = et131x_pci_remove,
4748 .driver.pm = ET131X_PM_OPS,
4749};
4750
4751module_pci_driver(et131x_driver);
4752