1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/pci.h>
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97
98#define INTERNAL_MEM_SIZE 0x400
99#define INTERNAL_MEM_RX_OFFSET 0x1FF
100
101
102
103
104
105
106
107
108
109
110#define INT_MASK_DISABLE 0xffffffff
111
112
113
114
115
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119
120
121#define NIC_MIN_PACKET_SIZE 60
122
123
124#define NIC_MAX_MCAST_LIST 128
125
126
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137
138#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
139
140
141#define FMP_ADAPTER_LOWER_POWER 0x00200000
142
143#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
145
146#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
147
148
149#define ET1310_PCI_MAC_ADDRESS 0xA4
150#define ET1310_PCI_EEPROM_STATUS 0xB2
151#define ET1310_PCI_ACK_NACK 0xC0
152#define ET1310_PCI_REPLAY 0xC2
153#define ET1310_PCI_L0L1LATENCY 0xCF
154
155
156#define ET131X_PCI_DEVICE_ID_GIG 0xED00
157#define ET131X_PCI_DEVICE_ID_FAST 0xED01
158
159
160#define NANO_IN_A_MICRO 1000
161
162#define PARM_RX_NUM_BUFS_DEF 4
163#define PARM_RX_TIME_INT_DEF 10
164#define PARM_RX_MEM_END_DEF 0x2bc
165#define PARM_TX_TIME_INT_DEF 40
166#define PARM_TX_NUM_BUFS_DEF 4
167#define PARM_DMA_CACHE_DEF 0
168
169
170#define FBR_CHUNKS 32
171#define MAX_DESC_PER_RING_RX 1024
172
173
174#define RFD_LOW_WATER_MARK 40
175#define NIC_DEFAULT_NUM_RFD 1024
176#define NUM_FBRS 2
177
178#define MAX_PACKETS_HANDLED 256
179
180#define ALCATEL_MULTICAST_PKT 0x01000000
181#define ALCATEL_BROADCAST_PKT 0x02000000
182
183
184struct fbr_desc {
185 u32 addr_lo;
186 u32 addr_hi;
187 u32 word2;
188};
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232struct pkt_stat_desc {
233 u32 word0;
234 u32 word1;
235};
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct rx_status_block {
263 u32 word0;
264 u32 word1;
265};
266
267
268
269
270struct fbr_lookup {
271 void *virt[MAX_DESC_PER_RING_RX];
272 u32 bus_high[MAX_DESC_PER_RING_RX];
273 u32 bus_low[MAX_DESC_PER_RING_RX];
274 void *ring_virtaddr;
275 dma_addr_t ring_physaddr;
276 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
277 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
278 u32 local_full;
279 u32 num_entries;
280 dma_addr_t buffsize;
281};
282
283
284
285
286struct rx_ring {
287 struct fbr_lookup *fbr[NUM_FBRS];
288 void *ps_ring_virtaddr;
289 dma_addr_t ps_ring_physaddr;
290 u32 local_psr_full;
291 u32 psr_entries;
292
293 struct rx_status_block *rx_status_block;
294 dma_addr_t rx_status_bus;
295
296 struct list_head recv_list;
297 u32 num_ready_recv;
298
299 u32 num_rfd;
300
301 bool unfinished_receives;
302};
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330#define TXDESC_FLAG_LASTPKT 0x0001
331#define TXDESC_FLAG_FIRSTPKT 0x0002
332#define TXDESC_FLAG_INTPROC 0x0004
333
334
335struct tx_desc {
336 u32 addr_hi;
337 u32 addr_lo;
338 u32 len_vlan;
339 u32 flags;
340};
341
342
343
344
345
346
347struct tcb {
348 struct tcb *next;
349 u32 count;
350 u32 stale;
351 struct sk_buff *skb;
352 u32 index;
353 u32 index_start;
354};
355
356
357struct tx_ring {
358
359 struct tcb *tcb_ring;
360
361
362 struct tcb *tcb_qhead;
363 struct tcb *tcb_qtail;
364
365
366 struct tcb *send_head;
367 struct tcb *send_tail;
368 int used;
369
370
371 struct tx_desc *tx_desc_ring;
372 dma_addr_t tx_desc_ring_pa;
373
374
375 u32 send_idx;
376
377
378 u32 *tx_status;
379 dma_addr_t tx_status_pa;
380
381
382 int since_irq;
383};
384
385
386
387
388#define NUM_DESC_PER_RING_TX 512
389#define NUM_TCB 64
390
391
392
393
394
395#define TX_ERROR_PERIOD 1000
396
397#define LO_MARK_PERCENT_FOR_PSR 15
398#define LO_MARK_PERCENT_FOR_RX 15
399
400
401struct rfd {
402 struct list_head list_node;
403 struct sk_buff *skb;
404 u32 len;
405 u16 bufferindex;
406 u8 ringindex;
407};
408
409
410#define FLOW_BOTH 0
411#define FLOW_TXONLY 1
412#define FLOW_RXONLY 2
413#define FLOW_NONE 3
414
415
416struct ce_stats {
417 u32 multicast_pkts_rcvd;
418 u32 rcvd_pkts_dropped;
419
420 u32 tx_underflows;
421 u32 tx_collisions;
422 u32 tx_excessive_collisions;
423 u32 tx_first_collisions;
424 u32 tx_late_collisions;
425 u32 tx_max_pkt_errs;
426 u32 tx_deferred;
427
428 u32 rx_overflows;
429 u32 rx_length_errs;
430 u32 rx_align_errs;
431 u32 rx_crc_errs;
432 u32 rx_code_violations;
433 u32 rx_other_errs;
434
435 u32 interrupt_status;
436};
437
438
439struct et131x_adapter {
440 struct net_device *netdev;
441 struct pci_dev *pdev;
442 struct mii_bus *mii_bus;
443 struct napi_struct napi;
444
445
446 u32 flags;
447
448
449 int link;
450
451
452 u8 rom_addr[ETH_ALEN];
453 u8 addr[ETH_ALEN];
454 bool has_eeprom;
455 u8 eeprom_data[2];
456
457 spinlock_t tcb_send_qlock;
458 spinlock_t tcb_ready_qlock;
459 spinlock_t rcv_lock;
460
461
462 u32 packet_filter;
463
464
465 u32 multicast_addr_count;
466 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
467
468
469 struct address_map __iomem *regs;
470
471
472 u8 wanted_flow;
473 u32 registry_jumbo_packet;
474
475
476 u8 flow;
477
478
479 struct timer_list error_timer;
480
481
482
483
484 u8 boot_coma;
485
486
487 struct tx_ring tx_ring;
488
489
490 struct rx_ring rx_ring;
491
492 struct ce_stats stats;
493};
494
495static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
496{
497 u32 reg;
498 int i;
499
500
501
502
503
504
505 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
506 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
507 return -EIO;
508
509
510 if ((reg & 0x3000) == 0x3000) {
511 if (status)
512 *status = reg;
513 return reg & 0xFF;
514 }
515 }
516 return -ETIMEDOUT;
517}
518
519static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
520{
521 struct pci_dev *pdev = adapter->pdev;
522 int index = 0;
523 int retries;
524 int err = 0;
525 int writeok = 0;
526 u32 status;
527 u32 val = 0;
528
529
530
531
532
533
534
535
536 err = eeprom_wait_ready(pdev, NULL);
537 if (err < 0)
538 return err;
539
540
541
542
543
544
545 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
546 LBCIF_CONTROL_LBCIF_ENABLE |
547 LBCIF_CONTROL_I2C_WRITE))
548 return -EIO;
549
550
551 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
552 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
553 break;
554
555
556
557 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
558 break;
559
560
561
562
563
564
565
566 err = eeprom_wait_ready(pdev, &status);
567 if (err < 0)
568 return 0;
569
570
571
572
573
574 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
575 adapter->pdev->revision == 0)
576 break;
577
578
579
580
581
582
583
584
585 if (status & LBCIF_STATUS_ACK_ERROR) {
586
587
588
589
590
591 udelay(10);
592 continue;
593 }
594
595 writeok = 1;
596 break;
597 }
598
599 udelay(10);
600
601 while (1) {
602 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
603 LBCIF_CONTROL_LBCIF_ENABLE))
604 writeok = 0;
605
606
607
608
609 do {
610 pci_write_config_dword(pdev,
611 LBCIF_ADDRESS_REGISTER,
612 addr);
613 do {
614 pci_read_config_dword(pdev,
615 LBCIF_DATA_REGISTER,
616 &val);
617 } while ((val & 0x00010000) == 0);
618 } while (val & 0x00040000);
619
620 if ((val & 0xFF00) != 0xC000 || index == 10000)
621 break;
622 index++;
623 }
624 return writeok ? 0 : -EIO;
625}
626
627static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
628{
629 struct pci_dev *pdev = adapter->pdev;
630 int err;
631 u32 status;
632
633
634
635
636 err = eeprom_wait_ready(pdev, NULL);
637 if (err < 0)
638 return err;
639
640
641
642
643
644 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
645 LBCIF_CONTROL_LBCIF_ENABLE))
646 return -EIO;
647
648
649
650 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
651 return -EIO;
652
653
654
655
656 err = eeprom_wait_ready(pdev, &status);
657 if (err < 0)
658 return err;
659
660
661
662 *pdata = err;
663
664 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
665}
666
667static int et131x_init_eeprom(struct et131x_adapter *adapter)
668{
669 struct pci_dev *pdev = adapter->pdev;
670 u8 eestatus;
671
672 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
673
674
675
676
677
678
679
680 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
681 dev_err(&pdev->dev,
682 "Could not read PCI config space for EEPROM Status\n");
683 return -EIO;
684 }
685
686
687
688
689 if (eestatus & 0x4C) {
690 int write_failed = 0;
691
692 if (pdev->revision == 0x01) {
693 int i;
694 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
695
696
697
698
699
700 for (i = 0; i < 3; i++)
701 if (eeprom_write(adapter, i, eedata[i]) < 0)
702 write_failed = 1;
703 }
704 if (pdev->revision != 0x01 || write_failed) {
705 dev_err(&pdev->dev,
706 "Fatal EEPROM Status Error - 0x%04x\n",
707 eestatus);
708
709
710
711
712
713
714
715 adapter->has_eeprom = 0;
716 return -EIO;
717 }
718 }
719 adapter->has_eeprom = 1;
720
721
722
723
724 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
725 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
726
727 if (adapter->eeprom_data[0] != 0xcd)
728
729 adapter->eeprom_data[1] = 0x00;
730
731 return 0;
732}
733
734static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
735{
736
737 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
738 struct rx_ring *rx_ring = &adapter->rx_ring;
739
740 if (rx_ring->fbr[1]->buffsize == 4096)
741 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
742 else if (rx_ring->fbr[1]->buffsize == 8192)
743 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
744 else if (rx_ring->fbr[1]->buffsize == 16384)
745 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
746
747 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
748 if (rx_ring->fbr[0]->buffsize == 256)
749 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
750 else if (rx_ring->fbr[0]->buffsize == 512)
751 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
752 else if (rx_ring->fbr[0]->buffsize == 1024)
753 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
754 writel(csr, &adapter->regs->rxdma.csr);
755
756 csr = readl(&adapter->regs->rxdma.csr);
757 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
758 udelay(5);
759 csr = readl(&adapter->regs->rxdma.csr);
760 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
761 dev_err(&adapter->pdev->dev,
762 "RX Dma failed to exit halt state. CSR 0x%08x\n",
763 csr);
764 }
765 }
766}
767
768static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
769{
770 u32 csr;
771
772 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
773 &adapter->regs->rxdma.csr);
774 csr = readl(&adapter->regs->rxdma.csr);
775 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
776 udelay(5);
777 csr = readl(&adapter->regs->rxdma.csr);
778 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
779 dev_err(&adapter->pdev->dev,
780 "RX Dma failed to enter halt state. CSR 0x%08x\n",
781 csr);
782 }
783}
784
785static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
786{
787
788
789
790 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
791 &adapter->regs->txdma.csr);
792}
793
794static inline void add_10bit(u32 *v, int n)
795{
796 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
797}
798
799static inline void add_12bit(u32 *v, int n)
800{
801 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
802}
803
804static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
805{
806 struct mac_regs __iomem *macregs = &adapter->regs->mac;
807 u32 station1;
808 u32 station2;
809 u32 ipg;
810
811
812
813
814 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
815 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
816 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
817 ¯egs->cfg1);
818
819
820 ipg = 0x38005860;
821 ipg |= 0x50 << 8;
822 writel(ipg, ¯egs->ipg);
823
824
825
826 writel(0x00A1F037, ¯egs->hfdp);
827
828
829 writel(0, ¯egs->if_ctrl);
830
831 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg);
832
833
834
835
836
837
838
839
840 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
841 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
842 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
843 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
844 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
845 adapter->addr[2];
846 writel(station1, ¯egs->station_addr_1);
847 writel(station2, ¯egs->station_addr_2);
848
849
850
851
852
853
854
855
856 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
857
858
859 writel(0, ¯egs->cfg1);
860}
861
862static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
863{
864 int32_t delay = 0;
865 struct mac_regs __iomem *mac = &adapter->regs->mac;
866 struct phy_device *phydev = adapter->netdev->phydev;
867 u32 cfg1;
868 u32 cfg2;
869 u32 ifctrl;
870 u32 ctl;
871
872 ctl = readl(&adapter->regs->txmac.ctl);
873 cfg1 = readl(&mac->cfg1);
874 cfg2 = readl(&mac->cfg2);
875 ifctrl = readl(&mac->if_ctrl);
876
877
878 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
879 if (phydev->speed == SPEED_1000) {
880 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
881 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
882 } else {
883 cfg2 |= ET_MAC_CFG2_IFMODE_100;
884 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
885 }
886
887 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
888 ET_MAC_CFG1_TX_FLOW;
889
890 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
891 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
892 cfg1 |= ET_MAC_CFG1_RX_FLOW;
893 writel(cfg1, &mac->cfg1);
894
895
896
897
898
899 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
900 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
901 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
902 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
903 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
904 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
905
906 if (phydev->duplex == DUPLEX_FULL)
907 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
908
909 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
910 if (phydev->duplex == DUPLEX_HALF)
911 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
912
913 writel(ifctrl, &mac->if_ctrl);
914 writel(cfg2, &mac->cfg2);
915
916 do {
917 udelay(10);
918 delay++;
919 cfg1 = readl(&mac->cfg1);
920 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
921
922 if (delay == 100) {
923 dev_warn(&adapter->pdev->dev,
924 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
925 cfg1);
926 }
927
928 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
929 writel(ctl, &adapter->regs->txmac.ctl);
930
931 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
932 et131x_rx_dma_enable(adapter);
933 et131x_tx_dma_enable(adapter);
934 }
935}
936
937static int et1310_in_phy_coma(struct et131x_adapter *adapter)
938{
939 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
940
941 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
942}
943
944static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
945{
946 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
947 u32 hash1 = 0;
948 u32 hash2 = 0;
949 u32 hash3 = 0;
950 u32 hash4 = 0;
951 u32 pm_csr;
952
953
954
955
956
957
958 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
959 int i;
960
961
962 for (i = 0; i < adapter->multicast_addr_count; i++) {
963 u32 result;
964
965 result = ether_crc(6, adapter->multicast_list[i]);
966
967 result = (result & 0x3F800000) >> 23;
968
969 if (result < 32) {
970 hash1 |= (1 << result);
971 } else if ((31 < result) && (result < 64)) {
972 result -= 32;
973 hash2 |= (1 << result);
974 } else if ((63 < result) && (result < 96)) {
975 result -= 64;
976 hash3 |= (1 << result);
977 } else {
978 result -= 96;
979 hash4 |= (1 << result);
980 }
981 }
982 }
983
984
985 pm_csr = readl(&adapter->regs->global.pm_csr);
986 if (!et1310_in_phy_coma(adapter)) {
987 writel(hash1, &rxmac->multi_hash1);
988 writel(hash2, &rxmac->multi_hash2);
989 writel(hash3, &rxmac->multi_hash3);
990 writel(hash4, &rxmac->multi_hash4);
991 }
992}
993
994static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
995{
996 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
997 u32 uni_pf1;
998 u32 uni_pf2;
999 u32 uni_pf3;
1000 u32 pm_csr;
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1012 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1013 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1014 adapter->addr[1];
1015
1016 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1017 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1018 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1019 adapter->addr[5];
1020
1021 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1022 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1023 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1024 adapter->addr[5];
1025
1026 pm_csr = readl(&adapter->regs->global.pm_csr);
1027 if (!et1310_in_phy_coma(adapter)) {
1028 writel(uni_pf1, &rxmac->uni_pf_addr1);
1029 writel(uni_pf2, &rxmac->uni_pf_addr2);
1030 writel(uni_pf3, &rxmac->uni_pf_addr3);
1031 }
1032}
1033
1034static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1035{
1036 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1037 struct phy_device *phydev = adapter->netdev->phydev;
1038 u32 sa_lo;
1039 u32 sa_hi = 0;
1040 u32 pf_ctrl = 0;
1041 u32 __iomem *wolw;
1042
1043
1044 writel(0x8, &rxmac->ctrl);
1045
1046
1047 writel(0, &rxmac->crc0);
1048 writel(0, &rxmac->crc12);
1049 writel(0, &rxmac->crc34);
1050
1051
1052
1053
1054
1055 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1056 writel(0, wolw);
1057
1058
1059 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1060 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1061 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1062 adapter->addr[5];
1063 writel(sa_lo, &rxmac->sa_lo);
1064
1065 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1066 adapter->addr[1];
1067 writel(sa_hi, &rxmac->sa_hi);
1068
1069
1070 writel(0, &rxmac->pf_ctrl);
1071
1072
1073 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1074 et1310_setup_device_for_unicast(adapter);
1075 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1076 } else {
1077 writel(0, &rxmac->uni_pf_addr1);
1078 writel(0, &rxmac->uni_pf_addr2);
1079 writel(0, &rxmac->uni_pf_addr3);
1080 }
1081
1082
1083 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1084 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1085 et1310_setup_device_for_multicast(adapter);
1086 }
1087
1088
1089 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1090 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1091
1092 if (adapter->registry_jumbo_packet > 8192)
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1104 else
1105 writel(0, &rxmac->mcif_ctrl_max_seg);
1106
1107 writel(0, &rxmac->mcif_water_mark);
1108 writel(0, &rxmac->mif_ctrl);
1109 writel(0, &rxmac->space_avail);
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 if (phydev && phydev->speed == SPEED_100)
1125 writel(0x30038, &rxmac->mif_ctrl);
1126 else
1127 writel(0x30030, &rxmac->mif_ctrl);
1128
1129
1130
1131
1132
1133
1134
1135 writel(pf_ctrl, &rxmac->pf_ctrl);
1136 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1137}
1138
1139static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1140{
1141 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1142
1143
1144
1145
1146
1147 if (adapter->flow == FLOW_NONE)
1148 writel(0, &txmac->cf_param);
1149 else
1150 writel(0x40, &txmac->cf_param);
1151}
1152
1153static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1154{
1155 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1156 u32 __iomem *reg;
1157
1158
1159 for (reg = &macstat->txrx_0_64_byte_frames;
1160 reg <= &macstat->carry_reg2; reg++)
1161 writel(0, reg);
1162
1163
1164
1165
1166
1167 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1168 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1169}
1170
1171static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1172 u8 reg, u16 *value)
1173{
1174 struct mac_regs __iomem *mac = &adapter->regs->mac;
1175 int status = 0;
1176 u32 delay = 0;
1177 u32 mii_addr;
1178 u32 mii_cmd;
1179 u32 mii_indicator;
1180
1181
1182
1183
1184 mii_addr = readl(&mac->mii_mgmt_addr);
1185 mii_cmd = readl(&mac->mii_mgmt_cmd);
1186
1187
1188 writel(0, &mac->mii_mgmt_cmd);
1189
1190
1191 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1192
1193 writel(0x1, &mac->mii_mgmt_cmd);
1194
1195 do {
1196 udelay(50);
1197 delay++;
1198 mii_indicator = readl(&mac->mii_mgmt_indicator);
1199 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1200
1201
1202 if (delay == 50) {
1203 dev_warn(&adapter->pdev->dev,
1204 "reg 0x%08x could not be read\n", reg);
1205 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1206 mii_indicator);
1207
1208 status = -EIO;
1209 goto out;
1210 }
1211
1212
1213
1214
1215 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1216
1217out:
1218
1219 writel(0, &mac->mii_mgmt_cmd);
1220
1221
1222
1223
1224 writel(mii_addr, &mac->mii_mgmt_addr);
1225 writel(mii_cmd, &mac->mii_mgmt_cmd);
1226
1227 return status;
1228}
1229
1230static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1231{
1232 struct phy_device *phydev = adapter->netdev->phydev;
1233
1234 if (!phydev)
1235 return -EIO;
1236
1237 return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value);
1238}
1239
1240static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1241 u16 value)
1242{
1243 struct mac_regs __iomem *mac = &adapter->regs->mac;
1244 int status = 0;
1245 u32 delay = 0;
1246 u32 mii_addr;
1247 u32 mii_cmd;
1248 u32 mii_indicator;
1249
1250
1251
1252
1253 mii_addr = readl(&mac->mii_mgmt_addr);
1254 mii_cmd = readl(&mac->mii_mgmt_cmd);
1255
1256
1257 writel(0, &mac->mii_mgmt_cmd);
1258
1259
1260 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1261
1262
1263 writel(value, &mac->mii_mgmt_ctrl);
1264
1265 do {
1266 udelay(50);
1267 delay++;
1268 mii_indicator = readl(&mac->mii_mgmt_indicator);
1269 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1270
1271
1272 if (delay == 100) {
1273 u16 tmp;
1274
1275 dev_warn(&adapter->pdev->dev,
1276 "reg 0x%08x could not be written", reg);
1277 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1278 mii_indicator);
1279 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1280 readl(&mac->mii_mgmt_cmd));
1281
1282 et131x_mii_read(adapter, reg, &tmp);
1283
1284 status = -EIO;
1285 }
1286
1287 writel(0, &mac->mii_mgmt_cmd);
1288
1289
1290
1291
1292 writel(mii_addr, &mac->mii_mgmt_addr);
1293 writel(mii_cmd, &mac->mii_mgmt_cmd);
1294
1295 return status;
1296}
1297
1298static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1299 u16 regnum,
1300 u16 bitnum,
1301 u8 *value)
1302{
1303 u16 reg;
1304 u16 mask = 1 << bitnum;
1305
1306 et131x_mii_read(adapter, regnum, ®);
1307
1308 *value = (reg & mask) >> bitnum;
1309}
1310
1311static void et1310_config_flow_control(struct et131x_adapter *adapter)
1312{
1313 struct phy_device *phydev = adapter->netdev->phydev;
1314
1315 if (phydev->duplex == DUPLEX_HALF) {
1316 adapter->flow = FLOW_NONE;
1317 } else {
1318 char remote_pause, remote_async_pause;
1319
1320 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1321 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1322
1323 if (remote_pause && remote_async_pause) {
1324 adapter->flow = adapter->wanted_flow;
1325 } else if (remote_pause && !remote_async_pause) {
1326 if (adapter->wanted_flow == FLOW_BOTH)
1327 adapter->flow = FLOW_BOTH;
1328 else
1329 adapter->flow = FLOW_NONE;
1330 } else if (!remote_pause && !remote_async_pause) {
1331 adapter->flow = FLOW_NONE;
1332 } else {
1333 if (adapter->wanted_flow == FLOW_BOTH)
1334 adapter->flow = FLOW_RXONLY;
1335 else
1336 adapter->flow = FLOW_NONE;
1337 }
1338 }
1339}
1340
1341
1342static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1343{
1344 struct ce_stats *stats = &adapter->stats;
1345 struct macstat_regs __iomem *macstat =
1346 &adapter->regs->macstat;
1347
1348 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1349 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1350 stats->tx_deferred += readl(&macstat->tx_deferred);
1351 stats->tx_excessive_collisions +=
1352 readl(&macstat->tx_multiple_collisions);
1353 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1354 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1355 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1356
1357 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1358 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1359 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1360 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1361 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1362 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1363 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1364}
1365
1366
1367
1368
1369
1370
1371
1372static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1373{
1374 u32 carry_reg1;
1375 u32 carry_reg2;
1376
1377
1378
1379
1380 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1381 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1382
1383 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1384 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1385
1386
1387
1388
1389
1390
1391
1392 if (carry_reg1 & (1 << 14))
1393 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1394 if (carry_reg1 & (1 << 8))
1395 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1396 if (carry_reg1 & (1 << 7))
1397 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1398 if (carry_reg1 & (1 << 2))
1399 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1400 if (carry_reg1 & (1 << 6))
1401 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1402 if (carry_reg1 & (1 << 3))
1403 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1404 if (carry_reg1 & (1 << 0))
1405 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1406 if (carry_reg2 & (1 << 16))
1407 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1408 if (carry_reg2 & (1 << 15))
1409 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1410 if (carry_reg2 & (1 << 6))
1411 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1412 if (carry_reg2 & (1 << 8))
1413 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1414 if (carry_reg2 & (1 << 5))
1415 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1416 if (carry_reg2 & (1 << 4))
1417 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1418 if (carry_reg2 & (1 << 2))
1419 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1420}
1421
1422static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1423{
1424 struct net_device *netdev = bus->priv;
1425 struct et131x_adapter *adapter = netdev_priv(netdev);
1426 u16 value;
1427 int ret;
1428
1429 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1430
1431 if (ret < 0)
1432 return ret;
1433
1434 return value;
1435}
1436
1437static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1438 int reg, u16 value)
1439{
1440 struct net_device *netdev = bus->priv;
1441 struct et131x_adapter *adapter = netdev_priv(netdev);
1442
1443 return et131x_mii_write(adapter, phy_addr, reg, value);
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1456{
1457 u16 data;
1458 struct phy_device *phydev = adapter->netdev->phydev;
1459
1460 et131x_mii_read(adapter, MII_BMCR, &data);
1461 data &= ~BMCR_PDOWN;
1462 if (down)
1463 data |= BMCR_PDOWN;
1464 et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data);
1465}
1466
1467
1468static void et131x_xcvr_init(struct et131x_adapter *adapter)
1469{
1470 u16 lcr2;
1471 struct phy_device *phydev = adapter->netdev->phydev;
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1482 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1483
1484 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1485 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1486
1487 if ((adapter->eeprom_data[1] & 0x8) == 0)
1488 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1489 else
1490 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1491
1492 et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2);
1493 }
1494}
1495
1496
1497static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1498{
1499 struct global_regs __iomem *regs = &adapter->regs->global;
1500
1501 writel(0, ®s->rxq_start_addr);
1502 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1503
1504 if (adapter->registry_jumbo_packet < 2048) {
1505
1506
1507
1508
1509
1510 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1511 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1512 } else if (adapter->registry_jumbo_packet < 8192) {
1513
1514 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1515 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1516 } else {
1517
1518
1519
1520
1521
1522 writel(0x01b3, ®s->rxq_end_addr);
1523 writel(0x01b4, ®s->txq_start_addr);
1524 }
1525
1526
1527 writel(0, ®s->loopback);
1528
1529 writel(0, ®s->msi_config);
1530
1531
1532
1533
1534 writel(0, ®s->watchdog_timer);
1535}
1536
1537
1538static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1539{
1540 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1541 struct rx_ring *rx_local = &adapter->rx_ring;
1542 struct fbr_desc *fbr_entry;
1543 u32 entry;
1544 u32 psr_num_des;
1545 unsigned long flags;
1546 u8 id;
1547
1548 et131x_rx_dma_disable(adapter);
1549
1550
1551 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1552 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1553
1554 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1555
1556
1557 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1558 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1559 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1560 writel(0, &rx_dma->psr_full_offset);
1561
1562 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1563 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1564 &rx_dma->psr_min_des);
1565
1566 spin_lock_irqsave(&adapter->rcv_lock, flags);
1567
1568
1569 rx_local->local_psr_full = 0;
1570
1571 for (id = 0; id < NUM_FBRS; id++) {
1572 u32 __iomem *num_des;
1573 u32 __iomem *full_offset;
1574 u32 __iomem *min_des;
1575 u32 __iomem *base_hi;
1576 u32 __iomem *base_lo;
1577 struct fbr_lookup *fbr = rx_local->fbr[id];
1578
1579 if (id == 0) {
1580 num_des = &rx_dma->fbr0_num_des;
1581 full_offset = &rx_dma->fbr0_full_offset;
1582 min_des = &rx_dma->fbr0_min_des;
1583 base_hi = &rx_dma->fbr0_base_hi;
1584 base_lo = &rx_dma->fbr0_base_lo;
1585 } else {
1586 num_des = &rx_dma->fbr1_num_des;
1587 full_offset = &rx_dma->fbr1_full_offset;
1588 min_des = &rx_dma->fbr1_min_des;
1589 base_hi = &rx_dma->fbr1_base_hi;
1590 base_lo = &rx_dma->fbr1_base_lo;
1591 }
1592
1593
1594 fbr_entry = fbr->ring_virtaddr;
1595 for (entry = 0; entry < fbr->num_entries; entry++) {
1596 fbr_entry->addr_hi = fbr->bus_high[entry];
1597 fbr_entry->addr_lo = fbr->bus_low[entry];
1598 fbr_entry->word2 = entry;
1599 fbr_entry++;
1600 }
1601
1602
1603 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1604 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1605 writel(fbr->num_entries - 1, num_des);
1606 writel(ET_DMA10_WRAP, full_offset);
1607
1608
1609
1610
1611 fbr->local_full = ET_DMA10_WRAP;
1612 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1613 min_des);
1614 }
1615
1616
1617
1618
1619
1620
1621 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1622
1623
1624
1625
1626
1627
1628 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1629
1630 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1631}
1632
1633
1634
1635
1636
1637
1638static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1639{
1640 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1641 struct tx_ring *tx_ring = &adapter->tx_ring;
1642
1643
1644 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1645 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1646
1647
1648 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1649
1650
1651 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1652 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1653
1654 *tx_ring->tx_status = 0;
1655
1656 writel(0, &txdma->service_request);
1657 tx_ring->send_idx = 0;
1658}
1659
1660
1661static void et131x_adapter_setup(struct et131x_adapter *adapter)
1662{
1663 et131x_configure_global_regs(adapter);
1664 et1310_config_mac_regs1(adapter);
1665
1666
1667
1668 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1669
1670 et1310_config_rxmac_regs(adapter);
1671 et1310_config_txmac_regs(adapter);
1672
1673 et131x_config_rx_dma_regs(adapter);
1674 et131x_config_tx_dma_regs(adapter);
1675
1676 et1310_config_macstat_regs(adapter);
1677
1678 et1310_phy_power_switch(adapter, 0);
1679 et131x_xcvr_init(adapter);
1680}
1681
1682
1683static void et131x_soft_reset(struct et131x_adapter *adapter)
1684{
1685 u32 reg;
1686
1687
1688 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1689 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1690 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1691 writel(reg, &adapter->regs->mac.cfg1);
1692
1693 reg = ET_RESET_ALL;
1694 writel(reg, &adapter->regs->global.sw_reset);
1695
1696 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1697 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1698 writel(reg, &adapter->regs->mac.cfg1);
1699 writel(0, &adapter->regs->mac.cfg1);
1700}
1701
1702static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1703{
1704 u32 mask;
1705
1706 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1707 mask = INT_MASK_ENABLE;
1708 else
1709 mask = INT_MASK_ENABLE_NO_FLOW;
1710
1711 writel(mask, &adapter->regs->global.int_mask);
1712}
1713
1714static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1715{
1716 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1717}
1718
1719static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1720{
1721
1722 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1723 &adapter->regs->txdma.csr);
1724}
1725
1726static void et131x_enable_txrx(struct net_device *netdev)
1727{
1728 struct et131x_adapter *adapter = netdev_priv(netdev);
1729
1730 et131x_rx_dma_enable(adapter);
1731 et131x_tx_dma_enable(adapter);
1732
1733 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1734 et131x_enable_interrupts(adapter);
1735
1736 netif_start_queue(netdev);
1737}
1738
1739static void et131x_disable_txrx(struct net_device *netdev)
1740{
1741 struct et131x_adapter *adapter = netdev_priv(netdev);
1742
1743 netif_stop_queue(netdev);
1744
1745 et131x_rx_dma_disable(adapter);
1746 et131x_tx_dma_disable(adapter);
1747
1748 et131x_disable_interrupts(adapter);
1749}
1750
1751static void et131x_init_send(struct et131x_adapter *adapter)
1752{
1753 int i;
1754 struct tx_ring *tx_ring = &adapter->tx_ring;
1755 struct tcb *tcb = tx_ring->tcb_ring;
1756
1757 tx_ring->tcb_qhead = tcb;
1758
1759 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1760
1761 for (i = 0; i < NUM_TCB; i++) {
1762 tcb->next = tcb + 1;
1763 tcb++;
1764 }
1765
1766 tcb--;
1767 tx_ring->tcb_qtail = tcb;
1768 tcb->next = NULL;
1769
1770 tx_ring->send_head = NULL;
1771 tx_ring->send_tail = NULL;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1785{
1786 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1787
1788
1789 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1790
1791
1792 et131x_disable_txrx(adapter->netdev);
1793
1794
1795 pmcsr &= ~ET_PMCSR_INIT;
1796 writel(pmcsr, &adapter->regs->global.pm_csr);
1797
1798
1799 pmcsr |= ET_PM_PHY_SW_COMA;
1800 writel(pmcsr, &adapter->regs->global.pm_csr);
1801}
1802
1803static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1804{
1805 u32 pmcsr;
1806
1807 pmcsr = readl(&adapter->regs->global.pm_csr);
1808
1809
1810 pmcsr |= ET_PMCSR_INIT;
1811 pmcsr &= ~ET_PM_PHY_SW_COMA;
1812 writel(pmcsr, &adapter->regs->global.pm_csr);
1813
1814
1815
1816
1817
1818
1819 et131x_init_send(adapter);
1820
1821
1822
1823
1824
1825 et131x_soft_reset(adapter);
1826
1827 et131x_adapter_setup(adapter);
1828
1829
1830 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1831
1832 et131x_enable_txrx(adapter->netdev);
1833}
1834
1835static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1836{
1837 u32 tmp_free_buff_ring = *free_buff_ring;
1838
1839 tmp_free_buff_ring++;
1840
1841
1842
1843
1844
1845 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1846 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1847 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1848 }
1849
1850 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1851 *free_buff_ring = tmp_free_buff_ring;
1852 return tmp_free_buff_ring;
1853}
1854
1855
1856
1857
1858
1859
1860static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1861{
1862 u8 id;
1863 u32 i, j;
1864 u32 bufsize;
1865 u32 psr_size;
1866 u32 fbr_chunksize;
1867 struct rx_ring *rx_ring = &adapter->rx_ring;
1868 struct fbr_lookup *fbr;
1869
1870
1871 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1872 if (rx_ring->fbr[0] == NULL)
1873 return -ENOMEM;
1874 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1875 if (rx_ring->fbr[1] == NULL)
1876 return -ENOMEM;
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 if (adapter->registry_jumbo_packet < 2048) {
1896 rx_ring->fbr[0]->buffsize = 256;
1897 rx_ring->fbr[0]->num_entries = 512;
1898 rx_ring->fbr[1]->buffsize = 2048;
1899 rx_ring->fbr[1]->num_entries = 512;
1900 } else if (adapter->registry_jumbo_packet < 4096) {
1901 rx_ring->fbr[0]->buffsize = 512;
1902 rx_ring->fbr[0]->num_entries = 1024;
1903 rx_ring->fbr[1]->buffsize = 4096;
1904 rx_ring->fbr[1]->num_entries = 512;
1905 } else {
1906 rx_ring->fbr[0]->buffsize = 1024;
1907 rx_ring->fbr[0]->num_entries = 768;
1908 rx_ring->fbr[1]->buffsize = 16384;
1909 rx_ring->fbr[1]->num_entries = 128;
1910 }
1911
1912 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1913 rx_ring->fbr[1]->num_entries;
1914
1915 for (id = 0; id < NUM_FBRS; id++) {
1916 fbr = rx_ring->fbr[id];
1917
1918 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1919 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1920 bufsize,
1921 &fbr->ring_physaddr,
1922 GFP_KERNEL);
1923 if (!fbr->ring_virtaddr) {
1924 dev_err(&adapter->pdev->dev,
1925 "Cannot alloc memory for Free Buffer Ring %d\n",
1926 id);
1927 return -ENOMEM;
1928 }
1929 }
1930
1931 for (id = 0; id < NUM_FBRS; id++) {
1932 fbr = rx_ring->fbr[id];
1933 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1934
1935 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1936 dma_addr_t fbr_physaddr;
1937
1938 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1939 &adapter->pdev->dev, fbr_chunksize,
1940 &fbr->mem_physaddrs[i],
1941 GFP_KERNEL);
1942
1943 if (!fbr->mem_virtaddrs[i]) {
1944 dev_err(&adapter->pdev->dev,
1945 "Could not alloc memory\n");
1946 return -ENOMEM;
1947 }
1948
1949
1950 fbr_physaddr = fbr->mem_physaddrs[i];
1951
1952 for (j = 0; j < FBR_CHUNKS; j++) {
1953 u32 k = (i * FBR_CHUNKS) + j;
1954
1955
1956
1957
1958 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1959 (j * fbr->buffsize);
1960
1961
1962
1963
1964 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1965 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1966 fbr_physaddr += fbr->buffsize;
1967 }
1968 }
1969 }
1970
1971
1972 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1973
1974 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1975 psr_size,
1976 &rx_ring->ps_ring_physaddr,
1977 GFP_KERNEL);
1978
1979 if (!rx_ring->ps_ring_virtaddr) {
1980 dev_err(&adapter->pdev->dev,
1981 "Cannot alloc memory for Packet Status Ring\n");
1982 return -ENOMEM;
1983 }
1984
1985
1986 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1987 sizeof(struct rx_status_block),
1988 &rx_ring->rx_status_bus,
1989 GFP_KERNEL);
1990 if (!rx_ring->rx_status_block) {
1991 dev_err(&adapter->pdev->dev,
1992 "Cannot alloc memory for Status Block\n");
1993 return -ENOMEM;
1994 }
1995 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1996
1997
1998
1999
2000 INIT_LIST_HEAD(&rx_ring->recv_list);
2001 return 0;
2002}
2003
2004static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2005{
2006 u8 id;
2007 u32 ii;
2008 u32 bufsize;
2009 u32 psr_size;
2010 struct rfd *rfd;
2011 struct rx_ring *rx_ring = &adapter->rx_ring;
2012 struct fbr_lookup *fbr;
2013
2014
2015 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2016
2017 while (!list_empty(&rx_ring->recv_list)) {
2018 rfd = list_entry(rx_ring->recv_list.next,
2019 struct rfd, list_node);
2020
2021 list_del(&rfd->list_node);
2022 rfd->skb = NULL;
2023 kfree(rfd);
2024 }
2025
2026
2027 for (id = 0; id < NUM_FBRS; id++) {
2028 fbr = rx_ring->fbr[id];
2029
2030 if (!fbr || !fbr->ring_virtaddr)
2031 continue;
2032
2033
2034 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2035 if (fbr->mem_virtaddrs[ii]) {
2036 bufsize = fbr->buffsize * FBR_CHUNKS;
2037
2038 dma_free_coherent(&adapter->pdev->dev,
2039 bufsize,
2040 fbr->mem_virtaddrs[ii],
2041 fbr->mem_physaddrs[ii]);
2042
2043 fbr->mem_virtaddrs[ii] = NULL;
2044 }
2045 }
2046
2047 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2048
2049 dma_free_coherent(&adapter->pdev->dev,
2050 bufsize,
2051 fbr->ring_virtaddr,
2052 fbr->ring_physaddr);
2053
2054 fbr->ring_virtaddr = NULL;
2055 }
2056
2057
2058 if (rx_ring->ps_ring_virtaddr) {
2059 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2060
2061 dma_free_coherent(&adapter->pdev->dev, psr_size,
2062 rx_ring->ps_ring_virtaddr,
2063 rx_ring->ps_ring_physaddr);
2064
2065 rx_ring->ps_ring_virtaddr = NULL;
2066 }
2067
2068
2069 if (rx_ring->rx_status_block) {
2070 dma_free_coherent(&adapter->pdev->dev,
2071 sizeof(struct rx_status_block),
2072 rx_ring->rx_status_block,
2073 rx_ring->rx_status_bus);
2074 rx_ring->rx_status_block = NULL;
2075 }
2076
2077
2078 kfree(rx_ring->fbr[0]);
2079 kfree(rx_ring->fbr[1]);
2080
2081
2082 rx_ring->num_ready_recv = 0;
2083}
2084
2085
2086static int et131x_init_recv(struct et131x_adapter *adapter)
2087{
2088 struct rfd *rfd;
2089 u32 rfdct;
2090 struct rx_ring *rx_ring = &adapter->rx_ring;
2091
2092
2093 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2094 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2095 if (!rfd)
2096 return -ENOMEM;
2097
2098 rfd->skb = NULL;
2099
2100
2101 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2102
2103
2104 rx_ring->num_ready_recv++;
2105 }
2106
2107 return 0;
2108}
2109
2110
2111static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2112{
2113 struct phy_device *phydev = adapter->netdev->phydev;
2114
2115
2116
2117
2118 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2119 writel(0, &adapter->regs->rxdma.max_pkt_time);
2120 writel(1, &adapter->regs->rxdma.num_pkt_done);
2121 }
2122}
2123
2124
2125static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2126{
2127 struct rx_ring *rx_local = &adapter->rx_ring;
2128 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2129 u16 buff_index = rfd->bufferindex;
2130 u8 ring_index = rfd->ringindex;
2131 unsigned long flags;
2132 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2133
2134
2135
2136
2137 if (buff_index < fbr->num_entries) {
2138 u32 free_buff_ring;
2139 u32 __iomem *offset;
2140 struct fbr_desc *next;
2141
2142 if (ring_index == 0)
2143 offset = &rx_dma->fbr0_full_offset;
2144 else
2145 offset = &rx_dma->fbr1_full_offset;
2146
2147 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2148 INDEX10(fbr->local_full);
2149
2150
2151
2152
2153
2154 next->addr_hi = fbr->bus_high[buff_index];
2155 next->addr_lo = fbr->bus_low[buff_index];
2156 next->word2 = buff_index;
2157
2158 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2159 fbr->num_entries - 1);
2160 writel(free_buff_ring, offset);
2161 } else {
2162 dev_err(&adapter->pdev->dev,
2163 "%s illegal Buffer Index returned\n", __func__);
2164 }
2165
2166
2167
2168
2169 spin_lock_irqsave(&adapter->rcv_lock, flags);
2170 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2171 rx_local->num_ready_recv++;
2172 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2173
2174 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2185{
2186 struct rx_ring *rx_local = &adapter->rx_ring;
2187 struct rx_status_block *status;
2188 struct pkt_stat_desc *psr;
2189 struct rfd *rfd;
2190 unsigned long flags;
2191 struct list_head *element;
2192 u8 ring_index;
2193 u16 buff_index;
2194 u32 len;
2195 u32 word0;
2196 u32 word1;
2197 struct sk_buff *skb;
2198 struct fbr_lookup *fbr;
2199
2200
2201
2202
2203
2204 status = rx_local->rx_status_block;
2205 word1 = status->word1 >> 16;
2206
2207
2208 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2209 return NULL;
2210
2211
2212 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2213 (rx_local->local_psr_full & 0xFFF);
2214
2215
2216
2217
2218 len = psr->word1 & 0xFFFF;
2219 ring_index = (psr->word1 >> 26) & 0x03;
2220 fbr = rx_local->fbr[ring_index];
2221 buff_index = (psr->word1 >> 16) & 0x3FF;
2222 word0 = psr->word0;
2223
2224
2225
2226 add_12bit(&rx_local->local_psr_full, 1);
2227 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2228
2229 rx_local->local_psr_full &= ~0xFFF;
2230 rx_local->local_psr_full ^= 0x1000;
2231 }
2232
2233 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2234
2235 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2236
2237 dev_err(&adapter->pdev->dev,
2238 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2239 rx_local->local_psr_full & 0xFFF, len, buff_index);
2240 return NULL;
2241 }
2242
2243
2244 spin_lock_irqsave(&adapter->rcv_lock, flags);
2245
2246 element = rx_local->recv_list.next;
2247 rfd = list_entry(element, struct rfd, list_node);
2248
2249 if (!rfd) {
2250 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2251 return NULL;
2252 }
2253
2254 list_del(&rfd->list_node);
2255 rx_local->num_ready_recv--;
2256
2257 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2258
2259 rfd->bufferindex = buff_index;
2260 rfd->ringindex = ring_index;
2261
2262
2263
2264
2265
2266 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2267 adapter->stats.rx_other_errs++;
2268 rfd->len = 0;
2269 goto out;
2270 }
2271
2272 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2273 adapter->stats.multicast_pkts_rcvd++;
2274
2275 rfd->len = len;
2276
2277 skb = dev_alloc_skb(rfd->len + 2);
2278 if (!skb)
2279 return NULL;
2280
2281 adapter->netdev->stats.rx_bytes += rfd->len;
2282
2283 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
2284
2285 skb->protocol = eth_type_trans(skb, adapter->netdev);
2286 skb->ip_summed = CHECKSUM_NONE;
2287 netif_receive_skb(skb);
2288
2289out:
2290 nic_return_rfd(adapter, rfd);
2291 return rfd;
2292}
2293
2294static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2295{
2296 struct rfd *rfd = NULL;
2297 int count = 0;
2298 int limit = budget;
2299 bool done = true;
2300 struct rx_ring *rx_ring = &adapter->rx_ring;
2301
2302 if (budget > MAX_PACKETS_HANDLED)
2303 limit = MAX_PACKETS_HANDLED;
2304
2305
2306 while (count < limit) {
2307 if (list_empty(&rx_ring->recv_list)) {
2308 WARN_ON(rx_ring->num_ready_recv != 0);
2309 done = false;
2310 break;
2311 }
2312
2313 rfd = nic_rx_pkts(adapter);
2314
2315 if (rfd == NULL)
2316 break;
2317
2318
2319
2320
2321
2322
2323 if (!adapter->packet_filter ||
2324 !netif_carrier_ok(adapter->netdev) ||
2325 rfd->len == 0)
2326 continue;
2327
2328 adapter->netdev->stats.rx_packets++;
2329
2330 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2331 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2332
2333 count++;
2334 }
2335
2336 if (count == limit || !done) {
2337 rx_ring->unfinished_receives = true;
2338 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2339 &adapter->regs->global.watchdog_timer);
2340 } else {
2341
2342 rx_ring->unfinished_receives = false;
2343 }
2344
2345 return count;
2346}
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2357{
2358 int desc_size = 0;
2359 struct tx_ring *tx_ring = &adapter->tx_ring;
2360
2361
2362 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2363 GFP_ATOMIC | GFP_DMA);
2364 if (!tx_ring->tcb_ring)
2365 return -ENOMEM;
2366
2367 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2368 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2369 desc_size,
2370 &tx_ring->tx_desc_ring_pa,
2371 GFP_KERNEL);
2372 if (!tx_ring->tx_desc_ring) {
2373 dev_err(&adapter->pdev->dev,
2374 "Cannot alloc memory for Tx Ring\n");
2375 return -ENOMEM;
2376 }
2377
2378 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2379 sizeof(u32),
2380 &tx_ring->tx_status_pa,
2381 GFP_KERNEL);
2382 if (!tx_ring->tx_status) {
2383 dev_err(&adapter->pdev->dev,
2384 "Cannot alloc memory for Tx status block\n");
2385 return -ENOMEM;
2386 }
2387 return 0;
2388}
2389
2390static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2391{
2392 int desc_size = 0;
2393 struct tx_ring *tx_ring = &adapter->tx_ring;
2394
2395 if (tx_ring->tx_desc_ring) {
2396
2397 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2398 dma_free_coherent(&adapter->pdev->dev,
2399 desc_size,
2400 tx_ring->tx_desc_ring,
2401 tx_ring->tx_desc_ring_pa);
2402 tx_ring->tx_desc_ring = NULL;
2403 }
2404
2405
2406 if (tx_ring->tx_status) {
2407 dma_free_coherent(&adapter->pdev->dev,
2408 sizeof(u32),
2409 tx_ring->tx_status,
2410 tx_ring->tx_status_pa);
2411
2412 tx_ring->tx_status = NULL;
2413 }
2414
2415 kfree(tx_ring->tcb_ring);
2416}
2417
2418
2419static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2420{
2421 u32 i;
2422 struct tx_desc desc[24];
2423 u32 frag = 0;
2424 u32 thiscopy, remainder;
2425 struct sk_buff *skb = tcb->skb;
2426 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2427 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2428 struct phy_device *phydev = adapter->netdev->phydev;
2429 dma_addr_t dma_addr;
2430 struct tx_ring *tx_ring = &adapter->tx_ring;
2431
2432
2433
2434
2435
2436
2437
2438 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2439
2440 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2441
2442 for (i = 0; i < nr_frags; i++) {
2443
2444
2445
2446 if (i == 0) {
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 if (skb_headlen(skb) <= 1514) {
2457
2458
2459
2460 desc[frag].len_vlan = skb_headlen(skb);
2461 dma_addr = dma_map_single(&adapter->pdev->dev,
2462 skb->data,
2463 skb_headlen(skb),
2464 DMA_TO_DEVICE);
2465 desc[frag].addr_lo = lower_32_bits(dma_addr);
2466 desc[frag].addr_hi = upper_32_bits(dma_addr);
2467 frag++;
2468 } else {
2469 desc[frag].len_vlan = skb_headlen(skb) / 2;
2470 dma_addr = dma_map_single(&adapter->pdev->dev,
2471 skb->data,
2472 skb_headlen(skb) / 2,
2473 DMA_TO_DEVICE);
2474 desc[frag].addr_lo = lower_32_bits(dma_addr);
2475 desc[frag].addr_hi = upper_32_bits(dma_addr);
2476 frag++;
2477
2478 desc[frag].len_vlan = skb_headlen(skb) / 2;
2479 dma_addr = dma_map_single(&adapter->pdev->dev,
2480 skb->data +
2481 skb_headlen(skb) / 2,
2482 skb_headlen(skb) / 2,
2483 DMA_TO_DEVICE);
2484 desc[frag].addr_lo = lower_32_bits(dma_addr);
2485 desc[frag].addr_hi = upper_32_bits(dma_addr);
2486 frag++;
2487 }
2488 } else {
2489 desc[frag].len_vlan = frags[i - 1].size;
2490 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2491 &frags[i - 1],
2492 0,
2493 frags[i - 1].size,
2494 DMA_TO_DEVICE);
2495 desc[frag].addr_lo = lower_32_bits(dma_addr);
2496 desc[frag].addr_hi = upper_32_bits(dma_addr);
2497 frag++;
2498 }
2499 }
2500
2501 if (phydev && phydev->speed == SPEED_1000) {
2502 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2503
2504 desc[frag - 1].flags =
2505 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2506 tx_ring->since_irq = 0;
2507 } else {
2508 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2509 }
2510 } else {
2511 desc[frag - 1].flags =
2512 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2513 }
2514
2515 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2516
2517 tcb->index_start = tx_ring->send_idx;
2518 tcb->stale = 0;
2519
2520 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2521
2522 if (thiscopy >= frag) {
2523 remainder = 0;
2524 thiscopy = frag;
2525 } else {
2526 remainder = frag - thiscopy;
2527 }
2528
2529 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2530 desc,
2531 sizeof(struct tx_desc) * thiscopy);
2532
2533 add_10bit(&tx_ring->send_idx, thiscopy);
2534
2535 if (INDEX10(tx_ring->send_idx) == 0 ||
2536 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2537 tx_ring->send_idx &= ~ET_DMA10_MASK;
2538 tx_ring->send_idx ^= ET_DMA10_WRAP;
2539 }
2540
2541 if (remainder) {
2542 memcpy(tx_ring->tx_desc_ring,
2543 desc + thiscopy,
2544 sizeof(struct tx_desc) * remainder);
2545
2546 add_10bit(&tx_ring->send_idx, remainder);
2547 }
2548
2549 if (INDEX10(tx_ring->send_idx) == 0) {
2550 if (tx_ring->send_idx)
2551 tcb->index = NUM_DESC_PER_RING_TX - 1;
2552 else
2553 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2554 } else {
2555 tcb->index = tx_ring->send_idx - 1;
2556 }
2557
2558 spin_lock(&adapter->tcb_send_qlock);
2559
2560 if (tx_ring->send_tail)
2561 tx_ring->send_tail->next = tcb;
2562 else
2563 tx_ring->send_head = tcb;
2564
2565 tx_ring->send_tail = tcb;
2566
2567 WARN_ON(tcb->next != NULL);
2568
2569 tx_ring->used++;
2570
2571 spin_unlock(&adapter->tcb_send_qlock);
2572
2573
2574 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2575
2576
2577
2578
2579 if (phydev && phydev->speed == SPEED_1000) {
2580 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2581 &adapter->regs->global.watchdog_timer);
2582 }
2583 return 0;
2584}
2585
2586static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2587{
2588 int status;
2589 struct tcb *tcb;
2590 unsigned long flags;
2591 struct tx_ring *tx_ring = &adapter->tx_ring;
2592
2593
2594 if (skb->len < ETH_HLEN)
2595 return -EIO;
2596
2597 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2598
2599 tcb = tx_ring->tcb_qhead;
2600
2601 if (tcb == NULL) {
2602 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2603 return -ENOMEM;
2604 }
2605
2606 tx_ring->tcb_qhead = tcb->next;
2607
2608 if (tx_ring->tcb_qhead == NULL)
2609 tx_ring->tcb_qtail = NULL;
2610
2611 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2612
2613 tcb->skb = skb;
2614 tcb->next = NULL;
2615
2616 status = nic_send_packet(adapter, tcb);
2617
2618 if (status != 0) {
2619 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2620
2621 if (tx_ring->tcb_qtail)
2622 tx_ring->tcb_qtail->next = tcb;
2623 else
2624
2625 tx_ring->tcb_qhead = tcb;
2626
2627 tx_ring->tcb_qtail = tcb;
2628 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2629 return status;
2630 }
2631 WARN_ON(tx_ring->used > NUM_TCB);
2632 return 0;
2633}
2634
2635
2636static inline void free_send_packet(struct et131x_adapter *adapter,
2637 struct tcb *tcb)
2638{
2639 unsigned long flags;
2640 struct tx_desc *desc = NULL;
2641 struct net_device_stats *stats = &adapter->netdev->stats;
2642 struct tx_ring *tx_ring = &adapter->tx_ring;
2643 u64 dma_addr;
2644
2645 if (tcb->skb) {
2646 stats->tx_bytes += tcb->skb->len;
2647
2648
2649
2650
2651
2652 do {
2653 desc = tx_ring->tx_desc_ring +
2654 INDEX10(tcb->index_start);
2655
2656 dma_addr = desc->addr_lo;
2657 dma_addr |= (u64)desc->addr_hi << 32;
2658
2659 dma_unmap_single(&adapter->pdev->dev,
2660 dma_addr,
2661 desc->len_vlan, DMA_TO_DEVICE);
2662
2663 add_10bit(&tcb->index_start, 1);
2664 if (INDEX10(tcb->index_start) >=
2665 NUM_DESC_PER_RING_TX) {
2666 tcb->index_start &= ~ET_DMA10_MASK;
2667 tcb->index_start ^= ET_DMA10_WRAP;
2668 }
2669 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2670
2671 dev_kfree_skb_any(tcb->skb);
2672 }
2673
2674 memset(tcb, 0, sizeof(struct tcb));
2675
2676
2677 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2678
2679 stats->tx_packets++;
2680
2681 if (tx_ring->tcb_qtail)
2682 tx_ring->tcb_qtail->next = tcb;
2683 else
2684 tx_ring->tcb_qhead = tcb;
2685
2686 tx_ring->tcb_qtail = tcb;
2687
2688 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2689 WARN_ON(tx_ring->used < 0);
2690}
2691
2692
2693static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2694{
2695 struct tcb *tcb;
2696 unsigned long flags;
2697 u32 freed = 0;
2698 struct tx_ring *tx_ring = &adapter->tx_ring;
2699
2700
2701 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2702
2703 tcb = tx_ring->send_head;
2704
2705 while (tcb != NULL && freed < NUM_TCB) {
2706 struct tcb *next = tcb->next;
2707
2708 tx_ring->send_head = next;
2709
2710 if (next == NULL)
2711 tx_ring->send_tail = NULL;
2712
2713 tx_ring->used--;
2714
2715 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2716
2717 freed++;
2718 free_send_packet(adapter, tcb);
2719
2720 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2721
2722 tcb = tx_ring->send_head;
2723 }
2724
2725 WARN_ON(freed == NUM_TCB);
2726
2727 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2728
2729 tx_ring->used = 0;
2730}
2731
2732
2733
2734
2735
2736
2737static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2738{
2739 unsigned long flags;
2740 u32 serviced;
2741 struct tcb *tcb;
2742 u32 index;
2743 struct tx_ring *tx_ring = &adapter->tx_ring;
2744
2745 serviced = readl(&adapter->regs->txdma.new_service_complete);
2746 index = INDEX10(serviced);
2747
2748
2749
2750
2751 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2752
2753 tcb = tx_ring->send_head;
2754
2755 while (tcb &&
2756 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2757 index < INDEX10(tcb->index)) {
2758 tx_ring->used--;
2759 tx_ring->send_head = tcb->next;
2760 if (tcb->next == NULL)
2761 tx_ring->send_tail = NULL;
2762
2763 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2764 free_send_packet(adapter, tcb);
2765 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2766
2767
2768 tcb = tx_ring->send_head;
2769 }
2770 while (tcb &&
2771 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2772 index > (tcb->index & ET_DMA10_MASK)) {
2773 tx_ring->used--;
2774 tx_ring->send_head = tcb->next;
2775 if (tcb->next == NULL)
2776 tx_ring->send_tail = NULL;
2777
2778 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2779 free_send_packet(adapter, tcb);
2780 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2781
2782
2783 tcb = tx_ring->send_head;
2784 }
2785
2786
2787 if (tx_ring->used <= NUM_TCB / 3)
2788 netif_wake_queue(adapter->netdev);
2789
2790 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2791}
2792
2793static int et131x_get_regs_len(struct net_device *netdev)
2794{
2795#define ET131X_REGS_LEN 256
2796 return ET131X_REGS_LEN * sizeof(u32);
2797}
2798
2799static void et131x_get_regs(struct net_device *netdev,
2800 struct ethtool_regs *regs, void *regs_data)
2801{
2802 struct et131x_adapter *adapter = netdev_priv(netdev);
2803 struct address_map __iomem *aregs = adapter->regs;
2804 u32 *regs_buff = regs_data;
2805 u32 num = 0;
2806 u16 tmp;
2807
2808 memset(regs_data, 0, et131x_get_regs_len(netdev));
2809
2810 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2811 adapter->pdev->device;
2812
2813
2814 et131x_mii_read(adapter, MII_BMCR, &tmp);
2815 regs_buff[num++] = tmp;
2816 et131x_mii_read(adapter, MII_BMSR, &tmp);
2817 regs_buff[num++] = tmp;
2818 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2819 regs_buff[num++] = tmp;
2820 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2821 regs_buff[num++] = tmp;
2822 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2823 regs_buff[num++] = tmp;
2824 et131x_mii_read(adapter, MII_LPA, &tmp);
2825 regs_buff[num++] = tmp;
2826 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2827 regs_buff[num++] = tmp;
2828
2829 et131x_mii_read(adapter, 0x07, &tmp);
2830 regs_buff[num++] = tmp;
2831
2832 et131x_mii_read(adapter, 0x08, &tmp);
2833 regs_buff[num++] = tmp;
2834 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2835 regs_buff[num++] = tmp;
2836 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2837 regs_buff[num++] = tmp;
2838 et131x_mii_read(adapter, 0x0b, &tmp);
2839 regs_buff[num++] = tmp;
2840 et131x_mii_read(adapter, 0x0c, &tmp);
2841 regs_buff[num++] = tmp;
2842 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2843 regs_buff[num++] = tmp;
2844 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2845 regs_buff[num++] = tmp;
2846 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2847 regs_buff[num++] = tmp;
2848
2849 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2850 regs_buff[num++] = tmp;
2851 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2852 regs_buff[num++] = tmp;
2853 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2854 regs_buff[num++] = tmp;
2855 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2856 regs_buff[num++] = tmp;
2857 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2858 regs_buff[num++] = tmp;
2859
2860 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2861 regs_buff[num++] = tmp;
2862 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2863 regs_buff[num++] = tmp;
2864 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2865 regs_buff[num++] = tmp;
2866 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2867 regs_buff[num++] = tmp;
2868 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2869 regs_buff[num++] = tmp;
2870 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2871 regs_buff[num++] = tmp;
2872 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2873 regs_buff[num++] = tmp;
2874 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2875 regs_buff[num++] = tmp;
2876
2877
2878 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2879 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2880 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2881 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2882 regs_buff[num++] = readl(&aregs->global.pm_csr);
2883 regs_buff[num++] = adapter->stats.interrupt_status;
2884 regs_buff[num++] = readl(&aregs->global.int_mask);
2885 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2886 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2887 regs_buff[num++] = readl(&aregs->global.sw_reset);
2888 regs_buff[num++] = readl(&aregs->global.slv_timer);
2889 regs_buff[num++] = readl(&aregs->global.msi_config);
2890 regs_buff[num++] = readl(&aregs->global.loopback);
2891 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2892
2893
2894 regs_buff[num++] = readl(&aregs->txdma.csr);
2895 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2896 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2897 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2898 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2899 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2900 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2901 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2902 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2903 regs_buff[num++] = readl(&aregs->txdma.service_request);
2904 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2905 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2906 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2907 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2908 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2909 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2910 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2911 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2912 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2913 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2914 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2915 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2916 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2917 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2918 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2919 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2920
2921
2922 regs_buff[num++] = readl(&aregs->rxdma.csr);
2923 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2924 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2925 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2926 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2927 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2928 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2929 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2930 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2931 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2932 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2933 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2934 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2935 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2936 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2939 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2940 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2941 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2942 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2943 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2946 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2947 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2948 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2949 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2950 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2951}
2952
2953static void et131x_get_drvinfo(struct net_device *netdev,
2954 struct ethtool_drvinfo *info)
2955{
2956 struct et131x_adapter *adapter = netdev_priv(netdev);
2957
2958 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2959 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
2960 strlcpy(info->bus_info, pci_name(adapter->pdev),
2961 sizeof(info->bus_info));
2962}
2963
2964static const struct ethtool_ops et131x_ethtool_ops = {
2965 .get_drvinfo = et131x_get_drvinfo,
2966 .get_regs_len = et131x_get_regs_len,
2967 .get_regs = et131x_get_regs,
2968 .get_link = ethtool_op_get_link,
2969 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2970 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2971};
2972
2973
2974static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2975{
2976
2977
2978
2979
2980 if (is_zero_ether_addr(adapter->rom_addr)) {
2981
2982
2983
2984
2985 get_random_bytes(&adapter->addr[5], 1);
2986
2987
2988
2989
2990 ether_addr_copy(adapter->rom_addr, adapter->addr);
2991 } else {
2992
2993
2994
2995
2996 ether_addr_copy(adapter->addr, adapter->rom_addr);
2997 }
2998}
2999
3000static int et131x_pci_init(struct et131x_adapter *adapter,
3001 struct pci_dev *pdev)
3002{
3003 u16 max_payload;
3004 int i, rc;
3005
3006 rc = et131x_init_eeprom(adapter);
3007 if (rc < 0)
3008 goto out;
3009
3010 if (!pci_is_pcie(pdev)) {
3011 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3012 goto err_out;
3013 }
3014
3015
3016 max_payload = pdev->pcie_mpss;
3017
3018 if (max_payload < 2) {
3019 static const u16 acknak[2] = { 0x76, 0xD0 };
3020 static const u16 replay[2] = { 0x1E0, 0x2ED };
3021
3022 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3023 acknak[max_payload])) {
3024 dev_err(&pdev->dev,
3025 "Could not write PCI config space for ACK/NAK\n");
3026 goto err_out;
3027 }
3028 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3029 replay[max_payload])) {
3030 dev_err(&pdev->dev,
3031 "Could not write PCI config space for Replay Timer\n");
3032 goto err_out;
3033 }
3034 }
3035
3036
3037
3038
3039 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3040 dev_err(&pdev->dev,
3041 "Could not write PCI config space for Latency Timers\n");
3042 goto err_out;
3043 }
3044
3045
3046 if (pcie_set_readrq(pdev, 2048)) {
3047 dev_err(&pdev->dev,
3048 "Couldn't change PCI config space for Max read size\n");
3049 goto err_out;
3050 }
3051
3052
3053
3054
3055 if (!adapter->has_eeprom) {
3056 et131x_hwaddr_init(adapter);
3057 return 0;
3058 }
3059
3060 for (i = 0; i < ETH_ALEN; i++) {
3061 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3062 adapter->rom_addr + i)) {
3063 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3064 goto err_out;
3065 }
3066 }
3067 ether_addr_copy(adapter->addr, adapter->rom_addr);
3068out:
3069 return rc;
3070err_out:
3071 rc = -EIO;
3072 goto out;
3073}
3074
3075
3076
3077
3078
3079
3080
3081static void et131x_error_timer_handler(unsigned long data)
3082{
3083 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
3084 struct phy_device *phydev = adapter->netdev->phydev;
3085
3086 if (et1310_in_phy_coma(adapter)) {
3087
3088
3089
3090
3091 et1310_disable_phy_coma(adapter);
3092 adapter->boot_coma = 20;
3093 } else {
3094 et1310_update_macstat_host_counters(adapter);
3095 }
3096
3097 if (!phydev->link && adapter->boot_coma < 11)
3098 adapter->boot_coma++;
3099
3100 if (adapter->boot_coma == 10) {
3101 if (!phydev->link) {
3102 if (!et1310_in_phy_coma(adapter)) {
3103
3104
3105
3106 et131x_enable_interrupts(adapter);
3107 et1310_enable_phy_coma(adapter);
3108 }
3109 }
3110 }
3111
3112
3113 mod_timer(&adapter->error_timer, jiffies +
3114 msecs_to_jiffies(TX_ERROR_PERIOD));
3115}
3116
3117static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3118{
3119 et131x_tx_dma_memory_free(adapter);
3120 et131x_rx_dma_memory_free(adapter);
3121}
3122
3123static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3124{
3125 int status;
3126
3127 status = et131x_tx_dma_memory_alloc(adapter);
3128 if (status) {
3129 dev_err(&adapter->pdev->dev,
3130 "et131x_tx_dma_memory_alloc FAILED\n");
3131 et131x_tx_dma_memory_free(adapter);
3132 return status;
3133 }
3134
3135 status = et131x_rx_dma_memory_alloc(adapter);
3136 if (status) {
3137 dev_err(&adapter->pdev->dev,
3138 "et131x_rx_dma_memory_alloc FAILED\n");
3139 et131x_adapter_memory_free(adapter);
3140 return status;
3141 }
3142
3143 status = et131x_init_recv(adapter);
3144 if (status) {
3145 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3146 et131x_adapter_memory_free(adapter);
3147 }
3148 return status;
3149}
3150
3151static void et131x_adjust_link(struct net_device *netdev)
3152{
3153 struct et131x_adapter *adapter = netdev_priv(netdev);
3154 struct phy_device *phydev = netdev->phydev;
3155
3156 if (!phydev)
3157 return;
3158 if (phydev->link == adapter->link)
3159 return;
3160
3161
3162
3163
3164
3165 if (et1310_in_phy_coma(adapter))
3166 et1310_disable_phy_coma(adapter);
3167
3168 adapter->link = phydev->link;
3169 phy_print_status(phydev);
3170
3171 if (phydev->link) {
3172 adapter->boot_coma = 20;
3173 if (phydev->speed == SPEED_10) {
3174 u16 register18;
3175
3176 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3177 ®ister18);
3178 et131x_mii_write(adapter, phydev->mdio.addr,
3179 PHY_MPHY_CONTROL_REG,
3180 register18 | 0x4);
3181 et131x_mii_write(adapter, phydev->mdio.addr,
3182 PHY_INDEX_REG, register18 | 0x8402);
3183 et131x_mii_write(adapter, phydev->mdio.addr,
3184 PHY_DATA_REG, register18 | 511);
3185 et131x_mii_write(adapter, phydev->mdio.addr,
3186 PHY_MPHY_CONTROL_REG, register18);
3187 }
3188
3189 et1310_config_flow_control(adapter);
3190
3191 if (phydev->speed == SPEED_1000 &&
3192 adapter->registry_jumbo_packet > 2048) {
3193 u16 reg;
3194
3195 et131x_mii_read(adapter, PHY_CONFIG, ®);
3196 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3197 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3198 et131x_mii_write(adapter, phydev->mdio.addr,
3199 PHY_CONFIG, reg);
3200 }
3201
3202 et131x_set_rx_dma_timer(adapter);
3203 et1310_config_mac_regs2(adapter);
3204 } else {
3205 adapter->boot_coma = 0;
3206
3207 if (phydev->speed == SPEED_10) {
3208 u16 register18;
3209
3210 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3211 ®ister18);
3212 et131x_mii_write(adapter, phydev->mdio.addr,
3213 PHY_MPHY_CONTROL_REG,
3214 register18 | 0x4);
3215 et131x_mii_write(adapter, phydev->mdio.addr,
3216 PHY_INDEX_REG, register18 | 0x8402);
3217 et131x_mii_write(adapter, phydev->mdio.addr,
3218 PHY_DATA_REG, register18 | 511);
3219 et131x_mii_write(adapter, phydev->mdio.addr,
3220 PHY_MPHY_CONTROL_REG, register18);
3221 }
3222
3223 et131x_free_busy_send_packets(adapter);
3224 et131x_init_send(adapter);
3225
3226
3227
3228
3229
3230
3231 et131x_soft_reset(adapter);
3232
3233 et131x_adapter_setup(adapter);
3234
3235 et131x_disable_txrx(netdev);
3236 et131x_enable_txrx(netdev);
3237 }
3238}
3239
3240static int et131x_mii_probe(struct net_device *netdev)
3241{
3242 struct et131x_adapter *adapter = netdev_priv(netdev);
3243 struct phy_device *phydev = NULL;
3244
3245 phydev = phy_find_first(adapter->mii_bus);
3246 if (!phydev) {
3247 dev_err(&adapter->pdev->dev, "no PHY found\n");
3248 return -ENODEV;
3249 }
3250
3251 phydev = phy_connect(netdev, phydev_name(phydev),
3252 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3253
3254 if (IS_ERR(phydev)) {
3255 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3256 return PTR_ERR(phydev);
3257 }
3258
3259 phydev->supported &= (SUPPORTED_10baseT_Half |
3260 SUPPORTED_10baseT_Full |
3261 SUPPORTED_100baseT_Half |
3262 SUPPORTED_100baseT_Full |
3263 SUPPORTED_Autoneg |
3264 SUPPORTED_MII |
3265 SUPPORTED_TP);
3266
3267 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3268 phydev->supported |= SUPPORTED_1000baseT_Half |
3269 SUPPORTED_1000baseT_Full;
3270
3271 phydev->advertising = phydev->supported;
3272 phydev->autoneg = AUTONEG_ENABLE;
3273
3274 phy_attached_info(phydev);
3275
3276 return 0;
3277}
3278
3279static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3280 struct pci_dev *pdev)
3281{
3282 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3283
3284 struct et131x_adapter *adapter;
3285
3286 adapter = netdev_priv(netdev);
3287 adapter->pdev = pci_dev_get(pdev);
3288 adapter->netdev = netdev;
3289
3290 spin_lock_init(&adapter->tcb_send_qlock);
3291 spin_lock_init(&adapter->tcb_ready_qlock);
3292 spin_lock_init(&adapter->rcv_lock);
3293
3294 adapter->registry_jumbo_packet = 1514;
3295
3296 ether_addr_copy(adapter->addr, default_mac);
3297
3298 return adapter;
3299}
3300
3301static void et131x_pci_remove(struct pci_dev *pdev)
3302{
3303 struct net_device *netdev = pci_get_drvdata(pdev);
3304 struct et131x_adapter *adapter = netdev_priv(netdev);
3305
3306 unregister_netdev(netdev);
3307 netif_napi_del(&adapter->napi);
3308 phy_disconnect(netdev->phydev);
3309 mdiobus_unregister(adapter->mii_bus);
3310 mdiobus_free(adapter->mii_bus);
3311
3312 et131x_adapter_memory_free(adapter);
3313 iounmap(adapter->regs);
3314 pci_dev_put(pdev);
3315
3316 free_netdev(netdev);
3317 pci_release_regions(pdev);
3318 pci_disable_device(pdev);
3319}
3320
3321static void et131x_up(struct net_device *netdev)
3322{
3323 et131x_enable_txrx(netdev);
3324 phy_start(netdev->phydev);
3325}
3326
3327static void et131x_down(struct net_device *netdev)
3328{
3329
3330 netif_trans_update(netdev);
3331
3332 phy_stop(netdev->phydev);
3333 et131x_disable_txrx(netdev);
3334}
3335
3336#ifdef CONFIG_PM_SLEEP
3337static int et131x_suspend(struct device *dev)
3338{
3339 struct pci_dev *pdev = to_pci_dev(dev);
3340 struct net_device *netdev = pci_get_drvdata(pdev);
3341
3342 if (netif_running(netdev)) {
3343 netif_device_detach(netdev);
3344 et131x_down(netdev);
3345 pci_save_state(pdev);
3346 }
3347
3348 return 0;
3349}
3350
3351static int et131x_resume(struct device *dev)
3352{
3353 struct pci_dev *pdev = to_pci_dev(dev);
3354 struct net_device *netdev = pci_get_drvdata(pdev);
3355
3356 if (netif_running(netdev)) {
3357 pci_restore_state(pdev);
3358 et131x_up(netdev);
3359 netif_device_attach(netdev);
3360 }
3361
3362 return 0;
3363}
3364#endif
3365
3366static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3367
3368static irqreturn_t et131x_isr(int irq, void *dev_id)
3369{
3370 bool handled = true;
3371 bool enable_interrupts = true;
3372 struct net_device *netdev = dev_id;
3373 struct et131x_adapter *adapter = netdev_priv(netdev);
3374 struct address_map __iomem *iomem = adapter->regs;
3375 struct rx_ring *rx_ring = &adapter->rx_ring;
3376 struct tx_ring *tx_ring = &adapter->tx_ring;
3377 u32 status;
3378
3379 if (!netif_device_present(netdev)) {
3380 handled = false;
3381 enable_interrupts = false;
3382 goto out;
3383 }
3384
3385 et131x_disable_interrupts(adapter);
3386
3387 status = readl(&adapter->regs->global.int_status);
3388
3389 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3390 status &= ~INT_MASK_ENABLE;
3391 else
3392 status &= ~INT_MASK_ENABLE_NO_FLOW;
3393
3394
3395 if (!status) {
3396 handled = false;
3397 et131x_enable_interrupts(adapter);
3398 goto out;
3399 }
3400
3401
3402 if (status & ET_INTR_WATCHDOG) {
3403 struct tcb *tcb = tx_ring->send_head;
3404
3405 if (tcb)
3406 if (++tcb->stale > 1)
3407 status |= ET_INTR_TXDMA_ISR;
3408
3409 if (rx_ring->unfinished_receives)
3410 status |= ET_INTR_RXDMA_XFR_DONE;
3411 else if (tcb == NULL)
3412 writel(0, &adapter->regs->global.watchdog_timer);
3413
3414 status &= ~ET_INTR_WATCHDOG;
3415 }
3416
3417 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3418 enable_interrupts = false;
3419 napi_schedule(&adapter->napi);
3420 }
3421
3422 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3423
3424 if (!status)
3425 goto out;
3426
3427 if (status & ET_INTR_TXDMA_ERR) {
3428
3429 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3430
3431 dev_warn(&adapter->pdev->dev,
3432 "TXDMA_ERR interrupt, error = %d\n",
3433 txdma_err);
3434 }
3435
3436 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3453 u32 pm_csr;
3454
3455
3456
3457
3458 pm_csr = readl(&iomem->global.pm_csr);
3459 if (!et1310_in_phy_coma(adapter))
3460 writel(3, &iomem->txmac.bp_ctrl);
3461 }
3462 }
3463
3464
3465 if (status & ET_INTR_RXDMA_STAT_LOW) {
3466
3467
3468
3469
3470
3471
3472
3473 }
3474
3475 if (status & ET_INTR_RXDMA_ERR) {
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3493 readl(&iomem->txmac.tx_test));
3494 }
3495
3496
3497 if (status & ET_INTR_WOL) {
3498
3499
3500
3501
3502 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3503 }
3504
3505 if (status & ET_INTR_TXMAC) {
3506 u32 err = readl(&iomem->txmac.err);
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3517 err);
3518
3519
3520
3521
3522 }
3523
3524 if (status & ET_INTR_RXMAC) {
3525
3526
3527
3528
3529 dev_warn(&adapter->pdev->dev,
3530 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3531 readl(&iomem->rxmac.err_reg));
3532
3533 dev_warn(&adapter->pdev->dev,
3534 "Enable 0x%08x, Diag 0x%08x\n",
3535 readl(&iomem->rxmac.ctrl),
3536 readl(&iomem->rxmac.rxq_diag));
3537
3538
3539
3540
3541 }
3542
3543 if (status & ET_INTR_MAC_STAT) {
3544
3545
3546
3547
3548 et1310_handle_macstat_interrupt(adapter);
3549 }
3550
3551 if (status & ET_INTR_SLV_TIMEOUT) {
3552
3553
3554
3555
3556
3557
3558 }
3559
3560out:
3561 if (enable_interrupts)
3562 et131x_enable_interrupts(adapter);
3563
3564 return IRQ_RETVAL(handled);
3565}
3566
3567static int et131x_poll(struct napi_struct *napi, int budget)
3568{
3569 struct et131x_adapter *adapter =
3570 container_of(napi, struct et131x_adapter, napi);
3571 int work_done = et131x_handle_recv_pkts(adapter, budget);
3572
3573 et131x_handle_send_pkts(adapter);
3574
3575 if (work_done < budget) {
3576 napi_complete(&adapter->napi);
3577 et131x_enable_interrupts(adapter);
3578 }
3579
3580 return work_done;
3581}
3582
3583
3584static struct net_device_stats *et131x_stats(struct net_device *netdev)
3585{
3586 struct et131x_adapter *adapter = netdev_priv(netdev);
3587 struct net_device_stats *stats = &adapter->netdev->stats;
3588 struct ce_stats *devstat = &adapter->stats;
3589
3590 stats->rx_errors = devstat->rx_length_errs +
3591 devstat->rx_align_errs +
3592 devstat->rx_crc_errs +
3593 devstat->rx_code_violations +
3594 devstat->rx_other_errs;
3595 stats->tx_errors = devstat->tx_max_pkt_errs;
3596 stats->multicast = devstat->multicast_pkts_rcvd;
3597 stats->collisions = devstat->tx_collisions;
3598
3599 stats->rx_length_errors = devstat->rx_length_errs;
3600 stats->rx_over_errors = devstat->rx_overflows;
3601 stats->rx_crc_errors = devstat->rx_crc_errs;
3602 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614 return stats;
3615}
3616
3617static int et131x_open(struct net_device *netdev)
3618{
3619 struct et131x_adapter *adapter = netdev_priv(netdev);
3620 struct pci_dev *pdev = adapter->pdev;
3621 unsigned int irq = pdev->irq;
3622 int result;
3623
3624
3625 init_timer(&adapter->error_timer);
3626 adapter->error_timer.expires = jiffies +
3627 msecs_to_jiffies(TX_ERROR_PERIOD);
3628 adapter->error_timer.function = et131x_error_timer_handler;
3629 adapter->error_timer.data = (unsigned long)adapter;
3630 add_timer(&adapter->error_timer);
3631
3632 result = request_irq(irq, et131x_isr,
3633 IRQF_SHARED, netdev->name, netdev);
3634 if (result) {
3635 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3636 return result;
3637 }
3638
3639 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3640
3641 napi_enable(&adapter->napi);
3642
3643 et131x_up(netdev);
3644
3645 return result;
3646}
3647
3648static int et131x_close(struct net_device *netdev)
3649{
3650 struct et131x_adapter *adapter = netdev_priv(netdev);
3651
3652 et131x_down(netdev);
3653 napi_disable(&adapter->napi);
3654
3655 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3656 free_irq(adapter->pdev->irq, netdev);
3657
3658
3659 return del_timer_sync(&adapter->error_timer);
3660}
3661
3662static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3663 int cmd)
3664{
3665 if (!netdev->phydev)
3666 return -EINVAL;
3667
3668 return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
3669}
3670
3671
3672static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3673{
3674 int filter = adapter->packet_filter;
3675 u32 ctrl;
3676 u32 pf_ctrl;
3677
3678 ctrl = readl(&adapter->regs->rxmac.ctrl);
3679 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3680
3681
3682 ctrl |= 0x04;
3683
3684
3685
3686
3687 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3688 pf_ctrl &= ~7;
3689 else {
3690
3691
3692
3693
3694 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3695 pf_ctrl &= ~2;
3696 else {
3697 et1310_setup_device_for_multicast(adapter);
3698 pf_ctrl |= 2;
3699 ctrl &= ~0x04;
3700 }
3701
3702
3703 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3704 et1310_setup_device_for_unicast(adapter);
3705 pf_ctrl |= 4;
3706 ctrl &= ~0x04;
3707 }
3708
3709
3710 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3711 pf_ctrl |= 1;
3712 ctrl &= ~0x04;
3713 } else {
3714 pf_ctrl &= ~1;
3715 }
3716
3717
3718
3719
3720
3721 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3722 writel(ctrl, &adapter->regs->rxmac.ctrl);
3723 }
3724 return 0;
3725}
3726
3727static void et131x_multicast(struct net_device *netdev)
3728{
3729 struct et131x_adapter *adapter = netdev_priv(netdev);
3730 int packet_filter;
3731 struct netdev_hw_addr *ha;
3732 int i;
3733
3734
3735
3736
3737
3738 packet_filter = adapter->packet_filter;
3739
3740
3741
3742
3743
3744
3745 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3746
3747
3748
3749
3750 if (netdev->flags & IFF_PROMISC)
3751 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3752 else
3753 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3754
3755 if ((netdev->flags & IFF_ALLMULTI) ||
3756 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3757 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3758
3759 if (netdev_mc_count(netdev) < 1) {
3760 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3761 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3762 } else {
3763 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3764 }
3765
3766
3767 i = 0;
3768 netdev_for_each_mc_addr(ha, netdev) {
3769 if (i == NIC_MAX_MCAST_LIST)
3770 break;
3771 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3772 }
3773 adapter->multicast_addr_count = i;
3774
3775
3776
3777
3778
3779
3780
3781 if (packet_filter != adapter->packet_filter)
3782 et131x_set_packet_filter(adapter);
3783}
3784
3785static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3786{
3787 struct et131x_adapter *adapter = netdev_priv(netdev);
3788 struct tx_ring *tx_ring = &adapter->tx_ring;
3789
3790
3791 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3792 netif_stop_queue(netdev);
3793
3794
3795 netif_trans_update(netdev);
3796
3797
3798 if (tx_ring->used >= NUM_TCB)
3799 goto drop_err;
3800
3801 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3802 !netif_carrier_ok(netdev))
3803 goto drop_err;
3804
3805 if (send_packet(skb, adapter))
3806 goto drop_err;
3807
3808 return NETDEV_TX_OK;
3809
3810drop_err:
3811 dev_kfree_skb_any(skb);
3812 adapter->netdev->stats.tx_dropped++;
3813 return NETDEV_TX_OK;
3814}
3815
3816
3817
3818
3819
3820
3821
3822static void et131x_tx_timeout(struct net_device *netdev)
3823{
3824 struct et131x_adapter *adapter = netdev_priv(netdev);
3825 struct tx_ring *tx_ring = &adapter->tx_ring;
3826 struct tcb *tcb;
3827 unsigned long flags;
3828
3829
3830 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3831 return;
3832
3833
3834
3835
3836 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3837 return;
3838
3839
3840 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3841 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3842 return;
3843 }
3844
3845
3846 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3847 tcb = tx_ring->send_head;
3848 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3849
3850 if (tcb) {
3851 tcb->count++;
3852
3853 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3854 dev_warn(&adapter->pdev->dev,
3855 "Send stuck - reset. tcb->WrIndex %x\n",
3856 tcb->index);
3857
3858 adapter->netdev->stats.tx_errors++;
3859
3860
3861 et131x_disable_txrx(netdev);
3862 et131x_enable_txrx(netdev);
3863 }
3864 }
3865}
3866
3867static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3868{
3869 int result = 0;
3870 struct et131x_adapter *adapter = netdev_priv(netdev);
3871
3872 if (new_mtu < 64 || new_mtu > 9216)
3873 return -EINVAL;
3874
3875 et131x_disable_txrx(netdev);
3876
3877 netdev->mtu = new_mtu;
3878
3879 et131x_adapter_memory_free(adapter);
3880
3881
3882 adapter->registry_jumbo_packet = new_mtu + 14;
3883 et131x_soft_reset(adapter);
3884
3885 result = et131x_adapter_memory_alloc(adapter);
3886 if (result != 0) {
3887 dev_warn(&adapter->pdev->dev,
3888 "Change MTU failed; couldn't re-alloc DMA memory\n");
3889 return result;
3890 }
3891
3892 et131x_init_send(adapter);
3893 et131x_hwaddr_init(adapter);
3894 ether_addr_copy(netdev->dev_addr, adapter->addr);
3895
3896
3897 et131x_adapter_setup(adapter);
3898 et131x_enable_txrx(netdev);
3899
3900 return result;
3901}
3902
3903static const struct net_device_ops et131x_netdev_ops = {
3904 .ndo_open = et131x_open,
3905 .ndo_stop = et131x_close,
3906 .ndo_start_xmit = et131x_tx,
3907 .ndo_set_rx_mode = et131x_multicast,
3908 .ndo_tx_timeout = et131x_tx_timeout,
3909 .ndo_change_mtu = et131x_change_mtu,
3910 .ndo_set_mac_address = eth_mac_addr,
3911 .ndo_validate_addr = eth_validate_addr,
3912 .ndo_get_stats = et131x_stats,
3913 .ndo_do_ioctl = et131x_ioctl,
3914};
3915
3916static int et131x_pci_setup(struct pci_dev *pdev,
3917 const struct pci_device_id *ent)
3918{
3919 struct net_device *netdev;
3920 struct et131x_adapter *adapter;
3921 int rc;
3922
3923 rc = pci_enable_device(pdev);
3924 if (rc < 0) {
3925 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3926 goto out;
3927 }
3928
3929
3930 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3931 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3932 rc = -ENODEV;
3933 goto err_disable;
3934 }
3935
3936 rc = pci_request_regions(pdev, DRIVER_NAME);
3937 if (rc < 0) {
3938 dev_err(&pdev->dev, "Can't get PCI resources\n");
3939 goto err_disable;
3940 }
3941
3942 pci_set_master(pdev);
3943
3944
3945 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
3946 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
3947 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3948 rc = -EIO;
3949 goto err_release_res;
3950 }
3951
3952 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3953 if (!netdev) {
3954 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3955 rc = -ENOMEM;
3956 goto err_release_res;
3957 }
3958
3959 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3960 netdev->netdev_ops = &et131x_netdev_ops;
3961
3962 SET_NETDEV_DEV(netdev, &pdev->dev);
3963 netdev->ethtool_ops = &et131x_ethtool_ops;
3964
3965 adapter = et131x_adapter_init(netdev, pdev);
3966
3967 rc = et131x_pci_init(adapter, pdev);
3968 if (rc < 0)
3969 goto err_free_dev;
3970
3971
3972 adapter->regs = pci_ioremap_bar(pdev, 0);
3973 if (!adapter->regs) {
3974 dev_err(&pdev->dev, "Cannot map device registers\n");
3975 rc = -ENOMEM;
3976 goto err_free_dev;
3977 }
3978
3979
3980 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
3981
3982 et131x_soft_reset(adapter);
3983 et131x_disable_interrupts(adapter);
3984
3985 rc = et131x_adapter_memory_alloc(adapter);
3986 if (rc < 0) {
3987 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
3988 goto err_iounmap;
3989 }
3990
3991 et131x_init_send(adapter);
3992
3993 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
3994
3995 ether_addr_copy(netdev->dev_addr, adapter->addr);
3996
3997 rc = -ENOMEM;
3998
3999 adapter->mii_bus = mdiobus_alloc();
4000 if (!adapter->mii_bus) {
4001 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4002 goto err_mem_free;
4003 }
4004
4005 adapter->mii_bus->name = "et131x_eth_mii";
4006 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4007 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4008 adapter->mii_bus->priv = netdev;
4009 adapter->mii_bus->read = et131x_mdio_read;
4010 adapter->mii_bus->write = et131x_mdio_write;
4011
4012 rc = mdiobus_register(adapter->mii_bus);
4013 if (rc < 0) {
4014 dev_err(&pdev->dev, "failed to register MII bus\n");
4015 goto err_mdio_free;
4016 }
4017
4018 rc = et131x_mii_probe(netdev);
4019 if (rc < 0) {
4020 dev_err(&pdev->dev, "failed to probe MII bus\n");
4021 goto err_mdio_unregister;
4022 }
4023
4024 et131x_adapter_setup(adapter);
4025
4026
4027 adapter->boot_coma = 0;
4028 et1310_disable_phy_coma(adapter);
4029
4030
4031
4032
4033
4034
4035
4036
4037 rc = register_netdev(netdev);
4038 if (rc < 0) {
4039 dev_err(&pdev->dev, "register_netdev() failed\n");
4040 goto err_phy_disconnect;
4041 }
4042
4043
4044
4045
4046
4047 pci_set_drvdata(pdev, netdev);
4048out:
4049 return rc;
4050
4051err_phy_disconnect:
4052 phy_disconnect(netdev->phydev);
4053err_mdio_unregister:
4054 mdiobus_unregister(adapter->mii_bus);
4055err_mdio_free:
4056 mdiobus_free(adapter->mii_bus);
4057err_mem_free:
4058 et131x_adapter_memory_free(adapter);
4059err_iounmap:
4060 iounmap(adapter->regs);
4061err_free_dev:
4062 pci_dev_put(pdev);
4063 free_netdev(netdev);
4064err_release_res:
4065 pci_release_regions(pdev);
4066err_disable:
4067 pci_disable_device(pdev);
4068 goto out;
4069}
4070
4071static const struct pci_device_id et131x_pci_table[] = {
4072 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4073 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4074 { 0,}
4075};
4076MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4077
4078static struct pci_driver et131x_driver = {
4079 .name = DRIVER_NAME,
4080 .id_table = et131x_pci_table,
4081 .probe = et131x_pci_setup,
4082 .remove = et131x_pci_remove,
4083 .driver.pm = &et131x_pm_ops,
4084};
4085
4086module_pci_driver(et131x_driver);
4087