1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/pci.h>
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97
98#define INTERNAL_MEM_SIZE 0x400
99#define INTERNAL_MEM_RX_OFFSET 0x1FF
100
101
102
103
104
105
106
107
108
109
110#define INT_MASK_DISABLE 0xffffffff
111
112
113
114
115
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119
120
121#define NIC_MIN_PACKET_SIZE 60
122
123
124#define NIC_MAX_MCAST_LIST 128
125
126
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137
138#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
139
140
141#define FMP_ADAPTER_LOWER_POWER 0x00200000
142
143#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
145
146#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
147
148
149#define ET1310_PCI_MAC_ADDRESS 0xA4
150#define ET1310_PCI_EEPROM_STATUS 0xB2
151#define ET1310_PCI_ACK_NACK 0xC0
152#define ET1310_PCI_REPLAY 0xC2
153#define ET1310_PCI_L0L1LATENCY 0xCF
154
155
156#define ET131X_PCI_DEVICE_ID_GIG 0xED00
157#define ET131X_PCI_DEVICE_ID_FAST 0xED01
158
159
160#define NANO_IN_A_MICRO 1000
161
162#define PARM_RX_NUM_BUFS_DEF 4
163#define PARM_RX_TIME_INT_DEF 10
164#define PARM_RX_MEM_END_DEF 0x2bc
165#define PARM_TX_TIME_INT_DEF 40
166#define PARM_TX_NUM_BUFS_DEF 4
167#define PARM_DMA_CACHE_DEF 0
168
169
170#define FBR_CHUNKS 32
171#define MAX_DESC_PER_RING_RX 1024
172
173
174#define RFD_LOW_WATER_MARK 40
175#define NIC_DEFAULT_NUM_RFD 1024
176#define NUM_FBRS 2
177
178#define MAX_PACKETS_HANDLED 256
179#define ET131X_MIN_MTU 64
180#define ET131X_MAX_MTU 9216
181
182#define ALCATEL_MULTICAST_PKT 0x01000000
183#define ALCATEL_BROADCAST_PKT 0x02000000
184
185
186struct fbr_desc {
187 u32 addr_lo;
188 u32 addr_hi;
189 u32 word2;
190};
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234struct pkt_stat_desc {
235 u32 word0;
236 u32 word1;
237};
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264struct rx_status_block {
265 u32 word0;
266 u32 word1;
267};
268
269
270
271
272struct fbr_lookup {
273 void *virt[MAX_DESC_PER_RING_RX];
274 u32 bus_high[MAX_DESC_PER_RING_RX];
275 u32 bus_low[MAX_DESC_PER_RING_RX];
276 void *ring_virtaddr;
277 dma_addr_t ring_physaddr;
278 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
279 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
280 u32 local_full;
281 u32 num_entries;
282 dma_addr_t buffsize;
283};
284
285
286
287
288struct rx_ring {
289 struct fbr_lookup *fbr[NUM_FBRS];
290 void *ps_ring_virtaddr;
291 dma_addr_t ps_ring_physaddr;
292 u32 local_psr_full;
293 u32 psr_entries;
294
295 struct rx_status_block *rx_status_block;
296 dma_addr_t rx_status_bus;
297
298 struct list_head recv_list;
299 u32 num_ready_recv;
300
301 u32 num_rfd;
302
303 bool unfinished_receives;
304};
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332#define TXDESC_FLAG_LASTPKT 0x0001
333#define TXDESC_FLAG_FIRSTPKT 0x0002
334#define TXDESC_FLAG_INTPROC 0x0004
335
336
337struct tx_desc {
338 u32 addr_hi;
339 u32 addr_lo;
340 u32 len_vlan;
341 u32 flags;
342};
343
344
345
346
347
348
349struct tcb {
350 struct tcb *next;
351 u32 count;
352 u32 stale;
353 struct sk_buff *skb;
354 u32 index;
355 u32 index_start;
356};
357
358
359struct tx_ring {
360
361 struct tcb *tcb_ring;
362
363
364 struct tcb *tcb_qhead;
365 struct tcb *tcb_qtail;
366
367
368 struct tcb *send_head;
369 struct tcb *send_tail;
370 int used;
371
372
373 struct tx_desc *tx_desc_ring;
374 dma_addr_t tx_desc_ring_pa;
375
376
377 u32 send_idx;
378
379
380 u32 *tx_status;
381 dma_addr_t tx_status_pa;
382
383
384 int since_irq;
385};
386
387
388
389
390#define NUM_DESC_PER_RING_TX 512
391#define NUM_TCB 64
392
393
394
395
396
397#define TX_ERROR_PERIOD 1000
398
399#define LO_MARK_PERCENT_FOR_PSR 15
400#define LO_MARK_PERCENT_FOR_RX 15
401
402
403struct rfd {
404 struct list_head list_node;
405 struct sk_buff *skb;
406 u32 len;
407 u16 bufferindex;
408 u8 ringindex;
409};
410
411
412#define FLOW_BOTH 0
413#define FLOW_TXONLY 1
414#define FLOW_RXONLY 2
415#define FLOW_NONE 3
416
417
418struct ce_stats {
419 u32 multicast_pkts_rcvd;
420 u32 rcvd_pkts_dropped;
421
422 u32 tx_underflows;
423 u32 tx_collisions;
424 u32 tx_excessive_collisions;
425 u32 tx_first_collisions;
426 u32 tx_late_collisions;
427 u32 tx_max_pkt_errs;
428 u32 tx_deferred;
429
430 u32 rx_overflows;
431 u32 rx_length_errs;
432 u32 rx_align_errs;
433 u32 rx_crc_errs;
434 u32 rx_code_violations;
435 u32 rx_other_errs;
436
437 u32 interrupt_status;
438};
439
440
441struct et131x_adapter {
442 struct net_device *netdev;
443 struct pci_dev *pdev;
444 struct mii_bus *mii_bus;
445 struct napi_struct napi;
446
447
448 u32 flags;
449
450
451 int link;
452
453
454 u8 rom_addr[ETH_ALEN];
455 u8 addr[ETH_ALEN];
456 bool has_eeprom;
457 u8 eeprom_data[2];
458
459 spinlock_t tcb_send_qlock;
460 spinlock_t tcb_ready_qlock;
461 spinlock_t rcv_lock;
462
463
464 u32 packet_filter;
465
466
467 u32 multicast_addr_count;
468 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
469
470
471 struct address_map __iomem *regs;
472
473
474 u8 wanted_flow;
475 u32 registry_jumbo_packet;
476
477
478 u8 flow;
479
480
481 struct timer_list error_timer;
482
483
484
485
486 u8 boot_coma;
487
488
489 struct tx_ring tx_ring;
490
491
492 struct rx_ring rx_ring;
493
494 struct ce_stats stats;
495};
496
497static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
498{
499 u32 reg;
500 int i;
501
502
503
504
505
506
507 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
508 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
509 return -EIO;
510
511
512 if ((reg & 0x3000) == 0x3000) {
513 if (status)
514 *status = reg;
515 return reg & 0xFF;
516 }
517 }
518 return -ETIMEDOUT;
519}
520
521static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
522{
523 struct pci_dev *pdev = adapter->pdev;
524 int index = 0;
525 int retries;
526 int err = 0;
527 int writeok = 0;
528 u32 status;
529 u32 val = 0;
530
531
532
533
534
535
536
537
538 err = eeprom_wait_ready(pdev, NULL);
539 if (err < 0)
540 return err;
541
542
543
544
545
546
547 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
548 LBCIF_CONTROL_LBCIF_ENABLE |
549 LBCIF_CONTROL_I2C_WRITE))
550 return -EIO;
551
552
553 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
554 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
555 break;
556
557
558
559 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
560 break;
561
562
563
564
565
566
567
568 err = eeprom_wait_ready(pdev, &status);
569 if (err < 0)
570 return 0;
571
572
573
574
575
576 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
577 adapter->pdev->revision == 0)
578 break;
579
580
581
582
583
584
585
586
587 if (status & LBCIF_STATUS_ACK_ERROR) {
588
589
590
591
592
593 udelay(10);
594 continue;
595 }
596
597 writeok = 1;
598 break;
599 }
600
601 udelay(10);
602
603 while (1) {
604 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
605 LBCIF_CONTROL_LBCIF_ENABLE))
606 writeok = 0;
607
608
609
610
611 do {
612 pci_write_config_dword(pdev,
613 LBCIF_ADDRESS_REGISTER,
614 addr);
615 do {
616 pci_read_config_dword(pdev,
617 LBCIF_DATA_REGISTER,
618 &val);
619 } while ((val & 0x00010000) == 0);
620 } while (val & 0x00040000);
621
622 if ((val & 0xFF00) != 0xC000 || index == 10000)
623 break;
624 index++;
625 }
626 return writeok ? 0 : -EIO;
627}
628
629static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
630{
631 struct pci_dev *pdev = adapter->pdev;
632 int err;
633 u32 status;
634
635
636
637
638 err = eeprom_wait_ready(pdev, NULL);
639 if (err < 0)
640 return err;
641
642
643
644
645
646 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
647 LBCIF_CONTROL_LBCIF_ENABLE))
648 return -EIO;
649
650
651
652 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
653 return -EIO;
654
655
656
657
658 err = eeprom_wait_ready(pdev, &status);
659 if (err < 0)
660 return err;
661
662
663
664 *pdata = err;
665
666 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
667}
668
669static int et131x_init_eeprom(struct et131x_adapter *adapter)
670{
671 struct pci_dev *pdev = adapter->pdev;
672 u8 eestatus;
673
674 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
675
676
677
678
679
680
681
682 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
683 dev_err(&pdev->dev,
684 "Could not read PCI config space for EEPROM Status\n");
685 return -EIO;
686 }
687
688
689
690
691 if (eestatus & 0x4C) {
692 int write_failed = 0;
693
694 if (pdev->revision == 0x01) {
695 int i;
696 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
697
698
699
700
701
702 for (i = 0; i < 3; i++)
703 if (eeprom_write(adapter, i, eedata[i]) < 0)
704 write_failed = 1;
705 }
706 if (pdev->revision != 0x01 || write_failed) {
707 dev_err(&pdev->dev,
708 "Fatal EEPROM Status Error - 0x%04x\n",
709 eestatus);
710
711
712
713
714
715
716
717 adapter->has_eeprom = false;
718 return -EIO;
719 }
720 }
721 adapter->has_eeprom = true;
722
723
724
725
726 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
727 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
728
729 if (adapter->eeprom_data[0] != 0xcd)
730
731 adapter->eeprom_data[1] = 0x00;
732
733 return 0;
734}
735
736static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
737{
738
739 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
740 struct rx_ring *rx_ring = &adapter->rx_ring;
741
742 if (rx_ring->fbr[1]->buffsize == 4096)
743 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
744 else if (rx_ring->fbr[1]->buffsize == 8192)
745 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
746 else if (rx_ring->fbr[1]->buffsize == 16384)
747 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
748
749 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
750 if (rx_ring->fbr[0]->buffsize == 256)
751 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
752 else if (rx_ring->fbr[0]->buffsize == 512)
753 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
754 else if (rx_ring->fbr[0]->buffsize == 1024)
755 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
756 writel(csr, &adapter->regs->rxdma.csr);
757
758 csr = readl(&adapter->regs->rxdma.csr);
759 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
760 udelay(5);
761 csr = readl(&adapter->regs->rxdma.csr);
762 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
763 dev_err(&adapter->pdev->dev,
764 "RX Dma failed to exit halt state. CSR 0x%08x\n",
765 csr);
766 }
767 }
768}
769
770static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
771{
772 u32 csr;
773
774 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
775 &adapter->regs->rxdma.csr);
776 csr = readl(&adapter->regs->rxdma.csr);
777 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
778 udelay(5);
779 csr = readl(&adapter->regs->rxdma.csr);
780 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
781 dev_err(&adapter->pdev->dev,
782 "RX Dma failed to enter halt state. CSR 0x%08x\n",
783 csr);
784 }
785}
786
787static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
788{
789
790
791
792 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
793 &adapter->regs->txdma.csr);
794}
795
796static inline void add_10bit(u32 *v, int n)
797{
798 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
799}
800
801static inline void add_12bit(u32 *v, int n)
802{
803 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
804}
805
806static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
807{
808 struct mac_regs __iomem *macregs = &adapter->regs->mac;
809 u32 station1;
810 u32 station2;
811 u32 ipg;
812
813
814
815
816 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
817 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
818 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
819 ¯egs->cfg1);
820
821
822 ipg = 0x38005860;
823 ipg |= 0x50 << 8;
824 writel(ipg, ¯egs->ipg);
825
826
827
828 writel(0x00A1F037, ¯egs->hfdp);
829
830
831 writel(0, ¯egs->if_ctrl);
832
833 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg);
834
835
836
837
838
839
840
841
842 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
843 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
844 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
845 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
846 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
847 adapter->addr[2];
848 writel(station1, ¯egs->station_addr_1);
849 writel(station2, ¯egs->station_addr_2);
850
851
852
853
854
855
856
857
858 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
859
860
861 writel(0, ¯egs->cfg1);
862}
863
864static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
865{
866 int32_t delay = 0;
867 struct mac_regs __iomem *mac = &adapter->regs->mac;
868 struct phy_device *phydev = adapter->netdev->phydev;
869 u32 cfg1;
870 u32 cfg2;
871 u32 ifctrl;
872 u32 ctl;
873
874 ctl = readl(&adapter->regs->txmac.ctl);
875 cfg1 = readl(&mac->cfg1);
876 cfg2 = readl(&mac->cfg2);
877 ifctrl = readl(&mac->if_ctrl);
878
879
880 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
881 if (phydev->speed == SPEED_1000) {
882 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
883 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
884 } else {
885 cfg2 |= ET_MAC_CFG2_IFMODE_100;
886 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
887 }
888
889 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
890 ET_MAC_CFG1_TX_FLOW;
891
892 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
893 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
894 cfg1 |= ET_MAC_CFG1_RX_FLOW;
895 writel(cfg1, &mac->cfg1);
896
897
898
899
900
901 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
902 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
903 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
904 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
905 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
906 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
907
908 if (phydev->duplex == DUPLEX_FULL)
909 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
910
911 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
912 if (phydev->duplex == DUPLEX_HALF)
913 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
914
915 writel(ifctrl, &mac->if_ctrl);
916 writel(cfg2, &mac->cfg2);
917
918 do {
919 udelay(10);
920 delay++;
921 cfg1 = readl(&mac->cfg1);
922 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
923
924 if (delay == 100) {
925 dev_warn(&adapter->pdev->dev,
926 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
927 cfg1);
928 }
929
930 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
931 writel(ctl, &adapter->regs->txmac.ctl);
932
933 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
934 et131x_rx_dma_enable(adapter);
935 et131x_tx_dma_enable(adapter);
936 }
937}
938
939static int et1310_in_phy_coma(struct et131x_adapter *adapter)
940{
941 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
942
943 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
944}
945
946static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
947{
948 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
949 u32 hash1 = 0;
950 u32 hash2 = 0;
951 u32 hash3 = 0;
952 u32 hash4 = 0;
953
954
955
956
957
958
959 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
960 int i;
961
962
963 for (i = 0; i < adapter->multicast_addr_count; i++) {
964 u32 result;
965
966 result = ether_crc(6, adapter->multicast_list[i]);
967
968 result = (result & 0x3F800000) >> 23;
969
970 if (result < 32) {
971 hash1 |= (1 << result);
972 } else if ((31 < result) && (result < 64)) {
973 result -= 32;
974 hash2 |= (1 << result);
975 } else if ((63 < result) && (result < 96)) {
976 result -= 64;
977 hash3 |= (1 << result);
978 } else {
979 result -= 96;
980 hash4 |= (1 << result);
981 }
982 }
983 }
984
985
986 if (!et1310_in_phy_coma(adapter)) {
987 writel(hash1, &rxmac->multi_hash1);
988 writel(hash2, &rxmac->multi_hash2);
989 writel(hash3, &rxmac->multi_hash3);
990 writel(hash4, &rxmac->multi_hash4);
991 }
992}
993
994static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
995{
996 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
997 u32 uni_pf1;
998 u32 uni_pf2;
999 u32 uni_pf3;
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1011 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1012 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1013 adapter->addr[1];
1014
1015 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1016 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1017 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1018 adapter->addr[5];
1019
1020 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1021 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1022 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1023 adapter->addr[5];
1024
1025 if (!et1310_in_phy_coma(adapter)) {
1026 writel(uni_pf1, &rxmac->uni_pf_addr1);
1027 writel(uni_pf2, &rxmac->uni_pf_addr2);
1028 writel(uni_pf3, &rxmac->uni_pf_addr3);
1029 }
1030}
1031
1032static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1033{
1034 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1035 struct phy_device *phydev = adapter->netdev->phydev;
1036 u32 sa_lo;
1037 u32 sa_hi = 0;
1038 u32 pf_ctrl = 0;
1039 u32 __iomem *wolw;
1040
1041
1042 writel(0x8, &rxmac->ctrl);
1043
1044
1045 writel(0, &rxmac->crc0);
1046 writel(0, &rxmac->crc12);
1047 writel(0, &rxmac->crc34);
1048
1049
1050
1051
1052
1053 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1054 writel(0, wolw);
1055
1056
1057 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1058 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1059 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1060 adapter->addr[5];
1061 writel(sa_lo, &rxmac->sa_lo);
1062
1063 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1064 adapter->addr[1];
1065 writel(sa_hi, &rxmac->sa_hi);
1066
1067
1068 writel(0, &rxmac->pf_ctrl);
1069
1070
1071 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1072 et1310_setup_device_for_unicast(adapter);
1073 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1074 } else {
1075 writel(0, &rxmac->uni_pf_addr1);
1076 writel(0, &rxmac->uni_pf_addr2);
1077 writel(0, &rxmac->uni_pf_addr3);
1078 }
1079
1080
1081 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1082 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1083 et1310_setup_device_for_multicast(adapter);
1084 }
1085
1086
1087 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1088 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1089
1090 if (adapter->registry_jumbo_packet > 8192)
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1102 else
1103 writel(0, &rxmac->mcif_ctrl_max_seg);
1104
1105 writel(0, &rxmac->mcif_water_mark);
1106 writel(0, &rxmac->mif_ctrl);
1107 writel(0, &rxmac->space_avail);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 if (phydev && phydev->speed == SPEED_100)
1123 writel(0x30038, &rxmac->mif_ctrl);
1124 else
1125 writel(0x30030, &rxmac->mif_ctrl);
1126
1127
1128
1129
1130
1131
1132
1133 writel(pf_ctrl, &rxmac->pf_ctrl);
1134 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1135}
1136
1137static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1138{
1139 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1140
1141
1142
1143
1144
1145 if (adapter->flow == FLOW_NONE)
1146 writel(0, &txmac->cf_param);
1147 else
1148 writel(0x40, &txmac->cf_param);
1149}
1150
1151static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1152{
1153 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1154 u32 __iomem *reg;
1155
1156
1157 for (reg = &macstat->txrx_0_64_byte_frames;
1158 reg <= &macstat->carry_reg2; reg++)
1159 writel(0, reg);
1160
1161
1162
1163
1164
1165 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1166 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1167}
1168
1169static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1170 u8 reg, u16 *value)
1171{
1172 struct mac_regs __iomem *mac = &adapter->regs->mac;
1173 int status = 0;
1174 u32 delay = 0;
1175 u32 mii_addr;
1176 u32 mii_cmd;
1177 u32 mii_indicator;
1178
1179
1180
1181
1182 mii_addr = readl(&mac->mii_mgmt_addr);
1183 mii_cmd = readl(&mac->mii_mgmt_cmd);
1184
1185
1186 writel(0, &mac->mii_mgmt_cmd);
1187
1188
1189 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1190
1191 writel(0x1, &mac->mii_mgmt_cmd);
1192
1193 do {
1194 udelay(50);
1195 delay++;
1196 mii_indicator = readl(&mac->mii_mgmt_indicator);
1197 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1198
1199
1200 if (delay == 50) {
1201 dev_warn(&adapter->pdev->dev,
1202 "reg 0x%08x could not be read\n", reg);
1203 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1204 mii_indicator);
1205
1206 status = -EIO;
1207 goto out;
1208 }
1209
1210
1211
1212
1213 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1214
1215out:
1216
1217 writel(0, &mac->mii_mgmt_cmd);
1218
1219
1220
1221
1222 writel(mii_addr, &mac->mii_mgmt_addr);
1223 writel(mii_cmd, &mac->mii_mgmt_cmd);
1224
1225 return status;
1226}
1227
1228static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1229{
1230 struct phy_device *phydev = adapter->netdev->phydev;
1231
1232 if (!phydev)
1233 return -EIO;
1234
1235 return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value);
1236}
1237
1238static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1239 u16 value)
1240{
1241 struct mac_regs __iomem *mac = &adapter->regs->mac;
1242 int status = 0;
1243 u32 delay = 0;
1244 u32 mii_addr;
1245 u32 mii_cmd;
1246 u32 mii_indicator;
1247
1248
1249
1250
1251 mii_addr = readl(&mac->mii_mgmt_addr);
1252 mii_cmd = readl(&mac->mii_mgmt_cmd);
1253
1254
1255 writel(0, &mac->mii_mgmt_cmd);
1256
1257
1258 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1259
1260
1261 writel(value, &mac->mii_mgmt_ctrl);
1262
1263 do {
1264 udelay(50);
1265 delay++;
1266 mii_indicator = readl(&mac->mii_mgmt_indicator);
1267 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1268
1269
1270 if (delay == 100) {
1271 u16 tmp;
1272
1273 dev_warn(&adapter->pdev->dev,
1274 "reg 0x%08x could not be written", reg);
1275 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1276 mii_indicator);
1277 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1278 readl(&mac->mii_mgmt_cmd));
1279
1280 et131x_mii_read(adapter, reg, &tmp);
1281
1282 status = -EIO;
1283 }
1284
1285 writel(0, &mac->mii_mgmt_cmd);
1286
1287
1288
1289
1290 writel(mii_addr, &mac->mii_mgmt_addr);
1291 writel(mii_cmd, &mac->mii_mgmt_cmd);
1292
1293 return status;
1294}
1295
1296static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1297 u16 regnum,
1298 u16 bitnum,
1299 u8 *value)
1300{
1301 u16 reg;
1302 u16 mask = 1 << bitnum;
1303
1304 et131x_mii_read(adapter, regnum, ®);
1305
1306 *value = (reg & mask) >> bitnum;
1307}
1308
1309static void et1310_config_flow_control(struct et131x_adapter *adapter)
1310{
1311 struct phy_device *phydev = adapter->netdev->phydev;
1312
1313 if (phydev->duplex == DUPLEX_HALF) {
1314 adapter->flow = FLOW_NONE;
1315 } else {
1316 char remote_pause, remote_async_pause;
1317
1318 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1319 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1320
1321 if (remote_pause && remote_async_pause) {
1322 adapter->flow = adapter->wanted_flow;
1323 } else if (remote_pause && !remote_async_pause) {
1324 if (adapter->wanted_flow == FLOW_BOTH)
1325 adapter->flow = FLOW_BOTH;
1326 else
1327 adapter->flow = FLOW_NONE;
1328 } else if (!remote_pause && !remote_async_pause) {
1329 adapter->flow = FLOW_NONE;
1330 } else {
1331 if (adapter->wanted_flow == FLOW_BOTH)
1332 adapter->flow = FLOW_RXONLY;
1333 else
1334 adapter->flow = FLOW_NONE;
1335 }
1336 }
1337}
1338
1339
1340static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1341{
1342 struct ce_stats *stats = &adapter->stats;
1343 struct macstat_regs __iomem *macstat =
1344 &adapter->regs->macstat;
1345
1346 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1347 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1348 stats->tx_deferred += readl(&macstat->tx_deferred);
1349 stats->tx_excessive_collisions +=
1350 readl(&macstat->tx_multiple_collisions);
1351 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1352 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1353 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1354
1355 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1356 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1357 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1358 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1359 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1360 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1361 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1362}
1363
1364
1365
1366
1367
1368
1369
1370static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1371{
1372 u32 carry_reg1;
1373 u32 carry_reg2;
1374
1375
1376
1377
1378 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1379 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1380
1381 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1382 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1383
1384
1385
1386
1387
1388
1389
1390 if (carry_reg1 & (1 << 14))
1391 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1392 if (carry_reg1 & (1 << 8))
1393 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1394 if (carry_reg1 & (1 << 7))
1395 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1396 if (carry_reg1 & (1 << 2))
1397 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1398 if (carry_reg1 & (1 << 6))
1399 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1400 if (carry_reg1 & (1 << 3))
1401 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1402 if (carry_reg1 & (1 << 0))
1403 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1404 if (carry_reg2 & (1 << 16))
1405 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1406 if (carry_reg2 & (1 << 15))
1407 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1408 if (carry_reg2 & (1 << 6))
1409 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1410 if (carry_reg2 & (1 << 8))
1411 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1412 if (carry_reg2 & (1 << 5))
1413 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1414 if (carry_reg2 & (1 << 4))
1415 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1416 if (carry_reg2 & (1 << 2))
1417 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1418}
1419
1420static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1421{
1422 struct net_device *netdev = bus->priv;
1423 struct et131x_adapter *adapter = netdev_priv(netdev);
1424 u16 value;
1425 int ret;
1426
1427 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1428
1429 if (ret < 0)
1430 return ret;
1431
1432 return value;
1433}
1434
1435static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1436 int reg, u16 value)
1437{
1438 struct net_device *netdev = bus->priv;
1439 struct et131x_adapter *adapter = netdev_priv(netdev);
1440
1441 return et131x_mii_write(adapter, phy_addr, reg, value);
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1454{
1455 u16 data;
1456 struct phy_device *phydev = adapter->netdev->phydev;
1457
1458 et131x_mii_read(adapter, MII_BMCR, &data);
1459 data &= ~BMCR_PDOWN;
1460 if (down)
1461 data |= BMCR_PDOWN;
1462 et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data);
1463}
1464
1465
1466static void et131x_xcvr_init(struct et131x_adapter *adapter)
1467{
1468 u16 lcr2;
1469 struct phy_device *phydev = adapter->netdev->phydev;
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1480 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1481
1482 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1483 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1484
1485 if ((adapter->eeprom_data[1] & 0x8) == 0)
1486 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1487 else
1488 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1489
1490 et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2);
1491 }
1492}
1493
1494
1495static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1496{
1497 struct global_regs __iomem *regs = &adapter->regs->global;
1498
1499 writel(0, ®s->rxq_start_addr);
1500 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1501
1502 if (adapter->registry_jumbo_packet < 2048) {
1503
1504
1505
1506
1507
1508 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1509 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1510 } else if (adapter->registry_jumbo_packet < 8192) {
1511
1512 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1513 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1514 } else {
1515
1516
1517
1518
1519
1520 writel(0x01b3, ®s->rxq_end_addr);
1521 writel(0x01b4, ®s->txq_start_addr);
1522 }
1523
1524
1525 writel(0, ®s->loopback);
1526
1527 writel(0, ®s->msi_config);
1528
1529
1530
1531
1532 writel(0, ®s->watchdog_timer);
1533}
1534
1535
1536static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1537{
1538 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1539 struct rx_ring *rx_local = &adapter->rx_ring;
1540 struct fbr_desc *fbr_entry;
1541 u32 entry;
1542 u32 psr_num_des;
1543 unsigned long flags;
1544 u8 id;
1545
1546 et131x_rx_dma_disable(adapter);
1547
1548
1549 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1550 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1551
1552 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1553
1554
1555 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1556 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1557 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1558 writel(0, &rx_dma->psr_full_offset);
1559
1560 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1561 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1562 &rx_dma->psr_min_des);
1563
1564 spin_lock_irqsave(&adapter->rcv_lock, flags);
1565
1566
1567 rx_local->local_psr_full = 0;
1568
1569 for (id = 0; id < NUM_FBRS; id++) {
1570 u32 __iomem *num_des;
1571 u32 __iomem *full_offset;
1572 u32 __iomem *min_des;
1573 u32 __iomem *base_hi;
1574 u32 __iomem *base_lo;
1575 struct fbr_lookup *fbr = rx_local->fbr[id];
1576
1577 if (id == 0) {
1578 num_des = &rx_dma->fbr0_num_des;
1579 full_offset = &rx_dma->fbr0_full_offset;
1580 min_des = &rx_dma->fbr0_min_des;
1581 base_hi = &rx_dma->fbr0_base_hi;
1582 base_lo = &rx_dma->fbr0_base_lo;
1583 } else {
1584 num_des = &rx_dma->fbr1_num_des;
1585 full_offset = &rx_dma->fbr1_full_offset;
1586 min_des = &rx_dma->fbr1_min_des;
1587 base_hi = &rx_dma->fbr1_base_hi;
1588 base_lo = &rx_dma->fbr1_base_lo;
1589 }
1590
1591
1592 fbr_entry = fbr->ring_virtaddr;
1593 for (entry = 0; entry < fbr->num_entries; entry++) {
1594 fbr_entry->addr_hi = fbr->bus_high[entry];
1595 fbr_entry->addr_lo = fbr->bus_low[entry];
1596 fbr_entry->word2 = entry;
1597 fbr_entry++;
1598 }
1599
1600
1601 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1602 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1603 writel(fbr->num_entries - 1, num_des);
1604 writel(ET_DMA10_WRAP, full_offset);
1605
1606
1607
1608
1609 fbr->local_full = ET_DMA10_WRAP;
1610 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1611 min_des);
1612 }
1613
1614
1615
1616
1617
1618
1619 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1620
1621
1622
1623
1624
1625
1626 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1627
1628 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1629}
1630
1631
1632
1633
1634
1635
1636static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1637{
1638 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1639 struct tx_ring *tx_ring = &adapter->tx_ring;
1640
1641
1642 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1643 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1644
1645
1646 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1647
1648
1649 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1650 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1651
1652 *tx_ring->tx_status = 0;
1653
1654 writel(0, &txdma->service_request);
1655 tx_ring->send_idx = 0;
1656}
1657
1658
1659static void et131x_adapter_setup(struct et131x_adapter *adapter)
1660{
1661 et131x_configure_global_regs(adapter);
1662 et1310_config_mac_regs1(adapter);
1663
1664
1665
1666 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1667
1668 et1310_config_rxmac_regs(adapter);
1669 et1310_config_txmac_regs(adapter);
1670
1671 et131x_config_rx_dma_regs(adapter);
1672 et131x_config_tx_dma_regs(adapter);
1673
1674 et1310_config_macstat_regs(adapter);
1675
1676 et1310_phy_power_switch(adapter, 0);
1677 et131x_xcvr_init(adapter);
1678}
1679
1680
1681static void et131x_soft_reset(struct et131x_adapter *adapter)
1682{
1683 u32 reg;
1684
1685
1686 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1687 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1688 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1689 writel(reg, &adapter->regs->mac.cfg1);
1690
1691 reg = ET_RESET_ALL;
1692 writel(reg, &adapter->regs->global.sw_reset);
1693
1694 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1695 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1696 writel(reg, &adapter->regs->mac.cfg1);
1697 writel(0, &adapter->regs->mac.cfg1);
1698}
1699
1700static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1701{
1702 u32 mask;
1703
1704 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1705 mask = INT_MASK_ENABLE;
1706 else
1707 mask = INT_MASK_ENABLE_NO_FLOW;
1708
1709 writel(mask, &adapter->regs->global.int_mask);
1710}
1711
1712static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1713{
1714 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1715}
1716
1717static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1718{
1719
1720 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1721 &adapter->regs->txdma.csr);
1722}
1723
1724static void et131x_enable_txrx(struct net_device *netdev)
1725{
1726 struct et131x_adapter *adapter = netdev_priv(netdev);
1727
1728 et131x_rx_dma_enable(adapter);
1729 et131x_tx_dma_enable(adapter);
1730
1731 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1732 et131x_enable_interrupts(adapter);
1733
1734 netif_start_queue(netdev);
1735}
1736
1737static void et131x_disable_txrx(struct net_device *netdev)
1738{
1739 struct et131x_adapter *adapter = netdev_priv(netdev);
1740
1741 netif_stop_queue(netdev);
1742
1743 et131x_rx_dma_disable(adapter);
1744 et131x_tx_dma_disable(adapter);
1745
1746 et131x_disable_interrupts(adapter);
1747}
1748
1749static void et131x_init_send(struct et131x_adapter *adapter)
1750{
1751 int i;
1752 struct tx_ring *tx_ring = &adapter->tx_ring;
1753 struct tcb *tcb = tx_ring->tcb_ring;
1754
1755 tx_ring->tcb_qhead = tcb;
1756
1757 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1758
1759 for (i = 0; i < NUM_TCB; i++) {
1760 tcb->next = tcb + 1;
1761 tcb++;
1762 }
1763
1764 tcb--;
1765 tx_ring->tcb_qtail = tcb;
1766 tcb->next = NULL;
1767
1768 tx_ring->send_head = NULL;
1769 tx_ring->send_tail = NULL;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1783{
1784 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1785
1786
1787 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1788
1789
1790 et131x_disable_txrx(adapter->netdev);
1791
1792
1793 pmcsr &= ~ET_PMCSR_INIT;
1794 writel(pmcsr, &adapter->regs->global.pm_csr);
1795
1796
1797 pmcsr |= ET_PM_PHY_SW_COMA;
1798 writel(pmcsr, &adapter->regs->global.pm_csr);
1799}
1800
1801static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1802{
1803 u32 pmcsr;
1804
1805 pmcsr = readl(&adapter->regs->global.pm_csr);
1806
1807
1808 pmcsr |= ET_PMCSR_INIT;
1809 pmcsr &= ~ET_PM_PHY_SW_COMA;
1810 writel(pmcsr, &adapter->regs->global.pm_csr);
1811
1812
1813
1814
1815
1816
1817 et131x_init_send(adapter);
1818
1819
1820
1821
1822
1823 et131x_soft_reset(adapter);
1824
1825 et131x_adapter_setup(adapter);
1826
1827
1828 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1829
1830 et131x_enable_txrx(adapter->netdev);
1831}
1832
1833static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1834{
1835 u32 tmp_free_buff_ring = *free_buff_ring;
1836
1837 tmp_free_buff_ring++;
1838
1839
1840
1841
1842
1843 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1844 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1845 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1846 }
1847
1848 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1849 *free_buff_ring = tmp_free_buff_ring;
1850 return tmp_free_buff_ring;
1851}
1852
1853
1854
1855
1856
1857
1858static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1859{
1860 u8 id;
1861 u32 i, j;
1862 u32 bufsize;
1863 u32 psr_size;
1864 u32 fbr_chunksize;
1865 struct rx_ring *rx_ring = &adapter->rx_ring;
1866 struct fbr_lookup *fbr;
1867
1868
1869 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1870 if (rx_ring->fbr[0] == NULL)
1871 return -ENOMEM;
1872 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1873 if (rx_ring->fbr[1] == NULL)
1874 return -ENOMEM;
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (adapter->registry_jumbo_packet < 2048) {
1894 rx_ring->fbr[0]->buffsize = 256;
1895 rx_ring->fbr[0]->num_entries = 512;
1896 rx_ring->fbr[1]->buffsize = 2048;
1897 rx_ring->fbr[1]->num_entries = 512;
1898 } else if (adapter->registry_jumbo_packet < 4096) {
1899 rx_ring->fbr[0]->buffsize = 512;
1900 rx_ring->fbr[0]->num_entries = 1024;
1901 rx_ring->fbr[1]->buffsize = 4096;
1902 rx_ring->fbr[1]->num_entries = 512;
1903 } else {
1904 rx_ring->fbr[0]->buffsize = 1024;
1905 rx_ring->fbr[0]->num_entries = 768;
1906 rx_ring->fbr[1]->buffsize = 16384;
1907 rx_ring->fbr[1]->num_entries = 128;
1908 }
1909
1910 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1911 rx_ring->fbr[1]->num_entries;
1912
1913 for (id = 0; id < NUM_FBRS; id++) {
1914 fbr = rx_ring->fbr[id];
1915
1916 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1917 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1918 bufsize,
1919 &fbr->ring_physaddr,
1920 GFP_KERNEL);
1921 if (!fbr->ring_virtaddr) {
1922 dev_err(&adapter->pdev->dev,
1923 "Cannot alloc memory for Free Buffer Ring %d\n",
1924 id);
1925 return -ENOMEM;
1926 }
1927 }
1928
1929 for (id = 0; id < NUM_FBRS; id++) {
1930 fbr = rx_ring->fbr[id];
1931 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1932
1933 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1934 dma_addr_t fbr_physaddr;
1935
1936 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1937 &adapter->pdev->dev, fbr_chunksize,
1938 &fbr->mem_physaddrs[i],
1939 GFP_KERNEL);
1940
1941 if (!fbr->mem_virtaddrs[i]) {
1942 dev_err(&adapter->pdev->dev,
1943 "Could not alloc memory\n");
1944 return -ENOMEM;
1945 }
1946
1947
1948 fbr_physaddr = fbr->mem_physaddrs[i];
1949
1950 for (j = 0; j < FBR_CHUNKS; j++) {
1951 u32 k = (i * FBR_CHUNKS) + j;
1952
1953
1954
1955
1956 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1957 (j * fbr->buffsize);
1958
1959
1960
1961
1962 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1963 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1964 fbr_physaddr += fbr->buffsize;
1965 }
1966 }
1967 }
1968
1969
1970 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1971
1972 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1973 psr_size,
1974 &rx_ring->ps_ring_physaddr,
1975 GFP_KERNEL);
1976
1977 if (!rx_ring->ps_ring_virtaddr) {
1978 dev_err(&adapter->pdev->dev,
1979 "Cannot alloc memory for Packet Status Ring\n");
1980 return -ENOMEM;
1981 }
1982
1983
1984 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1985 sizeof(struct rx_status_block),
1986 &rx_ring->rx_status_bus,
1987 GFP_KERNEL);
1988 if (!rx_ring->rx_status_block) {
1989 dev_err(&adapter->pdev->dev,
1990 "Cannot alloc memory for Status Block\n");
1991 return -ENOMEM;
1992 }
1993 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1994
1995
1996
1997
1998 INIT_LIST_HEAD(&rx_ring->recv_list);
1999 return 0;
2000}
2001
2002static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2003{
2004 u8 id;
2005 u32 ii;
2006 u32 bufsize;
2007 u32 psr_size;
2008 struct rfd *rfd;
2009 struct rx_ring *rx_ring = &adapter->rx_ring;
2010 struct fbr_lookup *fbr;
2011
2012
2013 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2014
2015 while (!list_empty(&rx_ring->recv_list)) {
2016 rfd = list_entry(rx_ring->recv_list.next,
2017 struct rfd, list_node);
2018
2019 list_del(&rfd->list_node);
2020 rfd->skb = NULL;
2021 kfree(rfd);
2022 }
2023
2024
2025 for (id = 0; id < NUM_FBRS; id++) {
2026 fbr = rx_ring->fbr[id];
2027
2028 if (!fbr || !fbr->ring_virtaddr)
2029 continue;
2030
2031
2032 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2033 if (fbr->mem_virtaddrs[ii]) {
2034 bufsize = fbr->buffsize * FBR_CHUNKS;
2035
2036 dma_free_coherent(&adapter->pdev->dev,
2037 bufsize,
2038 fbr->mem_virtaddrs[ii],
2039 fbr->mem_physaddrs[ii]);
2040
2041 fbr->mem_virtaddrs[ii] = NULL;
2042 }
2043 }
2044
2045 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2046
2047 dma_free_coherent(&adapter->pdev->dev,
2048 bufsize,
2049 fbr->ring_virtaddr,
2050 fbr->ring_physaddr);
2051
2052 fbr->ring_virtaddr = NULL;
2053 }
2054
2055
2056 if (rx_ring->ps_ring_virtaddr) {
2057 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2058
2059 dma_free_coherent(&adapter->pdev->dev, psr_size,
2060 rx_ring->ps_ring_virtaddr,
2061 rx_ring->ps_ring_physaddr);
2062
2063 rx_ring->ps_ring_virtaddr = NULL;
2064 }
2065
2066
2067 if (rx_ring->rx_status_block) {
2068 dma_free_coherent(&adapter->pdev->dev,
2069 sizeof(struct rx_status_block),
2070 rx_ring->rx_status_block,
2071 rx_ring->rx_status_bus);
2072 rx_ring->rx_status_block = NULL;
2073 }
2074
2075
2076 kfree(rx_ring->fbr[0]);
2077 kfree(rx_ring->fbr[1]);
2078
2079
2080 rx_ring->num_ready_recv = 0;
2081}
2082
2083
2084static int et131x_init_recv(struct et131x_adapter *adapter)
2085{
2086 struct rfd *rfd;
2087 u32 rfdct;
2088 struct rx_ring *rx_ring = &adapter->rx_ring;
2089
2090
2091 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2092 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2093 if (!rfd)
2094 return -ENOMEM;
2095
2096 rfd->skb = NULL;
2097
2098
2099 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2100
2101
2102 rx_ring->num_ready_recv++;
2103 }
2104
2105 return 0;
2106}
2107
2108
2109static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2110{
2111 struct phy_device *phydev = adapter->netdev->phydev;
2112
2113
2114
2115
2116 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2117 writel(0, &adapter->regs->rxdma.max_pkt_time);
2118 writel(1, &adapter->regs->rxdma.num_pkt_done);
2119 }
2120}
2121
2122
2123static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2124{
2125 struct rx_ring *rx_local = &adapter->rx_ring;
2126 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2127 u16 buff_index = rfd->bufferindex;
2128 u8 ring_index = rfd->ringindex;
2129 unsigned long flags;
2130 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2131
2132
2133
2134
2135 if (buff_index < fbr->num_entries) {
2136 u32 free_buff_ring;
2137 u32 __iomem *offset;
2138 struct fbr_desc *next;
2139
2140 if (ring_index == 0)
2141 offset = &rx_dma->fbr0_full_offset;
2142 else
2143 offset = &rx_dma->fbr1_full_offset;
2144
2145 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2146 INDEX10(fbr->local_full);
2147
2148
2149
2150
2151
2152 next->addr_hi = fbr->bus_high[buff_index];
2153 next->addr_lo = fbr->bus_low[buff_index];
2154 next->word2 = buff_index;
2155
2156 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2157 fbr->num_entries - 1);
2158 writel(free_buff_ring, offset);
2159 } else {
2160 dev_err(&adapter->pdev->dev,
2161 "%s illegal Buffer Index returned\n", __func__);
2162 }
2163
2164
2165
2166
2167 spin_lock_irqsave(&adapter->rcv_lock, flags);
2168 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2169 rx_local->num_ready_recv++;
2170 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2171
2172 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2183{
2184 struct rx_ring *rx_local = &adapter->rx_ring;
2185 struct rx_status_block *status;
2186 struct pkt_stat_desc *psr;
2187 struct rfd *rfd;
2188 unsigned long flags;
2189 struct list_head *element;
2190 u8 ring_index;
2191 u16 buff_index;
2192 u32 len;
2193 u32 word0;
2194 u32 word1;
2195 struct sk_buff *skb;
2196 struct fbr_lookup *fbr;
2197
2198
2199
2200
2201
2202 status = rx_local->rx_status_block;
2203 word1 = status->word1 >> 16;
2204
2205
2206 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2207 return NULL;
2208
2209
2210 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2211 (rx_local->local_psr_full & 0xFFF);
2212
2213
2214
2215
2216 len = psr->word1 & 0xFFFF;
2217 ring_index = (psr->word1 >> 26) & 0x03;
2218 fbr = rx_local->fbr[ring_index];
2219 buff_index = (psr->word1 >> 16) & 0x3FF;
2220 word0 = psr->word0;
2221
2222
2223
2224 add_12bit(&rx_local->local_psr_full, 1);
2225 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2226
2227 rx_local->local_psr_full &= ~0xFFF;
2228 rx_local->local_psr_full ^= 0x1000;
2229 }
2230
2231 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2232
2233 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2234
2235 dev_err(&adapter->pdev->dev,
2236 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2237 rx_local->local_psr_full & 0xFFF, len, buff_index);
2238 return NULL;
2239 }
2240
2241
2242 spin_lock_irqsave(&adapter->rcv_lock, flags);
2243
2244 element = rx_local->recv_list.next;
2245 rfd = list_entry(element, struct rfd, list_node);
2246
2247 if (!rfd) {
2248 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2249 return NULL;
2250 }
2251
2252 list_del(&rfd->list_node);
2253 rx_local->num_ready_recv--;
2254
2255 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2256
2257 rfd->bufferindex = buff_index;
2258 rfd->ringindex = ring_index;
2259
2260
2261
2262
2263
2264 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2265 adapter->stats.rx_other_errs++;
2266 rfd->len = 0;
2267 goto out;
2268 }
2269
2270 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2271 adapter->stats.multicast_pkts_rcvd++;
2272
2273 rfd->len = len;
2274
2275 skb = dev_alloc_skb(rfd->len + 2);
2276 if (!skb)
2277 return NULL;
2278
2279 adapter->netdev->stats.rx_bytes += rfd->len;
2280
2281 skb_put_data(skb, fbr->virt[buff_index], rfd->len);
2282
2283 skb->protocol = eth_type_trans(skb, adapter->netdev);
2284 skb->ip_summed = CHECKSUM_NONE;
2285 netif_receive_skb(skb);
2286
2287out:
2288 nic_return_rfd(adapter, rfd);
2289 return rfd;
2290}
2291
2292static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2293{
2294 struct rfd *rfd = NULL;
2295 int count = 0;
2296 int limit = budget;
2297 bool done = true;
2298 struct rx_ring *rx_ring = &adapter->rx_ring;
2299
2300 if (budget > MAX_PACKETS_HANDLED)
2301 limit = MAX_PACKETS_HANDLED;
2302
2303
2304 while (count < limit) {
2305 if (list_empty(&rx_ring->recv_list)) {
2306 WARN_ON(rx_ring->num_ready_recv != 0);
2307 done = false;
2308 break;
2309 }
2310
2311 rfd = nic_rx_pkts(adapter);
2312
2313 if (rfd == NULL)
2314 break;
2315
2316
2317
2318
2319
2320
2321 if (!adapter->packet_filter ||
2322 !netif_carrier_ok(adapter->netdev) ||
2323 rfd->len == 0)
2324 continue;
2325
2326 adapter->netdev->stats.rx_packets++;
2327
2328 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2329 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2330
2331 count++;
2332 }
2333
2334 if (count == limit || !done) {
2335 rx_ring->unfinished_receives = true;
2336 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2337 &adapter->regs->global.watchdog_timer);
2338 } else {
2339
2340 rx_ring->unfinished_receives = false;
2341 }
2342
2343 return count;
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2355{
2356 int desc_size = 0;
2357 struct tx_ring *tx_ring = &adapter->tx_ring;
2358
2359
2360 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2361 GFP_KERNEL | GFP_DMA);
2362 if (!tx_ring->tcb_ring)
2363 return -ENOMEM;
2364
2365 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2366 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2367 desc_size,
2368 &tx_ring->tx_desc_ring_pa,
2369 GFP_KERNEL);
2370 if (!tx_ring->tx_desc_ring) {
2371 dev_err(&adapter->pdev->dev,
2372 "Cannot alloc memory for Tx Ring\n");
2373 return -ENOMEM;
2374 }
2375
2376 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2377 sizeof(u32),
2378 &tx_ring->tx_status_pa,
2379 GFP_KERNEL);
2380 if (!tx_ring->tx_status) {
2381 dev_err(&adapter->pdev->dev,
2382 "Cannot alloc memory for Tx status block\n");
2383 return -ENOMEM;
2384 }
2385 return 0;
2386}
2387
2388static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2389{
2390 int desc_size = 0;
2391 struct tx_ring *tx_ring = &adapter->tx_ring;
2392
2393 if (tx_ring->tx_desc_ring) {
2394
2395 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2396 dma_free_coherent(&adapter->pdev->dev,
2397 desc_size,
2398 tx_ring->tx_desc_ring,
2399 tx_ring->tx_desc_ring_pa);
2400 tx_ring->tx_desc_ring = NULL;
2401 }
2402
2403
2404 if (tx_ring->tx_status) {
2405 dma_free_coherent(&adapter->pdev->dev,
2406 sizeof(u32),
2407 tx_ring->tx_status,
2408 tx_ring->tx_status_pa);
2409
2410 tx_ring->tx_status = NULL;
2411 }
2412
2413 kfree(tx_ring->tcb_ring);
2414}
2415
2416
2417static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2418{
2419 u32 i;
2420 struct tx_desc desc[24];
2421 u32 frag = 0;
2422 u32 thiscopy, remainder;
2423 struct sk_buff *skb = tcb->skb;
2424 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2425 skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
2426 struct phy_device *phydev = adapter->netdev->phydev;
2427 dma_addr_t dma_addr;
2428 struct tx_ring *tx_ring = &adapter->tx_ring;
2429
2430
2431
2432
2433
2434
2435
2436 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2437
2438 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2439
2440 for (i = 0; i < nr_frags; i++) {
2441
2442
2443
2444 if (i == 0) {
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454 if (skb_headlen(skb) <= 1514) {
2455
2456
2457
2458 desc[frag].len_vlan = skb_headlen(skb);
2459 dma_addr = dma_map_single(&adapter->pdev->dev,
2460 skb->data,
2461 skb_headlen(skb),
2462 DMA_TO_DEVICE);
2463 desc[frag].addr_lo = lower_32_bits(dma_addr);
2464 desc[frag].addr_hi = upper_32_bits(dma_addr);
2465 frag++;
2466 } else {
2467 desc[frag].len_vlan = skb_headlen(skb) / 2;
2468 dma_addr = dma_map_single(&adapter->pdev->dev,
2469 skb->data,
2470 skb_headlen(skb) / 2,
2471 DMA_TO_DEVICE);
2472 desc[frag].addr_lo = lower_32_bits(dma_addr);
2473 desc[frag].addr_hi = upper_32_bits(dma_addr);
2474 frag++;
2475
2476 desc[frag].len_vlan = skb_headlen(skb) / 2;
2477 dma_addr = dma_map_single(&adapter->pdev->dev,
2478 skb->data +
2479 skb_headlen(skb) / 2,
2480 skb_headlen(skb) / 2,
2481 DMA_TO_DEVICE);
2482 desc[frag].addr_lo = lower_32_bits(dma_addr);
2483 desc[frag].addr_hi = upper_32_bits(dma_addr);
2484 frag++;
2485 }
2486 } else {
2487 desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
2488 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2489 &frags[i - 1],
2490 0,
2491 desc[frag].len_vlan,
2492 DMA_TO_DEVICE);
2493 desc[frag].addr_lo = lower_32_bits(dma_addr);
2494 desc[frag].addr_hi = upper_32_bits(dma_addr);
2495 frag++;
2496 }
2497 }
2498
2499 if (phydev && phydev->speed == SPEED_1000) {
2500 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2501
2502 desc[frag - 1].flags =
2503 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2504 tx_ring->since_irq = 0;
2505 } else {
2506 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2507 }
2508 } else {
2509 desc[frag - 1].flags =
2510 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2511 }
2512
2513 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2514
2515 tcb->index_start = tx_ring->send_idx;
2516 tcb->stale = 0;
2517
2518 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2519
2520 if (thiscopy >= frag) {
2521 remainder = 0;
2522 thiscopy = frag;
2523 } else {
2524 remainder = frag - thiscopy;
2525 }
2526
2527 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2528 desc,
2529 sizeof(struct tx_desc) * thiscopy);
2530
2531 add_10bit(&tx_ring->send_idx, thiscopy);
2532
2533 if (INDEX10(tx_ring->send_idx) == 0 ||
2534 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2535 tx_ring->send_idx &= ~ET_DMA10_MASK;
2536 tx_ring->send_idx ^= ET_DMA10_WRAP;
2537 }
2538
2539 if (remainder) {
2540 memcpy(tx_ring->tx_desc_ring,
2541 desc + thiscopy,
2542 sizeof(struct tx_desc) * remainder);
2543
2544 add_10bit(&tx_ring->send_idx, remainder);
2545 }
2546
2547 if (INDEX10(tx_ring->send_idx) == 0) {
2548 if (tx_ring->send_idx)
2549 tcb->index = NUM_DESC_PER_RING_TX - 1;
2550 else
2551 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2552 } else {
2553 tcb->index = tx_ring->send_idx - 1;
2554 }
2555
2556 spin_lock(&adapter->tcb_send_qlock);
2557
2558 if (tx_ring->send_tail)
2559 tx_ring->send_tail->next = tcb;
2560 else
2561 tx_ring->send_head = tcb;
2562
2563 tx_ring->send_tail = tcb;
2564
2565 WARN_ON(tcb->next != NULL);
2566
2567 tx_ring->used++;
2568
2569 spin_unlock(&adapter->tcb_send_qlock);
2570
2571
2572 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2573
2574
2575
2576
2577 if (phydev && phydev->speed == SPEED_1000) {
2578 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2579 &adapter->regs->global.watchdog_timer);
2580 }
2581 return 0;
2582}
2583
2584static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2585{
2586 int status;
2587 struct tcb *tcb;
2588 unsigned long flags;
2589 struct tx_ring *tx_ring = &adapter->tx_ring;
2590
2591
2592 if (skb->len < ETH_HLEN)
2593 return -EIO;
2594
2595 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2596
2597 tcb = tx_ring->tcb_qhead;
2598
2599 if (tcb == NULL) {
2600 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2601 return -ENOMEM;
2602 }
2603
2604 tx_ring->tcb_qhead = tcb->next;
2605
2606 if (tx_ring->tcb_qhead == NULL)
2607 tx_ring->tcb_qtail = NULL;
2608
2609 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2610
2611 tcb->skb = skb;
2612 tcb->next = NULL;
2613
2614 status = nic_send_packet(adapter, tcb);
2615
2616 if (status != 0) {
2617 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2618
2619 if (tx_ring->tcb_qtail)
2620 tx_ring->tcb_qtail->next = tcb;
2621 else
2622
2623 tx_ring->tcb_qhead = tcb;
2624
2625 tx_ring->tcb_qtail = tcb;
2626 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2627 return status;
2628 }
2629 WARN_ON(tx_ring->used > NUM_TCB);
2630 return 0;
2631}
2632
2633
2634static inline void free_send_packet(struct et131x_adapter *adapter,
2635 struct tcb *tcb)
2636{
2637 unsigned long flags;
2638 struct tx_desc *desc = NULL;
2639 struct net_device_stats *stats = &adapter->netdev->stats;
2640 struct tx_ring *tx_ring = &adapter->tx_ring;
2641 u64 dma_addr;
2642
2643 if (tcb->skb) {
2644 stats->tx_bytes += tcb->skb->len;
2645
2646
2647
2648
2649
2650 do {
2651 desc = tx_ring->tx_desc_ring +
2652 INDEX10(tcb->index_start);
2653
2654 dma_addr = desc->addr_lo;
2655 dma_addr |= (u64)desc->addr_hi << 32;
2656
2657 dma_unmap_single(&adapter->pdev->dev,
2658 dma_addr,
2659 desc->len_vlan, DMA_TO_DEVICE);
2660
2661 add_10bit(&tcb->index_start, 1);
2662 if (INDEX10(tcb->index_start) >=
2663 NUM_DESC_PER_RING_TX) {
2664 tcb->index_start &= ~ET_DMA10_MASK;
2665 tcb->index_start ^= ET_DMA10_WRAP;
2666 }
2667 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2668
2669 dev_kfree_skb_any(tcb->skb);
2670 }
2671
2672 memset(tcb, 0, sizeof(struct tcb));
2673
2674
2675 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2676
2677 stats->tx_packets++;
2678
2679 if (tx_ring->tcb_qtail)
2680 tx_ring->tcb_qtail->next = tcb;
2681 else
2682 tx_ring->tcb_qhead = tcb;
2683
2684 tx_ring->tcb_qtail = tcb;
2685
2686 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2687 WARN_ON(tx_ring->used < 0);
2688}
2689
2690
2691static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2692{
2693 struct tcb *tcb;
2694 unsigned long flags;
2695 u32 freed = 0;
2696 struct tx_ring *tx_ring = &adapter->tx_ring;
2697
2698
2699 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2700
2701 tcb = tx_ring->send_head;
2702
2703 while (tcb != NULL && freed < NUM_TCB) {
2704 struct tcb *next = tcb->next;
2705
2706 tx_ring->send_head = next;
2707
2708 if (next == NULL)
2709 tx_ring->send_tail = NULL;
2710
2711 tx_ring->used--;
2712
2713 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2714
2715 freed++;
2716 free_send_packet(adapter, tcb);
2717
2718 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2719
2720 tcb = tx_ring->send_head;
2721 }
2722
2723 WARN_ON(freed == NUM_TCB);
2724
2725 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2726
2727 tx_ring->used = 0;
2728}
2729
2730
2731
2732
2733
2734
2735static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2736{
2737 unsigned long flags;
2738 u32 serviced;
2739 struct tcb *tcb;
2740 u32 index;
2741 struct tx_ring *tx_ring = &adapter->tx_ring;
2742
2743 serviced = readl(&adapter->regs->txdma.new_service_complete);
2744 index = INDEX10(serviced);
2745
2746
2747
2748
2749 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2750
2751 tcb = tx_ring->send_head;
2752
2753 while (tcb &&
2754 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2755 index < INDEX10(tcb->index)) {
2756 tx_ring->used--;
2757 tx_ring->send_head = tcb->next;
2758 if (tcb->next == NULL)
2759 tx_ring->send_tail = NULL;
2760
2761 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2762 free_send_packet(adapter, tcb);
2763 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2764
2765
2766 tcb = tx_ring->send_head;
2767 }
2768 while (tcb &&
2769 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2770 index > (tcb->index & ET_DMA10_MASK)) {
2771 tx_ring->used--;
2772 tx_ring->send_head = tcb->next;
2773 if (tcb->next == NULL)
2774 tx_ring->send_tail = NULL;
2775
2776 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2777 free_send_packet(adapter, tcb);
2778 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2779
2780
2781 tcb = tx_ring->send_head;
2782 }
2783
2784
2785 if (tx_ring->used <= NUM_TCB / 3)
2786 netif_wake_queue(adapter->netdev);
2787
2788 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2789}
2790
2791static int et131x_get_regs_len(struct net_device *netdev)
2792{
2793#define ET131X_REGS_LEN 256
2794 return ET131X_REGS_LEN * sizeof(u32);
2795}
2796
2797static void et131x_get_regs(struct net_device *netdev,
2798 struct ethtool_regs *regs, void *regs_data)
2799{
2800 struct et131x_adapter *adapter = netdev_priv(netdev);
2801 struct address_map __iomem *aregs = adapter->regs;
2802 u32 *regs_buff = regs_data;
2803 u32 num = 0;
2804 u16 tmp;
2805
2806 memset(regs_data, 0, et131x_get_regs_len(netdev));
2807
2808 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2809 adapter->pdev->device;
2810
2811
2812 et131x_mii_read(adapter, MII_BMCR, &tmp);
2813 regs_buff[num++] = tmp;
2814 et131x_mii_read(adapter, MII_BMSR, &tmp);
2815 regs_buff[num++] = tmp;
2816 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2817 regs_buff[num++] = tmp;
2818 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2819 regs_buff[num++] = tmp;
2820 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2821 regs_buff[num++] = tmp;
2822 et131x_mii_read(adapter, MII_LPA, &tmp);
2823 regs_buff[num++] = tmp;
2824 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2825 regs_buff[num++] = tmp;
2826
2827 et131x_mii_read(adapter, 0x07, &tmp);
2828 regs_buff[num++] = tmp;
2829
2830 et131x_mii_read(adapter, 0x08, &tmp);
2831 regs_buff[num++] = tmp;
2832 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2833 regs_buff[num++] = tmp;
2834 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2835 regs_buff[num++] = tmp;
2836 et131x_mii_read(adapter, 0x0b, &tmp);
2837 regs_buff[num++] = tmp;
2838 et131x_mii_read(adapter, 0x0c, &tmp);
2839 regs_buff[num++] = tmp;
2840 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2841 regs_buff[num++] = tmp;
2842 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2843 regs_buff[num++] = tmp;
2844 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2845 regs_buff[num++] = tmp;
2846
2847 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2848 regs_buff[num++] = tmp;
2849 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2850 regs_buff[num++] = tmp;
2851 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2852 regs_buff[num++] = tmp;
2853 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2854 regs_buff[num++] = tmp;
2855 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2856 regs_buff[num++] = tmp;
2857
2858 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2859 regs_buff[num++] = tmp;
2860 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2861 regs_buff[num++] = tmp;
2862 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2863 regs_buff[num++] = tmp;
2864 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2865 regs_buff[num++] = tmp;
2866 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2867 regs_buff[num++] = tmp;
2868 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2869 regs_buff[num++] = tmp;
2870 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2871 regs_buff[num++] = tmp;
2872 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2873 regs_buff[num++] = tmp;
2874
2875
2876 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2877 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2878 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2879 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2880 regs_buff[num++] = readl(&aregs->global.pm_csr);
2881 regs_buff[num++] = adapter->stats.interrupt_status;
2882 regs_buff[num++] = readl(&aregs->global.int_mask);
2883 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2884 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2885 regs_buff[num++] = readl(&aregs->global.sw_reset);
2886 regs_buff[num++] = readl(&aregs->global.slv_timer);
2887 regs_buff[num++] = readl(&aregs->global.msi_config);
2888 regs_buff[num++] = readl(&aregs->global.loopback);
2889 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2890
2891
2892 regs_buff[num++] = readl(&aregs->txdma.csr);
2893 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2894 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2895 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2896 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2897 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2898 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2899 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2900 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2901 regs_buff[num++] = readl(&aregs->txdma.service_request);
2902 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2903 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2904 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2905 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2906 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2907 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2908 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2909 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2910 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2911 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2912 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2913 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2914 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2915 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2916 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2917 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2918
2919
2920 regs_buff[num++] = readl(&aregs->rxdma.csr);
2921 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2922 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2923 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2924 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2925 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2926 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2927 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2928 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2929 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2930 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2931 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2932 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2933 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2934 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2935 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2936 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2939 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2940 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2941 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2942 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2943 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2946 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2947 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2948 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2949}
2950
2951static void et131x_get_drvinfo(struct net_device *netdev,
2952 struct ethtool_drvinfo *info)
2953{
2954 struct et131x_adapter *adapter = netdev_priv(netdev);
2955
2956 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2957 strlcpy(info->bus_info, pci_name(adapter->pdev),
2958 sizeof(info->bus_info));
2959}
2960
2961static const struct ethtool_ops et131x_ethtool_ops = {
2962 .get_drvinfo = et131x_get_drvinfo,
2963 .get_regs_len = et131x_get_regs_len,
2964 .get_regs = et131x_get_regs,
2965 .get_link = ethtool_op_get_link,
2966 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2967 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2968};
2969
2970
2971static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2972{
2973
2974
2975
2976
2977 if (is_zero_ether_addr(adapter->rom_addr)) {
2978
2979
2980
2981
2982 get_random_bytes(&adapter->addr[5], 1);
2983
2984
2985
2986
2987 ether_addr_copy(adapter->rom_addr, adapter->addr);
2988 } else {
2989
2990
2991
2992
2993 ether_addr_copy(adapter->addr, adapter->rom_addr);
2994 }
2995}
2996
2997static int et131x_pci_init(struct et131x_adapter *adapter,
2998 struct pci_dev *pdev)
2999{
3000 u16 max_payload;
3001 int i, rc;
3002
3003 rc = et131x_init_eeprom(adapter);
3004 if (rc < 0)
3005 goto out;
3006
3007 if (!pci_is_pcie(pdev)) {
3008 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3009 goto err_out;
3010 }
3011
3012
3013 max_payload = pdev->pcie_mpss;
3014
3015 if (max_payload < 2) {
3016 static const u16 acknak[2] = { 0x76, 0xD0 };
3017 static const u16 replay[2] = { 0x1E0, 0x2ED };
3018
3019 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3020 acknak[max_payload])) {
3021 dev_err(&pdev->dev,
3022 "Could not write PCI config space for ACK/NAK\n");
3023 goto err_out;
3024 }
3025 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3026 replay[max_payload])) {
3027 dev_err(&pdev->dev,
3028 "Could not write PCI config space for Replay Timer\n");
3029 goto err_out;
3030 }
3031 }
3032
3033
3034
3035
3036 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3037 dev_err(&pdev->dev,
3038 "Could not write PCI config space for Latency Timers\n");
3039 goto err_out;
3040 }
3041
3042
3043 if (pcie_set_readrq(pdev, 2048)) {
3044 dev_err(&pdev->dev,
3045 "Couldn't change PCI config space for Max read size\n");
3046 goto err_out;
3047 }
3048
3049
3050
3051
3052 if (!adapter->has_eeprom) {
3053 et131x_hwaddr_init(adapter);
3054 return 0;
3055 }
3056
3057 for (i = 0; i < ETH_ALEN; i++) {
3058 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3059 adapter->rom_addr + i)) {
3060 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3061 goto err_out;
3062 }
3063 }
3064 ether_addr_copy(adapter->addr, adapter->rom_addr);
3065out:
3066 return rc;
3067err_out:
3068 rc = -EIO;
3069 goto out;
3070}
3071
3072
3073
3074
3075
3076
3077
3078static void et131x_error_timer_handler(struct timer_list *t)
3079{
3080 struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
3081 struct phy_device *phydev = adapter->netdev->phydev;
3082
3083 if (et1310_in_phy_coma(adapter)) {
3084
3085
3086
3087
3088 et1310_disable_phy_coma(adapter);
3089 adapter->boot_coma = 20;
3090 } else {
3091 et1310_update_macstat_host_counters(adapter);
3092 }
3093
3094 if (!phydev->link && adapter->boot_coma < 11)
3095 adapter->boot_coma++;
3096
3097 if (adapter->boot_coma == 10) {
3098 if (!phydev->link) {
3099 if (!et1310_in_phy_coma(adapter)) {
3100
3101
3102
3103 et131x_enable_interrupts(adapter);
3104 et1310_enable_phy_coma(adapter);
3105 }
3106 }
3107 }
3108
3109
3110 mod_timer(&adapter->error_timer, jiffies +
3111 msecs_to_jiffies(TX_ERROR_PERIOD));
3112}
3113
3114static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3115{
3116 et131x_tx_dma_memory_free(adapter);
3117 et131x_rx_dma_memory_free(adapter);
3118}
3119
3120static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3121{
3122 int status;
3123
3124 status = et131x_tx_dma_memory_alloc(adapter);
3125 if (status) {
3126 dev_err(&adapter->pdev->dev,
3127 "et131x_tx_dma_memory_alloc FAILED\n");
3128 et131x_tx_dma_memory_free(adapter);
3129 return status;
3130 }
3131
3132 status = et131x_rx_dma_memory_alloc(adapter);
3133 if (status) {
3134 dev_err(&adapter->pdev->dev,
3135 "et131x_rx_dma_memory_alloc FAILED\n");
3136 et131x_adapter_memory_free(adapter);
3137 return status;
3138 }
3139
3140 status = et131x_init_recv(adapter);
3141 if (status) {
3142 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3143 et131x_adapter_memory_free(adapter);
3144 }
3145 return status;
3146}
3147
3148static void et131x_adjust_link(struct net_device *netdev)
3149{
3150 struct et131x_adapter *adapter = netdev_priv(netdev);
3151 struct phy_device *phydev = netdev->phydev;
3152
3153 if (!phydev)
3154 return;
3155 if (phydev->link == adapter->link)
3156 return;
3157
3158
3159
3160
3161
3162 if (et1310_in_phy_coma(adapter))
3163 et1310_disable_phy_coma(adapter);
3164
3165 adapter->link = phydev->link;
3166 phy_print_status(phydev);
3167
3168 if (phydev->link) {
3169 adapter->boot_coma = 20;
3170 if (phydev->speed == SPEED_10) {
3171 u16 register18;
3172
3173 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3174 ®ister18);
3175 et131x_mii_write(adapter, phydev->mdio.addr,
3176 PHY_MPHY_CONTROL_REG,
3177 register18 | 0x4);
3178 et131x_mii_write(adapter, phydev->mdio.addr,
3179 PHY_INDEX_REG, register18 | 0x8402);
3180 et131x_mii_write(adapter, phydev->mdio.addr,
3181 PHY_DATA_REG, register18 | 511);
3182 et131x_mii_write(adapter, phydev->mdio.addr,
3183 PHY_MPHY_CONTROL_REG, register18);
3184 }
3185
3186 et1310_config_flow_control(adapter);
3187
3188 if (phydev->speed == SPEED_1000 &&
3189 adapter->registry_jumbo_packet > 2048) {
3190 u16 reg;
3191
3192 et131x_mii_read(adapter, PHY_CONFIG, ®);
3193 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3194 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3195 et131x_mii_write(adapter, phydev->mdio.addr,
3196 PHY_CONFIG, reg);
3197 }
3198
3199 et131x_set_rx_dma_timer(adapter);
3200 et1310_config_mac_regs2(adapter);
3201 } else {
3202 adapter->boot_coma = 0;
3203
3204 if (phydev->speed == SPEED_10) {
3205 u16 register18;
3206
3207 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3208 ®ister18);
3209 et131x_mii_write(adapter, phydev->mdio.addr,
3210 PHY_MPHY_CONTROL_REG,
3211 register18 | 0x4);
3212 et131x_mii_write(adapter, phydev->mdio.addr,
3213 PHY_INDEX_REG, register18 | 0x8402);
3214 et131x_mii_write(adapter, phydev->mdio.addr,
3215 PHY_DATA_REG, register18 | 511);
3216 et131x_mii_write(adapter, phydev->mdio.addr,
3217 PHY_MPHY_CONTROL_REG, register18);
3218 }
3219
3220 et131x_free_busy_send_packets(adapter);
3221 et131x_init_send(adapter);
3222
3223
3224
3225
3226
3227
3228 et131x_soft_reset(adapter);
3229
3230 et131x_adapter_setup(adapter);
3231
3232 et131x_disable_txrx(netdev);
3233 et131x_enable_txrx(netdev);
3234 }
3235}
3236
3237static int et131x_mii_probe(struct net_device *netdev)
3238{
3239 struct et131x_adapter *adapter = netdev_priv(netdev);
3240 struct phy_device *phydev = NULL;
3241
3242 phydev = phy_find_first(adapter->mii_bus);
3243 if (!phydev) {
3244 dev_err(&adapter->pdev->dev, "no PHY found\n");
3245 return -ENODEV;
3246 }
3247
3248 phydev = phy_connect(netdev, phydev_name(phydev),
3249 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3250
3251 if (IS_ERR(phydev)) {
3252 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3253 return PTR_ERR(phydev);
3254 }
3255
3256 phy_set_max_speed(phydev, SPEED_100);
3257
3258 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3259 phy_set_max_speed(phydev, SPEED_1000);
3260
3261 phydev->autoneg = AUTONEG_ENABLE;
3262
3263 phy_attached_info(phydev);
3264
3265 return 0;
3266}
3267
3268static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3269 struct pci_dev *pdev)
3270{
3271 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3272
3273 struct et131x_adapter *adapter;
3274
3275 adapter = netdev_priv(netdev);
3276 adapter->pdev = pci_dev_get(pdev);
3277 adapter->netdev = netdev;
3278
3279 spin_lock_init(&adapter->tcb_send_qlock);
3280 spin_lock_init(&adapter->tcb_ready_qlock);
3281 spin_lock_init(&adapter->rcv_lock);
3282
3283 adapter->registry_jumbo_packet = 1514;
3284
3285 ether_addr_copy(adapter->addr, default_mac);
3286
3287 return adapter;
3288}
3289
3290static void et131x_pci_remove(struct pci_dev *pdev)
3291{
3292 struct net_device *netdev = pci_get_drvdata(pdev);
3293 struct et131x_adapter *adapter = netdev_priv(netdev);
3294
3295 unregister_netdev(netdev);
3296 netif_napi_del(&adapter->napi);
3297 phy_disconnect(netdev->phydev);
3298 mdiobus_unregister(adapter->mii_bus);
3299 mdiobus_free(adapter->mii_bus);
3300
3301 et131x_adapter_memory_free(adapter);
3302 iounmap(adapter->regs);
3303 pci_dev_put(pdev);
3304
3305 free_netdev(netdev);
3306 pci_release_regions(pdev);
3307 pci_disable_device(pdev);
3308}
3309
3310static void et131x_up(struct net_device *netdev)
3311{
3312 et131x_enable_txrx(netdev);
3313 phy_start(netdev->phydev);
3314}
3315
3316static void et131x_down(struct net_device *netdev)
3317{
3318
3319 netif_trans_update(netdev);
3320
3321 phy_stop(netdev->phydev);
3322 et131x_disable_txrx(netdev);
3323}
3324
3325#ifdef CONFIG_PM_SLEEP
3326static int et131x_suspend(struct device *dev)
3327{
3328 struct pci_dev *pdev = to_pci_dev(dev);
3329 struct net_device *netdev = pci_get_drvdata(pdev);
3330
3331 if (netif_running(netdev)) {
3332 netif_device_detach(netdev);
3333 et131x_down(netdev);
3334 pci_save_state(pdev);
3335 }
3336
3337 return 0;
3338}
3339
3340static int et131x_resume(struct device *dev)
3341{
3342 struct pci_dev *pdev = to_pci_dev(dev);
3343 struct net_device *netdev = pci_get_drvdata(pdev);
3344
3345 if (netif_running(netdev)) {
3346 pci_restore_state(pdev);
3347 et131x_up(netdev);
3348 netif_device_attach(netdev);
3349 }
3350
3351 return 0;
3352}
3353#endif
3354
3355static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3356
3357static irqreturn_t et131x_isr(int irq, void *dev_id)
3358{
3359 bool handled = true;
3360 bool enable_interrupts = true;
3361 struct net_device *netdev = dev_id;
3362 struct et131x_adapter *adapter = netdev_priv(netdev);
3363 struct address_map __iomem *iomem = adapter->regs;
3364 struct rx_ring *rx_ring = &adapter->rx_ring;
3365 struct tx_ring *tx_ring = &adapter->tx_ring;
3366 u32 status;
3367
3368 if (!netif_device_present(netdev)) {
3369 handled = false;
3370 enable_interrupts = false;
3371 goto out;
3372 }
3373
3374 et131x_disable_interrupts(adapter);
3375
3376 status = readl(&adapter->regs->global.int_status);
3377
3378 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3379 status &= ~INT_MASK_ENABLE;
3380 else
3381 status &= ~INT_MASK_ENABLE_NO_FLOW;
3382
3383
3384 if (!status) {
3385 handled = false;
3386 et131x_enable_interrupts(adapter);
3387 goto out;
3388 }
3389
3390
3391 if (status & ET_INTR_WATCHDOG) {
3392 struct tcb *tcb = tx_ring->send_head;
3393
3394 if (tcb)
3395 if (++tcb->stale > 1)
3396 status |= ET_INTR_TXDMA_ISR;
3397
3398 if (rx_ring->unfinished_receives)
3399 status |= ET_INTR_RXDMA_XFR_DONE;
3400 else if (tcb == NULL)
3401 writel(0, &adapter->regs->global.watchdog_timer);
3402
3403 status &= ~ET_INTR_WATCHDOG;
3404 }
3405
3406 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3407 enable_interrupts = false;
3408 napi_schedule(&adapter->napi);
3409 }
3410
3411 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3412
3413 if (!status)
3414 goto out;
3415
3416 if (status & ET_INTR_TXDMA_ERR) {
3417
3418 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3419
3420 dev_warn(&adapter->pdev->dev,
3421 "TXDMA_ERR interrupt, error = %d\n",
3422 txdma_err);
3423 }
3424
3425 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3442
3443
3444
3445 if (!et1310_in_phy_coma(adapter))
3446 writel(3, &iomem->txmac.bp_ctrl);
3447 }
3448 }
3449
3450
3451 if (status & ET_INTR_RXDMA_STAT_LOW) {
3452
3453
3454
3455
3456
3457
3458
3459 }
3460
3461 if (status & ET_INTR_RXDMA_ERR) {
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3479 readl(&iomem->txmac.tx_test));
3480 }
3481
3482
3483 if (status & ET_INTR_WOL) {
3484
3485
3486
3487
3488 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3489 }
3490
3491 if (status & ET_INTR_TXMAC) {
3492 u32 err = readl(&iomem->txmac.err);
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3503 err);
3504
3505
3506
3507
3508 }
3509
3510 if (status & ET_INTR_RXMAC) {
3511
3512
3513
3514
3515 dev_warn(&adapter->pdev->dev,
3516 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3517 readl(&iomem->rxmac.err_reg));
3518
3519 dev_warn(&adapter->pdev->dev,
3520 "Enable 0x%08x, Diag 0x%08x\n",
3521 readl(&iomem->rxmac.ctrl),
3522 readl(&iomem->rxmac.rxq_diag));
3523
3524
3525
3526
3527 }
3528
3529 if (status & ET_INTR_MAC_STAT) {
3530
3531
3532
3533
3534 et1310_handle_macstat_interrupt(adapter);
3535 }
3536
3537 if (status & ET_INTR_SLV_TIMEOUT) {
3538
3539
3540
3541
3542
3543
3544 }
3545
3546out:
3547 if (enable_interrupts)
3548 et131x_enable_interrupts(adapter);
3549
3550 return IRQ_RETVAL(handled);
3551}
3552
3553static int et131x_poll(struct napi_struct *napi, int budget)
3554{
3555 struct et131x_adapter *adapter =
3556 container_of(napi, struct et131x_adapter, napi);
3557 int work_done = et131x_handle_recv_pkts(adapter, budget);
3558
3559 et131x_handle_send_pkts(adapter);
3560
3561 if (work_done < budget) {
3562 napi_complete_done(&adapter->napi, work_done);
3563 et131x_enable_interrupts(adapter);
3564 }
3565
3566 return work_done;
3567}
3568
3569
3570static struct net_device_stats *et131x_stats(struct net_device *netdev)
3571{
3572 struct et131x_adapter *adapter = netdev_priv(netdev);
3573 struct net_device_stats *stats = &adapter->netdev->stats;
3574 struct ce_stats *devstat = &adapter->stats;
3575
3576 stats->rx_errors = devstat->rx_length_errs +
3577 devstat->rx_align_errs +
3578 devstat->rx_crc_errs +
3579 devstat->rx_code_violations +
3580 devstat->rx_other_errs;
3581 stats->tx_errors = devstat->tx_max_pkt_errs;
3582 stats->multicast = devstat->multicast_pkts_rcvd;
3583 stats->collisions = devstat->tx_collisions;
3584
3585 stats->rx_length_errors = devstat->rx_length_errs;
3586 stats->rx_over_errors = devstat->rx_overflows;
3587 stats->rx_crc_errors = devstat->rx_crc_errs;
3588 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600 return stats;
3601}
3602
3603static int et131x_open(struct net_device *netdev)
3604{
3605 struct et131x_adapter *adapter = netdev_priv(netdev);
3606 struct pci_dev *pdev = adapter->pdev;
3607 unsigned int irq = pdev->irq;
3608 int result;
3609
3610
3611 timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
3612 adapter->error_timer.expires = jiffies +
3613 msecs_to_jiffies(TX_ERROR_PERIOD);
3614 add_timer(&adapter->error_timer);
3615
3616 result = request_irq(irq, et131x_isr,
3617 IRQF_SHARED, netdev->name, netdev);
3618 if (result) {
3619 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3620 return result;
3621 }
3622
3623 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3624
3625 napi_enable(&adapter->napi);
3626
3627 et131x_up(netdev);
3628
3629 return result;
3630}
3631
3632static int et131x_close(struct net_device *netdev)
3633{
3634 struct et131x_adapter *adapter = netdev_priv(netdev);
3635
3636 et131x_down(netdev);
3637 napi_disable(&adapter->napi);
3638
3639 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3640 free_irq(adapter->pdev->irq, netdev);
3641
3642
3643 return del_timer_sync(&adapter->error_timer);
3644}
3645
3646
3647static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3648{
3649 int filter = adapter->packet_filter;
3650 u32 ctrl;
3651 u32 pf_ctrl;
3652
3653 ctrl = readl(&adapter->regs->rxmac.ctrl);
3654 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3655
3656
3657 ctrl |= 0x04;
3658
3659
3660
3661
3662 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3663 pf_ctrl &= ~7;
3664 else {
3665
3666
3667
3668
3669 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3670 pf_ctrl &= ~2;
3671 else {
3672 et1310_setup_device_for_multicast(adapter);
3673 pf_ctrl |= 2;
3674 ctrl &= ~0x04;
3675 }
3676
3677
3678 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3679 et1310_setup_device_for_unicast(adapter);
3680 pf_ctrl |= 4;
3681 ctrl &= ~0x04;
3682 }
3683
3684
3685 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3686 pf_ctrl |= 1;
3687 ctrl &= ~0x04;
3688 } else {
3689 pf_ctrl &= ~1;
3690 }
3691
3692
3693
3694
3695
3696 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3697 writel(ctrl, &adapter->regs->rxmac.ctrl);
3698 }
3699 return 0;
3700}
3701
3702static void et131x_multicast(struct net_device *netdev)
3703{
3704 struct et131x_adapter *adapter = netdev_priv(netdev);
3705 int packet_filter;
3706 struct netdev_hw_addr *ha;
3707 int i;
3708
3709
3710
3711
3712
3713 packet_filter = adapter->packet_filter;
3714
3715
3716
3717
3718
3719
3720 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3721
3722
3723
3724
3725 if (netdev->flags & IFF_PROMISC)
3726 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3727 else
3728 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3729
3730 if ((netdev->flags & IFF_ALLMULTI) ||
3731 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3732 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3733
3734 if (netdev_mc_count(netdev) < 1) {
3735 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3736 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3737 } else {
3738 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3739 }
3740
3741
3742 i = 0;
3743 netdev_for_each_mc_addr(ha, netdev) {
3744 if (i == NIC_MAX_MCAST_LIST)
3745 break;
3746 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3747 }
3748 adapter->multicast_addr_count = i;
3749
3750
3751
3752
3753
3754
3755
3756 if (packet_filter != adapter->packet_filter)
3757 et131x_set_packet_filter(adapter);
3758}
3759
3760static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3761{
3762 struct et131x_adapter *adapter = netdev_priv(netdev);
3763 struct tx_ring *tx_ring = &adapter->tx_ring;
3764
3765
3766 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3767 netif_stop_queue(netdev);
3768
3769
3770 netif_trans_update(netdev);
3771
3772
3773 if (tx_ring->used >= NUM_TCB)
3774 goto drop_err;
3775
3776 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3777 !netif_carrier_ok(netdev))
3778 goto drop_err;
3779
3780 if (send_packet(skb, adapter))
3781 goto drop_err;
3782
3783 return NETDEV_TX_OK;
3784
3785drop_err:
3786 dev_kfree_skb_any(skb);
3787 adapter->netdev->stats.tx_dropped++;
3788 return NETDEV_TX_OK;
3789}
3790
3791
3792
3793
3794
3795
3796
3797static void et131x_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3798{
3799 struct et131x_adapter *adapter = netdev_priv(netdev);
3800 struct tx_ring *tx_ring = &adapter->tx_ring;
3801 struct tcb *tcb;
3802 unsigned long flags;
3803
3804
3805 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3806 return;
3807
3808
3809
3810
3811 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3812 return;
3813
3814
3815 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3816 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3817 return;
3818 }
3819
3820
3821 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3822 tcb = tx_ring->send_head;
3823 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3824
3825 if (tcb) {
3826 tcb->count++;
3827
3828 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3829 dev_warn(&adapter->pdev->dev,
3830 "Send stuck - reset. tcb->WrIndex %x\n",
3831 tcb->index);
3832
3833 adapter->netdev->stats.tx_errors++;
3834
3835
3836 et131x_disable_txrx(netdev);
3837 et131x_enable_txrx(netdev);
3838 }
3839 }
3840}
3841
3842static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3843{
3844 int result = 0;
3845 struct et131x_adapter *adapter = netdev_priv(netdev);
3846
3847 et131x_disable_txrx(netdev);
3848
3849 netdev->mtu = new_mtu;
3850
3851 et131x_adapter_memory_free(adapter);
3852
3853
3854 adapter->registry_jumbo_packet = new_mtu + 14;
3855 et131x_soft_reset(adapter);
3856
3857 result = et131x_adapter_memory_alloc(adapter);
3858 if (result != 0) {
3859 dev_warn(&adapter->pdev->dev,
3860 "Change MTU failed; couldn't re-alloc DMA memory\n");
3861 return result;
3862 }
3863
3864 et131x_init_send(adapter);
3865 et131x_hwaddr_init(adapter);
3866 ether_addr_copy(netdev->dev_addr, adapter->addr);
3867
3868
3869 et131x_adapter_setup(adapter);
3870 et131x_enable_txrx(netdev);
3871
3872 return result;
3873}
3874
3875static const struct net_device_ops et131x_netdev_ops = {
3876 .ndo_open = et131x_open,
3877 .ndo_stop = et131x_close,
3878 .ndo_start_xmit = et131x_tx,
3879 .ndo_set_rx_mode = et131x_multicast,
3880 .ndo_tx_timeout = et131x_tx_timeout,
3881 .ndo_change_mtu = et131x_change_mtu,
3882 .ndo_set_mac_address = eth_mac_addr,
3883 .ndo_validate_addr = eth_validate_addr,
3884 .ndo_get_stats = et131x_stats,
3885 .ndo_eth_ioctl = phy_do_ioctl,
3886};
3887
3888static int et131x_pci_setup(struct pci_dev *pdev,
3889 const struct pci_device_id *ent)
3890{
3891 struct net_device *netdev;
3892 struct et131x_adapter *adapter;
3893 int rc;
3894
3895 rc = pci_enable_device(pdev);
3896 if (rc < 0) {
3897 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3898 goto out;
3899 }
3900
3901
3902 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3903 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3904 rc = -ENODEV;
3905 goto err_disable;
3906 }
3907
3908 rc = pci_request_regions(pdev, DRIVER_NAME);
3909 if (rc < 0) {
3910 dev_err(&pdev->dev, "Can't get PCI resources\n");
3911 goto err_disable;
3912 }
3913
3914 pci_set_master(pdev);
3915
3916
3917 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
3918 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
3919 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3920 rc = -EIO;
3921 goto err_release_res;
3922 }
3923
3924 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3925 if (!netdev) {
3926 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3927 rc = -ENOMEM;
3928 goto err_release_res;
3929 }
3930
3931 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3932 netdev->netdev_ops = &et131x_netdev_ops;
3933 netdev->min_mtu = ET131X_MIN_MTU;
3934 netdev->max_mtu = ET131X_MAX_MTU;
3935
3936 SET_NETDEV_DEV(netdev, &pdev->dev);
3937 netdev->ethtool_ops = &et131x_ethtool_ops;
3938
3939 adapter = et131x_adapter_init(netdev, pdev);
3940
3941 rc = et131x_pci_init(adapter, pdev);
3942 if (rc < 0)
3943 goto err_free_dev;
3944
3945
3946 adapter->regs = pci_ioremap_bar(pdev, 0);
3947 if (!adapter->regs) {
3948 dev_err(&pdev->dev, "Cannot map device registers\n");
3949 rc = -ENOMEM;
3950 goto err_free_dev;
3951 }
3952
3953
3954 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
3955
3956 et131x_soft_reset(adapter);
3957 et131x_disable_interrupts(adapter);
3958
3959 rc = et131x_adapter_memory_alloc(adapter);
3960 if (rc < 0) {
3961 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
3962 goto err_iounmap;
3963 }
3964
3965 et131x_init_send(adapter);
3966
3967 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
3968
3969 ether_addr_copy(netdev->dev_addr, adapter->addr);
3970
3971 rc = -ENOMEM;
3972
3973 adapter->mii_bus = mdiobus_alloc();
3974 if (!adapter->mii_bus) {
3975 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
3976 goto err_mem_free;
3977 }
3978
3979 adapter->mii_bus->name = "et131x_eth_mii";
3980 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
3981 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
3982 adapter->mii_bus->priv = netdev;
3983 adapter->mii_bus->read = et131x_mdio_read;
3984 adapter->mii_bus->write = et131x_mdio_write;
3985
3986 rc = mdiobus_register(adapter->mii_bus);
3987 if (rc < 0) {
3988 dev_err(&pdev->dev, "failed to register MII bus\n");
3989 goto err_mdio_free;
3990 }
3991
3992 rc = et131x_mii_probe(netdev);
3993 if (rc < 0) {
3994 dev_err(&pdev->dev, "failed to probe MII bus\n");
3995 goto err_mdio_unregister;
3996 }
3997
3998 et131x_adapter_setup(adapter);
3999
4000
4001 adapter->boot_coma = 0;
4002 et1310_disable_phy_coma(adapter);
4003
4004
4005
4006
4007
4008
4009
4010
4011 rc = register_netdev(netdev);
4012 if (rc < 0) {
4013 dev_err(&pdev->dev, "register_netdev() failed\n");
4014 goto err_phy_disconnect;
4015 }
4016
4017
4018
4019
4020
4021 pci_set_drvdata(pdev, netdev);
4022out:
4023 return rc;
4024
4025err_phy_disconnect:
4026 phy_disconnect(netdev->phydev);
4027err_mdio_unregister:
4028 mdiobus_unregister(adapter->mii_bus);
4029err_mdio_free:
4030 mdiobus_free(adapter->mii_bus);
4031err_mem_free:
4032 et131x_adapter_memory_free(adapter);
4033err_iounmap:
4034 iounmap(adapter->regs);
4035err_free_dev:
4036 pci_dev_put(pdev);
4037 free_netdev(netdev);
4038err_release_res:
4039 pci_release_regions(pdev);
4040err_disable:
4041 pci_disable_device(pdev);
4042 goto out;
4043}
4044
4045static const struct pci_device_id et131x_pci_table[] = {
4046 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4047 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4048 { 0,}
4049};
4050MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4051
4052static struct pci_driver et131x_driver = {
4053 .name = DRIVER_NAME,
4054 .id_table = et131x_pci_table,
4055 .probe = et131x_pci_setup,
4056 .remove = et131x_pci_remove,
4057 .driver.pm = &et131x_pm_ops,
4058};
4059
4060module_pci_driver(et131x_driver);
4061