1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "e1000.h"
23
24
25
26
27
28
29
30
31
32s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
33{
34 struct e1000_mac_info *mac = &hw->mac;
35 struct e1000_bus_info *bus = &hw->bus;
36 struct e1000_adapter *adapter = hw->adapter;
37 u16 pcie_link_status, cap_offset;
38
39 cap_offset = adapter->pdev->pcie_cap;
40 if (!cap_offset) {
41 bus->width = e1000_bus_width_unknown;
42 } else {
43 pci_read_config_word(adapter->pdev,
44 cap_offset + PCIE_LINK_STATUS,
45 &pcie_link_status);
46 bus->width = (enum e1000_bus_width)((pcie_link_status &
47 PCIE_LINK_WIDTH_MASK) >>
48 PCIE_LINK_WIDTH_SHIFT);
49 }
50
51 mac->ops.set_lan_id(hw);
52
53 return 0;
54}
55
56
57
58
59
60
61
62
63
64void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
65{
66 struct e1000_bus_info *bus = &hw->bus;
67 u32 reg;
68
69
70
71
72 reg = er32(STATUS);
73 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
74}
75
76
77
78
79
80
81
82void e1000_set_lan_id_single_port(struct e1000_hw *hw)
83{
84 struct e1000_bus_info *bus = &hw->bus;
85
86 bus->func = 0;
87}
88
89
90
91
92
93
94
95
96void e1000_clear_vfta_generic(struct e1000_hw *hw)
97{
98 u32 offset;
99
100 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
101 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
102 e1e_flush();
103 }
104}
105
106
107
108
109
110
111
112
113
114
115void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
116{
117 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
118 e1e_flush();
119}
120
121
122
123
124
125
126
127
128
129
130void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
131{
132 u32 i;
133 u8 mac_addr[ETH_ALEN] = { 0 };
134
135
136 e_dbg("Programming MAC Address into RAR[0]\n");
137
138 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
139
140
141 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
142 for (i = 1; i < rar_count; i++)
143 hw->mac.ops.rar_set(hw, mac_addr, i);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
159{
160 u32 i;
161 s32 ret_val;
162 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
163 u8 alt_mac_addr[ETH_ALEN];
164
165 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
166 if (ret_val)
167 return ret_val;
168
169
170 if (hw->mac.type == e1000_82573)
171 return 0;
172
173 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
174 &nvm_alt_mac_addr_offset);
175 if (ret_val) {
176 e_dbg("NVM Read Error\n");
177 return ret_val;
178 }
179
180 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
181 (nvm_alt_mac_addr_offset == 0x0000))
182
183 return 0;
184
185 if (hw->bus.func == E1000_FUNC_1)
186 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
187 for (i = 0; i < ETH_ALEN; i += 2) {
188 offset = nvm_alt_mac_addr_offset + (i >> 1);
189 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
190 if (ret_val) {
191 e_dbg("NVM Read Error\n");
192 return ret_val;
193 }
194
195 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
196 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
197 }
198
199
200 if (is_multicast_ether_addr(alt_mac_addr)) {
201 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
202 return 0;
203 }
204
205
206
207
208
209 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
210
211 return 0;
212}
213
214u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
215{
216 return hw->mac.rar_entry_count;
217}
218
219
220
221
222
223
224
225
226
227
228int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
229{
230 u32 rar_low, rar_high;
231
232
233
234
235 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
236 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
237
238 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
239
240
241 if (rar_low || rar_high)
242 rar_high |= E1000_RAH_AV;
243
244
245
246
247
248 ew32(RAL(index), rar_low);
249 e1e_flush();
250 ew32(RAH(index), rar_high);
251 e1e_flush();
252
253 return 0;
254}
255
256
257
258
259
260
261
262
263
264static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
265{
266 u32 hash_value, hash_mask;
267 u8 bit_shift = 0;
268
269
270 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
271
272
273
274
275 while (hash_mask >> bit_shift != 0xFF)
276 bit_shift++;
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 switch (hw->mac.mc_filter_type) {
304 default:
305 case 0:
306 break;
307 case 1:
308 bit_shift += 1;
309 break;
310 case 2:
311 bit_shift += 2;
312 break;
313 case 3:
314 bit_shift += 4;
315 break;
316 }
317
318 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
319 (((u16)mc_addr[5]) << bit_shift)));
320
321 return hash_value;
322}
323
324
325
326
327
328
329
330
331
332
333void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
334 u8 *mc_addr_list, u32 mc_addr_count)
335{
336 u32 hash_value, hash_bit, hash_reg;
337 int i;
338
339
340 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
341
342
343 for (i = 0; (u32)i < mc_addr_count; i++) {
344 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
345
346 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
347 hash_bit = hash_value & 0x1F;
348
349 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
350 mc_addr_list += (ETH_ALEN);
351 }
352
353
354 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
355 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
356 e1e_flush();
357}
358
359
360
361
362
363
364
365void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
366{
367 er32(CRCERRS);
368 er32(SYMERRS);
369 er32(MPC);
370 er32(SCC);
371 er32(ECOL);
372 er32(MCC);
373 er32(LATECOL);
374 er32(COLC);
375 er32(DC);
376 er32(SEC);
377 er32(RLEC);
378 er32(XONRXC);
379 er32(XONTXC);
380 er32(XOFFRXC);
381 er32(XOFFTXC);
382 er32(FCRUC);
383 er32(GPRC);
384 er32(BPRC);
385 er32(MPRC);
386 er32(GPTC);
387 er32(GORCL);
388 er32(GORCH);
389 er32(GOTCL);
390 er32(GOTCH);
391 er32(RNBC);
392 er32(RUC);
393 er32(RFC);
394 er32(ROC);
395 er32(RJC);
396 er32(TORL);
397 er32(TORH);
398 er32(TOTL);
399 er32(TOTH);
400 er32(TPR);
401 er32(TPT);
402 er32(MPTC);
403 er32(BPTC);
404}
405
406
407
408
409
410
411
412
413
414s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
415{
416 struct e1000_mac_info *mac = &hw->mac;
417 s32 ret_val;
418 bool link;
419
420
421
422
423
424
425 if (!mac->get_link_status)
426 return 0;
427
428
429
430
431
432 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
433 if (ret_val)
434 return ret_val;
435
436 if (!link)
437 return 0;
438
439 mac->get_link_status = false;
440
441
442
443
444 e1000e_check_downshift(hw);
445
446
447
448
449 if (!mac->autoneg)
450 return -E1000_ERR_CONFIG;
451
452
453
454
455
456 mac->ops.config_collision_dist(hw);
457
458
459
460
461
462
463 ret_val = e1000e_config_fc_after_link_up(hw);
464 if (ret_val)
465 e_dbg("Error configuring flow control\n");
466
467 return ret_val;
468}
469
470
471
472
473
474
475
476
477s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
478{
479 struct e1000_mac_info *mac = &hw->mac;
480 u32 rxcw;
481 u32 ctrl;
482 u32 status;
483 s32 ret_val;
484
485 ctrl = er32(CTRL);
486 status = er32(STATUS);
487 rxcw = er32(RXCW);
488
489
490
491
492
493
494
495
496
497 if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
498 !(rxcw & E1000_RXCW_C)) {
499 if (!mac->autoneg_failed) {
500 mac->autoneg_failed = true;
501 return 0;
502 }
503 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
504
505
506 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
507
508
509 ctrl = er32(CTRL);
510 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
511 ew32(CTRL, ctrl);
512
513
514 ret_val = e1000e_config_fc_after_link_up(hw);
515 if (ret_val) {
516 e_dbg("Error configuring flow control\n");
517 return ret_val;
518 }
519 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
520
521
522
523
524
525 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
526 ew32(TXCW, mac->txcw);
527 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
528
529 mac->serdes_has_link = true;
530 }
531
532 return 0;
533}
534
535
536
537
538
539
540
541
542s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
543{
544 struct e1000_mac_info *mac = &hw->mac;
545 u32 rxcw;
546 u32 ctrl;
547 u32 status;
548 s32 ret_val;
549
550 ctrl = er32(CTRL);
551 status = er32(STATUS);
552 rxcw = er32(RXCW);
553
554
555
556
557
558
559
560
561 if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
562 if (!mac->autoneg_failed) {
563 mac->autoneg_failed = true;
564 return 0;
565 }
566 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
567
568
569 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
570
571
572 ctrl = er32(CTRL);
573 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
574 ew32(CTRL, ctrl);
575
576
577 ret_val = e1000e_config_fc_after_link_up(hw);
578 if (ret_val) {
579 e_dbg("Error configuring flow control\n");
580 return ret_val;
581 }
582 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
583
584
585
586
587
588 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
589 ew32(TXCW, mac->txcw);
590 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
591
592 mac->serdes_has_link = true;
593 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
594
595
596
597
598
599 usleep_range(10, 20);
600 rxcw = er32(RXCW);
601 if (rxcw & E1000_RXCW_SYNCH) {
602 if (!(rxcw & E1000_RXCW_IV)) {
603 mac->serdes_has_link = true;
604 e_dbg("SERDES: Link up - forced.\n");
605 }
606 } else {
607 mac->serdes_has_link = false;
608 e_dbg("SERDES: Link down - force failed.\n");
609 }
610 }
611
612 if (E1000_TXCW_ANE & er32(TXCW)) {
613 status = er32(STATUS);
614 if (status & E1000_STATUS_LU) {
615
616 usleep_range(10, 20);
617 rxcw = er32(RXCW);
618 if (rxcw & E1000_RXCW_SYNCH) {
619 if (!(rxcw & E1000_RXCW_IV)) {
620 mac->serdes_has_link = true;
621 e_dbg("SERDES: Link up - autoneg completed successfully.\n");
622 } else {
623 mac->serdes_has_link = false;
624 e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
625 }
626 } else {
627 mac->serdes_has_link = false;
628 e_dbg("SERDES: Link down - no sync.\n");
629 }
630 } else {
631 mac->serdes_has_link = false;
632 e_dbg("SERDES: Link down - autoneg failed\n");
633 }
634 }
635
636 return 0;
637}
638
639
640
641
642
643
644
645
646static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
647{
648 s32 ret_val;
649 u16 nvm_data;
650
651
652
653
654
655
656
657
658
659 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
660
661 if (ret_val) {
662 e_dbg("NVM Read Error\n");
663 return ret_val;
664 }
665
666 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
667 hw->fc.requested_mode = e1000_fc_none;
668 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
669 hw->fc.requested_mode = e1000_fc_tx_pause;
670 else
671 hw->fc.requested_mode = e1000_fc_full;
672
673 return 0;
674}
675
676
677
678
679
680
681
682
683
684
685
686s32 e1000e_setup_link_generic(struct e1000_hw *hw)
687{
688 s32 ret_val;
689
690
691
692
693 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
694 return 0;
695
696
697
698
699 if (hw->fc.requested_mode == e1000_fc_default) {
700 ret_val = e1000_set_default_fc_generic(hw);
701 if (ret_val)
702 return ret_val;
703 }
704
705
706
707
708 hw->fc.current_mode = hw->fc.requested_mode;
709
710 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
711
712
713 ret_val = hw->mac.ops.setup_physical_interface(hw);
714 if (ret_val)
715 return ret_val;
716
717
718
719
720
721
722 e_dbg("Initializing the Flow Control address, type and timer regs\n");
723 ew32(FCT, FLOW_CONTROL_TYPE);
724 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
725 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
726
727 ew32(FCTTV, hw->fc.pause_time);
728
729 return e1000e_set_fc_watermarks(hw);
730}
731
732
733
734
735
736
737
738
739static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
740{
741 struct e1000_mac_info *mac = &hw->mac;
742 u32 txcw;
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760 switch (hw->fc.current_mode) {
761 case e1000_fc_none:
762
763 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
764 break;
765 case e1000_fc_rx_pause:
766
767
768
769
770
771
772
773 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
774 break;
775 case e1000_fc_tx_pause:
776
777
778
779 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
780 break;
781 case e1000_fc_full:
782
783
784
785 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
786 break;
787 default:
788 e_dbg("Flow control param set incorrectly\n");
789 return -E1000_ERR_CONFIG;
790 }
791
792 ew32(TXCW, txcw);
793 mac->txcw = txcw;
794
795 return 0;
796}
797
798
799
800
801
802
803
804
805static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
806{
807 struct e1000_mac_info *mac = &hw->mac;
808 u32 i, status;
809 s32 ret_val;
810
811
812
813
814
815
816
817 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
818 usleep_range(10000, 20000);
819 status = er32(STATUS);
820 if (status & E1000_STATUS_LU)
821 break;
822 }
823 if (i == FIBER_LINK_UP_LIMIT) {
824 e_dbg("Never got a valid link from auto-neg!!!\n");
825 mac->autoneg_failed = true;
826
827
828
829
830
831 ret_val = mac->ops.check_for_link(hw);
832 if (ret_val) {
833 e_dbg("Error while checking for link\n");
834 return ret_val;
835 }
836 mac->autoneg_failed = false;
837 } else {
838 mac->autoneg_failed = false;
839 e_dbg("Valid Link Found\n");
840 }
841
842 return 0;
843}
844
845
846
847
848
849
850
851
852s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
853{
854 u32 ctrl;
855 s32 ret_val;
856
857 ctrl = er32(CTRL);
858
859
860 ctrl &= ~E1000_CTRL_LRST;
861
862 hw->mac.ops.config_collision_dist(hw);
863
864 ret_val = e1000_commit_fc_settings_generic(hw);
865 if (ret_val)
866 return ret_val;
867
868
869
870
871
872
873
874 e_dbg("Auto-negotiation enabled\n");
875
876 ew32(CTRL, ctrl);
877 e1e_flush();
878 usleep_range(1000, 2000);
879
880
881
882
883
884 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
885 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
886 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
887 } else {
888 e_dbg("No signal detected\n");
889 }
890
891 return ret_val;
892}
893
894
895
896
897
898
899
900
901void e1000e_config_collision_dist_generic(struct e1000_hw *hw)
902{
903 u32 tctl;
904
905 tctl = er32(TCTL);
906
907 tctl &= ~E1000_TCTL_COLD;
908 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
909
910 ew32(TCTL, tctl);
911 e1e_flush();
912}
913
914
915
916
917
918
919
920
921
922s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
923{
924 u32 fcrtl = 0, fcrth = 0;
925
926
927
928
929
930
931
932 if (hw->fc.current_mode & e1000_fc_tx_pause) {
933
934
935
936
937 fcrtl = hw->fc.low_water;
938 if (hw->fc.send_xon)
939 fcrtl |= E1000_FCRTL_XONE;
940
941 fcrth = hw->fc.high_water;
942 }
943 ew32(FCRTL, fcrtl);
944 ew32(FCRTH, fcrth);
945
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958
959s32 e1000e_force_mac_fc(struct e1000_hw *hw)
960{
961 u32 ctrl;
962
963 ctrl = er32(CTRL);
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
983
984 switch (hw->fc.current_mode) {
985 case e1000_fc_none:
986 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
987 break;
988 case e1000_fc_rx_pause:
989 ctrl &= (~E1000_CTRL_TFCE);
990 ctrl |= E1000_CTRL_RFCE;
991 break;
992 case e1000_fc_tx_pause:
993 ctrl &= (~E1000_CTRL_RFCE);
994 ctrl |= E1000_CTRL_TFCE;
995 break;
996 case e1000_fc_full:
997 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
998 break;
999 default:
1000 e_dbg("Flow control param set incorrectly\n");
1001 return -E1000_ERR_CONFIG;
1002 }
1003
1004 ew32(CTRL, ctrl);
1005
1006 return 0;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1020{
1021 struct e1000_mac_info *mac = &hw->mac;
1022 s32 ret_val = 0;
1023 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
1024 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1025 u16 speed, duplex;
1026
1027
1028
1029
1030
1031 if (mac->autoneg_failed) {
1032 if (hw->phy.media_type == e1000_media_type_fiber ||
1033 hw->phy.media_type == e1000_media_type_internal_serdes)
1034 ret_val = e1000e_force_mac_fc(hw);
1035 } else {
1036 if (hw->phy.media_type == e1000_media_type_copper)
1037 ret_val = e1000e_force_mac_fc(hw);
1038 }
1039
1040 if (ret_val) {
1041 e_dbg("Error forcing flow control settings\n");
1042 return ret_val;
1043 }
1044
1045
1046
1047
1048
1049
1050 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1051
1052
1053
1054
1055 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1056 if (ret_val)
1057 return ret_val;
1058 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1059 if (ret_val)
1060 return ret_val;
1061
1062 if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
1063 e_dbg("Copper PHY and Auto Neg has not completed.\n");
1064 return ret_val;
1065 }
1066
1067
1068
1069
1070
1071
1072
1073 ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
1074 if (ret_val)
1075 return ret_val;
1076 ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
1077 if (ret_val)
1078 return ret_val;
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1114 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
1115
1116
1117
1118
1119
1120
1121 if (hw->fc.requested_mode == e1000_fc_full) {
1122 hw->fc.current_mode = e1000_fc_full;
1123 e_dbg("Flow Control = FULL.\n");
1124 } else {
1125 hw->fc.current_mode = e1000_fc_rx_pause;
1126 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1127 }
1128 }
1129
1130
1131
1132
1133
1134
1135
1136 else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1137 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1138 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1139 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1140 hw->fc.current_mode = e1000_fc_tx_pause;
1141 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1142 }
1143
1144
1145
1146
1147
1148
1149
1150 else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1151 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1152 !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1153 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1154 hw->fc.current_mode = e1000_fc_rx_pause;
1155 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1156 } else {
1157
1158
1159
1160 hw->fc.current_mode = e1000_fc_none;
1161 e_dbg("Flow Control = NONE.\n");
1162 }
1163
1164
1165
1166
1167
1168 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1169 if (ret_val) {
1170 e_dbg("Error getting link speed and duplex\n");
1171 return ret_val;
1172 }
1173
1174 if (duplex == HALF_DUPLEX)
1175 hw->fc.current_mode = e1000_fc_none;
1176
1177
1178
1179
1180 ret_val = e1000e_force_mac_fc(hw);
1181 if (ret_val) {
1182 e_dbg("Error forcing flow control settings\n");
1183 return ret_val;
1184 }
1185 }
1186
1187
1188
1189
1190
1191
1192 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1193 mac->autoneg) {
1194
1195
1196
1197 pcs_status_reg = er32(PCS_LSTAT);
1198
1199 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1200 e_dbg("PCS Auto Neg has not completed.\n");
1201 return ret_val;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210 pcs_adv_reg = er32(PCS_ANADV);
1211 pcs_lp_ability_reg = er32(PCS_LPAB);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1247 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1248
1249
1250
1251
1252
1253
1254 if (hw->fc.requested_mode == e1000_fc_full) {
1255 hw->fc.current_mode = e1000_fc_full;
1256 e_dbg("Flow Control = FULL.\n");
1257 } else {
1258 hw->fc.current_mode = e1000_fc_rx_pause;
1259 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1260 }
1261 }
1262
1263
1264
1265
1266
1267
1268
1269 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1270 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1271 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1272 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1273 hw->fc.current_mode = e1000_fc_tx_pause;
1274 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1275 }
1276
1277
1278
1279
1280
1281
1282
1283 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1284 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1285 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1286 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1287 hw->fc.current_mode = e1000_fc_rx_pause;
1288 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1289 } else {
1290
1291
1292
1293 hw->fc.current_mode = e1000_fc_none;
1294 e_dbg("Flow Control = NONE.\n");
1295 }
1296
1297
1298
1299
1300 pcs_ctrl_reg = er32(PCS_LCTL);
1301 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1302 ew32(PCS_LCTL, pcs_ctrl_reg);
1303
1304 ret_val = e1000e_force_mac_fc(hw);
1305 if (ret_val) {
1306 e_dbg("Error forcing flow control settings\n");
1307 return ret_val;
1308 }
1309 }
1310
1311 return 0;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1324 u16 *duplex)
1325{
1326 u32 status;
1327
1328 status = er32(STATUS);
1329 if (status & E1000_STATUS_SPEED_1000)
1330 *speed = SPEED_1000;
1331 else if (status & E1000_STATUS_SPEED_100)
1332 *speed = SPEED_100;
1333 else
1334 *speed = SPEED_10;
1335
1336 if (status & E1000_STATUS_FD)
1337 *duplex = FULL_DUPLEX;
1338 else
1339 *duplex = HALF_DUPLEX;
1340
1341 e_dbg("%u Mbps, %s Duplex\n",
1342 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1343 *duplex == FULL_DUPLEX ? "Full" : "Half");
1344
1345 return 0;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
1358 *hw, u16 *speed, u16 *duplex)
1359{
1360 *speed = SPEED_1000;
1361 *duplex = FULL_DUPLEX;
1362
1363 return 0;
1364}
1365
1366
1367
1368
1369
1370
1371
1372s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1373{
1374 u32 swsm;
1375 s32 timeout = hw->nvm.word_size + 1;
1376 s32 i = 0;
1377
1378
1379 while (i < timeout) {
1380 swsm = er32(SWSM);
1381 if (!(swsm & E1000_SWSM_SMBI))
1382 break;
1383
1384 usleep_range(50, 100);
1385 i++;
1386 }
1387
1388 if (i == timeout) {
1389 e_dbg("Driver can't access device - SMBI bit is set.\n");
1390 return -E1000_ERR_NVM;
1391 }
1392
1393
1394 for (i = 0; i < timeout; i++) {
1395 swsm = er32(SWSM);
1396 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1397
1398
1399 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1400 break;
1401
1402 usleep_range(50, 100);
1403 }
1404
1405 if (i == timeout) {
1406
1407 e1000e_put_hw_semaphore(hw);
1408 e_dbg("Driver can't access the NVM\n");
1409 return -E1000_ERR_NVM;
1410 }
1411
1412 return 0;
1413}
1414
1415
1416
1417
1418
1419
1420
1421void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1422{
1423 u32 swsm;
1424
1425 swsm = er32(SWSM);
1426 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1427 ew32(SWSM, swsm);
1428}
1429
1430
1431
1432
1433
1434
1435
1436s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1437{
1438 s32 i = 0;
1439
1440 while (i < AUTO_READ_DONE_TIMEOUT) {
1441 if (er32(EECD) & E1000_EECD_AUTO_RD)
1442 break;
1443 usleep_range(1000, 2000);
1444 i++;
1445 }
1446
1447 if (i == AUTO_READ_DONE_TIMEOUT) {
1448 e_dbg("Auto read by HW from NVM has not completed.\n");
1449 return -E1000_ERR_RESET;
1450 }
1451
1452 return 0;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1464{
1465 s32 ret_val;
1466
1467 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1468 if (ret_val) {
1469 e_dbg("NVM Read Error\n");
1470 return ret_val;
1471 }
1472
1473 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1474 *data = ID_LED_DEFAULT;
1475
1476 return 0;
1477}
1478
1479
1480
1481
1482
1483
1484s32 e1000e_id_led_init_generic(struct e1000_hw *hw)
1485{
1486 struct e1000_mac_info *mac = &hw->mac;
1487 s32 ret_val;
1488 const u32 ledctl_mask = 0x000000FF;
1489 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1490 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1491 u16 data, i, temp;
1492 const u16 led_mask = 0x0F;
1493
1494 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1495 if (ret_val)
1496 return ret_val;
1497
1498 mac->ledctl_default = er32(LEDCTL);
1499 mac->ledctl_mode1 = mac->ledctl_default;
1500 mac->ledctl_mode2 = mac->ledctl_default;
1501
1502 for (i = 0; i < 4; i++) {
1503 temp = (data >> (i << 2)) & led_mask;
1504 switch (temp) {
1505 case ID_LED_ON1_DEF2:
1506 case ID_LED_ON1_ON2:
1507 case ID_LED_ON1_OFF2:
1508 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1509 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1510 break;
1511 case ID_LED_OFF1_DEF2:
1512 case ID_LED_OFF1_ON2:
1513 case ID_LED_OFF1_OFF2:
1514 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1515 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1516 break;
1517 default:
1518
1519 break;
1520 }
1521 switch (temp) {
1522 case ID_LED_DEF1_ON2:
1523 case ID_LED_ON1_ON2:
1524 case ID_LED_OFF1_ON2:
1525 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1526 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1527 break;
1528 case ID_LED_DEF1_OFF2:
1529 case ID_LED_ON1_OFF2:
1530 case ID_LED_OFF1_OFF2:
1531 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1532 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1533 break;
1534 default:
1535
1536 break;
1537 }
1538 }
1539
1540 return 0;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1551{
1552 u32 ledctl;
1553
1554 if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
1555 return -E1000_ERR_CONFIG;
1556
1557 if (hw->phy.media_type == e1000_media_type_fiber) {
1558 ledctl = er32(LEDCTL);
1559 hw->mac.ledctl_default = ledctl;
1560
1561 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1562 E1000_LEDCTL_LED0_MODE_MASK);
1563 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1564 E1000_LEDCTL_LED0_MODE_SHIFT);
1565 ew32(LEDCTL, ledctl);
1566 } else if (hw->phy.media_type == e1000_media_type_copper) {
1567 ew32(LEDCTL, hw->mac.ledctl_mode1);
1568 }
1569
1570 return 0;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1581{
1582 ew32(LEDCTL, hw->mac.ledctl_default);
1583 return 0;
1584}
1585
1586
1587
1588
1589
1590
1591
1592s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1593{
1594 u32 ledctl_blink = 0;
1595 u32 i;
1596
1597 if (hw->phy.media_type == e1000_media_type_fiber) {
1598
1599 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1600 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1601 } else {
1602
1603
1604
1605
1606
1607
1608 ledctl_blink = hw->mac.ledctl_mode2;
1609 for (i = 0; i < 32; i += 8) {
1610 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1611 E1000_LEDCTL_LED0_MODE_MASK;
1612 u32 led_default = hw->mac.ledctl_default >> i;
1613
1614 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1615 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1616 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1617 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1618 ledctl_blink &=
1619 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1620 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1621 E1000_LEDCTL_MODE_LED_ON) << i;
1622 }
1623 }
1624 }
1625
1626 ew32(LEDCTL, ledctl_blink);
1627
1628 return 0;
1629}
1630
1631
1632
1633
1634
1635
1636
1637s32 e1000e_led_on_generic(struct e1000_hw *hw)
1638{
1639 u32 ctrl;
1640
1641 switch (hw->phy.media_type) {
1642 case e1000_media_type_fiber:
1643 ctrl = er32(CTRL);
1644 ctrl &= ~E1000_CTRL_SWDPIN0;
1645 ctrl |= E1000_CTRL_SWDPIO0;
1646 ew32(CTRL, ctrl);
1647 break;
1648 case e1000_media_type_copper:
1649 ew32(LEDCTL, hw->mac.ledctl_mode2);
1650 break;
1651 default:
1652 break;
1653 }
1654
1655 return 0;
1656}
1657
1658
1659
1660
1661
1662
1663
1664s32 e1000e_led_off_generic(struct e1000_hw *hw)
1665{
1666 u32 ctrl;
1667
1668 switch (hw->phy.media_type) {
1669 case e1000_media_type_fiber:
1670 ctrl = er32(CTRL);
1671 ctrl |= E1000_CTRL_SWDPIN0;
1672 ctrl |= E1000_CTRL_SWDPIO0;
1673 ew32(CTRL, ctrl);
1674 break;
1675 case e1000_media_type_copper:
1676 ew32(LEDCTL, hw->mac.ledctl_mode1);
1677 break;
1678 default:
1679 break;
1680 }
1681
1682 return 0;
1683}
1684
1685
1686
1687
1688
1689
1690
1691
1692void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1693{
1694 u32 gcr;
1695
1696 if (no_snoop) {
1697 gcr = er32(GCR);
1698 gcr &= ~(PCIE_NO_SNOOP_ALL);
1699 gcr |= no_snoop;
1700 ew32(GCR, gcr);
1701 }
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1716{
1717 u32 ctrl;
1718 s32 timeout = MASTER_DISABLE_TIMEOUT;
1719
1720 ctrl = er32(CTRL);
1721 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1722 ew32(CTRL, ctrl);
1723
1724 while (timeout) {
1725 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1726 break;
1727 usleep_range(100, 200);
1728 timeout--;
1729 }
1730
1731 if (!timeout) {
1732 e_dbg("Master requests are pending.\n");
1733 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1734 }
1735
1736 return 0;
1737}
1738
1739
1740
1741
1742
1743
1744
1745void e1000e_reset_adaptive(struct e1000_hw *hw)
1746{
1747 struct e1000_mac_info *mac = &hw->mac;
1748
1749 if (!mac->adaptive_ifs) {
1750 e_dbg("Not in Adaptive IFS mode!\n");
1751 return;
1752 }
1753
1754 mac->current_ifs_val = 0;
1755 mac->ifs_min_val = IFS_MIN;
1756 mac->ifs_max_val = IFS_MAX;
1757 mac->ifs_step_size = IFS_STEP;
1758 mac->ifs_ratio = IFS_RATIO;
1759
1760 mac->in_ifs_mode = false;
1761 ew32(AIT, 0);
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771void e1000e_update_adaptive(struct e1000_hw *hw)
1772{
1773 struct e1000_mac_info *mac = &hw->mac;
1774
1775 if (!mac->adaptive_ifs) {
1776 e_dbg("Not in Adaptive IFS mode!\n");
1777 return;
1778 }
1779
1780 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1781 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1782 mac->in_ifs_mode = true;
1783 if (mac->current_ifs_val < mac->ifs_max_val) {
1784 if (!mac->current_ifs_val)
1785 mac->current_ifs_val = mac->ifs_min_val;
1786 else
1787 mac->current_ifs_val +=
1788 mac->ifs_step_size;
1789 ew32(AIT, mac->current_ifs_val);
1790 }
1791 }
1792 } else {
1793 if (mac->in_ifs_mode &&
1794 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1795 mac->current_ifs_val = 0;
1796 mac->in_ifs_mode = false;
1797 ew32(AIT, 0);
1798 }
1799 }
1800}
1801