1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/if_ether.h>
25#include <linux/delay.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29
30#include "e1000_mac.h"
31
32#include "igb.h"
33
34static s32 igb_set_default_fc(struct e1000_hw *hw);
35static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
36
37
38
39
40
41
42
43
44
45s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
46{
47 struct e1000_bus_info *bus = &hw->bus;
48 s32 ret_val;
49 u32 reg;
50 u16 pcie_link_status;
51
52 bus->type = e1000_bus_type_pci_express;
53
54 ret_val = igb_read_pcie_cap_reg(hw,
55 PCI_EXP_LNKSTA,
56 &pcie_link_status);
57 if (ret_val) {
58 bus->width = e1000_bus_width_unknown;
59 bus->speed = e1000_bus_speed_unknown;
60 } else {
61 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
62 case PCI_EXP_LNKSTA_CLS_2_5GB:
63 bus->speed = e1000_bus_speed_2500;
64 break;
65 case PCI_EXP_LNKSTA_CLS_5_0GB:
66 bus->speed = e1000_bus_speed_5000;
67 break;
68 default:
69 bus->speed = e1000_bus_speed_unknown;
70 break;
71 }
72
73 bus->width = (enum e1000_bus_width)((pcie_link_status &
74 PCI_EXP_LNKSTA_NLW) >>
75 PCI_EXP_LNKSTA_NLW_SHIFT);
76 }
77
78 reg = rd32(E1000_STATUS);
79 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
80
81 return 0;
82}
83
84
85
86
87
88
89
90
91void igb_clear_vfta(struct e1000_hw *hw)
92{
93 u32 offset;
94
95 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
96 hw->mac.ops.write_vfta(hw, offset, 0);
97}
98
99
100
101
102
103
104
105
106
107
108void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
109{
110 struct igb_adapter *adapter = hw->back;
111
112 array_wr32(E1000_VFTA, offset, value);
113 wrfl();
114
115 adapter->shadow_vfta[offset] = value;
116}
117
118
119
120
121
122
123
124
125
126
127void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
128{
129 u32 i;
130 u8 mac_addr[ETH_ALEN] = {0};
131
132
133 hw_dbg("Programming MAC Address into RAR[0]\n");
134
135 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
136
137
138 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
139 for (i = 1; i < rar_count; i++)
140 hw->mac.ops.rar_set(hw, mac_addr, i);
141}
142
143
144
145
146
147
148
149
150
151
152static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
153{
154 s32 regindex, first_empty_slot;
155 u32 bits;
156
157
158 if (vlan == 0)
159 return 0;
160
161
162
163
164
165 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
166
167
168
169
170
171
172 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
173 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
174 if (bits == vlan)
175 return regindex;
176 if (!first_empty_slot && !bits)
177 first_empty_slot = regindex;
178 }
179
180 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
181}
182
183
184
185
186
187
188
189
190
191
192
193s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
194 bool vlan_on, bool vlvf_bypass)
195{
196 struct igb_adapter *adapter = hw->back;
197 u32 regidx, vfta_delta, vfta, bits;
198 s32 vlvf_index;
199
200 if ((vlan > 4095) || (vind > 7))
201 return -E1000_ERR_PARAM;
202
203
204
205
206
207
208
209
210
211
212
213
214 regidx = vlan / 32;
215 vfta_delta = BIT(vlan % 32);
216 vfta = adapter->shadow_vfta[regidx];
217
218
219
220
221
222 vfta_delta &= vlan_on ? ~vfta : vfta;
223 vfta ^= vfta_delta;
224
225
226
227
228
229
230
231
232
233 if (!adapter->vfs_allocated_count)
234 goto vfta_update;
235
236 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
237 if (vlvf_index < 0) {
238 if (vlvf_bypass)
239 goto vfta_update;
240 return vlvf_index;
241 }
242
243 bits = rd32(E1000_VLVF(vlvf_index));
244
245
246 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
247 if (vlan_on)
248 goto vlvf_update;
249
250
251 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
252
253 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
254
255
256
257
258 if (vfta_delta)
259 hw->mac.ops.write_vfta(hw, regidx, vfta);
260
261
262 wr32(E1000_VLVF(vlvf_index), 0);
263
264 return 0;
265 }
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 vfta_delta = 0;
282
283vlvf_update:
284
285 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
286
287vfta_update:
288
289 if (vfta_delta)
290 hw->mac.ops.write_vfta(hw, regidx, vfta);
291
292 return 0;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
307{
308 u32 i;
309 s32 ret_val = 0;
310 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
311 u8 alt_mac_addr[ETH_ALEN];
312
313
314
315
316 if (hw->mac.type >= e1000_82580)
317 goto out;
318
319 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
320 &nvm_alt_mac_addr_offset);
321 if (ret_val) {
322 hw_dbg("NVM Read Error\n");
323 goto out;
324 }
325
326 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
327 (nvm_alt_mac_addr_offset == 0x0000))
328
329 goto out;
330
331 if (hw->bus.func == E1000_FUNC_1)
332 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
333 if (hw->bus.func == E1000_FUNC_2)
334 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
335
336 if (hw->bus.func == E1000_FUNC_3)
337 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
338 for (i = 0; i < ETH_ALEN; i += 2) {
339 offset = nvm_alt_mac_addr_offset + (i >> 1);
340 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
341 if (ret_val) {
342 hw_dbg("NVM Read Error\n");
343 goto out;
344 }
345
346 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
347 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
348 }
349
350
351 if (is_multicast_ether_addr(alt_mac_addr)) {
352 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
353 goto out;
354 }
355
356
357
358
359
360 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
361
362out:
363 return ret_val;
364}
365
366
367
368
369
370
371
372
373
374
375void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
376{
377 u32 rar_low, rar_high;
378
379
380
381
382 rar_low = ((u32) addr[0] |
383 ((u32) addr[1] << 8) |
384 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
385
386 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
387
388
389 if (rar_low || rar_high)
390 rar_high |= E1000_RAH_AV;
391
392
393
394
395
396 wr32(E1000_RAL(index), rar_low);
397 wrfl();
398 wr32(E1000_RAH(index), rar_high);
399 wrfl();
400}
401
402
403
404
405
406
407
408
409
410
411
412void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
413{
414 u32 hash_bit, hash_reg, mta;
415
416
417
418
419
420
421
422
423
424
425 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
426 hash_bit = hash_value & 0x1F;
427
428 mta = array_rd32(E1000_MTA, hash_reg);
429
430 mta |= BIT(hash_bit);
431
432 array_wr32(E1000_MTA, hash_reg, mta);
433 wrfl();
434}
435
436
437
438
439
440
441
442
443
444
445static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
446{
447 u32 hash_value, hash_mask;
448 u8 bit_shift = 0;
449
450
451 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
452
453
454
455
456 while (hash_mask >> bit_shift != 0xFF)
457 bit_shift++;
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484 switch (hw->mac.mc_filter_type) {
485 default:
486 case 0:
487 break;
488 case 1:
489 bit_shift += 1;
490 break;
491 case 2:
492 bit_shift += 2;
493 break;
494 case 3:
495 bit_shift += 4;
496 break;
497 }
498
499 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
500 (((u16) mc_addr[5]) << bit_shift)));
501
502 return hash_value;
503}
504
505
506
507
508
509
510
511
512
513
514void igb_update_mc_addr_list(struct e1000_hw *hw,
515 u8 *mc_addr_list, u32 mc_addr_count)
516{
517 u32 hash_value, hash_bit, hash_reg;
518 int i;
519
520
521 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
522
523
524 for (i = 0; (u32) i < mc_addr_count; i++) {
525 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
526
527 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
528 hash_bit = hash_value & 0x1F;
529
530 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
531 mc_addr_list += (ETH_ALEN);
532 }
533
534
535 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
536 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
537 wrfl();
538}
539
540
541
542
543
544
545
546void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
547{
548 rd32(E1000_CRCERRS);
549 rd32(E1000_SYMERRS);
550 rd32(E1000_MPC);
551 rd32(E1000_SCC);
552 rd32(E1000_ECOL);
553 rd32(E1000_MCC);
554 rd32(E1000_LATECOL);
555 rd32(E1000_COLC);
556 rd32(E1000_DC);
557 rd32(E1000_SEC);
558 rd32(E1000_RLEC);
559 rd32(E1000_XONRXC);
560 rd32(E1000_XONTXC);
561 rd32(E1000_XOFFRXC);
562 rd32(E1000_XOFFTXC);
563 rd32(E1000_FCRUC);
564 rd32(E1000_GPRC);
565 rd32(E1000_BPRC);
566 rd32(E1000_MPRC);
567 rd32(E1000_GPTC);
568 rd32(E1000_GORCL);
569 rd32(E1000_GORCH);
570 rd32(E1000_GOTCL);
571 rd32(E1000_GOTCH);
572 rd32(E1000_RNBC);
573 rd32(E1000_RUC);
574 rd32(E1000_RFC);
575 rd32(E1000_ROC);
576 rd32(E1000_RJC);
577 rd32(E1000_TORL);
578 rd32(E1000_TORH);
579 rd32(E1000_TOTL);
580 rd32(E1000_TOTH);
581 rd32(E1000_TPR);
582 rd32(E1000_TPT);
583 rd32(E1000_MPTC);
584 rd32(E1000_BPTC);
585}
586
587
588
589
590
591
592
593
594
595s32 igb_check_for_copper_link(struct e1000_hw *hw)
596{
597 struct e1000_mac_info *mac = &hw->mac;
598 s32 ret_val;
599 bool link;
600
601
602
603
604
605
606 if (!mac->get_link_status) {
607 ret_val = 0;
608 goto out;
609 }
610
611
612
613
614
615 ret_val = igb_phy_has_link(hw, 1, 0, &link);
616 if (ret_val)
617 goto out;
618
619 if (!link)
620 goto out;
621
622 mac->get_link_status = false;
623
624
625
626
627 igb_check_downshift(hw);
628
629
630
631
632 if (!mac->autoneg) {
633 ret_val = -E1000_ERR_CONFIG;
634 goto out;
635 }
636
637
638
639
640
641 igb_config_collision_dist(hw);
642
643
644
645
646
647
648 ret_val = igb_config_fc_after_link_up(hw);
649 if (ret_val)
650 hw_dbg("Error configuring flow control\n");
651
652out:
653 return ret_val;
654}
655
656
657
658
659
660
661
662
663
664
665
666s32 igb_setup_link(struct e1000_hw *hw)
667{
668 s32 ret_val = 0;
669
670
671
672
673 if (igb_check_reset_block(hw))
674 goto out;
675
676
677
678
679 if (hw->fc.requested_mode == e1000_fc_default) {
680 ret_val = igb_set_default_fc(hw);
681 if (ret_val)
682 goto out;
683 }
684
685
686
687
688
689 hw->fc.current_mode = hw->fc.requested_mode;
690
691 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
692
693
694 ret_val = hw->mac.ops.setup_physical_interface(hw);
695 if (ret_val)
696 goto out;
697
698
699
700
701
702
703 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
704 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
705 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
706 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
707
708 wr32(E1000_FCTTV, hw->fc.pause_time);
709
710 ret_val = igb_set_fc_watermarks(hw);
711
712out:
713
714 return ret_val;
715}
716
717
718
719
720
721
722
723
724
725void igb_config_collision_dist(struct e1000_hw *hw)
726{
727 u32 tctl;
728
729 tctl = rd32(E1000_TCTL);
730
731 tctl &= ~E1000_TCTL_COLD;
732 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
733
734 wr32(E1000_TCTL, tctl);
735 wrfl();
736}
737
738
739
740
741
742
743
744
745
746static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
747{
748 s32 ret_val = 0;
749 u32 fcrtl = 0, fcrth = 0;
750
751
752
753
754
755
756
757 if (hw->fc.current_mode & e1000_fc_tx_pause) {
758
759
760
761
762 fcrtl = hw->fc.low_water;
763 if (hw->fc.send_xon)
764 fcrtl |= E1000_FCRTL_XONE;
765
766 fcrth = hw->fc.high_water;
767 }
768 wr32(E1000_FCRTL, fcrtl);
769 wr32(E1000_FCRTH, fcrth);
770
771 return ret_val;
772}
773
774
775
776
777
778
779
780
781static s32 igb_set_default_fc(struct e1000_hw *hw)
782{
783 s32 ret_val = 0;
784 u16 lan_offset;
785 u16 nvm_data;
786
787
788
789
790
791
792
793
794
795 if (hw->mac.type == e1000_i350) {
796 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
797 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
798 + lan_offset, 1, &nvm_data);
799 } else {
800 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
801 1, &nvm_data);
802 }
803
804 if (ret_val) {
805 hw_dbg("NVM Read Error\n");
806 goto out;
807 }
808
809 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
810 hw->fc.requested_mode = e1000_fc_none;
811 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
812 NVM_WORD0F_ASM_DIR)
813 hw->fc.requested_mode = e1000_fc_tx_pause;
814 else
815 hw->fc.requested_mode = e1000_fc_full;
816
817out:
818 return ret_val;
819}
820
821
822
823
824
825
826
827
828
829
830
831s32 igb_force_mac_fc(struct e1000_hw *hw)
832{
833 u32 ctrl;
834 s32 ret_val = 0;
835
836 ctrl = rd32(E1000_CTRL);
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
856
857 switch (hw->fc.current_mode) {
858 case e1000_fc_none:
859 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
860 break;
861 case e1000_fc_rx_pause:
862 ctrl &= (~E1000_CTRL_TFCE);
863 ctrl |= E1000_CTRL_RFCE;
864 break;
865 case e1000_fc_tx_pause:
866 ctrl &= (~E1000_CTRL_RFCE);
867 ctrl |= E1000_CTRL_TFCE;
868 break;
869 case e1000_fc_full:
870 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
871 break;
872 default:
873 hw_dbg("Flow control param set incorrectly\n");
874 ret_val = -E1000_ERR_CONFIG;
875 goto out;
876 }
877
878 wr32(E1000_CTRL, ctrl);
879
880out:
881 return ret_val;
882}
883
884
885
886
887
888
889
890
891
892
893
894s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
895{
896 struct e1000_mac_info *mac = &hw->mac;
897 s32 ret_val = 0;
898 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
899 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
900 u16 speed, duplex;
901
902
903
904
905
906 if (mac->autoneg_failed) {
907 if (hw->phy.media_type == e1000_media_type_internal_serdes)
908 ret_val = igb_force_mac_fc(hw);
909 } else {
910 if (hw->phy.media_type == e1000_media_type_copper)
911 ret_val = igb_force_mac_fc(hw);
912 }
913
914 if (ret_val) {
915 hw_dbg("Error forcing flow control settings\n");
916 goto out;
917 }
918
919
920
921
922
923
924 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
925
926
927
928
929 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
930 &mii_status_reg);
931 if (ret_val)
932 goto out;
933 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
934 &mii_status_reg);
935 if (ret_val)
936 goto out;
937
938 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
939 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
940 goto out;
941 }
942
943
944
945
946
947
948
949 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
950 &mii_nway_adv_reg);
951 if (ret_val)
952 goto out;
953 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
954 &mii_nway_lp_ability_reg);
955 if (ret_val)
956 goto out;
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
992 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
993
994
995
996
997
998
999 if (hw->fc.requested_mode == e1000_fc_full) {
1000 hw->fc.current_mode = e1000_fc_full;
1001 hw_dbg("Flow Control = FULL.\n");
1002 } else {
1003 hw->fc.current_mode = e1000_fc_rx_pause;
1004 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1005 }
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1015 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1016 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1017 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1018 hw->fc.current_mode = e1000_fc_tx_pause;
1019 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1020 }
1021
1022
1023
1024
1025
1026
1027
1028 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1029 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1030 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1031 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1032 hw->fc.current_mode = e1000_fc_rx_pause;
1033 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1056 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1057 (hw->fc.strict_ieee)) {
1058 hw->fc.current_mode = e1000_fc_none;
1059 hw_dbg("Flow Control = NONE.\n");
1060 } else {
1061 hw->fc.current_mode = e1000_fc_rx_pause;
1062 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1063 }
1064
1065
1066
1067
1068
1069 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1070 if (ret_val) {
1071 hw_dbg("Error getting link speed and duplex\n");
1072 goto out;
1073 }
1074
1075 if (duplex == HALF_DUPLEX)
1076 hw->fc.current_mode = e1000_fc_none;
1077
1078
1079
1080
1081 ret_val = igb_force_mac_fc(hw);
1082 if (ret_val) {
1083 hw_dbg("Error forcing flow control settings\n");
1084 goto out;
1085 }
1086 }
1087
1088
1089
1090
1091
1092 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1093 && mac->autoneg) {
1094
1095
1096
1097 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1098
1099 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1100 hw_dbg("PCS Auto Neg has not completed.\n");
1101 return ret_val;
1102 }
1103
1104
1105
1106
1107
1108
1109
1110 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1111 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1147 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1148
1149
1150
1151
1152
1153
1154 if (hw->fc.requested_mode == e1000_fc_full) {
1155 hw->fc.current_mode = e1000_fc_full;
1156 hw_dbg("Flow Control = FULL.\n");
1157 } else {
1158 hw->fc.current_mode = e1000_fc_rx_pause;
1159 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1160 }
1161 }
1162
1163
1164
1165
1166
1167
1168
1169 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1170 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1171 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1172 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1173 hw->fc.current_mode = e1000_fc_tx_pause;
1174 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1175 }
1176
1177
1178
1179
1180
1181
1182
1183 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1184 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1185 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1186 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1187 hw->fc.current_mode = e1000_fc_rx_pause;
1188 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1189 } else {
1190
1191
1192
1193 hw->fc.current_mode = e1000_fc_none;
1194 hw_dbg("Flow Control = NONE.\n");
1195 }
1196
1197
1198
1199
1200 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1201 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1202 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1203
1204 ret_val = igb_force_mac_fc(hw);
1205 if (ret_val) {
1206 hw_dbg("Error forcing flow control settings\n");
1207 return ret_val;
1208 }
1209 }
1210
1211out:
1212 return ret_val;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1225 u16 *duplex)
1226{
1227 u32 status;
1228
1229 status = rd32(E1000_STATUS);
1230 if (status & E1000_STATUS_SPEED_1000) {
1231 *speed = SPEED_1000;
1232 hw_dbg("1000 Mbs, ");
1233 } else if (status & E1000_STATUS_SPEED_100) {
1234 *speed = SPEED_100;
1235 hw_dbg("100 Mbs, ");
1236 } else {
1237 *speed = SPEED_10;
1238 hw_dbg("10 Mbs, ");
1239 }
1240
1241 if (status & E1000_STATUS_FD) {
1242 *duplex = FULL_DUPLEX;
1243 hw_dbg("Full Duplex\n");
1244 } else {
1245 *duplex = HALF_DUPLEX;
1246 hw_dbg("Half Duplex\n");
1247 }
1248
1249 return 0;
1250}
1251
1252
1253
1254
1255
1256
1257
1258s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1259{
1260 u32 swsm;
1261 s32 ret_val = 0;
1262 s32 timeout = hw->nvm.word_size + 1;
1263 s32 i = 0;
1264
1265
1266 while (i < timeout) {
1267 swsm = rd32(E1000_SWSM);
1268 if (!(swsm & E1000_SWSM_SMBI))
1269 break;
1270
1271 udelay(50);
1272 i++;
1273 }
1274
1275 if (i == timeout) {
1276 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1277 ret_val = -E1000_ERR_NVM;
1278 goto out;
1279 }
1280
1281
1282 for (i = 0; i < timeout; i++) {
1283 swsm = rd32(E1000_SWSM);
1284 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1285
1286
1287 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1288 break;
1289
1290 udelay(50);
1291 }
1292
1293 if (i == timeout) {
1294
1295 igb_put_hw_semaphore(hw);
1296 hw_dbg("Driver can't access the NVM\n");
1297 ret_val = -E1000_ERR_NVM;
1298 goto out;
1299 }
1300
1301out:
1302 return ret_val;
1303}
1304
1305
1306
1307
1308
1309
1310
1311void igb_put_hw_semaphore(struct e1000_hw *hw)
1312{
1313 u32 swsm;
1314
1315 swsm = rd32(E1000_SWSM);
1316
1317 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1318
1319 wr32(E1000_SWSM, swsm);
1320}
1321
1322
1323
1324
1325
1326
1327
1328s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1329{
1330 s32 i = 0;
1331 s32 ret_val = 0;
1332
1333
1334 while (i < AUTO_READ_DONE_TIMEOUT) {
1335 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1336 break;
1337 usleep_range(1000, 2000);
1338 i++;
1339 }
1340
1341 if (i == AUTO_READ_DONE_TIMEOUT) {
1342 hw_dbg("Auto read by HW from NVM has not completed.\n");
1343 ret_val = -E1000_ERR_RESET;
1344 goto out;
1345 }
1346
1347out:
1348 return ret_val;
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1360{
1361 s32 ret_val;
1362
1363 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1364 if (ret_val) {
1365 hw_dbg("NVM Read Error\n");
1366 goto out;
1367 }
1368
1369 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1370 switch (hw->phy.media_type) {
1371 case e1000_media_type_internal_serdes:
1372 *data = ID_LED_DEFAULT_82575_SERDES;
1373 break;
1374 case e1000_media_type_copper:
1375 default:
1376 *data = ID_LED_DEFAULT;
1377 break;
1378 }
1379 }
1380out:
1381 return ret_val;
1382}
1383
1384
1385
1386
1387
1388
1389s32 igb_id_led_init(struct e1000_hw *hw)
1390{
1391 struct e1000_mac_info *mac = &hw->mac;
1392 s32 ret_val;
1393 const u32 ledctl_mask = 0x000000FF;
1394 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1395 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1396 u16 data, i, temp;
1397 const u16 led_mask = 0x0F;
1398
1399
1400 if ((hw->mac.type == e1000_i210) ||
1401 (hw->mac.type == e1000_i211))
1402 ret_val = igb_valid_led_default_i210(hw, &data);
1403 else
1404 ret_val = igb_valid_led_default(hw, &data);
1405
1406 if (ret_val)
1407 goto out;
1408
1409 mac->ledctl_default = rd32(E1000_LEDCTL);
1410 mac->ledctl_mode1 = mac->ledctl_default;
1411 mac->ledctl_mode2 = mac->ledctl_default;
1412
1413 for (i = 0; i < 4; i++) {
1414 temp = (data >> (i << 2)) & led_mask;
1415 switch (temp) {
1416 case ID_LED_ON1_DEF2:
1417 case ID_LED_ON1_ON2:
1418 case ID_LED_ON1_OFF2:
1419 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1420 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1421 break;
1422 case ID_LED_OFF1_DEF2:
1423 case ID_LED_OFF1_ON2:
1424 case ID_LED_OFF1_OFF2:
1425 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1426 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1427 break;
1428 default:
1429
1430 break;
1431 }
1432 switch (temp) {
1433 case ID_LED_DEF1_ON2:
1434 case ID_LED_ON1_ON2:
1435 case ID_LED_OFF1_ON2:
1436 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1437 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1438 break;
1439 case ID_LED_DEF1_OFF2:
1440 case ID_LED_ON1_OFF2:
1441 case ID_LED_OFF1_OFF2:
1442 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1443 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1444 break;
1445 default:
1446
1447 break;
1448 }
1449 }
1450
1451out:
1452 return ret_val;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462s32 igb_cleanup_led(struct e1000_hw *hw)
1463{
1464 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1465 return 0;
1466}
1467
1468
1469
1470
1471
1472
1473
1474s32 igb_blink_led(struct e1000_hw *hw)
1475{
1476 u32 ledctl_blink = 0;
1477 u32 i;
1478
1479 if (hw->phy.media_type == e1000_media_type_fiber) {
1480
1481 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1482 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1483 } else {
1484
1485
1486
1487
1488
1489
1490 ledctl_blink = hw->mac.ledctl_mode2;
1491 for (i = 0; i < 32; i += 8) {
1492 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1493 E1000_LEDCTL_LED0_MODE_MASK;
1494 u32 led_default = hw->mac.ledctl_default >> i;
1495
1496 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1497 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1498 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1499 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1500 ledctl_blink &=
1501 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1502 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1503 E1000_LEDCTL_MODE_LED_ON) << i;
1504 }
1505 }
1506 }
1507
1508 wr32(E1000_LEDCTL, ledctl_blink);
1509
1510 return 0;
1511}
1512
1513
1514
1515
1516
1517
1518
1519s32 igb_led_off(struct e1000_hw *hw)
1520{
1521 switch (hw->phy.media_type) {
1522 case e1000_media_type_copper:
1523 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1524 break;
1525 default:
1526 break;
1527 }
1528
1529 return 0;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543s32 igb_disable_pcie_master(struct e1000_hw *hw)
1544{
1545 u32 ctrl;
1546 s32 timeout = MASTER_DISABLE_TIMEOUT;
1547 s32 ret_val = 0;
1548
1549 if (hw->bus.type != e1000_bus_type_pci_express)
1550 goto out;
1551
1552 ctrl = rd32(E1000_CTRL);
1553 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1554 wr32(E1000_CTRL, ctrl);
1555
1556 while (timeout) {
1557 if (!(rd32(E1000_STATUS) &
1558 E1000_STATUS_GIO_MASTER_ENABLE))
1559 break;
1560 udelay(100);
1561 timeout--;
1562 }
1563
1564 if (!timeout) {
1565 hw_dbg("Master requests are pending.\n");
1566 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1567 goto out;
1568 }
1569
1570out:
1571 return ret_val;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1582{
1583 s32 ret_val = 0;
1584
1585
1586 if (hw->mac.type >= e1000_82580)
1587 goto out;
1588
1589 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1590 hw_dbg("Invalid MDI setting detected\n");
1591 hw->phy.mdix = 1;
1592 ret_val = -E1000_ERR_CONFIG;
1593 goto out;
1594 }
1595
1596out:
1597 return ret_val;
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1612 u32 offset, u8 data)
1613{
1614 u32 i, regvalue = 0;
1615 s32 ret_val = 0;
1616
1617
1618 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1619 wr32(reg, regvalue);
1620
1621
1622 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1623 udelay(5);
1624 regvalue = rd32(reg);
1625 if (regvalue & E1000_GEN_CTL_READY)
1626 break;
1627 }
1628 if (!(regvalue & E1000_GEN_CTL_READY)) {
1629 hw_dbg("Reg %08x did not indicate ready\n", reg);
1630 ret_val = -E1000_ERR_PHY;
1631 goto out;
1632 }
1633
1634out:
1635 return ret_val;
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1646{
1647 u32 manc;
1648 u32 fwsm, factps;
1649 bool ret_val = false;
1650
1651 if (!hw->mac.asf_firmware_present)
1652 goto out;
1653
1654 manc = rd32(E1000_MANC);
1655
1656 if (!(manc & E1000_MANC_RCV_TCO_EN))
1657 goto out;
1658
1659 if (hw->mac.arc_subsystem_valid) {
1660 fwsm = rd32(E1000_FWSM);
1661 factps = rd32(E1000_FACTPS);
1662
1663 if (!(factps & E1000_FACTPS_MNGCG) &&
1664 ((fwsm & E1000_FWSM_MODE_MASK) ==
1665 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1666 ret_val = true;
1667 goto out;
1668 }
1669 } else {
1670 if ((manc & E1000_MANC_SMBUS_EN) &&
1671 !(manc & E1000_MANC_ASF_EN)) {
1672 ret_val = true;
1673 goto out;
1674 }
1675 }
1676
1677out:
1678 return ret_val;
1679}
1680