1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/if_ether.h>
25#include <linux/delay.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29
30#include "e1000_mac.h"
31
32#include "igb.h"
33
34static s32 igb_set_default_fc(struct e1000_hw *hw);
35static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
36
37
38
39
40
41
42
43
44
45s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
46{
47 struct e1000_bus_info *bus = &hw->bus;
48 s32 ret_val;
49 u32 reg;
50 u16 pcie_link_status;
51
52 bus->type = e1000_bus_type_pci_express;
53
54 ret_val = igb_read_pcie_cap_reg(hw,
55 PCI_EXP_LNKSTA,
56 &pcie_link_status);
57 if (ret_val) {
58 bus->width = e1000_bus_width_unknown;
59 bus->speed = e1000_bus_speed_unknown;
60 } else {
61 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
62 case PCI_EXP_LNKSTA_CLS_2_5GB:
63 bus->speed = e1000_bus_speed_2500;
64 break;
65 case PCI_EXP_LNKSTA_CLS_5_0GB:
66 bus->speed = e1000_bus_speed_5000;
67 break;
68 default:
69 bus->speed = e1000_bus_speed_unknown;
70 break;
71 }
72
73 bus->width = (enum e1000_bus_width)((pcie_link_status &
74 PCI_EXP_LNKSTA_NLW) >>
75 PCI_EXP_LNKSTA_NLW_SHIFT);
76 }
77
78 reg = rd32(E1000_STATUS);
79 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
80
81 return 0;
82}
83
84
85
86
87
88
89
90
91void igb_clear_vfta(struct e1000_hw *hw)
92{
93 u32 offset;
94
95 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
96 hw->mac.ops.write_vfta(hw, offset, 0);
97}
98
99
100
101
102
103
104
105
106
107
108void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
109{
110 struct igb_adapter *adapter = hw->back;
111
112 array_wr32(E1000_VFTA, offset, value);
113 wrfl();
114
115 adapter->shadow_vfta[offset] = value;
116}
117
118
119
120
121
122
123
124
125
126
127void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
128{
129 u32 i;
130 u8 mac_addr[ETH_ALEN] = {0};
131
132
133 hw_dbg("Programming MAC Address into RAR[0]\n");
134
135 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
136
137
138 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
139 for (i = 1; i < rar_count; i++)
140 hw->mac.ops.rar_set(hw, mac_addr, i);
141}
142
143
144
145
146
147
148
149
150
151
152static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
153{
154 s32 regindex, first_empty_slot;
155 u32 bits;
156
157
158 if (vlan == 0)
159 return 0;
160
161
162
163
164
165 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
166
167
168
169
170
171
172 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
173 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
174 if (bits == vlan)
175 return regindex;
176 if (!first_empty_slot && !bits)
177 first_empty_slot = regindex;
178 }
179
180 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
181}
182
183
184
185
186
187
188
189
190
191
192
193s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
194 bool vlan_on, bool vlvf_bypass)
195{
196 struct igb_adapter *adapter = hw->back;
197 u32 regidx, vfta_delta, vfta, bits;
198 s32 vlvf_index;
199
200 if ((vlan > 4095) || (vind > 7))
201 return -E1000_ERR_PARAM;
202
203
204
205
206
207
208
209
210
211
212
213
214 regidx = vlan / 32;
215 vfta_delta = BIT(vlan % 32);
216 vfta = adapter->shadow_vfta[regidx];
217
218
219
220
221
222 vfta_delta &= vlan_on ? ~vfta : vfta;
223 vfta ^= vfta_delta;
224
225
226
227
228
229
230
231
232
233 if (!adapter->vfs_allocated_count)
234 goto vfta_update;
235
236 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
237 if (vlvf_index < 0) {
238 if (vlvf_bypass)
239 goto vfta_update;
240 return vlvf_index;
241 }
242
243 bits = rd32(E1000_VLVF(vlvf_index));
244
245
246 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
247 if (vlan_on)
248 goto vlvf_update;
249
250
251 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
252
253 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
254
255
256
257
258 if (vfta_delta)
259 hw->mac.ops.write_vfta(hw, regidx, vfta);
260
261
262 wr32(E1000_VLVF(vlvf_index), 0);
263
264 return 0;
265 }
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 vfta_delta = 0;
282
283vlvf_update:
284
285 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
286
287vfta_update:
288
289 if (vfta_delta)
290 hw->mac.ops.write_vfta(hw, regidx, vfta);
291
292 return 0;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
307{
308 u32 i;
309 s32 ret_val = 0;
310 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
311 u8 alt_mac_addr[ETH_ALEN];
312
313
314
315
316 if (hw->mac.type >= e1000_82580)
317 goto out;
318
319 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
320 &nvm_alt_mac_addr_offset);
321 if (ret_val) {
322 hw_dbg("NVM Read Error\n");
323 goto out;
324 }
325
326 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
327 (nvm_alt_mac_addr_offset == 0x0000))
328
329 goto out;
330
331 if (hw->bus.func == E1000_FUNC_1)
332 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
333 if (hw->bus.func == E1000_FUNC_2)
334 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
335
336 if (hw->bus.func == E1000_FUNC_3)
337 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
338 for (i = 0; i < ETH_ALEN; i += 2) {
339 offset = nvm_alt_mac_addr_offset + (i >> 1);
340 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
341 if (ret_val) {
342 hw_dbg("NVM Read Error\n");
343 goto out;
344 }
345
346 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
347 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
348 }
349
350
351 if (is_multicast_ether_addr(alt_mac_addr)) {
352 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
353 goto out;
354 }
355
356
357
358
359
360 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
361
362out:
363 return ret_val;
364}
365
366
367
368
369
370
371
372
373
374
375void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
376{
377 u32 rar_low, rar_high;
378
379
380
381
382 rar_low = ((u32) addr[0] |
383 ((u32) addr[1] << 8) |
384 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
385
386 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
387
388
389 if (rar_low || rar_high)
390 rar_high |= E1000_RAH_AV;
391
392
393
394
395
396 wr32(E1000_RAL(index), rar_low);
397 wrfl();
398 wr32(E1000_RAH(index), rar_high);
399 wrfl();
400}
401
402
403
404
405
406
407
408
409
410
411
412void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
413{
414 u32 hash_bit, hash_reg, mta;
415
416
417
418
419
420
421
422
423
424
425 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
426 hash_bit = hash_value & 0x1F;
427
428 mta = array_rd32(E1000_MTA, hash_reg);
429
430 mta |= BIT(hash_bit);
431
432 array_wr32(E1000_MTA, hash_reg, mta);
433 wrfl();
434}
435
436
437
438
439
440
441
442
443
444
445static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
446{
447 u32 hash_value, hash_mask;
448 u8 bit_shift = 0;
449
450
451 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
452
453
454
455
456 while (hash_mask >> bit_shift != 0xFF)
457 bit_shift++;
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484 switch (hw->mac.mc_filter_type) {
485 default:
486 case 0:
487 break;
488 case 1:
489 bit_shift += 1;
490 break;
491 case 2:
492 bit_shift += 2;
493 break;
494 case 3:
495 bit_shift += 4;
496 break;
497 }
498
499 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
500 (((u16) mc_addr[5]) << bit_shift)));
501
502 return hash_value;
503}
504
505
506
507
508
509
510
511
512
513
514void igb_update_mc_addr_list(struct e1000_hw *hw,
515 u8 *mc_addr_list, u32 mc_addr_count)
516{
517 u32 hash_value, hash_bit, hash_reg;
518 int i;
519
520
521 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
522
523
524 for (i = 0; (u32) i < mc_addr_count; i++) {
525 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
526
527 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
528 hash_bit = hash_value & 0x1F;
529
530 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
531 mc_addr_list += (ETH_ALEN);
532 }
533
534
535 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
536 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
537 wrfl();
538}
539
540
541
542
543
544
545
546void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
547{
548 rd32(E1000_CRCERRS);
549 rd32(E1000_SYMERRS);
550 rd32(E1000_MPC);
551 rd32(E1000_SCC);
552 rd32(E1000_ECOL);
553 rd32(E1000_MCC);
554 rd32(E1000_LATECOL);
555 rd32(E1000_COLC);
556 rd32(E1000_DC);
557 rd32(E1000_SEC);
558 rd32(E1000_RLEC);
559 rd32(E1000_XONRXC);
560 rd32(E1000_XONTXC);
561 rd32(E1000_XOFFRXC);
562 rd32(E1000_XOFFTXC);
563 rd32(E1000_FCRUC);
564 rd32(E1000_GPRC);
565 rd32(E1000_BPRC);
566 rd32(E1000_MPRC);
567 rd32(E1000_GPTC);
568 rd32(E1000_GORCL);
569 rd32(E1000_GORCH);
570 rd32(E1000_GOTCL);
571 rd32(E1000_GOTCH);
572 rd32(E1000_RNBC);
573 rd32(E1000_RUC);
574 rd32(E1000_RFC);
575 rd32(E1000_ROC);
576 rd32(E1000_RJC);
577 rd32(E1000_TORL);
578 rd32(E1000_TORH);
579 rd32(E1000_TOTL);
580 rd32(E1000_TOTH);
581 rd32(E1000_TPR);
582 rd32(E1000_TPT);
583 rd32(E1000_MPTC);
584 rd32(E1000_BPTC);
585}
586
587
588
589
590
591
592
593
594
595s32 igb_check_for_copper_link(struct e1000_hw *hw)
596{
597 struct e1000_mac_info *mac = &hw->mac;
598 s32 ret_val;
599 bool link;
600
601
602
603
604
605
606 if (!mac->get_link_status) {
607 ret_val = 0;
608 goto out;
609 }
610
611
612
613
614
615 ret_val = igb_phy_has_link(hw, 1, 0, &link);
616 if (ret_val)
617 goto out;
618
619 if (!link)
620 goto out;
621
622 mac->get_link_status = false;
623
624
625
626
627 igb_check_downshift(hw);
628
629
630
631
632 if (!mac->autoneg) {
633 ret_val = -E1000_ERR_CONFIG;
634 goto out;
635 }
636
637
638
639
640
641 igb_config_collision_dist(hw);
642
643
644
645
646
647
648 ret_val = igb_config_fc_after_link_up(hw);
649 if (ret_val)
650 hw_dbg("Error configuring flow control\n");
651
652out:
653 return ret_val;
654}
655
656
657
658
659
660
661
662
663
664
665
666s32 igb_setup_link(struct e1000_hw *hw)
667{
668 s32 ret_val = 0;
669
670
671
672
673 if (igb_check_reset_block(hw))
674 goto out;
675
676
677
678
679 if (hw->fc.requested_mode == e1000_fc_default) {
680 ret_val = igb_set_default_fc(hw);
681 if (ret_val)
682 goto out;
683 }
684
685
686
687
688
689 hw->fc.current_mode = hw->fc.requested_mode;
690
691 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
692
693
694 ret_val = hw->mac.ops.setup_physical_interface(hw);
695 if (ret_val)
696 goto out;
697
698
699
700
701
702
703 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
704 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
705 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
706 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
707
708 wr32(E1000_FCTTV, hw->fc.pause_time);
709
710 ret_val = igb_set_fc_watermarks(hw);
711
712out:
713
714 return ret_val;
715}
716
717
718
719
720
721
722
723
724
725void igb_config_collision_dist(struct e1000_hw *hw)
726{
727 u32 tctl;
728
729 tctl = rd32(E1000_TCTL);
730
731 tctl &= ~E1000_TCTL_COLD;
732 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
733
734 wr32(E1000_TCTL, tctl);
735 wrfl();
736}
737
738
739
740
741
742
743
744
745
746static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
747{
748 s32 ret_val = 0;
749 u32 fcrtl = 0, fcrth = 0;
750
751
752
753
754
755
756
757 if (hw->fc.current_mode & e1000_fc_tx_pause) {
758
759
760
761
762 fcrtl = hw->fc.low_water;
763 if (hw->fc.send_xon)
764 fcrtl |= E1000_FCRTL_XONE;
765
766 fcrth = hw->fc.high_water;
767 }
768 wr32(E1000_FCRTL, fcrtl);
769 wr32(E1000_FCRTH, fcrth);
770
771 return ret_val;
772}
773
774
775
776
777
778
779
780
781static s32 igb_set_default_fc(struct e1000_hw *hw)
782{
783 s32 ret_val = 0;
784 u16 lan_offset;
785 u16 nvm_data;
786
787
788
789
790
791
792
793
794
795 if (hw->mac.type == e1000_i350)
796 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
797 else
798 lan_offset = 0;
799
800 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
801 1, &nvm_data);
802 if (ret_val) {
803 hw_dbg("NVM Read Error\n");
804 goto out;
805 }
806
807 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
808 hw->fc.requested_mode = e1000_fc_none;
809 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
810 hw->fc.requested_mode = e1000_fc_tx_pause;
811 else
812 hw->fc.requested_mode = e1000_fc_full;
813
814out:
815 return ret_val;
816}
817
818
819
820
821
822
823
824
825
826
827
828s32 igb_force_mac_fc(struct e1000_hw *hw)
829{
830 u32 ctrl;
831 s32 ret_val = 0;
832
833 ctrl = rd32(E1000_CTRL);
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
853
854 switch (hw->fc.current_mode) {
855 case e1000_fc_none:
856 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
857 break;
858 case e1000_fc_rx_pause:
859 ctrl &= (~E1000_CTRL_TFCE);
860 ctrl |= E1000_CTRL_RFCE;
861 break;
862 case e1000_fc_tx_pause:
863 ctrl &= (~E1000_CTRL_RFCE);
864 ctrl |= E1000_CTRL_TFCE;
865 break;
866 case e1000_fc_full:
867 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
868 break;
869 default:
870 hw_dbg("Flow control param set incorrectly\n");
871 ret_val = -E1000_ERR_CONFIG;
872 goto out;
873 }
874
875 wr32(E1000_CTRL, ctrl);
876
877out:
878 return ret_val;
879}
880
881
882
883
884
885
886
887
888
889
890
891s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
892{
893 struct e1000_mac_info *mac = &hw->mac;
894 s32 ret_val = 0;
895 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
896 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
897 u16 speed, duplex;
898
899
900
901
902
903 if (mac->autoneg_failed) {
904 if (hw->phy.media_type == e1000_media_type_internal_serdes)
905 ret_val = igb_force_mac_fc(hw);
906 } else {
907 if (hw->phy.media_type == e1000_media_type_copper)
908 ret_val = igb_force_mac_fc(hw);
909 }
910
911 if (ret_val) {
912 hw_dbg("Error forcing flow control settings\n");
913 goto out;
914 }
915
916
917
918
919
920
921 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
922
923
924
925
926 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
927 &mii_status_reg);
928 if (ret_val)
929 goto out;
930 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
931 &mii_status_reg);
932 if (ret_val)
933 goto out;
934
935 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
936 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
937 goto out;
938 }
939
940
941
942
943
944
945
946 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
947 &mii_nway_adv_reg);
948 if (ret_val)
949 goto out;
950 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
951 &mii_nway_lp_ability_reg);
952 if (ret_val)
953 goto out;
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
989 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
990
991
992
993
994
995
996 if (hw->fc.requested_mode == e1000_fc_full) {
997 hw->fc.current_mode = e1000_fc_full;
998 hw_dbg("Flow Control = FULL.\n");
999 } else {
1000 hw->fc.current_mode = e1000_fc_rx_pause;
1001 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1002 }
1003 }
1004
1005
1006
1007
1008
1009
1010
1011 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1012 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1013 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1014 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1015 hw->fc.current_mode = e1000_fc_tx_pause;
1016 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1017 }
1018
1019
1020
1021
1022
1023
1024
1025 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1026 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1027 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1028 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1029 hw->fc.current_mode = e1000_fc_rx_pause;
1030 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1053 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1054 (hw->fc.strict_ieee)) {
1055 hw->fc.current_mode = e1000_fc_none;
1056 hw_dbg("Flow Control = NONE.\n");
1057 } else {
1058 hw->fc.current_mode = e1000_fc_rx_pause;
1059 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1060 }
1061
1062
1063
1064
1065
1066 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1067 if (ret_val) {
1068 hw_dbg("Error getting link speed and duplex\n");
1069 goto out;
1070 }
1071
1072 if (duplex == HALF_DUPLEX)
1073 hw->fc.current_mode = e1000_fc_none;
1074
1075
1076
1077
1078 ret_val = igb_force_mac_fc(hw);
1079 if (ret_val) {
1080 hw_dbg("Error forcing flow control settings\n");
1081 goto out;
1082 }
1083 }
1084
1085
1086
1087
1088
1089 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1090 && mac->autoneg) {
1091
1092
1093
1094 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1095
1096 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1097 hw_dbg("PCS Auto Neg has not completed.\n");
1098 return ret_val;
1099 }
1100
1101
1102
1103
1104
1105
1106
1107 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1108 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1144 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1145
1146
1147
1148
1149
1150
1151 if (hw->fc.requested_mode == e1000_fc_full) {
1152 hw->fc.current_mode = e1000_fc_full;
1153 hw_dbg("Flow Control = FULL.\n");
1154 } else {
1155 hw->fc.current_mode = e1000_fc_rx_pause;
1156 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1157 }
1158 }
1159
1160
1161
1162
1163
1164
1165
1166 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1167 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1168 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1169 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1170 hw->fc.current_mode = e1000_fc_tx_pause;
1171 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1172 }
1173
1174
1175
1176
1177
1178
1179
1180 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1181 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1182 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1183 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1184 hw->fc.current_mode = e1000_fc_rx_pause;
1185 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1186 } else {
1187
1188
1189
1190 hw->fc.current_mode = e1000_fc_none;
1191 hw_dbg("Flow Control = NONE.\n");
1192 }
1193
1194
1195
1196
1197 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1198 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1199 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1200
1201 ret_val = igb_force_mac_fc(hw);
1202 if (ret_val) {
1203 hw_dbg("Error forcing flow control settings\n");
1204 return ret_val;
1205 }
1206 }
1207
1208out:
1209 return ret_val;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1222 u16 *duplex)
1223{
1224 u32 status;
1225
1226 status = rd32(E1000_STATUS);
1227 if (status & E1000_STATUS_SPEED_1000) {
1228 *speed = SPEED_1000;
1229 hw_dbg("1000 Mbs, ");
1230 } else if (status & E1000_STATUS_SPEED_100) {
1231 *speed = SPEED_100;
1232 hw_dbg("100 Mbs, ");
1233 } else {
1234 *speed = SPEED_10;
1235 hw_dbg("10 Mbs, ");
1236 }
1237
1238 if (status & E1000_STATUS_FD) {
1239 *duplex = FULL_DUPLEX;
1240 hw_dbg("Full Duplex\n");
1241 } else {
1242 *duplex = HALF_DUPLEX;
1243 hw_dbg("Half Duplex\n");
1244 }
1245
1246 return 0;
1247}
1248
1249
1250
1251
1252
1253
1254
1255s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1256{
1257 u32 swsm;
1258 s32 ret_val = 0;
1259 s32 timeout = hw->nvm.word_size + 1;
1260 s32 i = 0;
1261
1262
1263 while (i < timeout) {
1264 swsm = rd32(E1000_SWSM);
1265 if (!(swsm & E1000_SWSM_SMBI))
1266 break;
1267
1268 udelay(50);
1269 i++;
1270 }
1271
1272 if (i == timeout) {
1273 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1274 ret_val = -E1000_ERR_NVM;
1275 goto out;
1276 }
1277
1278
1279 for (i = 0; i < timeout; i++) {
1280 swsm = rd32(E1000_SWSM);
1281 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1282
1283
1284 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1285 break;
1286
1287 udelay(50);
1288 }
1289
1290 if (i == timeout) {
1291
1292 igb_put_hw_semaphore(hw);
1293 hw_dbg("Driver can't access the NVM\n");
1294 ret_val = -E1000_ERR_NVM;
1295 goto out;
1296 }
1297
1298out:
1299 return ret_val;
1300}
1301
1302
1303
1304
1305
1306
1307
1308void igb_put_hw_semaphore(struct e1000_hw *hw)
1309{
1310 u32 swsm;
1311
1312 swsm = rd32(E1000_SWSM);
1313
1314 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1315
1316 wr32(E1000_SWSM, swsm);
1317}
1318
1319
1320
1321
1322
1323
1324
1325s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1326{
1327 s32 i = 0;
1328 s32 ret_val = 0;
1329
1330
1331 while (i < AUTO_READ_DONE_TIMEOUT) {
1332 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1333 break;
1334 usleep_range(1000, 2000);
1335 i++;
1336 }
1337
1338 if (i == AUTO_READ_DONE_TIMEOUT) {
1339 hw_dbg("Auto read by HW from NVM has not completed.\n");
1340 ret_val = -E1000_ERR_RESET;
1341 goto out;
1342 }
1343
1344out:
1345 return ret_val;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1357{
1358 s32 ret_val;
1359
1360 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1361 if (ret_val) {
1362 hw_dbg("NVM Read Error\n");
1363 goto out;
1364 }
1365
1366 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1367 switch (hw->phy.media_type) {
1368 case e1000_media_type_internal_serdes:
1369 *data = ID_LED_DEFAULT_82575_SERDES;
1370 break;
1371 case e1000_media_type_copper:
1372 default:
1373 *data = ID_LED_DEFAULT;
1374 break;
1375 }
1376 }
1377out:
1378 return ret_val;
1379}
1380
1381
1382
1383
1384
1385
1386s32 igb_id_led_init(struct e1000_hw *hw)
1387{
1388 struct e1000_mac_info *mac = &hw->mac;
1389 s32 ret_val;
1390 const u32 ledctl_mask = 0x000000FF;
1391 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1392 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1393 u16 data, i, temp;
1394 const u16 led_mask = 0x0F;
1395
1396
1397 if ((hw->mac.type == e1000_i210) ||
1398 (hw->mac.type == e1000_i211))
1399 ret_val = igb_valid_led_default_i210(hw, &data);
1400 else
1401 ret_val = igb_valid_led_default(hw, &data);
1402
1403 if (ret_val)
1404 goto out;
1405
1406 mac->ledctl_default = rd32(E1000_LEDCTL);
1407 mac->ledctl_mode1 = mac->ledctl_default;
1408 mac->ledctl_mode2 = mac->ledctl_default;
1409
1410 for (i = 0; i < 4; i++) {
1411 temp = (data >> (i << 2)) & led_mask;
1412 switch (temp) {
1413 case ID_LED_ON1_DEF2:
1414 case ID_LED_ON1_ON2:
1415 case ID_LED_ON1_OFF2:
1416 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1417 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1418 break;
1419 case ID_LED_OFF1_DEF2:
1420 case ID_LED_OFF1_ON2:
1421 case ID_LED_OFF1_OFF2:
1422 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1423 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1424 break;
1425 default:
1426
1427 break;
1428 }
1429 switch (temp) {
1430 case ID_LED_DEF1_ON2:
1431 case ID_LED_ON1_ON2:
1432 case ID_LED_OFF1_ON2:
1433 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1434 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1435 break;
1436 case ID_LED_DEF1_OFF2:
1437 case ID_LED_ON1_OFF2:
1438 case ID_LED_OFF1_OFF2:
1439 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1440 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1441 break;
1442 default:
1443
1444 break;
1445 }
1446 }
1447
1448out:
1449 return ret_val;
1450}
1451
1452
1453
1454
1455
1456
1457
1458
1459s32 igb_cleanup_led(struct e1000_hw *hw)
1460{
1461 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1462 return 0;
1463}
1464
1465
1466
1467
1468
1469
1470
1471s32 igb_blink_led(struct e1000_hw *hw)
1472{
1473 u32 ledctl_blink = 0;
1474 u32 i;
1475
1476 if (hw->phy.media_type == e1000_media_type_fiber) {
1477
1478 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1479 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1480 } else {
1481
1482
1483
1484
1485
1486
1487 ledctl_blink = hw->mac.ledctl_mode2;
1488 for (i = 0; i < 32; i += 8) {
1489 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1490 E1000_LEDCTL_LED0_MODE_MASK;
1491 u32 led_default = hw->mac.ledctl_default >> i;
1492
1493 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1494 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1495 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1496 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1497 ledctl_blink &=
1498 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1499 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1500 E1000_LEDCTL_MODE_LED_ON) << i;
1501 }
1502 }
1503 }
1504
1505 wr32(E1000_LEDCTL, ledctl_blink);
1506
1507 return 0;
1508}
1509
1510
1511
1512
1513
1514
1515
1516s32 igb_led_off(struct e1000_hw *hw)
1517{
1518 switch (hw->phy.media_type) {
1519 case e1000_media_type_copper:
1520 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1521 break;
1522 default:
1523 break;
1524 }
1525
1526 return 0;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540s32 igb_disable_pcie_master(struct e1000_hw *hw)
1541{
1542 u32 ctrl;
1543 s32 timeout = MASTER_DISABLE_TIMEOUT;
1544 s32 ret_val = 0;
1545
1546 if (hw->bus.type != e1000_bus_type_pci_express)
1547 goto out;
1548
1549 ctrl = rd32(E1000_CTRL);
1550 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1551 wr32(E1000_CTRL, ctrl);
1552
1553 while (timeout) {
1554 if (!(rd32(E1000_STATUS) &
1555 E1000_STATUS_GIO_MASTER_ENABLE))
1556 break;
1557 udelay(100);
1558 timeout--;
1559 }
1560
1561 if (!timeout) {
1562 hw_dbg("Master requests are pending.\n");
1563 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1564 goto out;
1565 }
1566
1567out:
1568 return ret_val;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1579{
1580 s32 ret_val = 0;
1581
1582
1583 if (hw->mac.type >= e1000_82580)
1584 goto out;
1585
1586 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1587 hw_dbg("Invalid MDI setting detected\n");
1588 hw->phy.mdix = 1;
1589 ret_val = -E1000_ERR_CONFIG;
1590 goto out;
1591 }
1592
1593out:
1594 return ret_val;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1609 u32 offset, u8 data)
1610{
1611 u32 i, regvalue = 0;
1612 s32 ret_val = 0;
1613
1614
1615 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1616 wr32(reg, regvalue);
1617
1618
1619 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1620 udelay(5);
1621 regvalue = rd32(reg);
1622 if (regvalue & E1000_GEN_CTL_READY)
1623 break;
1624 }
1625 if (!(regvalue & E1000_GEN_CTL_READY)) {
1626 hw_dbg("Reg %08x did not indicate ready\n", reg);
1627 ret_val = -E1000_ERR_PHY;
1628 goto out;
1629 }
1630
1631out:
1632 return ret_val;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1643{
1644 u32 manc;
1645 u32 fwsm, factps;
1646 bool ret_val = false;
1647
1648 if (!hw->mac.asf_firmware_present)
1649 goto out;
1650
1651 manc = rd32(E1000_MANC);
1652
1653 if (!(manc & E1000_MANC_RCV_TCO_EN))
1654 goto out;
1655
1656 if (hw->mac.arc_subsystem_valid) {
1657 fwsm = rd32(E1000_FWSM);
1658 factps = rd32(E1000_FACTPS);
1659
1660 if (!(factps & E1000_FACTPS_MNGCG) &&
1661 ((fwsm & E1000_FWSM_MODE_MASK) ==
1662 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1663 ret_val = true;
1664 goto out;
1665 }
1666 } else {
1667 if ((manc & E1000_MANC_SMBUS_EN) &&
1668 !(manc & E1000_MANC_ASF_EN)) {
1669 ret_val = true;
1670 goto out;
1671 }
1672 }
1673
1674out:
1675 return ret_val;
1676}
1677