1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33
34#include "e1000_mac.h"
35
36#include "igb.h"
37
38static s32 igb_set_default_fc(struct e1000_hw *hw);
39static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
40
41
42
43
44
45
46
47
48
49s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
50{
51 struct e1000_bus_info *bus = &hw->bus;
52 s32 ret_val;
53 u32 reg;
54 u16 pcie_link_status;
55
56 bus->type = e1000_bus_type_pci_express;
57
58 ret_val = igb_read_pcie_cap_reg(hw,
59 PCI_EXP_LNKSTA,
60 &pcie_link_status);
61 if (ret_val) {
62 bus->width = e1000_bus_width_unknown;
63 bus->speed = e1000_bus_speed_unknown;
64 } else {
65 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
66 case PCI_EXP_LNKSTA_CLS_2_5GB:
67 bus->speed = e1000_bus_speed_2500;
68 break;
69 case PCI_EXP_LNKSTA_CLS_5_0GB:
70 bus->speed = e1000_bus_speed_5000;
71 break;
72 default:
73 bus->speed = e1000_bus_speed_unknown;
74 break;
75 }
76
77 bus->width = (enum e1000_bus_width)((pcie_link_status &
78 PCI_EXP_LNKSTA_NLW) >>
79 PCI_EXP_LNKSTA_NLW_SHIFT);
80 }
81
82 reg = rd32(E1000_STATUS);
83 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
84
85 return 0;
86}
87
88
89
90
91
92
93
94
95void igb_clear_vfta(struct e1000_hw *hw)
96{
97 u32 offset;
98
99 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
100 array_wr32(E1000_VFTA, offset, 0);
101 wrfl();
102 }
103}
104
105
106
107
108
109
110
111
112
113
114static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
115{
116 array_wr32(E1000_VFTA, offset, value);
117 wrfl();
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void igb_clear_vfta_i350(struct e1000_hw *hw)
133{
134 u32 offset;
135 int i;
136
137 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
138 for (i = 0; i < 10; i++)
139 array_wr32(E1000_VFTA, offset, 0);
140
141 wrfl();
142 }
143}
144
145
146
147
148
149
150
151
152
153
154static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
155{
156 int i;
157
158 for (i = 0; i < 10; i++)
159 array_wr32(E1000_VFTA, offset, value);
160
161 wrfl();
162}
163
164
165
166
167
168
169
170
171
172
173void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
174{
175 u32 i;
176 u8 mac_addr[ETH_ALEN] = {0};
177
178
179 hw_dbg("Programming MAC Address into RAR[0]\n");
180
181 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
182
183
184 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
185 for (i = 1; i < rar_count; i++)
186 hw->mac.ops.rar_set(hw, mac_addr, i);
187}
188
189
190
191
192
193
194
195
196
197
198s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
199{
200 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
201 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
202 u32 vfta;
203 struct igb_adapter *adapter = hw->back;
204 s32 ret_val = 0;
205
206 vfta = adapter->shadow_vfta[index];
207
208
209 if ((!!(vfta & mask)) == add) {
210 ret_val = -E1000_ERR_CONFIG;
211 } else {
212 if (add)
213 vfta |= mask;
214 else
215 vfta &= ~mask;
216 }
217 if (hw->mac.type == e1000_i350)
218 igb_write_vfta_i350(hw, index, vfta);
219 else
220 igb_write_vfta(hw, index, vfta);
221 adapter->shadow_vfta[index] = vfta;
222
223 return ret_val;
224}
225
226
227
228
229
230
231
232
233
234
235
236
237s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
238{
239 u32 i;
240 s32 ret_val = 0;
241 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
242 u8 alt_mac_addr[ETH_ALEN];
243
244
245
246
247
248 if (hw->mac.type >= e1000_82580)
249 goto out;
250
251 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
252 &nvm_alt_mac_addr_offset);
253 if (ret_val) {
254 hw_dbg("NVM Read Error\n");
255 goto out;
256 }
257
258 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
259 (nvm_alt_mac_addr_offset == 0x0000))
260
261 goto out;
262
263 if (hw->bus.func == E1000_FUNC_1)
264 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
265 if (hw->bus.func == E1000_FUNC_2)
266 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
267
268 if (hw->bus.func == E1000_FUNC_3)
269 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
270 for (i = 0; i < ETH_ALEN; i += 2) {
271 offset = nvm_alt_mac_addr_offset + (i >> 1);
272 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
273 if (ret_val) {
274 hw_dbg("NVM Read Error\n");
275 goto out;
276 }
277
278 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
279 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
280 }
281
282
283 if (is_multicast_ether_addr(alt_mac_addr)) {
284 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
285 goto out;
286 }
287
288
289
290
291
292
293 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
294
295out:
296 return ret_val;
297}
298
299
300
301
302
303
304
305
306
307
308void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
309{
310 u32 rar_low, rar_high;
311
312
313
314
315
316 rar_low = ((u32) addr[0] |
317 ((u32) addr[1] << 8) |
318 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
319
320 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
321
322
323 if (rar_low || rar_high)
324 rar_high |= E1000_RAH_AV;
325
326
327
328
329
330
331 wr32(E1000_RAL(index), rar_low);
332 wrfl();
333 wr32(E1000_RAH(index), rar_high);
334 wrfl();
335}
336
337
338
339
340
341
342
343
344
345
346
347void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
348{
349 u32 hash_bit, hash_reg, mta;
350
351
352
353
354
355
356
357
358
359
360
361 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
362 hash_bit = hash_value & 0x1F;
363
364 mta = array_rd32(E1000_MTA, hash_reg);
365
366 mta |= (1 << hash_bit);
367
368 array_wr32(E1000_MTA, hash_reg, mta);
369 wrfl();
370}
371
372
373
374
375
376
377
378
379
380
381static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
382{
383 u32 hash_value, hash_mask;
384 u8 bit_shift = 0;
385
386
387 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
388
389
390
391
392
393 while (hash_mask >> bit_shift != 0xFF)
394 bit_shift++;
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422 switch (hw->mac.mc_filter_type) {
423 default:
424 case 0:
425 break;
426 case 1:
427 bit_shift += 1;
428 break;
429 case 2:
430 bit_shift += 2;
431 break;
432 case 3:
433 bit_shift += 4;
434 break;
435 }
436
437 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
438 (((u16) mc_addr[5]) << bit_shift)));
439
440 return hash_value;
441}
442
443
444
445
446
447
448
449
450
451
452void igb_update_mc_addr_list(struct e1000_hw *hw,
453 u8 *mc_addr_list, u32 mc_addr_count)
454{
455 u32 hash_value, hash_bit, hash_reg;
456 int i;
457
458
459 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
460
461
462 for (i = 0; (u32) i < mc_addr_count; i++) {
463 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
464
465 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
466 hash_bit = hash_value & 0x1F;
467
468 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
469 mc_addr_list += (ETH_ALEN);
470 }
471
472
473 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
474 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
475 wrfl();
476}
477
478
479
480
481
482
483
484void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
485{
486 rd32(E1000_CRCERRS);
487 rd32(E1000_SYMERRS);
488 rd32(E1000_MPC);
489 rd32(E1000_SCC);
490 rd32(E1000_ECOL);
491 rd32(E1000_MCC);
492 rd32(E1000_LATECOL);
493 rd32(E1000_COLC);
494 rd32(E1000_DC);
495 rd32(E1000_SEC);
496 rd32(E1000_RLEC);
497 rd32(E1000_XONRXC);
498 rd32(E1000_XONTXC);
499 rd32(E1000_XOFFRXC);
500 rd32(E1000_XOFFTXC);
501 rd32(E1000_FCRUC);
502 rd32(E1000_GPRC);
503 rd32(E1000_BPRC);
504 rd32(E1000_MPRC);
505 rd32(E1000_GPTC);
506 rd32(E1000_GORCL);
507 rd32(E1000_GORCH);
508 rd32(E1000_GOTCL);
509 rd32(E1000_GOTCH);
510 rd32(E1000_RNBC);
511 rd32(E1000_RUC);
512 rd32(E1000_RFC);
513 rd32(E1000_ROC);
514 rd32(E1000_RJC);
515 rd32(E1000_TORL);
516 rd32(E1000_TORH);
517 rd32(E1000_TOTL);
518 rd32(E1000_TOTH);
519 rd32(E1000_TPR);
520 rd32(E1000_TPT);
521 rd32(E1000_MPTC);
522 rd32(E1000_BPTC);
523}
524
525
526
527
528
529
530
531
532
533s32 igb_check_for_copper_link(struct e1000_hw *hw)
534{
535 struct e1000_mac_info *mac = &hw->mac;
536 s32 ret_val;
537 bool link;
538
539
540
541
542
543
544
545 if (!mac->get_link_status) {
546 ret_val = 0;
547 goto out;
548 }
549
550
551
552
553
554
555 ret_val = igb_phy_has_link(hw, 1, 0, &link);
556 if (ret_val)
557 goto out;
558
559 if (!link)
560 goto out;
561
562 mac->get_link_status = false;
563
564
565
566
567
568 igb_check_downshift(hw);
569
570
571
572
573
574 if (!mac->autoneg) {
575 ret_val = -E1000_ERR_CONFIG;
576 goto out;
577 }
578
579
580
581
582
583
584 igb_config_collision_dist(hw);
585
586
587
588
589
590
591
592 ret_val = igb_config_fc_after_link_up(hw);
593 if (ret_val)
594 hw_dbg("Error configuring flow control\n");
595
596out:
597 return ret_val;
598}
599
600
601
602
603
604
605
606
607
608
609
610s32 igb_setup_link(struct e1000_hw *hw)
611{
612 s32 ret_val = 0;
613
614
615
616
617
618 if (igb_check_reset_block(hw))
619 goto out;
620
621
622
623
624
625 if (hw->fc.requested_mode == e1000_fc_default) {
626 ret_val = igb_set_default_fc(hw);
627 if (ret_val)
628 goto out;
629 }
630
631
632
633
634
635
636 hw->fc.current_mode = hw->fc.requested_mode;
637
638 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
639
640
641 ret_val = hw->mac.ops.setup_physical_interface(hw);
642 if (ret_val)
643 goto out;
644
645
646
647
648
649
650
651 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
652 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
653 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
654 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
655
656 wr32(E1000_FCTTV, hw->fc.pause_time);
657
658 ret_val = igb_set_fc_watermarks(hw);
659
660out:
661
662 return ret_val;
663}
664
665
666
667
668
669
670
671
672
673void igb_config_collision_dist(struct e1000_hw *hw)
674{
675 u32 tctl;
676
677 tctl = rd32(E1000_TCTL);
678
679 tctl &= ~E1000_TCTL_COLD;
680 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
681
682 wr32(E1000_TCTL, tctl);
683 wrfl();
684}
685
686
687
688
689
690
691
692
693
694static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
695{
696 s32 ret_val = 0;
697 u32 fcrtl = 0, fcrth = 0;
698
699
700
701
702
703
704
705
706 if (hw->fc.current_mode & e1000_fc_tx_pause) {
707
708
709
710
711
712 fcrtl = hw->fc.low_water;
713 if (hw->fc.send_xon)
714 fcrtl |= E1000_FCRTL_XONE;
715
716 fcrth = hw->fc.high_water;
717 }
718 wr32(E1000_FCRTL, fcrtl);
719 wr32(E1000_FCRTH, fcrth);
720
721 return ret_val;
722}
723
724
725
726
727
728
729
730
731static s32 igb_set_default_fc(struct e1000_hw *hw)
732{
733 s32 ret_val = 0;
734 u16 nvm_data;
735
736
737
738
739
740
741
742
743
744
745 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
746
747 if (ret_val) {
748 hw_dbg("NVM Read Error\n");
749 goto out;
750 }
751
752 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
753 hw->fc.requested_mode = e1000_fc_none;
754 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
755 NVM_WORD0F_ASM_DIR)
756 hw->fc.requested_mode = e1000_fc_tx_pause;
757 else
758 hw->fc.requested_mode = e1000_fc_full;
759
760out:
761 return ret_val;
762}
763
764
765
766
767
768
769
770
771
772
773
774s32 igb_force_mac_fc(struct e1000_hw *hw)
775{
776 u32 ctrl;
777 s32 ret_val = 0;
778
779 ctrl = rd32(E1000_CTRL);
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
800
801 switch (hw->fc.current_mode) {
802 case e1000_fc_none:
803 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
804 break;
805 case e1000_fc_rx_pause:
806 ctrl &= (~E1000_CTRL_TFCE);
807 ctrl |= E1000_CTRL_RFCE;
808 break;
809 case e1000_fc_tx_pause:
810 ctrl &= (~E1000_CTRL_RFCE);
811 ctrl |= E1000_CTRL_TFCE;
812 break;
813 case e1000_fc_full:
814 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
815 break;
816 default:
817 hw_dbg("Flow control param set incorrectly\n");
818 ret_val = -E1000_ERR_CONFIG;
819 goto out;
820 }
821
822 wr32(E1000_CTRL, ctrl);
823
824out:
825 return ret_val;
826}
827
828
829
830
831
832
833
834
835
836
837
838s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
839{
840 struct e1000_mac_info *mac = &hw->mac;
841 s32 ret_val = 0;
842 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
843 u16 speed, duplex;
844
845
846
847
848
849
850 if (mac->autoneg_failed) {
851 if (hw->phy.media_type == e1000_media_type_internal_serdes)
852 ret_val = igb_force_mac_fc(hw);
853 } else {
854 if (hw->phy.media_type == e1000_media_type_copper)
855 ret_val = igb_force_mac_fc(hw);
856 }
857
858 if (ret_val) {
859 hw_dbg("Error forcing flow control settings\n");
860 goto out;
861 }
862
863
864
865
866
867
868
869 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
870
871
872
873
874
875 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
876 &mii_status_reg);
877 if (ret_val)
878 goto out;
879 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
880 &mii_status_reg);
881 if (ret_val)
882 goto out;
883
884 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
885 hw_dbg("Copper PHY and Auto Neg "
886 "has not completed.\n");
887 goto out;
888 }
889
890
891
892
893
894
895
896
897 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
898 &mii_nway_adv_reg);
899 if (ret_val)
900 goto out;
901 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
902 &mii_nway_lp_ability_reg);
903 if (ret_val)
904 goto out;
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
941 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
942
943
944
945
946
947
948
949 if (hw->fc.requested_mode == e1000_fc_full) {
950 hw->fc.current_mode = e1000_fc_full;
951 hw_dbg("Flow Control = FULL.\r\n");
952 } else {
953 hw->fc.current_mode = e1000_fc_rx_pause;
954 hw_dbg("Flow Control = "
955 "RX PAUSE frames only.\r\n");
956 }
957 }
958
959
960
961
962
963
964
965
966 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
967 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
968 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
969 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
970 hw->fc.current_mode = e1000_fc_tx_pause;
971 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
972 }
973
974
975
976
977
978
979
980
981 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
982 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
983 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
984 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
985 hw->fc.current_mode = e1000_fc_rx_pause;
986 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
987 }
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 else if ((hw->fc.requested_mode == e1000_fc_none ||
1010 hw->fc.requested_mode == e1000_fc_tx_pause) ||
1011 hw->fc.strict_ieee) {
1012 hw->fc.current_mode = e1000_fc_none;
1013 hw_dbg("Flow Control = NONE.\r\n");
1014 } else {
1015 hw->fc.current_mode = e1000_fc_rx_pause;
1016 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
1017 }
1018
1019
1020
1021
1022
1023
1024 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1025 if (ret_val) {
1026 hw_dbg("Error getting link speed and duplex\n");
1027 goto out;
1028 }
1029
1030 if (duplex == HALF_DUPLEX)
1031 hw->fc.current_mode = e1000_fc_none;
1032
1033
1034
1035
1036
1037 ret_val = igb_force_mac_fc(hw);
1038 if (ret_val) {
1039 hw_dbg("Error forcing flow control settings\n");
1040 goto out;
1041 }
1042 }
1043
1044out:
1045 return ret_val;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1058 u16 *duplex)
1059{
1060 u32 status;
1061
1062 status = rd32(E1000_STATUS);
1063 if (status & E1000_STATUS_SPEED_1000) {
1064 *speed = SPEED_1000;
1065 hw_dbg("1000 Mbs, ");
1066 } else if (status & E1000_STATUS_SPEED_100) {
1067 *speed = SPEED_100;
1068 hw_dbg("100 Mbs, ");
1069 } else {
1070 *speed = SPEED_10;
1071 hw_dbg("10 Mbs, ");
1072 }
1073
1074 if (status & E1000_STATUS_FD) {
1075 *duplex = FULL_DUPLEX;
1076 hw_dbg("Full Duplex\n");
1077 } else {
1078 *duplex = HALF_DUPLEX;
1079 hw_dbg("Half Duplex\n");
1080 }
1081
1082 return 0;
1083}
1084
1085
1086
1087
1088
1089
1090
1091s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1092{
1093 u32 swsm;
1094 s32 ret_val = 0;
1095 s32 timeout = hw->nvm.word_size + 1;
1096 s32 i = 0;
1097
1098
1099 while (i < timeout) {
1100 swsm = rd32(E1000_SWSM);
1101 if (!(swsm & E1000_SWSM_SMBI))
1102 break;
1103
1104 udelay(50);
1105 i++;
1106 }
1107
1108 if (i == timeout) {
1109 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1110 ret_val = -E1000_ERR_NVM;
1111 goto out;
1112 }
1113
1114
1115 for (i = 0; i < timeout; i++) {
1116 swsm = rd32(E1000_SWSM);
1117 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1118
1119
1120 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1121 break;
1122
1123 udelay(50);
1124 }
1125
1126 if (i == timeout) {
1127
1128 igb_put_hw_semaphore(hw);
1129 hw_dbg("Driver can't access the NVM\n");
1130 ret_val = -E1000_ERR_NVM;
1131 goto out;
1132 }
1133
1134out:
1135 return ret_val;
1136}
1137
1138
1139
1140
1141
1142
1143
1144void igb_put_hw_semaphore(struct e1000_hw *hw)
1145{
1146 u32 swsm;
1147
1148 swsm = rd32(E1000_SWSM);
1149
1150 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1151
1152 wr32(E1000_SWSM, swsm);
1153}
1154
1155
1156
1157
1158
1159
1160
1161s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1162{
1163 s32 i = 0;
1164 s32 ret_val = 0;
1165
1166
1167 while (i < AUTO_READ_DONE_TIMEOUT) {
1168 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1169 break;
1170 msleep(1);
1171 i++;
1172 }
1173
1174 if (i == AUTO_READ_DONE_TIMEOUT) {
1175 hw_dbg("Auto read by HW from NVM has not completed.\n");
1176 ret_val = -E1000_ERR_RESET;
1177 goto out;
1178 }
1179
1180out:
1181 return ret_val;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1193{
1194 s32 ret_val;
1195
1196 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1197 if (ret_val) {
1198 hw_dbg("NVM Read Error\n");
1199 goto out;
1200 }
1201
1202 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1203 switch(hw->phy.media_type) {
1204 case e1000_media_type_internal_serdes:
1205 *data = ID_LED_DEFAULT_82575_SERDES;
1206 break;
1207 case e1000_media_type_copper:
1208 default:
1209 *data = ID_LED_DEFAULT;
1210 break;
1211 }
1212 }
1213out:
1214 return ret_val;
1215}
1216
1217
1218
1219
1220
1221
1222s32 igb_id_led_init(struct e1000_hw *hw)
1223{
1224 struct e1000_mac_info *mac = &hw->mac;
1225 s32 ret_val;
1226 const u32 ledctl_mask = 0x000000FF;
1227 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1228 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1229 u16 data, i, temp;
1230 const u16 led_mask = 0x0F;
1231
1232 ret_val = igb_valid_led_default(hw, &data);
1233 if (ret_val)
1234 goto out;
1235
1236 mac->ledctl_default = rd32(E1000_LEDCTL);
1237 mac->ledctl_mode1 = mac->ledctl_default;
1238 mac->ledctl_mode2 = mac->ledctl_default;
1239
1240 for (i = 0; i < 4; i++) {
1241 temp = (data >> (i << 2)) & led_mask;
1242 switch (temp) {
1243 case ID_LED_ON1_DEF2:
1244 case ID_LED_ON1_ON2:
1245 case ID_LED_ON1_OFF2:
1246 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1247 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1248 break;
1249 case ID_LED_OFF1_DEF2:
1250 case ID_LED_OFF1_ON2:
1251 case ID_LED_OFF1_OFF2:
1252 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1253 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1254 break;
1255 default:
1256
1257 break;
1258 }
1259 switch (temp) {
1260 case ID_LED_DEF1_ON2:
1261 case ID_LED_ON1_ON2:
1262 case ID_LED_OFF1_ON2:
1263 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1264 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1265 break;
1266 case ID_LED_DEF1_OFF2:
1267 case ID_LED_ON1_OFF2:
1268 case ID_LED_OFF1_OFF2:
1269 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1270 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1271 break;
1272 default:
1273
1274 break;
1275 }
1276 }
1277
1278out:
1279 return ret_val;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289s32 igb_cleanup_led(struct e1000_hw *hw)
1290{
1291 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1292 return 0;
1293}
1294
1295
1296
1297
1298
1299
1300
1301s32 igb_blink_led(struct e1000_hw *hw)
1302{
1303 u32 ledctl_blink = 0;
1304 u32 i;
1305
1306
1307
1308
1309
1310 ledctl_blink = hw->mac.ledctl_mode2;
1311 for (i = 0; i < 4; i++)
1312 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1313 E1000_LEDCTL_MODE_LED_ON)
1314 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1315 (i * 8));
1316
1317 wr32(E1000_LEDCTL, ledctl_blink);
1318
1319 return 0;
1320}
1321
1322
1323
1324
1325
1326
1327
1328s32 igb_led_off(struct e1000_hw *hw)
1329{
1330 switch (hw->phy.media_type) {
1331 case e1000_media_type_copper:
1332 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1333 break;
1334 default:
1335 break;
1336 }
1337
1338 return 0;
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352s32 igb_disable_pcie_master(struct e1000_hw *hw)
1353{
1354 u32 ctrl;
1355 s32 timeout = MASTER_DISABLE_TIMEOUT;
1356 s32 ret_val = 0;
1357
1358 if (hw->bus.type != e1000_bus_type_pci_express)
1359 goto out;
1360
1361 ctrl = rd32(E1000_CTRL);
1362 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1363 wr32(E1000_CTRL, ctrl);
1364
1365 while (timeout) {
1366 if (!(rd32(E1000_STATUS) &
1367 E1000_STATUS_GIO_MASTER_ENABLE))
1368 break;
1369 udelay(100);
1370 timeout--;
1371 }
1372
1373 if (!timeout) {
1374 hw_dbg("Master requests are pending.\n");
1375 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1376 goto out;
1377 }
1378
1379out:
1380 return ret_val;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1391{
1392 s32 ret_val = 0;
1393
1394 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1395 hw_dbg("Invalid MDI setting detected\n");
1396 hw->phy.mdix = 1;
1397 ret_val = -E1000_ERR_CONFIG;
1398 goto out;
1399 }
1400
1401out:
1402 return ret_val;
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1417 u32 offset, u8 data)
1418{
1419 u32 i, regvalue = 0;
1420 s32 ret_val = 0;
1421
1422
1423 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1424 wr32(reg, regvalue);
1425
1426
1427 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1428 udelay(5);
1429 regvalue = rd32(reg);
1430 if (regvalue & E1000_GEN_CTL_READY)
1431 break;
1432 }
1433 if (!(regvalue & E1000_GEN_CTL_READY)) {
1434 hw_dbg("Reg %08x did not indicate ready\n", reg);
1435 ret_val = -E1000_ERR_PHY;
1436 goto out;
1437 }
1438
1439out:
1440 return ret_val;
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1451{
1452 u32 manc;
1453 u32 fwsm, factps;
1454 bool ret_val = false;
1455
1456 if (!hw->mac.asf_firmware_present)
1457 goto out;
1458
1459 manc = rd32(E1000_MANC);
1460
1461 if (!(manc & E1000_MANC_RCV_TCO_EN))
1462 goto out;
1463
1464 if (hw->mac.arc_subsystem_valid) {
1465 fwsm = rd32(E1000_FWSM);
1466 factps = rd32(E1000_FACTPS);
1467
1468 if (!(factps & E1000_FACTPS_MNGCG) &&
1469 ((fwsm & E1000_FWSM_MODE_MASK) ==
1470 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1471 ret_val = true;
1472 goto out;
1473 }
1474 } else {
1475 if ((manc & E1000_MANC_SMBUS_EN) &&
1476 !(manc & E1000_MANC_ASF_EN)) {
1477 ret_val = true;
1478 goto out;
1479 }
1480 }
1481
1482out:
1483 return ret_val;
1484}
1485