1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32
33#include "e1000_mac.h"
34
35#include "igb.h"
36
37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39
40
41
42
43
44
45
46
47
48s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
49{
50 struct e1000_bus_info *bus = &hw->bus;
51 s32 ret_val;
52 u32 reg;
53 u16 pcie_link_status;
54
55 bus->type = e1000_bus_type_pci_express;
56
57 ret_val = igb_read_pcie_cap_reg(hw,
58 PCI_EXP_LNKSTA,
59 &pcie_link_status);
60 if (ret_val) {
61 bus->width = e1000_bus_width_unknown;
62 bus->speed = e1000_bus_speed_unknown;
63 } else {
64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
65 case PCI_EXP_LNKSTA_CLS_2_5GB:
66 bus->speed = e1000_bus_speed_2500;
67 break;
68 case PCI_EXP_LNKSTA_CLS_5_0GB:
69 bus->speed = e1000_bus_speed_5000;
70 break;
71 default:
72 bus->speed = e1000_bus_speed_unknown;
73 break;
74 }
75
76 bus->width = (enum e1000_bus_width)((pcie_link_status &
77 PCI_EXP_LNKSTA_NLW) >>
78 PCI_EXP_LNKSTA_NLW_SHIFT);
79 }
80
81 reg = rd32(E1000_STATUS);
82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
83
84 return 0;
85}
86
87
88
89
90
91
92
93
94void igb_clear_vfta(struct e1000_hw *hw)
95{
96 u32 offset;
97
98 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
99 array_wr32(E1000_VFTA, offset, 0);
100 wrfl();
101 }
102}
103
104
105
106
107
108
109
110
111
112
113static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
114{
115 array_wr32(E1000_VFTA, offset, value);
116 wrfl();
117}
118
119
120
121
122
123
124
125
126
127
128void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
129{
130 u32 i;
131 u8 mac_addr[ETH_ALEN] = {0};
132
133
134 hw_dbg("Programming MAC Address into RAR[0]\n");
135
136 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
137
138
139 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
140 for (i = 1; i < rar_count; i++)
141 hw->mac.ops.rar_set(hw, mac_addr, i);
142}
143
144
145
146
147
148
149
150
151
152
153s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
154{
155 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
156 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
157 u32 vfta = array_rd32(E1000_VFTA, index);
158 s32 ret_val = 0;
159
160
161 if ((!!(vfta & mask)) == add) {
162 ret_val = -E1000_ERR_CONFIG;
163 } else {
164 if (add)
165 vfta |= mask;
166 else
167 vfta &= ~mask;
168 }
169
170 igb_write_vfta(hw, index, vfta);
171
172 return ret_val;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
187{
188 u32 i;
189 s32 ret_val = 0;
190 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
191 u8 alt_mac_addr[ETH_ALEN];
192
193 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset);
195 if (ret_val) {
196 hw_dbg("NVM Read Error\n");
197 goto out;
198 }
199
200 if (nvm_alt_mac_addr_offset == 0xFFFF) {
201
202 goto out;
203 }
204
205 if (hw->bus.func == E1000_FUNC_1)
206 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
207 for (i = 0; i < ETH_ALEN; i += 2) {
208 offset = nvm_alt_mac_addr_offset + (i >> 1);
209 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
210 if (ret_val) {
211 hw_dbg("NVM Read Error\n");
212 goto out;
213 }
214
215 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
216 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
217 }
218
219
220 if (alt_mac_addr[0] & 0x01) {
221 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
222 goto out;
223 }
224
225
226
227
228
229
230 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
231
232out:
233 return ret_val;
234}
235
236
237
238
239
240
241
242
243
244
245void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
246{
247 u32 rar_low, rar_high;
248
249
250
251
252
253 rar_low = ((u32) addr[0] |
254 ((u32) addr[1] << 8) |
255 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
256
257 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
258
259
260 if (rar_low || rar_high)
261 rar_high |= E1000_RAH_AV;
262
263
264
265
266
267
268 wr32(E1000_RAL(index), rar_low);
269 wrfl();
270 wr32(E1000_RAH(index), rar_high);
271 wrfl();
272}
273
274
275
276
277
278
279
280
281
282
283
284void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
285{
286 u32 hash_bit, hash_reg, mta;
287
288
289
290
291
292
293
294
295
296
297
298 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
299 hash_bit = hash_value & 0x1F;
300
301 mta = array_rd32(E1000_MTA, hash_reg);
302
303 mta |= (1 << hash_bit);
304
305 array_wr32(E1000_MTA, hash_reg, mta);
306 wrfl();
307}
308
309
310
311
312
313
314
315
316
317
318static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
319{
320 u32 hash_value, hash_mask;
321 u8 bit_shift = 0;
322
323
324 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
325
326
327
328
329
330 while (hash_mask >> bit_shift != 0xFF)
331 bit_shift++;
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 switch (hw->mac.mc_filter_type) {
360 default:
361 case 0:
362 break;
363 case 1:
364 bit_shift += 1;
365 break;
366 case 2:
367 bit_shift += 2;
368 break;
369 case 3:
370 bit_shift += 4;
371 break;
372 }
373
374 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
375 (((u16) mc_addr[5]) << bit_shift)));
376
377 return hash_value;
378}
379
380
381
382
383
384
385
386
387
388
389void igb_update_mc_addr_list(struct e1000_hw *hw,
390 u8 *mc_addr_list, u32 mc_addr_count)
391{
392 u32 hash_value, hash_bit, hash_reg;
393 int i;
394
395
396 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
397
398
399 for (i = 0; (u32) i < mc_addr_count; i++) {
400 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
401
402 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
403 hash_bit = hash_value & 0x1F;
404
405 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
406 mc_addr_list += (ETH_ALEN);
407 }
408
409
410 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
411 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
412 wrfl();
413}
414
415
416
417
418
419
420
421void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
422{
423 rd32(E1000_CRCERRS);
424 rd32(E1000_SYMERRS);
425 rd32(E1000_MPC);
426 rd32(E1000_SCC);
427 rd32(E1000_ECOL);
428 rd32(E1000_MCC);
429 rd32(E1000_LATECOL);
430 rd32(E1000_COLC);
431 rd32(E1000_DC);
432 rd32(E1000_SEC);
433 rd32(E1000_RLEC);
434 rd32(E1000_XONRXC);
435 rd32(E1000_XONTXC);
436 rd32(E1000_XOFFRXC);
437 rd32(E1000_XOFFTXC);
438 rd32(E1000_FCRUC);
439 rd32(E1000_GPRC);
440 rd32(E1000_BPRC);
441 rd32(E1000_MPRC);
442 rd32(E1000_GPTC);
443 rd32(E1000_GORCL);
444 rd32(E1000_GORCH);
445 rd32(E1000_GOTCL);
446 rd32(E1000_GOTCH);
447 rd32(E1000_RNBC);
448 rd32(E1000_RUC);
449 rd32(E1000_RFC);
450 rd32(E1000_ROC);
451 rd32(E1000_RJC);
452 rd32(E1000_TORL);
453 rd32(E1000_TORH);
454 rd32(E1000_TOTL);
455 rd32(E1000_TOTH);
456 rd32(E1000_TPR);
457 rd32(E1000_TPT);
458 rd32(E1000_MPTC);
459 rd32(E1000_BPTC);
460}
461
462
463
464
465
466
467
468
469
470s32 igb_check_for_copper_link(struct e1000_hw *hw)
471{
472 struct e1000_mac_info *mac = &hw->mac;
473 s32 ret_val;
474 bool link;
475
476
477
478
479
480
481
482 if (!mac->get_link_status) {
483 ret_val = 0;
484 goto out;
485 }
486
487
488
489
490
491
492 ret_val = igb_phy_has_link(hw, 1, 0, &link);
493 if (ret_val)
494 goto out;
495
496 if (!link)
497 goto out;
498
499 mac->get_link_status = false;
500
501
502
503
504
505 igb_check_downshift(hw);
506
507
508
509
510
511 if (!mac->autoneg) {
512 ret_val = -E1000_ERR_CONFIG;
513 goto out;
514 }
515
516
517
518
519
520
521 igb_config_collision_dist(hw);
522
523
524
525
526
527
528
529 ret_val = igb_config_fc_after_link_up(hw);
530 if (ret_val)
531 hw_dbg("Error configuring flow control\n");
532
533out:
534 return ret_val;
535}
536
537
538
539
540
541
542
543
544
545
546
547s32 igb_setup_link(struct e1000_hw *hw)
548{
549 s32 ret_val = 0;
550
551
552
553
554
555 if (igb_check_reset_block(hw))
556 goto out;
557
558
559
560
561
562 if (hw->fc.requested_mode == e1000_fc_default) {
563 ret_val = igb_set_default_fc(hw);
564 if (ret_val)
565 goto out;
566 }
567
568
569
570
571
572
573 hw->fc.current_mode = hw->fc.requested_mode;
574
575 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
576
577
578 ret_val = hw->mac.ops.setup_physical_interface(hw);
579 if (ret_val)
580 goto out;
581
582
583
584
585
586
587
588 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
589 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
590 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
591 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
592
593 wr32(E1000_FCTTV, hw->fc.pause_time);
594
595 ret_val = igb_set_fc_watermarks(hw);
596
597out:
598 return ret_val;
599}
600
601
602
603
604
605
606
607
608
609void igb_config_collision_dist(struct e1000_hw *hw)
610{
611 u32 tctl;
612
613 tctl = rd32(E1000_TCTL);
614
615 tctl &= ~E1000_TCTL_COLD;
616 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
617
618 wr32(E1000_TCTL, tctl);
619 wrfl();
620}
621
622
623
624
625
626
627
628
629
630static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
631{
632 s32 ret_val = 0;
633 u32 fcrtl = 0, fcrth = 0;
634
635
636
637
638
639
640
641
642 if (hw->fc.current_mode & e1000_fc_tx_pause) {
643
644
645
646
647
648 fcrtl = hw->fc.low_water;
649 if (hw->fc.send_xon)
650 fcrtl |= E1000_FCRTL_XONE;
651
652 fcrth = hw->fc.high_water;
653 }
654 wr32(E1000_FCRTL, fcrtl);
655 wr32(E1000_FCRTH, fcrth);
656
657 return ret_val;
658}
659
660
661
662
663
664
665
666
667static s32 igb_set_default_fc(struct e1000_hw *hw)
668{
669 s32 ret_val = 0;
670 u16 nvm_data;
671
672
673
674
675
676
677
678
679
680
681 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
682
683 if (ret_val) {
684 hw_dbg("NVM Read Error\n");
685 goto out;
686 }
687
688 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
689 hw->fc.requested_mode = e1000_fc_none;
690 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
691 NVM_WORD0F_ASM_DIR)
692 hw->fc.requested_mode = e1000_fc_tx_pause;
693 else
694 hw->fc.requested_mode = e1000_fc_full;
695
696out:
697 return ret_val;
698}
699
700
701
702
703
704
705
706
707
708
709
710s32 igb_force_mac_fc(struct e1000_hw *hw)
711{
712 u32 ctrl;
713 s32 ret_val = 0;
714
715 ctrl = rd32(E1000_CTRL);
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
736
737 switch (hw->fc.current_mode) {
738 case e1000_fc_none:
739 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
740 break;
741 case e1000_fc_rx_pause:
742 ctrl &= (~E1000_CTRL_TFCE);
743 ctrl |= E1000_CTRL_RFCE;
744 break;
745 case e1000_fc_tx_pause:
746 ctrl &= (~E1000_CTRL_RFCE);
747 ctrl |= E1000_CTRL_TFCE;
748 break;
749 case e1000_fc_full:
750 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
751 break;
752 default:
753 hw_dbg("Flow control param set incorrectly\n");
754 ret_val = -E1000_ERR_CONFIG;
755 goto out;
756 }
757
758 wr32(E1000_CTRL, ctrl);
759
760out:
761 return ret_val;
762}
763
764
765
766
767
768
769
770
771
772
773
774s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
775{
776 struct e1000_mac_info *mac = &hw->mac;
777 s32 ret_val = 0;
778 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
779 u16 speed, duplex;
780
781
782
783
784
785
786 if (mac->autoneg_failed) {
787 if (hw->phy.media_type == e1000_media_type_internal_serdes)
788 ret_val = igb_force_mac_fc(hw);
789 } else {
790 if (hw->phy.media_type == e1000_media_type_copper)
791 ret_val = igb_force_mac_fc(hw);
792 }
793
794 if (ret_val) {
795 hw_dbg("Error forcing flow control settings\n");
796 goto out;
797 }
798
799
800
801
802
803
804
805 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
806
807
808
809
810
811 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
812 &mii_status_reg);
813 if (ret_val)
814 goto out;
815 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
816 &mii_status_reg);
817 if (ret_val)
818 goto out;
819
820 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
821 hw_dbg("Copper PHY and Auto Neg "
822 "has not completed.\n");
823 goto out;
824 }
825
826
827
828
829
830
831
832
833 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
834 &mii_nway_adv_reg);
835 if (ret_val)
836 goto out;
837 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
838 &mii_nway_lp_ability_reg);
839 if (ret_val)
840 goto out;
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
877 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
878
879
880
881
882
883
884
885 if (hw->fc.requested_mode == e1000_fc_full) {
886 hw->fc.current_mode = e1000_fc_full;
887 hw_dbg("Flow Control = FULL.\r\n");
888 } else {
889 hw->fc.current_mode = e1000_fc_rx_pause;
890 hw_dbg("Flow Control = "
891 "RX PAUSE frames only.\r\n");
892 }
893 }
894
895
896
897
898
899
900
901
902 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
903 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
904 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
905 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
906 hw->fc.current_mode = e1000_fc_tx_pause;
907 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
908 }
909
910
911
912
913
914
915
916
917 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
918 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
919 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
920 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
921 hw->fc.current_mode = e1000_fc_rx_pause;
922 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
923 }
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945 else if ((hw->fc.requested_mode == e1000_fc_none ||
946 hw->fc.requested_mode == e1000_fc_tx_pause) ||
947 hw->fc.strict_ieee) {
948 hw->fc.current_mode = e1000_fc_none;
949 hw_dbg("Flow Control = NONE.\r\n");
950 } else {
951 hw->fc.current_mode = e1000_fc_rx_pause;
952 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
953 }
954
955
956
957
958
959
960 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
961 if (ret_val) {
962 hw_dbg("Error getting link speed and duplex\n");
963 goto out;
964 }
965
966 if (duplex == HALF_DUPLEX)
967 hw->fc.current_mode = e1000_fc_none;
968
969
970
971
972
973 ret_val = igb_force_mac_fc(hw);
974 if (ret_val) {
975 hw_dbg("Error forcing flow control settings\n");
976 goto out;
977 }
978 }
979
980out:
981 return ret_val;
982}
983
984
985
986
987
988
989
990
991
992
993s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
994 u16 *duplex)
995{
996 u32 status;
997
998 status = rd32(E1000_STATUS);
999 if (status & E1000_STATUS_SPEED_1000) {
1000 *speed = SPEED_1000;
1001 hw_dbg("1000 Mbs, ");
1002 } else if (status & E1000_STATUS_SPEED_100) {
1003 *speed = SPEED_100;
1004 hw_dbg("100 Mbs, ");
1005 } else {
1006 *speed = SPEED_10;
1007 hw_dbg("10 Mbs, ");
1008 }
1009
1010 if (status & E1000_STATUS_FD) {
1011 *duplex = FULL_DUPLEX;
1012 hw_dbg("Full Duplex\n");
1013 } else {
1014 *duplex = HALF_DUPLEX;
1015 hw_dbg("Half Duplex\n");
1016 }
1017
1018 return 0;
1019}
1020
1021
1022
1023
1024
1025
1026
1027s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1028{
1029 u32 swsm;
1030 s32 ret_val = 0;
1031 s32 timeout = hw->nvm.word_size + 1;
1032 s32 i = 0;
1033
1034
1035 while (i < timeout) {
1036 swsm = rd32(E1000_SWSM);
1037 if (!(swsm & E1000_SWSM_SMBI))
1038 break;
1039
1040 udelay(50);
1041 i++;
1042 }
1043
1044 if (i == timeout) {
1045 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1046 ret_val = -E1000_ERR_NVM;
1047 goto out;
1048 }
1049
1050
1051 for (i = 0; i < timeout; i++) {
1052 swsm = rd32(E1000_SWSM);
1053 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1054
1055
1056 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1057 break;
1058
1059 udelay(50);
1060 }
1061
1062 if (i == timeout) {
1063
1064 igb_put_hw_semaphore(hw);
1065 hw_dbg("Driver can't access the NVM\n");
1066 ret_val = -E1000_ERR_NVM;
1067 goto out;
1068 }
1069
1070out:
1071 return ret_val;
1072}
1073
1074
1075
1076
1077
1078
1079
1080void igb_put_hw_semaphore(struct e1000_hw *hw)
1081{
1082 u32 swsm;
1083
1084 swsm = rd32(E1000_SWSM);
1085
1086 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1087
1088 wr32(E1000_SWSM, swsm);
1089}
1090
1091
1092
1093
1094
1095
1096
1097s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1098{
1099 s32 i = 0;
1100 s32 ret_val = 0;
1101
1102
1103 while (i < AUTO_READ_DONE_TIMEOUT) {
1104 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1105 break;
1106 msleep(1);
1107 i++;
1108 }
1109
1110 if (i == AUTO_READ_DONE_TIMEOUT) {
1111 hw_dbg("Auto read by HW from NVM has not completed.\n");
1112 ret_val = -E1000_ERR_RESET;
1113 goto out;
1114 }
1115
1116out:
1117 return ret_val;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1129{
1130 s32 ret_val;
1131
1132 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1133 if (ret_val) {
1134 hw_dbg("NVM Read Error\n");
1135 goto out;
1136 }
1137
1138 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1139 switch(hw->phy.media_type) {
1140 case e1000_media_type_internal_serdes:
1141 *data = ID_LED_DEFAULT_82575_SERDES;
1142 break;
1143 case e1000_media_type_copper:
1144 default:
1145 *data = ID_LED_DEFAULT;
1146 break;
1147 }
1148 }
1149out:
1150 return ret_val;
1151}
1152
1153
1154
1155
1156
1157
1158s32 igb_id_led_init(struct e1000_hw *hw)
1159{
1160 struct e1000_mac_info *mac = &hw->mac;
1161 s32 ret_val;
1162 const u32 ledctl_mask = 0x000000FF;
1163 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1164 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1165 u16 data, i, temp;
1166 const u16 led_mask = 0x0F;
1167
1168 ret_val = igb_valid_led_default(hw, &data);
1169 if (ret_val)
1170 goto out;
1171
1172 mac->ledctl_default = rd32(E1000_LEDCTL);
1173 mac->ledctl_mode1 = mac->ledctl_default;
1174 mac->ledctl_mode2 = mac->ledctl_default;
1175
1176 for (i = 0; i < 4; i++) {
1177 temp = (data >> (i << 2)) & led_mask;
1178 switch (temp) {
1179 case ID_LED_ON1_DEF2:
1180 case ID_LED_ON1_ON2:
1181 case ID_LED_ON1_OFF2:
1182 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1183 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1184 break;
1185 case ID_LED_OFF1_DEF2:
1186 case ID_LED_OFF1_ON2:
1187 case ID_LED_OFF1_OFF2:
1188 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1189 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1190 break;
1191 default:
1192
1193 break;
1194 }
1195 switch (temp) {
1196 case ID_LED_DEF1_ON2:
1197 case ID_LED_ON1_ON2:
1198 case ID_LED_OFF1_ON2:
1199 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1200 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1201 break;
1202 case ID_LED_DEF1_OFF2:
1203 case ID_LED_ON1_OFF2:
1204 case ID_LED_OFF1_OFF2:
1205 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1206 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1207 break;
1208 default:
1209
1210 break;
1211 }
1212 }
1213
1214out:
1215 return ret_val;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225s32 igb_cleanup_led(struct e1000_hw *hw)
1226{
1227 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1228 return 0;
1229}
1230
1231
1232
1233
1234
1235
1236
1237s32 igb_blink_led(struct e1000_hw *hw)
1238{
1239 u32 ledctl_blink = 0;
1240 u32 i;
1241
1242
1243
1244
1245
1246 ledctl_blink = hw->mac.ledctl_mode2;
1247 for (i = 0; i < 4; i++)
1248 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1249 E1000_LEDCTL_MODE_LED_ON)
1250 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1251 (i * 8));
1252
1253 wr32(E1000_LEDCTL, ledctl_blink);
1254
1255 return 0;
1256}
1257
1258
1259
1260
1261
1262
1263
1264s32 igb_led_off(struct e1000_hw *hw)
1265{
1266 switch (hw->phy.media_type) {
1267 case e1000_media_type_copper:
1268 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1269 break;
1270 default:
1271 break;
1272 }
1273
1274 return 0;
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288s32 igb_disable_pcie_master(struct e1000_hw *hw)
1289{
1290 u32 ctrl;
1291 s32 timeout = MASTER_DISABLE_TIMEOUT;
1292 s32 ret_val = 0;
1293
1294 if (hw->bus.type != e1000_bus_type_pci_express)
1295 goto out;
1296
1297 ctrl = rd32(E1000_CTRL);
1298 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1299 wr32(E1000_CTRL, ctrl);
1300
1301 while (timeout) {
1302 if (!(rd32(E1000_STATUS) &
1303 E1000_STATUS_GIO_MASTER_ENABLE))
1304 break;
1305 udelay(100);
1306 timeout--;
1307 }
1308
1309 if (!timeout) {
1310 hw_dbg("Master requests are pending.\n");
1311 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1312 goto out;
1313 }
1314
1315out:
1316 return ret_val;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1327{
1328 s32 ret_val = 0;
1329
1330 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1331 hw_dbg("Invalid MDI setting detected\n");
1332 hw->phy.mdix = 1;
1333 ret_val = -E1000_ERR_CONFIG;
1334 goto out;
1335 }
1336
1337out:
1338 return ret_val;
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1353 u32 offset, u8 data)
1354{
1355 u32 i, regvalue = 0;
1356 s32 ret_val = 0;
1357
1358
1359 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1360 wr32(reg, regvalue);
1361
1362
1363 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1364 udelay(5);
1365 regvalue = rd32(reg);
1366 if (regvalue & E1000_GEN_CTL_READY)
1367 break;
1368 }
1369 if (!(regvalue & E1000_GEN_CTL_READY)) {
1370 hw_dbg("Reg %08x did not indicate ready\n", reg);
1371 ret_val = -E1000_ERR_PHY;
1372 goto out;
1373 }
1374
1375out:
1376 return ret_val;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1387{
1388 u32 manc;
1389 u32 fwsm, factps;
1390 bool ret_val = false;
1391
1392 if (!hw->mac.asf_firmware_present)
1393 goto out;
1394
1395 manc = rd32(E1000_MANC);
1396
1397 if (!(manc & E1000_MANC_RCV_TCO_EN))
1398 goto out;
1399
1400 if (hw->mac.arc_subsystem_valid) {
1401 fwsm = rd32(E1000_FWSM);
1402 factps = rd32(E1000_FACTPS);
1403
1404 if (!(factps & E1000_FACTPS_MNGCG) &&
1405 ((fwsm & E1000_FWSM_MODE_MASK) ==
1406 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1407 ret_val = true;
1408 goto out;
1409 }
1410 } else {
1411 if ((manc & E1000_MANC_SMBUS_EN) &&
1412 !(manc & E1000_MANC_ASF_EN)) {
1413 ret_val = true;
1414 goto out;
1415 }
1416 }
1417
1418out:
1419 return ret_val;
1420}
1421