1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/if_ether.h>
25#include <linux/delay.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29
30#include "e1000_mac.h"
31
32#include "igb.h"
33
34static s32 igb_set_default_fc(struct e1000_hw *hw);
35static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
36
37
38
39
40
41
42
43
44
45s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
46{
47 struct e1000_bus_info *bus = &hw->bus;
48 s32 ret_val;
49 u32 reg;
50 u16 pcie_link_status;
51
52 bus->type = e1000_bus_type_pci_express;
53
54 ret_val = igb_read_pcie_cap_reg(hw,
55 PCI_EXP_LNKSTA,
56 &pcie_link_status);
57 if (ret_val) {
58 bus->width = e1000_bus_width_unknown;
59 bus->speed = e1000_bus_speed_unknown;
60 } else {
61 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
62 case PCI_EXP_LNKSTA_CLS_2_5GB:
63 bus->speed = e1000_bus_speed_2500;
64 break;
65 case PCI_EXP_LNKSTA_CLS_5_0GB:
66 bus->speed = e1000_bus_speed_5000;
67 break;
68 default:
69 bus->speed = e1000_bus_speed_unknown;
70 break;
71 }
72
73 bus->width = (enum e1000_bus_width)((pcie_link_status &
74 PCI_EXP_LNKSTA_NLW) >>
75 PCI_EXP_LNKSTA_NLW_SHIFT);
76 }
77
78 reg = rd32(E1000_STATUS);
79 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
80
81 return 0;
82}
83
84
85
86
87
88
89
90
91void igb_clear_vfta(struct e1000_hw *hw)
92{
93 u32 offset;
94
95 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
96 array_wr32(E1000_VFTA, offset, 0);
97 wrfl();
98 }
99}
100
101
102
103
104
105
106
107
108
109
110static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
111{
112 array_wr32(E1000_VFTA, offset, value);
113 wrfl();
114}
115
116
117
118
119
120
121
122
123
124
125
126
127
128void igb_clear_vfta_i350(struct e1000_hw *hw)
129{
130 u32 offset;
131 int i;
132
133 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
134 for (i = 0; i < 10; i++)
135 array_wr32(E1000_VFTA, offset, 0);
136
137 wrfl();
138 }
139}
140
141
142
143
144
145
146
147
148
149
150static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
151{
152 int i;
153
154 for (i = 0; i < 10; i++)
155 array_wr32(E1000_VFTA, offset, value);
156
157 wrfl();
158}
159
160
161
162
163
164
165
166
167
168
169void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
170{
171 u32 i;
172 u8 mac_addr[ETH_ALEN] = {0};
173
174
175 hw_dbg("Programming MAC Address into RAR[0]\n");
176
177 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
178
179
180 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
181 for (i = 1; i < rar_count; i++)
182 hw->mac.ops.rar_set(hw, mac_addr, i);
183}
184
185
186
187
188
189
190
191
192
193
194s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
195{
196 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
197 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
198 u32 vfta;
199 struct igb_adapter *adapter = hw->back;
200 s32 ret_val = 0;
201
202 vfta = adapter->shadow_vfta[index];
203
204
205 if ((!!(vfta & mask)) == add) {
206 ret_val = -E1000_ERR_CONFIG;
207 } else {
208 if (add)
209 vfta |= mask;
210 else
211 vfta &= ~mask;
212 }
213 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
214 igb_write_vfta_i350(hw, index, vfta);
215 else
216 igb_write_vfta(hw, index, vfta);
217 adapter->shadow_vfta[index] = vfta;
218
219 return ret_val;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
234{
235 u32 i;
236 s32 ret_val = 0;
237 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
238 u8 alt_mac_addr[ETH_ALEN];
239
240
241
242
243 if (hw->mac.type >= e1000_82580)
244 goto out;
245
246 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
247 &nvm_alt_mac_addr_offset);
248 if (ret_val) {
249 hw_dbg("NVM Read Error\n");
250 goto out;
251 }
252
253 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
254 (nvm_alt_mac_addr_offset == 0x0000))
255
256 goto out;
257
258 if (hw->bus.func == E1000_FUNC_1)
259 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
260 if (hw->bus.func == E1000_FUNC_2)
261 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
262
263 if (hw->bus.func == E1000_FUNC_3)
264 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
265 for (i = 0; i < ETH_ALEN; i += 2) {
266 offset = nvm_alt_mac_addr_offset + (i >> 1);
267 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
268 if (ret_val) {
269 hw_dbg("NVM Read Error\n");
270 goto out;
271 }
272
273 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
274 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
275 }
276
277
278 if (is_multicast_ether_addr(alt_mac_addr)) {
279 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
280 goto out;
281 }
282
283
284
285
286
287 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
288
289out:
290 return ret_val;
291}
292
293
294
295
296
297
298
299
300
301
302void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
303{
304 u32 rar_low, rar_high;
305
306
307
308
309 rar_low = ((u32) addr[0] |
310 ((u32) addr[1] << 8) |
311 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
312
313 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
314
315
316 if (rar_low || rar_high)
317 rar_high |= E1000_RAH_AV;
318
319
320
321
322
323 wr32(E1000_RAL(index), rar_low);
324 wrfl();
325 wr32(E1000_RAH(index), rar_high);
326 wrfl();
327}
328
329
330
331
332
333
334
335
336
337
338
339void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
340{
341 u32 hash_bit, hash_reg, mta;
342
343
344
345
346
347
348
349
350
351
352 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
353 hash_bit = hash_value & 0x1F;
354
355 mta = array_rd32(E1000_MTA, hash_reg);
356
357 mta |= (1 << hash_bit);
358
359 array_wr32(E1000_MTA, hash_reg, mta);
360 wrfl();
361}
362
363
364
365
366
367
368
369
370
371
372static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
373{
374 u32 hash_value, hash_mask;
375 u8 bit_shift = 0;
376
377
378 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
379
380
381
382
383 while (hash_mask >> bit_shift != 0xFF)
384 bit_shift++;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411 switch (hw->mac.mc_filter_type) {
412 default:
413 case 0:
414 break;
415 case 1:
416 bit_shift += 1;
417 break;
418 case 2:
419 bit_shift += 2;
420 break;
421 case 3:
422 bit_shift += 4;
423 break;
424 }
425
426 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
427 (((u16) mc_addr[5]) << bit_shift)));
428
429 return hash_value;
430}
431
432
433
434
435
436
437
438
439
440
441void igb_update_mc_addr_list(struct e1000_hw *hw,
442 u8 *mc_addr_list, u32 mc_addr_count)
443{
444 u32 hash_value, hash_bit, hash_reg;
445 int i;
446
447
448 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
449
450
451 for (i = 0; (u32) i < mc_addr_count; i++) {
452 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
453
454 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
455 hash_bit = hash_value & 0x1F;
456
457 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
458 mc_addr_list += (ETH_ALEN);
459 }
460
461
462 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
463 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
464 wrfl();
465}
466
467
468
469
470
471
472
473void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
474{
475 rd32(E1000_CRCERRS);
476 rd32(E1000_SYMERRS);
477 rd32(E1000_MPC);
478 rd32(E1000_SCC);
479 rd32(E1000_ECOL);
480 rd32(E1000_MCC);
481 rd32(E1000_LATECOL);
482 rd32(E1000_COLC);
483 rd32(E1000_DC);
484 rd32(E1000_SEC);
485 rd32(E1000_RLEC);
486 rd32(E1000_XONRXC);
487 rd32(E1000_XONTXC);
488 rd32(E1000_XOFFRXC);
489 rd32(E1000_XOFFTXC);
490 rd32(E1000_FCRUC);
491 rd32(E1000_GPRC);
492 rd32(E1000_BPRC);
493 rd32(E1000_MPRC);
494 rd32(E1000_GPTC);
495 rd32(E1000_GORCL);
496 rd32(E1000_GORCH);
497 rd32(E1000_GOTCL);
498 rd32(E1000_GOTCH);
499 rd32(E1000_RNBC);
500 rd32(E1000_RUC);
501 rd32(E1000_RFC);
502 rd32(E1000_ROC);
503 rd32(E1000_RJC);
504 rd32(E1000_TORL);
505 rd32(E1000_TORH);
506 rd32(E1000_TOTL);
507 rd32(E1000_TOTH);
508 rd32(E1000_TPR);
509 rd32(E1000_TPT);
510 rd32(E1000_MPTC);
511 rd32(E1000_BPTC);
512}
513
514
515
516
517
518
519
520
521
522s32 igb_check_for_copper_link(struct e1000_hw *hw)
523{
524 struct e1000_mac_info *mac = &hw->mac;
525 s32 ret_val;
526 bool link;
527
528
529
530
531
532
533 if (!mac->get_link_status) {
534 ret_val = 0;
535 goto out;
536 }
537
538
539
540
541
542 ret_val = igb_phy_has_link(hw, 1, 0, &link);
543 if (ret_val)
544 goto out;
545
546 if (!link)
547 goto out;
548
549 mac->get_link_status = false;
550
551
552
553
554 igb_check_downshift(hw);
555
556
557
558
559 if (!mac->autoneg) {
560 ret_val = -E1000_ERR_CONFIG;
561 goto out;
562 }
563
564
565
566
567
568 igb_config_collision_dist(hw);
569
570
571
572
573
574
575 ret_val = igb_config_fc_after_link_up(hw);
576 if (ret_val)
577 hw_dbg("Error configuring flow control\n");
578
579out:
580 return ret_val;
581}
582
583
584
585
586
587
588
589
590
591
592
593s32 igb_setup_link(struct e1000_hw *hw)
594{
595 s32 ret_val = 0;
596
597
598
599
600 if (igb_check_reset_block(hw))
601 goto out;
602
603
604
605
606 if (hw->fc.requested_mode == e1000_fc_default) {
607 ret_val = igb_set_default_fc(hw);
608 if (ret_val)
609 goto out;
610 }
611
612
613
614
615
616 hw->fc.current_mode = hw->fc.requested_mode;
617
618 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
619
620
621 ret_val = hw->mac.ops.setup_physical_interface(hw);
622 if (ret_val)
623 goto out;
624
625
626
627
628
629
630 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
631 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
632 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
633 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
634
635 wr32(E1000_FCTTV, hw->fc.pause_time);
636
637 ret_val = igb_set_fc_watermarks(hw);
638
639out:
640
641 return ret_val;
642}
643
644
645
646
647
648
649
650
651
652void igb_config_collision_dist(struct e1000_hw *hw)
653{
654 u32 tctl;
655
656 tctl = rd32(E1000_TCTL);
657
658 tctl &= ~E1000_TCTL_COLD;
659 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
660
661 wr32(E1000_TCTL, tctl);
662 wrfl();
663}
664
665
666
667
668
669
670
671
672
673static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
674{
675 s32 ret_val = 0;
676 u32 fcrtl = 0, fcrth = 0;
677
678
679
680
681
682
683
684 if (hw->fc.current_mode & e1000_fc_tx_pause) {
685
686
687
688
689 fcrtl = hw->fc.low_water;
690 if (hw->fc.send_xon)
691 fcrtl |= E1000_FCRTL_XONE;
692
693 fcrth = hw->fc.high_water;
694 }
695 wr32(E1000_FCRTL, fcrtl);
696 wr32(E1000_FCRTH, fcrth);
697
698 return ret_val;
699}
700
701
702
703
704
705
706
707
708static s32 igb_set_default_fc(struct e1000_hw *hw)
709{
710 s32 ret_val = 0;
711 u16 lan_offset;
712 u16 nvm_data;
713
714
715
716
717
718
719
720
721
722 if (hw->mac.type == e1000_i350) {
723 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
724 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
725 + lan_offset, 1, &nvm_data);
726 } else {
727 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
728 1, &nvm_data);
729 }
730
731 if (ret_val) {
732 hw_dbg("NVM Read Error\n");
733 goto out;
734 }
735
736 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
737 hw->fc.requested_mode = e1000_fc_none;
738 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
739 NVM_WORD0F_ASM_DIR)
740 hw->fc.requested_mode = e1000_fc_tx_pause;
741 else
742 hw->fc.requested_mode = e1000_fc_full;
743
744out:
745 return ret_val;
746}
747
748
749
750
751
752
753
754
755
756
757
758s32 igb_force_mac_fc(struct e1000_hw *hw)
759{
760 u32 ctrl;
761 s32 ret_val = 0;
762
763 ctrl = rd32(E1000_CTRL);
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
783
784 switch (hw->fc.current_mode) {
785 case e1000_fc_none:
786 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
787 break;
788 case e1000_fc_rx_pause:
789 ctrl &= (~E1000_CTRL_TFCE);
790 ctrl |= E1000_CTRL_RFCE;
791 break;
792 case e1000_fc_tx_pause:
793 ctrl &= (~E1000_CTRL_RFCE);
794 ctrl |= E1000_CTRL_TFCE;
795 break;
796 case e1000_fc_full:
797 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
798 break;
799 default:
800 hw_dbg("Flow control param set incorrectly\n");
801 ret_val = -E1000_ERR_CONFIG;
802 goto out;
803 }
804
805 wr32(E1000_CTRL, ctrl);
806
807out:
808 return ret_val;
809}
810
811
812
813
814
815
816
817
818
819
820
821s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
822{
823 struct e1000_mac_info *mac = &hw->mac;
824 s32 ret_val = 0;
825 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
826 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
827 u16 speed, duplex;
828
829
830
831
832
833 if (mac->autoneg_failed) {
834 if (hw->phy.media_type == e1000_media_type_internal_serdes)
835 ret_val = igb_force_mac_fc(hw);
836 } else {
837 if (hw->phy.media_type == e1000_media_type_copper)
838 ret_val = igb_force_mac_fc(hw);
839 }
840
841 if (ret_val) {
842 hw_dbg("Error forcing flow control settings\n");
843 goto out;
844 }
845
846
847
848
849
850
851 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
852
853
854
855
856 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
857 &mii_status_reg);
858 if (ret_val)
859 goto out;
860 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
861 &mii_status_reg);
862 if (ret_val)
863 goto out;
864
865 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
866 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
867 goto out;
868 }
869
870
871
872
873
874
875
876 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
877 &mii_nway_adv_reg);
878 if (ret_val)
879 goto out;
880 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
881 &mii_nway_lp_ability_reg);
882 if (ret_val)
883 goto out;
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
919 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
920
921
922
923
924
925
926 if (hw->fc.requested_mode == e1000_fc_full) {
927 hw->fc.current_mode = e1000_fc_full;
928 hw_dbg("Flow Control = FULL.\n");
929 } else {
930 hw->fc.current_mode = e1000_fc_rx_pause;
931 hw_dbg("Flow Control = RX PAUSE frames only.\n");
932 }
933 }
934
935
936
937
938
939
940
941 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
942 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
943 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
944 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
945 hw->fc.current_mode = e1000_fc_tx_pause;
946 hw_dbg("Flow Control = TX PAUSE frames only.\n");
947 }
948
949
950
951
952
953
954
955 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
956 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
957 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
958 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
959 hw->fc.current_mode = e1000_fc_rx_pause;
960 hw_dbg("Flow Control = RX PAUSE frames only.\n");
961 }
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982 else if ((hw->fc.requested_mode == e1000_fc_none) ||
983 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
984 (hw->fc.strict_ieee)) {
985 hw->fc.current_mode = e1000_fc_none;
986 hw_dbg("Flow Control = NONE.\n");
987 } else {
988 hw->fc.current_mode = e1000_fc_rx_pause;
989 hw_dbg("Flow Control = RX PAUSE frames only.\n");
990 }
991
992
993
994
995
996 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
997 if (ret_val) {
998 hw_dbg("Error getting link speed and duplex\n");
999 goto out;
1000 }
1001
1002 if (duplex == HALF_DUPLEX)
1003 hw->fc.current_mode = e1000_fc_none;
1004
1005
1006
1007
1008 ret_val = igb_force_mac_fc(hw);
1009 if (ret_val) {
1010 hw_dbg("Error forcing flow control settings\n");
1011 goto out;
1012 }
1013 }
1014
1015
1016
1017
1018
1019 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1020 && mac->autoneg) {
1021
1022
1023
1024 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1025
1026 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1027 hw_dbg("PCS Auto Neg has not completed.\n");
1028 return ret_val;
1029 }
1030
1031
1032
1033
1034
1035
1036
1037 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1038 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1074 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1075
1076
1077
1078
1079
1080
1081 if (hw->fc.requested_mode == e1000_fc_full) {
1082 hw->fc.current_mode = e1000_fc_full;
1083 hw_dbg("Flow Control = FULL.\n");
1084 } else {
1085 hw->fc.current_mode = e1000_fc_rx_pause;
1086 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1087 }
1088 }
1089
1090
1091
1092
1093
1094
1095
1096 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1097 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1098 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1099 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1100 hw->fc.current_mode = e1000_fc_tx_pause;
1101 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1102 }
1103
1104
1105
1106
1107
1108
1109
1110 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1111 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1112 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1113 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1114 hw->fc.current_mode = e1000_fc_rx_pause;
1115 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1116 } else {
1117
1118
1119
1120 hw->fc.current_mode = e1000_fc_none;
1121 hw_dbg("Flow Control = NONE.\n");
1122 }
1123
1124
1125
1126
1127 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1128 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1129 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1130
1131 ret_val = igb_force_mac_fc(hw);
1132 if (ret_val) {
1133 hw_dbg("Error forcing flow control settings\n");
1134 return ret_val;
1135 }
1136 }
1137
1138out:
1139 return ret_val;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1152 u16 *duplex)
1153{
1154 u32 status;
1155
1156 status = rd32(E1000_STATUS);
1157 if (status & E1000_STATUS_SPEED_1000) {
1158 *speed = SPEED_1000;
1159 hw_dbg("1000 Mbs, ");
1160 } else if (status & E1000_STATUS_SPEED_100) {
1161 *speed = SPEED_100;
1162 hw_dbg("100 Mbs, ");
1163 } else {
1164 *speed = SPEED_10;
1165 hw_dbg("10 Mbs, ");
1166 }
1167
1168 if (status & E1000_STATUS_FD) {
1169 *duplex = FULL_DUPLEX;
1170 hw_dbg("Full Duplex\n");
1171 } else {
1172 *duplex = HALF_DUPLEX;
1173 hw_dbg("Half Duplex\n");
1174 }
1175
1176 return 0;
1177}
1178
1179
1180
1181
1182
1183
1184
1185s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1186{
1187 u32 swsm;
1188 s32 ret_val = 0;
1189 s32 timeout = hw->nvm.word_size + 1;
1190 s32 i = 0;
1191
1192
1193 while (i < timeout) {
1194 swsm = rd32(E1000_SWSM);
1195 if (!(swsm & E1000_SWSM_SMBI))
1196 break;
1197
1198 udelay(50);
1199 i++;
1200 }
1201
1202 if (i == timeout) {
1203 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1204 ret_val = -E1000_ERR_NVM;
1205 goto out;
1206 }
1207
1208
1209 for (i = 0; i < timeout; i++) {
1210 swsm = rd32(E1000_SWSM);
1211 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1212
1213
1214 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1215 break;
1216
1217 udelay(50);
1218 }
1219
1220 if (i == timeout) {
1221
1222 igb_put_hw_semaphore(hw);
1223 hw_dbg("Driver can't access the NVM\n");
1224 ret_val = -E1000_ERR_NVM;
1225 goto out;
1226 }
1227
1228out:
1229 return ret_val;
1230}
1231
1232
1233
1234
1235
1236
1237
1238void igb_put_hw_semaphore(struct e1000_hw *hw)
1239{
1240 u32 swsm;
1241
1242 swsm = rd32(E1000_SWSM);
1243
1244 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1245
1246 wr32(E1000_SWSM, swsm);
1247}
1248
1249
1250
1251
1252
1253
1254
1255s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1256{
1257 s32 i = 0;
1258 s32 ret_val = 0;
1259
1260
1261 while (i < AUTO_READ_DONE_TIMEOUT) {
1262 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1263 break;
1264 usleep_range(1000, 2000);
1265 i++;
1266 }
1267
1268 if (i == AUTO_READ_DONE_TIMEOUT) {
1269 hw_dbg("Auto read by HW from NVM has not completed.\n");
1270 ret_val = -E1000_ERR_RESET;
1271 goto out;
1272 }
1273
1274out:
1275 return ret_val;
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1287{
1288 s32 ret_val;
1289
1290 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1291 if (ret_val) {
1292 hw_dbg("NVM Read Error\n");
1293 goto out;
1294 }
1295
1296 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1297 switch (hw->phy.media_type) {
1298 case e1000_media_type_internal_serdes:
1299 *data = ID_LED_DEFAULT_82575_SERDES;
1300 break;
1301 case e1000_media_type_copper:
1302 default:
1303 *data = ID_LED_DEFAULT;
1304 break;
1305 }
1306 }
1307out:
1308 return ret_val;
1309}
1310
1311
1312
1313
1314
1315
1316s32 igb_id_led_init(struct e1000_hw *hw)
1317{
1318 struct e1000_mac_info *mac = &hw->mac;
1319 s32 ret_val;
1320 const u32 ledctl_mask = 0x000000FF;
1321 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1322 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1323 u16 data, i, temp;
1324 const u16 led_mask = 0x0F;
1325
1326
1327 if ((hw->mac.type == e1000_i210) ||
1328 (hw->mac.type == e1000_i211))
1329 ret_val = igb_valid_led_default_i210(hw, &data);
1330 else
1331 ret_val = igb_valid_led_default(hw, &data);
1332
1333 if (ret_val)
1334 goto out;
1335
1336 mac->ledctl_default = rd32(E1000_LEDCTL);
1337 mac->ledctl_mode1 = mac->ledctl_default;
1338 mac->ledctl_mode2 = mac->ledctl_default;
1339
1340 for (i = 0; i < 4; i++) {
1341 temp = (data >> (i << 2)) & led_mask;
1342 switch (temp) {
1343 case ID_LED_ON1_DEF2:
1344 case ID_LED_ON1_ON2:
1345 case ID_LED_ON1_OFF2:
1346 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1347 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1348 break;
1349 case ID_LED_OFF1_DEF2:
1350 case ID_LED_OFF1_ON2:
1351 case ID_LED_OFF1_OFF2:
1352 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1353 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1354 break;
1355 default:
1356
1357 break;
1358 }
1359 switch (temp) {
1360 case ID_LED_DEF1_ON2:
1361 case ID_LED_ON1_ON2:
1362 case ID_LED_OFF1_ON2:
1363 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1364 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1365 break;
1366 case ID_LED_DEF1_OFF2:
1367 case ID_LED_ON1_OFF2:
1368 case ID_LED_OFF1_OFF2:
1369 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1370 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1371 break;
1372 default:
1373
1374 break;
1375 }
1376 }
1377
1378out:
1379 return ret_val;
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389s32 igb_cleanup_led(struct e1000_hw *hw)
1390{
1391 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1392 return 0;
1393}
1394
1395
1396
1397
1398
1399
1400
1401s32 igb_blink_led(struct e1000_hw *hw)
1402{
1403 u32 ledctl_blink = 0;
1404 u32 i;
1405
1406 if (hw->phy.media_type == e1000_media_type_fiber) {
1407
1408 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1409 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1410 } else {
1411
1412
1413
1414
1415
1416
1417 ledctl_blink = hw->mac.ledctl_mode2;
1418 for (i = 0; i < 32; i += 8) {
1419 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1420 E1000_LEDCTL_LED0_MODE_MASK;
1421 u32 led_default = hw->mac.ledctl_default >> i;
1422
1423 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1424 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1425 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1426 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1427 ledctl_blink &=
1428 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1429 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1430 E1000_LEDCTL_MODE_LED_ON) << i;
1431 }
1432 }
1433 }
1434
1435 wr32(E1000_LEDCTL, ledctl_blink);
1436
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444
1445
1446s32 igb_led_off(struct e1000_hw *hw)
1447{
1448 switch (hw->phy.media_type) {
1449 case e1000_media_type_copper:
1450 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1451 break;
1452 default:
1453 break;
1454 }
1455
1456 return 0;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470s32 igb_disable_pcie_master(struct e1000_hw *hw)
1471{
1472 u32 ctrl;
1473 s32 timeout = MASTER_DISABLE_TIMEOUT;
1474 s32 ret_val = 0;
1475
1476 if (hw->bus.type != e1000_bus_type_pci_express)
1477 goto out;
1478
1479 ctrl = rd32(E1000_CTRL);
1480 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1481 wr32(E1000_CTRL, ctrl);
1482
1483 while (timeout) {
1484 if (!(rd32(E1000_STATUS) &
1485 E1000_STATUS_GIO_MASTER_ENABLE))
1486 break;
1487 udelay(100);
1488 timeout--;
1489 }
1490
1491 if (!timeout) {
1492 hw_dbg("Master requests are pending.\n");
1493 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1494 goto out;
1495 }
1496
1497out:
1498 return ret_val;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1509{
1510 s32 ret_val = 0;
1511
1512
1513 if (hw->mac.type >= e1000_82580)
1514 goto out;
1515
1516 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1517 hw_dbg("Invalid MDI setting detected\n");
1518 hw->phy.mdix = 1;
1519 ret_val = -E1000_ERR_CONFIG;
1520 goto out;
1521 }
1522
1523out:
1524 return ret_val;
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1539 u32 offset, u8 data)
1540{
1541 u32 i, regvalue = 0;
1542 s32 ret_val = 0;
1543
1544
1545 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1546 wr32(reg, regvalue);
1547
1548
1549 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1550 udelay(5);
1551 regvalue = rd32(reg);
1552 if (regvalue & E1000_GEN_CTL_READY)
1553 break;
1554 }
1555 if (!(regvalue & E1000_GEN_CTL_READY)) {
1556 hw_dbg("Reg %08x did not indicate ready\n", reg);
1557 ret_val = -E1000_ERR_PHY;
1558 goto out;
1559 }
1560
1561out:
1562 return ret_val;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1573{
1574 u32 manc;
1575 u32 fwsm, factps;
1576 bool ret_val = false;
1577
1578 if (!hw->mac.asf_firmware_present)
1579 goto out;
1580
1581 manc = rd32(E1000_MANC);
1582
1583 if (!(manc & E1000_MANC_RCV_TCO_EN))
1584 goto out;
1585
1586 if (hw->mac.arc_subsystem_valid) {
1587 fwsm = rd32(E1000_FWSM);
1588 factps = rd32(E1000_FACTPS);
1589
1590 if (!(factps & E1000_FACTPS_MNGCG) &&
1591 ((fwsm & E1000_FWSM_MODE_MASK) ==
1592 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1593 ret_val = true;
1594 goto out;
1595 }
1596 } else {
1597 if ((manc & E1000_MANC_SMBUS_EN) &&
1598 !(manc & E1000_MANC_ASF_EN)) {
1599 ret_val = true;
1600 goto out;
1601 }
1602 }
1603
1604out:
1605 return ret_val;
1606}
1607