1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/if_ether.h>
26#include <linux/delay.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30
31#include "e1000_mac.h"
32
33#include "igb.h"
34
35static s32 igb_set_default_fc(struct e1000_hw *hw);
36static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
37
38
39
40
41
42
43
44
45
46s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
47{
48 struct e1000_bus_info *bus = &hw->bus;
49 s32 ret_val;
50 u32 reg;
51 u16 pcie_link_status;
52
53 bus->type = e1000_bus_type_pci_express;
54
55 ret_val = igb_read_pcie_cap_reg(hw,
56 PCI_EXP_LNKSTA,
57 &pcie_link_status);
58 if (ret_val) {
59 bus->width = e1000_bus_width_unknown;
60 bus->speed = e1000_bus_speed_unknown;
61 } else {
62 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
63 case PCI_EXP_LNKSTA_CLS_2_5GB:
64 bus->speed = e1000_bus_speed_2500;
65 break;
66 case PCI_EXP_LNKSTA_CLS_5_0GB:
67 bus->speed = e1000_bus_speed_5000;
68 break;
69 default:
70 bus->speed = e1000_bus_speed_unknown;
71 break;
72 }
73
74 bus->width = (enum e1000_bus_width)((pcie_link_status &
75 PCI_EXP_LNKSTA_NLW) >>
76 PCI_EXP_LNKSTA_NLW_SHIFT);
77 }
78
79 reg = rd32(E1000_STATUS);
80 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
81
82 return 0;
83}
84
85
86
87
88
89
90
91
92void igb_clear_vfta(struct e1000_hw *hw)
93{
94 u32 offset;
95
96 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
97 hw->mac.ops.write_vfta(hw, offset, 0);
98}
99
100
101
102
103
104
105
106
107
108
109void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
110{
111 struct igb_adapter *adapter = hw->back;
112
113 array_wr32(E1000_VFTA, offset, value);
114 wrfl();
115
116 adapter->shadow_vfta[offset] = value;
117}
118
119
120
121
122
123
124
125
126
127
128void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
129{
130 u32 i;
131 u8 mac_addr[ETH_ALEN] = {0};
132
133
134 hw_dbg("Programming MAC Address into RAR[0]\n");
135
136 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
137
138
139 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
140 for (i = 1; i < rar_count; i++)
141 hw->mac.ops.rar_set(hw, mac_addr, i);
142}
143
144
145
146
147
148
149
150
151
152
153static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
154{
155 s32 regindex, first_empty_slot;
156 u32 bits;
157
158
159 if (vlan == 0)
160 return 0;
161
162
163
164
165
166 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
167
168
169
170
171
172
173 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
174 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
175 if (bits == vlan)
176 return regindex;
177 if (!first_empty_slot && !bits)
178 first_empty_slot = regindex;
179 }
180
181 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
182}
183
184
185
186
187
188
189
190
191
192
193
194s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
195 bool vlan_on, bool vlvf_bypass)
196{
197 struct igb_adapter *adapter = hw->back;
198 u32 regidx, vfta_delta, vfta, bits;
199 s32 vlvf_index;
200
201 if ((vlan > 4095) || (vind > 7))
202 return -E1000_ERR_PARAM;
203
204
205
206
207
208
209
210
211
212
213
214
215 regidx = vlan / 32;
216 vfta_delta = BIT(vlan % 32);
217 vfta = adapter->shadow_vfta[regidx];
218
219
220
221
222
223 vfta_delta &= vlan_on ? ~vfta : vfta;
224 vfta ^= vfta_delta;
225
226
227
228
229
230
231
232
233
234 if (!adapter->vfs_allocated_count)
235 goto vfta_update;
236
237 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
238 if (vlvf_index < 0) {
239 if (vlvf_bypass)
240 goto vfta_update;
241 return vlvf_index;
242 }
243
244 bits = rd32(E1000_VLVF(vlvf_index));
245
246
247 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
248 if (vlan_on)
249 goto vlvf_update;
250
251
252 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
253
254 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
255
256
257
258
259 if (vfta_delta)
260 hw->mac.ops.write_vfta(hw, regidx, vfta);
261
262
263 wr32(E1000_VLVF(vlvf_index), 0);
264
265 return 0;
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282 vfta_delta = 0;
283
284vlvf_update:
285
286 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
287
288vfta_update:
289
290 if (vfta_delta)
291 hw->mac.ops.write_vfta(hw, regidx, vfta);
292
293 return 0;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
308{
309 u32 i;
310 s32 ret_val = 0;
311 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
312 u8 alt_mac_addr[ETH_ALEN];
313
314
315
316
317 if (hw->mac.type >= e1000_82580)
318 goto out;
319
320 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
321 &nvm_alt_mac_addr_offset);
322 if (ret_val) {
323 hw_dbg("NVM Read Error\n");
324 goto out;
325 }
326
327 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
328 (nvm_alt_mac_addr_offset == 0x0000))
329
330 goto out;
331
332 if (hw->bus.func == E1000_FUNC_1)
333 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
334 if (hw->bus.func == E1000_FUNC_2)
335 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
336
337 if (hw->bus.func == E1000_FUNC_3)
338 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
339 for (i = 0; i < ETH_ALEN; i += 2) {
340 offset = nvm_alt_mac_addr_offset + (i >> 1);
341 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
342 if (ret_val) {
343 hw_dbg("NVM Read Error\n");
344 goto out;
345 }
346
347 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
348 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
349 }
350
351
352 if (is_multicast_ether_addr(alt_mac_addr)) {
353 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
354 goto out;
355 }
356
357
358
359
360
361 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
362
363out:
364 return ret_val;
365}
366
367
368
369
370
371
372
373
374
375
376void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
377{
378 u32 rar_low, rar_high;
379
380
381
382
383 rar_low = ((u32) addr[0] |
384 ((u32) addr[1] << 8) |
385 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
386
387 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
388
389
390 if (rar_low || rar_high)
391 rar_high |= E1000_RAH_AV;
392
393
394
395
396
397 wr32(E1000_RAL(index), rar_low);
398 wrfl();
399 wr32(E1000_RAH(index), rar_high);
400 wrfl();
401}
402
403
404
405
406
407
408
409
410
411
412
413void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
414{
415 u32 hash_bit, hash_reg, mta;
416
417
418
419
420
421
422
423
424
425
426 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
427 hash_bit = hash_value & 0x1F;
428
429 mta = array_rd32(E1000_MTA, hash_reg);
430
431 mta |= BIT(hash_bit);
432
433 array_wr32(E1000_MTA, hash_reg, mta);
434 wrfl();
435}
436
437
438
439
440
441
442
443
444
445
446static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
447{
448 u32 hash_value, hash_mask;
449 u8 bit_shift = 0;
450
451
452 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
453
454
455
456
457 while (hash_mask >> bit_shift != 0xFF)
458 bit_shift++;
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485 switch (hw->mac.mc_filter_type) {
486 default:
487 case 0:
488 break;
489 case 1:
490 bit_shift += 1;
491 break;
492 case 2:
493 bit_shift += 2;
494 break;
495 case 3:
496 bit_shift += 4;
497 break;
498 }
499
500 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
501 (((u16) mc_addr[5]) << bit_shift)));
502
503 return hash_value;
504}
505
506
507
508
509
510
511
512
513
514
515void igb_update_mc_addr_list(struct e1000_hw *hw,
516 u8 *mc_addr_list, u32 mc_addr_count)
517{
518 u32 hash_value, hash_bit, hash_reg;
519 int i;
520
521
522 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
523
524
525 for (i = 0; (u32) i < mc_addr_count; i++) {
526 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
527
528 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
529 hash_bit = hash_value & 0x1F;
530
531 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
532 mc_addr_list += (ETH_ALEN);
533 }
534
535
536 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
537 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
538 wrfl();
539}
540
541
542
543
544
545
546
547void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
548{
549 rd32(E1000_CRCERRS);
550 rd32(E1000_SYMERRS);
551 rd32(E1000_MPC);
552 rd32(E1000_SCC);
553 rd32(E1000_ECOL);
554 rd32(E1000_MCC);
555 rd32(E1000_LATECOL);
556 rd32(E1000_COLC);
557 rd32(E1000_DC);
558 rd32(E1000_SEC);
559 rd32(E1000_RLEC);
560 rd32(E1000_XONRXC);
561 rd32(E1000_XONTXC);
562 rd32(E1000_XOFFRXC);
563 rd32(E1000_XOFFTXC);
564 rd32(E1000_FCRUC);
565 rd32(E1000_GPRC);
566 rd32(E1000_BPRC);
567 rd32(E1000_MPRC);
568 rd32(E1000_GPTC);
569 rd32(E1000_GORCL);
570 rd32(E1000_GORCH);
571 rd32(E1000_GOTCL);
572 rd32(E1000_GOTCH);
573 rd32(E1000_RNBC);
574 rd32(E1000_RUC);
575 rd32(E1000_RFC);
576 rd32(E1000_ROC);
577 rd32(E1000_RJC);
578 rd32(E1000_TORL);
579 rd32(E1000_TORH);
580 rd32(E1000_TOTL);
581 rd32(E1000_TOTH);
582 rd32(E1000_TPR);
583 rd32(E1000_TPT);
584 rd32(E1000_MPTC);
585 rd32(E1000_BPTC);
586}
587
588
589
590
591
592
593
594
595
596s32 igb_check_for_copper_link(struct e1000_hw *hw)
597{
598 struct e1000_mac_info *mac = &hw->mac;
599 s32 ret_val;
600 bool link;
601
602
603
604
605
606
607 if (!mac->get_link_status) {
608 ret_val = 0;
609 goto out;
610 }
611
612
613
614
615
616 ret_val = igb_phy_has_link(hw, 1, 0, &link);
617 if (ret_val)
618 goto out;
619
620 if (!link)
621 goto out;
622
623 mac->get_link_status = false;
624
625
626
627
628 igb_check_downshift(hw);
629
630
631
632
633 if (!mac->autoneg) {
634 ret_val = -E1000_ERR_CONFIG;
635 goto out;
636 }
637
638
639
640
641
642 igb_config_collision_dist(hw);
643
644
645
646
647
648
649 ret_val = igb_config_fc_after_link_up(hw);
650 if (ret_val)
651 hw_dbg("Error configuring flow control\n");
652
653out:
654 return ret_val;
655}
656
657
658
659
660
661
662
663
664
665
666
667s32 igb_setup_link(struct e1000_hw *hw)
668{
669 s32 ret_val = 0;
670
671
672
673
674 if (igb_check_reset_block(hw))
675 goto out;
676
677
678
679
680 if (hw->fc.requested_mode == e1000_fc_default) {
681 ret_val = igb_set_default_fc(hw);
682 if (ret_val)
683 goto out;
684 }
685
686
687
688
689
690 hw->fc.current_mode = hw->fc.requested_mode;
691
692 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
693
694
695 ret_val = hw->mac.ops.setup_physical_interface(hw);
696 if (ret_val)
697 goto out;
698
699
700
701
702
703
704 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
705 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
706 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
707 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
708
709 wr32(E1000_FCTTV, hw->fc.pause_time);
710
711 ret_val = igb_set_fc_watermarks(hw);
712
713out:
714
715 return ret_val;
716}
717
718
719
720
721
722
723
724
725
726void igb_config_collision_dist(struct e1000_hw *hw)
727{
728 u32 tctl;
729
730 tctl = rd32(E1000_TCTL);
731
732 tctl &= ~E1000_TCTL_COLD;
733 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
734
735 wr32(E1000_TCTL, tctl);
736 wrfl();
737}
738
739
740
741
742
743
744
745
746
747static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
748{
749 s32 ret_val = 0;
750 u32 fcrtl = 0, fcrth = 0;
751
752
753
754
755
756
757
758 if (hw->fc.current_mode & e1000_fc_tx_pause) {
759
760
761
762
763 fcrtl = hw->fc.low_water;
764 if (hw->fc.send_xon)
765 fcrtl |= E1000_FCRTL_XONE;
766
767 fcrth = hw->fc.high_water;
768 }
769 wr32(E1000_FCRTL, fcrtl);
770 wr32(E1000_FCRTH, fcrth);
771
772 return ret_val;
773}
774
775
776
777
778
779
780
781
782static s32 igb_set_default_fc(struct e1000_hw *hw)
783{
784 s32 ret_val = 0;
785 u16 lan_offset;
786 u16 nvm_data;
787
788
789
790
791
792
793
794
795
796 if (hw->mac.type == e1000_i350)
797 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
798 else
799 lan_offset = 0;
800
801 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
802 1, &nvm_data);
803 if (ret_val) {
804 hw_dbg("NVM Read Error\n");
805 goto out;
806 }
807
808 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
809 hw->fc.requested_mode = e1000_fc_none;
810 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
811 hw->fc.requested_mode = e1000_fc_tx_pause;
812 else
813 hw->fc.requested_mode = e1000_fc_full;
814
815out:
816 return ret_val;
817}
818
819
820
821
822
823
824
825
826
827
828
829s32 igb_force_mac_fc(struct e1000_hw *hw)
830{
831 u32 ctrl;
832 s32 ret_val = 0;
833
834 ctrl = rd32(E1000_CTRL);
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
854
855 switch (hw->fc.current_mode) {
856 case e1000_fc_none:
857 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
858 break;
859 case e1000_fc_rx_pause:
860 ctrl &= (~E1000_CTRL_TFCE);
861 ctrl |= E1000_CTRL_RFCE;
862 break;
863 case e1000_fc_tx_pause:
864 ctrl &= (~E1000_CTRL_RFCE);
865 ctrl |= E1000_CTRL_TFCE;
866 break;
867 case e1000_fc_full:
868 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
869 break;
870 default:
871 hw_dbg("Flow control param set incorrectly\n");
872 ret_val = -E1000_ERR_CONFIG;
873 goto out;
874 }
875
876 wr32(E1000_CTRL, ctrl);
877
878out:
879 return ret_val;
880}
881
882
883
884
885
886
887
888
889
890
891
892s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
893{
894 struct e1000_mac_info *mac = &hw->mac;
895 s32 ret_val = 0;
896 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
897 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
898 u16 speed, duplex;
899
900
901
902
903
904 if (mac->autoneg_failed) {
905 if (hw->phy.media_type == e1000_media_type_internal_serdes)
906 ret_val = igb_force_mac_fc(hw);
907 } else {
908 if (hw->phy.media_type == e1000_media_type_copper)
909 ret_val = igb_force_mac_fc(hw);
910 }
911
912 if (ret_val) {
913 hw_dbg("Error forcing flow control settings\n");
914 goto out;
915 }
916
917
918
919
920
921
922 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
923
924
925
926
927 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
928 &mii_status_reg);
929 if (ret_val)
930 goto out;
931 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
932 &mii_status_reg);
933 if (ret_val)
934 goto out;
935
936 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
937 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
938 goto out;
939 }
940
941
942
943
944
945
946
947 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
948 &mii_nway_adv_reg);
949 if (ret_val)
950 goto out;
951 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
952 &mii_nway_lp_ability_reg);
953 if (ret_val)
954 goto out;
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
990 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
991
992
993
994
995
996
997 if (hw->fc.requested_mode == e1000_fc_full) {
998 hw->fc.current_mode = e1000_fc_full;
999 hw_dbg("Flow Control = FULL.\n");
1000 } else {
1001 hw->fc.current_mode = e1000_fc_rx_pause;
1002 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1003 }
1004 }
1005
1006
1007
1008
1009
1010
1011
1012 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1013 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1014 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1015 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1016 hw->fc.current_mode = e1000_fc_tx_pause;
1017 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1018 }
1019
1020
1021
1022
1023
1024
1025
1026 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1027 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1028 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1029 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1030 hw->fc.current_mode = e1000_fc_rx_pause;
1031 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1032 }
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1054 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1055 (hw->fc.strict_ieee)) {
1056 hw->fc.current_mode = e1000_fc_none;
1057 hw_dbg("Flow Control = NONE.\n");
1058 } else {
1059 hw->fc.current_mode = e1000_fc_rx_pause;
1060 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1061 }
1062
1063
1064
1065
1066
1067 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1068 if (ret_val) {
1069 hw_dbg("Error getting link speed and duplex\n");
1070 goto out;
1071 }
1072
1073 if (duplex == HALF_DUPLEX)
1074 hw->fc.current_mode = e1000_fc_none;
1075
1076
1077
1078
1079 ret_val = igb_force_mac_fc(hw);
1080 if (ret_val) {
1081 hw_dbg("Error forcing flow control settings\n");
1082 goto out;
1083 }
1084 }
1085
1086
1087
1088
1089
1090 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1091 && mac->autoneg) {
1092
1093
1094
1095 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1096
1097 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1098 hw_dbg("PCS Auto Neg has not completed.\n");
1099 return ret_val;
1100 }
1101
1102
1103
1104
1105
1106
1107
1108 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1109 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1145 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1146
1147
1148
1149
1150
1151
1152 if (hw->fc.requested_mode == e1000_fc_full) {
1153 hw->fc.current_mode = e1000_fc_full;
1154 hw_dbg("Flow Control = FULL.\n");
1155 } else {
1156 hw->fc.current_mode = e1000_fc_rx_pause;
1157 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1158 }
1159 }
1160
1161
1162
1163
1164
1165
1166
1167 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1168 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1169 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1170 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1171 hw->fc.current_mode = e1000_fc_tx_pause;
1172 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1173 }
1174
1175
1176
1177
1178
1179
1180
1181 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1182 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1183 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1184 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1185 hw->fc.current_mode = e1000_fc_rx_pause;
1186 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1187 } else {
1188
1189
1190
1191 hw->fc.current_mode = e1000_fc_none;
1192 hw_dbg("Flow Control = NONE.\n");
1193 }
1194
1195
1196
1197
1198 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1199 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1200 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1201
1202 ret_val = igb_force_mac_fc(hw);
1203 if (ret_val) {
1204 hw_dbg("Error forcing flow control settings\n");
1205 return ret_val;
1206 }
1207 }
1208
1209out:
1210 return ret_val;
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1223 u16 *duplex)
1224{
1225 u32 status;
1226
1227 status = rd32(E1000_STATUS);
1228 if (status & E1000_STATUS_SPEED_1000) {
1229 *speed = SPEED_1000;
1230 hw_dbg("1000 Mbs, ");
1231 } else if (status & E1000_STATUS_SPEED_100) {
1232 *speed = SPEED_100;
1233 hw_dbg("100 Mbs, ");
1234 } else {
1235 *speed = SPEED_10;
1236 hw_dbg("10 Mbs, ");
1237 }
1238
1239 if (status & E1000_STATUS_FD) {
1240 *duplex = FULL_DUPLEX;
1241 hw_dbg("Full Duplex\n");
1242 } else {
1243 *duplex = HALF_DUPLEX;
1244 hw_dbg("Half Duplex\n");
1245 }
1246
1247 return 0;
1248}
1249
1250
1251
1252
1253
1254
1255
1256s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1257{
1258 u32 swsm;
1259 s32 ret_val = 0;
1260 s32 timeout = hw->nvm.word_size + 1;
1261 s32 i = 0;
1262
1263
1264 while (i < timeout) {
1265 swsm = rd32(E1000_SWSM);
1266 if (!(swsm & E1000_SWSM_SMBI))
1267 break;
1268
1269 udelay(50);
1270 i++;
1271 }
1272
1273 if (i == timeout) {
1274 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1275 ret_val = -E1000_ERR_NVM;
1276 goto out;
1277 }
1278
1279
1280 for (i = 0; i < timeout; i++) {
1281 swsm = rd32(E1000_SWSM);
1282 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1283
1284
1285 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1286 break;
1287
1288 udelay(50);
1289 }
1290
1291 if (i == timeout) {
1292
1293 igb_put_hw_semaphore(hw);
1294 hw_dbg("Driver can't access the NVM\n");
1295 ret_val = -E1000_ERR_NVM;
1296 goto out;
1297 }
1298
1299out:
1300 return ret_val;
1301}
1302
1303
1304
1305
1306
1307
1308
1309void igb_put_hw_semaphore(struct e1000_hw *hw)
1310{
1311 u32 swsm;
1312
1313 swsm = rd32(E1000_SWSM);
1314
1315 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1316
1317 wr32(E1000_SWSM, swsm);
1318}
1319
1320
1321
1322
1323
1324
1325
1326s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1327{
1328 s32 i = 0;
1329 s32 ret_val = 0;
1330
1331
1332 while (i < AUTO_READ_DONE_TIMEOUT) {
1333 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1334 break;
1335 usleep_range(1000, 2000);
1336 i++;
1337 }
1338
1339 if (i == AUTO_READ_DONE_TIMEOUT) {
1340 hw_dbg("Auto read by HW from NVM has not completed.\n");
1341 ret_val = -E1000_ERR_RESET;
1342 goto out;
1343 }
1344
1345out:
1346 return ret_val;
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1358{
1359 s32 ret_val;
1360
1361 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1362 if (ret_val) {
1363 hw_dbg("NVM Read Error\n");
1364 goto out;
1365 }
1366
1367 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1368 switch (hw->phy.media_type) {
1369 case e1000_media_type_internal_serdes:
1370 *data = ID_LED_DEFAULT_82575_SERDES;
1371 break;
1372 case e1000_media_type_copper:
1373 default:
1374 *data = ID_LED_DEFAULT;
1375 break;
1376 }
1377 }
1378out:
1379 return ret_val;
1380}
1381
1382
1383
1384
1385
1386
1387s32 igb_id_led_init(struct e1000_hw *hw)
1388{
1389 struct e1000_mac_info *mac = &hw->mac;
1390 s32 ret_val;
1391 const u32 ledctl_mask = 0x000000FF;
1392 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1393 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1394 u16 data, i, temp;
1395 const u16 led_mask = 0x0F;
1396
1397
1398 if ((hw->mac.type == e1000_i210) ||
1399 (hw->mac.type == e1000_i211))
1400 ret_val = igb_valid_led_default_i210(hw, &data);
1401 else
1402 ret_val = igb_valid_led_default(hw, &data);
1403
1404 if (ret_val)
1405 goto out;
1406
1407 mac->ledctl_default = rd32(E1000_LEDCTL);
1408 mac->ledctl_mode1 = mac->ledctl_default;
1409 mac->ledctl_mode2 = mac->ledctl_default;
1410
1411 for (i = 0; i < 4; i++) {
1412 temp = (data >> (i << 2)) & led_mask;
1413 switch (temp) {
1414 case ID_LED_ON1_DEF2:
1415 case ID_LED_ON1_ON2:
1416 case ID_LED_ON1_OFF2:
1417 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1418 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1419 break;
1420 case ID_LED_OFF1_DEF2:
1421 case ID_LED_OFF1_ON2:
1422 case ID_LED_OFF1_OFF2:
1423 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1424 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1425 break;
1426 default:
1427
1428 break;
1429 }
1430 switch (temp) {
1431 case ID_LED_DEF1_ON2:
1432 case ID_LED_ON1_ON2:
1433 case ID_LED_OFF1_ON2:
1434 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1435 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1436 break;
1437 case ID_LED_DEF1_OFF2:
1438 case ID_LED_ON1_OFF2:
1439 case ID_LED_OFF1_OFF2:
1440 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1441 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1442 break;
1443 default:
1444
1445 break;
1446 }
1447 }
1448
1449out:
1450 return ret_val;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460s32 igb_cleanup_led(struct e1000_hw *hw)
1461{
1462 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1463 return 0;
1464}
1465
1466
1467
1468
1469
1470
1471
1472s32 igb_blink_led(struct e1000_hw *hw)
1473{
1474 u32 ledctl_blink = 0;
1475 u32 i;
1476
1477 if (hw->phy.media_type == e1000_media_type_fiber) {
1478
1479 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1480 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1481 } else {
1482
1483
1484
1485
1486
1487
1488 ledctl_blink = hw->mac.ledctl_mode2;
1489 for (i = 0; i < 32; i += 8) {
1490 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1491 E1000_LEDCTL_LED0_MODE_MASK;
1492 u32 led_default = hw->mac.ledctl_default >> i;
1493
1494 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1495 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1496 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1497 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1498 ledctl_blink &=
1499 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1500 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1501 E1000_LEDCTL_MODE_LED_ON) << i;
1502 }
1503 }
1504 }
1505
1506 wr32(E1000_LEDCTL, ledctl_blink);
1507
1508 return 0;
1509}
1510
1511
1512
1513
1514
1515
1516
1517s32 igb_led_off(struct e1000_hw *hw)
1518{
1519 switch (hw->phy.media_type) {
1520 case e1000_media_type_copper:
1521 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1522 break;
1523 default:
1524 break;
1525 }
1526
1527 return 0;
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541s32 igb_disable_pcie_master(struct e1000_hw *hw)
1542{
1543 u32 ctrl;
1544 s32 timeout = MASTER_DISABLE_TIMEOUT;
1545 s32 ret_val = 0;
1546
1547 if (hw->bus.type != e1000_bus_type_pci_express)
1548 goto out;
1549
1550 ctrl = rd32(E1000_CTRL);
1551 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1552 wr32(E1000_CTRL, ctrl);
1553
1554 while (timeout) {
1555 if (!(rd32(E1000_STATUS) &
1556 E1000_STATUS_GIO_MASTER_ENABLE))
1557 break;
1558 udelay(100);
1559 timeout--;
1560 }
1561
1562 if (!timeout) {
1563 hw_dbg("Master requests are pending.\n");
1564 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1565 goto out;
1566 }
1567
1568out:
1569 return ret_val;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1580{
1581 s32 ret_val = 0;
1582
1583
1584 if (hw->mac.type >= e1000_82580)
1585 goto out;
1586
1587 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1588 hw_dbg("Invalid MDI setting detected\n");
1589 hw->phy.mdix = 1;
1590 ret_val = -E1000_ERR_CONFIG;
1591 goto out;
1592 }
1593
1594out:
1595 return ret_val;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1610 u32 offset, u8 data)
1611{
1612 u32 i, regvalue = 0;
1613 s32 ret_val = 0;
1614
1615
1616 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1617 wr32(reg, regvalue);
1618
1619
1620 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1621 udelay(5);
1622 regvalue = rd32(reg);
1623 if (regvalue & E1000_GEN_CTL_READY)
1624 break;
1625 }
1626 if (!(regvalue & E1000_GEN_CTL_READY)) {
1627 hw_dbg("Reg %08x did not indicate ready\n", reg);
1628 ret_val = -E1000_ERR_PHY;
1629 goto out;
1630 }
1631
1632out:
1633 return ret_val;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1644{
1645 u32 manc;
1646 u32 fwsm, factps;
1647 bool ret_val = false;
1648
1649 if (!hw->mac.asf_firmware_present)
1650 goto out;
1651
1652 manc = rd32(E1000_MANC);
1653
1654 if (!(manc & E1000_MANC_RCV_TCO_EN))
1655 goto out;
1656
1657 if (hw->mac.arc_subsystem_valid) {
1658 fwsm = rd32(E1000_FWSM);
1659 factps = rd32(E1000_FACTPS);
1660
1661 if (!(factps & E1000_FACTPS_MNGCG) &&
1662 ((fwsm & E1000_FWSM_MODE_MASK) ==
1663 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1664 ret_val = true;
1665 goto out;
1666 }
1667 } else {
1668 if ((manc & E1000_MANC_SMBUS_EN) &&
1669 !(manc & E1000_MANC_ASF_EN)) {
1670 ret_val = true;
1671 goto out;
1672 }
1673 }
1674
1675out:
1676 return ret_val;
1677}
1678