1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33
34#include "e1000.h"
35
36enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42};
43
44#define E1000_FACTPS_MNGCG 0x20000000
45
46#define E1000_IAMT_SIGNATURE 0x544D4149
47
48
49
50
51
52
53
54
55
56
57s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58{
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87}
88
89
90
91
92
93
94
95
96
97
98void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99{
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102}
103
104
105
106
107
108
109
110
111
112
113void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114{
115 u32 i;
116
117
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130}
131
132
133
134
135
136
137
138
139
140
141void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142{
143 u32 rar_low, rar_high;
144
145
146
147
148 rar_low = ((u32) addr[0] |
149 ((u32) addr[1] << 8) |
150 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
151
152 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
153
154 rar_high |= E1000_RAH_AV;
155
156 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
158}
159
160
161
162
163
164
165
166
167
168
169
170static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171{
172 u32 hash_bit, hash_reg, mta;
173
174
175
176
177
178
179
180
181
182
183 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
184 hash_bit = hash_value & 0x1F;
185
186 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
187
188 mta |= (1 << hash_bit);
189
190 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
191 e1e_flush();
192}
193
194
195
196
197
198
199
200
201
202
203static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
204{
205 u32 hash_value, hash_mask;
206 u8 bit_shift = 0;
207
208
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210
211
212
213 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++;
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241 switch (hw->mac.mc_filter_type) {
242 default:
243 case 0:
244 break;
245 case 1:
246 bit_shift += 1;
247 break;
248 case 2:
249 bit_shift += 2;
250 break;
251 case 3:
252 bit_shift += 4;
253 break;
254 }
255
256 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
257 (((u16) mc_addr[5]) << bit_shift)));
258
259 return hash_value;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
276 u8 *mc_addr_list, u32 mc_addr_count,
277 u32 rar_used_count, u32 rar_count)
278{
279 u32 hash_value;
280 u32 i;
281
282
283
284
285
286 for (i = rar_used_count; i < rar_count; i++) {
287 if (mc_addr_count) {
288 e1000e_rar_set(hw, mc_addr_list, i);
289 mc_addr_count--;
290 mc_addr_list += ETH_ALEN;
291 } else {
292 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
293 e1e_flush();
294 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
295 e1e_flush();
296 }
297 }
298
299
300 hw_dbg(hw, "Clearing MTA\n");
301 for (i = 0; i < hw->mac.mta_reg_count; i++) {
302 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
303 e1e_flush();
304 }
305
306
307 for (; mc_addr_count > 0; mc_addr_count--) {
308 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
309 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
310 e1000_mta_set(hw, hash_value);
311 mc_addr_list += ETH_ALEN;
312 }
313}
314
315
316
317
318
319
320
321void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
322{
323 u32 temp;
324
325 temp = er32(CRCERRS);
326 temp = er32(SYMERRS);
327 temp = er32(MPC);
328 temp = er32(SCC);
329 temp = er32(ECOL);
330 temp = er32(MCC);
331 temp = er32(LATECOL);
332 temp = er32(COLC);
333 temp = er32(DC);
334 temp = er32(SEC);
335 temp = er32(RLEC);
336 temp = er32(XONRXC);
337 temp = er32(XONTXC);
338 temp = er32(XOFFRXC);
339 temp = er32(XOFFTXC);
340 temp = er32(FCRUC);
341 temp = er32(GPRC);
342 temp = er32(BPRC);
343 temp = er32(MPRC);
344 temp = er32(GPTC);
345 temp = er32(GORCL);
346 temp = er32(GORCH);
347 temp = er32(GOTCL);
348 temp = er32(GOTCH);
349 temp = er32(RNBC);
350 temp = er32(RUC);
351 temp = er32(RFC);
352 temp = er32(ROC);
353 temp = er32(RJC);
354 temp = er32(TORL);
355 temp = er32(TORH);
356 temp = er32(TOTL);
357 temp = er32(TOTH);
358 temp = er32(TPR);
359 temp = er32(TPT);
360 temp = er32(MPTC);
361 temp = er32(BPTC);
362}
363
364
365
366
367
368
369
370
371
372s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
373{
374 struct e1000_mac_info *mac = &hw->mac;
375 s32 ret_val;
376 bool link;
377
378
379
380
381
382
383 if (!mac->get_link_status)
384 return 0;
385
386
387
388
389
390 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
391 if (ret_val)
392 return ret_val;
393
394 if (!link)
395 return ret_val;
396
397 mac->get_link_status = 0;
398
399
400
401 e1000e_check_downshift(hw);
402
403
404
405
406 if (!mac->autoneg) {
407 ret_val = -E1000_ERR_CONFIG;
408 return ret_val;
409 }
410
411
412
413
414
415 e1000e_config_collision_dist(hw);
416
417
418
419
420
421
422 ret_val = e1000e_config_fc_after_link_up(hw);
423 if (ret_val) {
424 hw_dbg(hw, "Error configuring flow control\n");
425 }
426
427 return ret_val;
428}
429
430
431
432
433
434
435
436
437s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
438{
439 struct e1000_mac_info *mac = &hw->mac;
440 u32 rxcw;
441 u32 ctrl;
442 u32 status;
443 s32 ret_val;
444
445 ctrl = er32(CTRL);
446 status = er32(STATUS);
447 rxcw = er32(RXCW);
448
449
450
451
452
453
454
455
456
457 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
458 (!(rxcw & E1000_RXCW_C))) {
459 if (mac->autoneg_failed == 0) {
460 mac->autoneg_failed = 1;
461 return 0;
462 }
463 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
464
465
466 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
467
468
469 ctrl = er32(CTRL);
470 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
471 ew32(CTRL, ctrl);
472
473
474 ret_val = e1000e_config_fc_after_link_up(hw);
475 if (ret_val) {
476 hw_dbg(hw, "Error configuring flow control\n");
477 return ret_val;
478 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480
481
482
483
484
485 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
486 ew32(TXCW, mac->txcw);
487 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
488
489 mac->serdes_has_link = 1;
490 }
491
492 return 0;
493}
494
495
496
497
498
499
500
501
502s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
503{
504 struct e1000_mac_info *mac = &hw->mac;
505 u32 rxcw;
506 u32 ctrl;
507 u32 status;
508 s32 ret_val;
509
510 ctrl = er32(CTRL);
511 status = er32(STATUS);
512 rxcw = er32(RXCW);
513
514
515
516
517
518
519
520
521 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
522 if (mac->autoneg_failed == 0) {
523 mac->autoneg_failed = 1;
524 return 0;
525 }
526 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
527
528
529 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
530
531
532 ctrl = er32(CTRL);
533 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
534 ew32(CTRL, ctrl);
535
536
537 ret_val = e1000e_config_fc_after_link_up(hw);
538 if (ret_val) {
539 hw_dbg(hw, "Error configuring flow control\n");
540 return ret_val;
541 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543
544
545
546
547
548 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
549 ew32(TXCW, mac->txcw);
550 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
551
552 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554
555
556
557
558
559 udelay(10);
560 if (E1000_RXCW_SYNCH & er32(RXCW)) {
561 if (!(rxcw & E1000_RXCW_IV)) {
562 mac->serdes_has_link = 1;
563 hw_dbg(hw, "SERDES: Link is up.\n");
564 }
565 } else {
566 mac->serdes_has_link = 0;
567 hw_dbg(hw, "SERDES: Link is down.\n");
568 }
569 }
570
571 if (E1000_TXCW_ANE & er32(TXCW)) {
572 status = er32(STATUS);
573 mac->serdes_has_link = (status & E1000_STATUS_LU);
574 }
575
576 return 0;
577}
578
579
580
581
582
583
584
585
586static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587{
588 struct e1000_mac_info *mac = &hw->mac;
589 s32 ret_val;
590 u16 nvm_data;
591
592 if (mac->fc != e1000_fc_default)
593 return 0;
594
595
596
597
598
599
600
601
602
603 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
604
605 if (ret_val) {
606 hw_dbg(hw, "NVM Read Error\n");
607 return ret_val;
608 }
609
610 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
611 mac->fc = e1000_fc_none;
612 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
613 NVM_WORD0F_ASM_DIR)
614 mac->fc = e1000_fc_tx_pause;
615 else
616 mac->fc = e1000_fc_full;
617
618 return 0;
619}
620
621
622
623
624
625
626
627
628
629
630
631s32 e1000e_setup_link(struct e1000_hw *hw)
632{
633 struct e1000_mac_info *mac = &hw->mac;
634 s32 ret_val;
635
636
637
638
639 if (e1000_check_reset_block(hw))
640 return 0;
641
642
643
644
645
646 if (mac->fc == e1000_fc_default) {
647 ret_val = e1000_set_default_fc_generic(hw);
648 if (ret_val)
649 return ret_val;
650 }
651
652
653
654
655
656 mac->original_fc = mac->fc;
657
658 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
659
660
661 ret_val = mac->ops.setup_physical_interface(hw);
662 if (ret_val)
663 return ret_val;
664
665
666
667
668
669
670 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
671 ew32(FCT, FLOW_CONTROL_TYPE);
672 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
673 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
674
675 ew32(FCTTV, mac->fc_pause_time);
676
677 return e1000e_set_fc_watermarks(hw);
678}
679
680
681
682
683
684
685
686
687static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
688{
689 struct e1000_mac_info *mac = &hw->mac;
690 u32 txcw;
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708 switch (mac->fc) {
709 case e1000_fc_none:
710
711 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
712 break;
713 case e1000_fc_rx_pause:
714
715
716
717
718
719
720
721 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
722 break;
723 case e1000_fc_tx_pause:
724
725
726
727 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
728 break;
729 case e1000_fc_full:
730
731
732
733 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
734 break;
735 default:
736 hw_dbg(hw, "Flow control param set incorrectly\n");
737 return -E1000_ERR_CONFIG;
738 break;
739 }
740
741 ew32(TXCW, txcw);
742 mac->txcw = txcw;
743
744 return 0;
745}
746
747
748
749
750
751
752
753
754static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
755{
756 struct e1000_mac_info *mac = &hw->mac;
757 u32 i, status;
758 s32 ret_val;
759
760
761
762
763
764
765
766 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
767 msleep(10);
768 status = er32(STATUS);
769 if (status & E1000_STATUS_LU)
770 break;
771 }
772 if (i == FIBER_LINK_UP_LIMIT) {
773 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
774 mac->autoneg_failed = 1;
775
776
777
778
779
780 ret_val = mac->ops.check_for_link(hw);
781 if (ret_val) {
782 hw_dbg(hw, "Error while checking for link\n");
783 return ret_val;
784 }
785 mac->autoneg_failed = 0;
786 } else {
787 mac->autoneg_failed = 0;
788 hw_dbg(hw, "Valid Link Found\n");
789 }
790
791 return 0;
792}
793
794
795
796
797
798
799
800
801s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
802{
803 u32 ctrl;
804 s32 ret_val;
805
806 ctrl = er32(CTRL);
807
808
809 ctrl &= ~E1000_CTRL_LRST;
810
811 e1000e_config_collision_dist(hw);
812
813 ret_val = e1000_commit_fc_settings_generic(hw);
814 if (ret_val)
815 return ret_val;
816
817
818
819
820
821
822
823 hw_dbg(hw, "Auto-negotiation enabled\n");
824
825 ew32(CTRL, ctrl);
826 e1e_flush();
827 msleep(1);
828
829
830
831
832
833 if (hw->media_type == e1000_media_type_internal_serdes ||
834 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
835 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
836 } else {
837 hw_dbg(hw, "No signal detected\n");
838 }
839
840 return 0;
841}
842
843
844
845
846
847
848
849
850
851void e1000e_config_collision_dist(struct e1000_hw *hw)
852{
853 u32 tctl;
854
855 tctl = er32(TCTL);
856
857 tctl &= ~E1000_TCTL_COLD;
858 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
859
860 ew32(TCTL, tctl);
861 e1e_flush();
862}
863
864
865
866
867
868
869
870
871
872s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
873{
874 struct e1000_mac_info *mac = &hw->mac;
875 u32 fcrtl = 0, fcrth = 0;
876
877
878
879
880
881
882
883 if (mac->fc & e1000_fc_tx_pause) {
884
885
886
887
888 fcrtl = mac->fc_low_water;
889 fcrtl |= E1000_FCRTL_XONE;
890 fcrth = mac->fc_high_water;
891 }
892 ew32(FCRTL, fcrtl);
893 ew32(FCRTH, fcrth);
894
895 return 0;
896}
897
898
899
900
901
902
903
904
905
906
907
908s32 e1000e_force_mac_fc(struct e1000_hw *hw)
909{
910 struct e1000_mac_info *mac = &hw->mac;
911 u32 ctrl;
912
913 ctrl = er32(CTRL);
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932 hw_dbg(hw, "mac->fc = %u\n", mac->fc);
933
934 switch (mac->fc) {
935 case e1000_fc_none:
936 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
937 break;
938 case e1000_fc_rx_pause:
939 ctrl &= (~E1000_CTRL_TFCE);
940 ctrl |= E1000_CTRL_RFCE;
941 break;
942 case e1000_fc_tx_pause:
943 ctrl &= (~E1000_CTRL_RFCE);
944 ctrl |= E1000_CTRL_TFCE;
945 break;
946 case e1000_fc_full:
947 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
948 break;
949 default:
950 hw_dbg(hw, "Flow control param set incorrectly\n");
951 return -E1000_ERR_CONFIG;
952 }
953
954 ew32(CTRL, ctrl);
955
956 return 0;
957}
958
959
960
961
962
963
964
965
966
967
968
969s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
970{
971 struct e1000_mac_info *mac = &hw->mac;
972 s32 ret_val = 0;
973 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
974 u16 speed, duplex;
975
976
977
978
979
980 if (mac->autoneg_failed) {
981 if (hw->media_type == e1000_media_type_fiber ||
982 hw->media_type == e1000_media_type_internal_serdes)
983 ret_val = e1000e_force_mac_fc(hw);
984 } else {
985 if (hw->media_type == e1000_media_type_copper)
986 ret_val = e1000e_force_mac_fc(hw);
987 }
988
989 if (ret_val) {
990 hw_dbg(hw, "Error forcing flow control settings\n");
991 return ret_val;
992 }
993
994
995
996
997
998
999 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
1000
1001
1002
1003
1004 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1005 if (ret_val)
1006 return ret_val;
1007 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1008 if (ret_val)
1009 return ret_val;
1010
1011 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1012 hw_dbg(hw, "Copper PHY and Auto Neg "
1013 "has not completed.\n");
1014 return ret_val;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1024 if (ret_val)
1025 return ret_val;
1026 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1027 if (ret_val)
1028 return ret_val;
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1065 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1066
1067
1068
1069
1070
1071
1072 if (mac->original_fc == e1000_fc_full) {
1073 mac->fc = e1000_fc_full;
1074 hw_dbg(hw, "Flow Control = FULL.\r\n");
1075 } else {
1076 mac->fc = e1000_fc_rx_pause;
1077 hw_dbg(hw, "Flow Control = "
1078 "RX PAUSE frames only.\r\n");
1079 }
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1090 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1091 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1092 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1093 mac->fc = e1000_fc_tx_pause;
1094 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1105 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1106 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1107 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1108 mac->fc = e1000_fc_rx_pause;
1109 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 else if ((mac->original_fc == e1000_fc_none) ||
1132 (mac->original_fc == e1000_fc_tx_pause)) {
1133 mac->fc = e1000_fc_none;
1134 hw_dbg(hw, "Flow Control = NONE.\r\n");
1135 } else {
1136 mac->fc = e1000_fc_rx_pause;
1137 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1138 }
1139
1140
1141
1142
1143
1144 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1145 if (ret_val) {
1146 hw_dbg(hw, "Error getting link speed and duplex\n");
1147 return ret_val;
1148 }
1149
1150 if (duplex == HALF_DUPLEX)
1151 mac->fc = e1000_fc_none;
1152
1153
1154
1155
1156 ret_val = e1000e_force_mac_fc(hw);
1157 if (ret_val) {
1158 hw_dbg(hw, "Error forcing flow control settings\n");
1159 return ret_val;
1160 }
1161 }
1162
1163 return 0;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1176{
1177 u32 status;
1178
1179 status = er32(STATUS);
1180 if (status & E1000_STATUS_SPEED_1000) {
1181 *speed = SPEED_1000;
1182 hw_dbg(hw, "1000 Mbs, ");
1183 } else if (status & E1000_STATUS_SPEED_100) {
1184 *speed = SPEED_100;
1185 hw_dbg(hw, "100 Mbs, ");
1186 } else {
1187 *speed = SPEED_10;
1188 hw_dbg(hw, "10 Mbs, ");
1189 }
1190
1191 if (status & E1000_STATUS_FD) {
1192 *duplex = FULL_DUPLEX;
1193 hw_dbg(hw, "Full Duplex\n");
1194 } else {
1195 *duplex = HALF_DUPLEX;
1196 hw_dbg(hw, "Half Duplex\n");
1197 }
1198
1199 return 0;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1212{
1213 *speed = SPEED_1000;
1214 *duplex = FULL_DUPLEX;
1215
1216 return 0;
1217}
1218
1219
1220
1221
1222
1223
1224
1225s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1226{
1227 u32 swsm;
1228 s32 timeout = hw->nvm.word_size + 1;
1229 s32 i = 0;
1230
1231
1232 while (i < timeout) {
1233 swsm = er32(SWSM);
1234 if (!(swsm & E1000_SWSM_SMBI))
1235 break;
1236
1237 udelay(50);
1238 i++;
1239 }
1240
1241 if (i == timeout) {
1242 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1243 return -E1000_ERR_NVM;
1244 }
1245
1246
1247 for (i = 0; i < timeout; i++) {
1248 swsm = er32(SWSM);
1249 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1250
1251
1252 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1253 break;
1254
1255 udelay(50);
1256 }
1257
1258 if (i == timeout) {
1259
1260 e1000e_put_hw_semaphore(hw);
1261 hw_dbg(hw, "Driver can't access the NVM\n");
1262 return -E1000_ERR_NVM;
1263 }
1264
1265 return 0;
1266}
1267
1268
1269
1270
1271
1272
1273
1274void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1275{
1276 u32 swsm;
1277
1278 swsm = er32(SWSM);
1279 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1280 ew32(SWSM, swsm);
1281}
1282
1283
1284
1285
1286
1287
1288
1289s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1290{
1291 s32 i = 0;
1292
1293 while (i < AUTO_READ_DONE_TIMEOUT) {
1294 if (er32(EECD) & E1000_EECD_AUTO_RD)
1295 break;
1296 msleep(1);
1297 i++;
1298 }
1299
1300 if (i == AUTO_READ_DONE_TIMEOUT) {
1301 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1302 return -E1000_ERR_RESET;
1303 }
1304
1305 return 0;
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1317{
1318 s32 ret_val;
1319
1320 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1321 if (ret_val) {
1322 hw_dbg(hw, "NVM Read Error\n");
1323 return ret_val;
1324 }
1325
1326 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1327 *data = ID_LED_DEFAULT;
1328
1329 return 0;
1330}
1331
1332
1333
1334
1335
1336
1337s32 e1000e_id_led_init(struct e1000_hw *hw)
1338{
1339 struct e1000_mac_info *mac = &hw->mac;
1340 s32 ret_val;
1341 const u32 ledctl_mask = 0x000000FF;
1342 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1343 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1344 u16 data, i, temp;
1345 const u16 led_mask = 0x0F;
1346
1347 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1348 if (ret_val)
1349 return ret_val;
1350
1351 mac->ledctl_default = er32(LEDCTL);
1352 mac->ledctl_mode1 = mac->ledctl_default;
1353 mac->ledctl_mode2 = mac->ledctl_default;
1354
1355 for (i = 0; i < 4; i++) {
1356 temp = (data >> (i << 2)) & led_mask;
1357 switch (temp) {
1358 case ID_LED_ON1_DEF2:
1359 case ID_LED_ON1_ON2:
1360 case ID_LED_ON1_OFF2:
1361 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1362 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1363 break;
1364 case ID_LED_OFF1_DEF2:
1365 case ID_LED_OFF1_ON2:
1366 case ID_LED_OFF1_OFF2:
1367 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1368 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1369 break;
1370 default:
1371
1372 break;
1373 }
1374 switch (temp) {
1375 case ID_LED_DEF1_ON2:
1376 case ID_LED_ON1_ON2:
1377 case ID_LED_OFF1_ON2:
1378 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1379 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1380 break;
1381 case ID_LED_DEF1_OFF2:
1382 case ID_LED_ON1_OFF2:
1383 case ID_LED_OFF1_OFF2:
1384 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1385 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1386 break;
1387 default:
1388
1389 break;
1390 }
1391 }
1392
1393 return 0;
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1404{
1405 ew32(LEDCTL, hw->mac.ledctl_default);
1406 return 0;
1407}
1408
1409
1410
1411
1412
1413
1414
1415s32 e1000e_blink_led(struct e1000_hw *hw)
1416{
1417 u32 ledctl_blink = 0;
1418 u32 i;
1419
1420 if (hw->media_type == e1000_media_type_fiber) {
1421
1422 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1423 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1424 } else {
1425
1426
1427 ledctl_blink = hw->mac.ledctl_mode2;
1428 for (i = 0; i < 4; i++)
1429 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1430 E1000_LEDCTL_MODE_LED_ON)
1431 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1432 (i * 8));
1433 }
1434
1435 ew32(LEDCTL, ledctl_blink);
1436
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444
1445
1446s32 e1000e_led_on_generic(struct e1000_hw *hw)
1447{
1448 u32 ctrl;
1449
1450 switch (hw->media_type) {
1451 case e1000_media_type_fiber:
1452 ctrl = er32(CTRL);
1453 ctrl &= ~E1000_CTRL_SWDPIN0;
1454 ctrl |= E1000_CTRL_SWDPIO0;
1455 ew32(CTRL, ctrl);
1456 break;
1457 case e1000_media_type_copper:
1458 ew32(LEDCTL, hw->mac.ledctl_mode2);
1459 break;
1460 default:
1461 break;
1462 }
1463
1464 return 0;
1465}
1466
1467
1468
1469
1470
1471
1472
1473s32 e1000e_led_off_generic(struct e1000_hw *hw)
1474{
1475 u32 ctrl;
1476
1477 switch (hw->media_type) {
1478 case e1000_media_type_fiber:
1479 ctrl = er32(CTRL);
1480 ctrl |= E1000_CTRL_SWDPIN0;
1481 ctrl |= E1000_CTRL_SWDPIO0;
1482 ew32(CTRL, ctrl);
1483 break;
1484 case e1000_media_type_copper:
1485 ew32(LEDCTL, hw->mac.ledctl_mode1);
1486 break;
1487 default:
1488 break;
1489 }
1490
1491 return 0;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1502{
1503 u32 gcr;
1504
1505 if (no_snoop) {
1506 gcr = er32(GCR);
1507 gcr &= ~(PCIE_NO_SNOOP_ALL);
1508 gcr |= no_snoop;
1509 ew32(GCR, gcr);
1510 }
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1525{
1526 u32 ctrl;
1527 s32 timeout = MASTER_DISABLE_TIMEOUT;
1528
1529 ctrl = er32(CTRL);
1530 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1531 ew32(CTRL, ctrl);
1532
1533 while (timeout) {
1534 if (!(er32(STATUS) &
1535 E1000_STATUS_GIO_MASTER_ENABLE))
1536 break;
1537 udelay(100);
1538 timeout--;
1539 }
1540
1541 if (!timeout) {
1542 hw_dbg(hw, "Master requests are pending.\n");
1543 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1544 }
1545
1546 return 0;
1547}
1548
1549
1550
1551
1552
1553
1554
1555void e1000e_reset_adaptive(struct e1000_hw *hw)
1556{
1557 struct e1000_mac_info *mac = &hw->mac;
1558
1559 mac->current_ifs_val = 0;
1560 mac->ifs_min_val = IFS_MIN;
1561 mac->ifs_max_val = IFS_MAX;
1562 mac->ifs_step_size = IFS_STEP;
1563 mac->ifs_ratio = IFS_RATIO;
1564
1565 mac->in_ifs_mode = 0;
1566 ew32(AIT, 0);
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576void e1000e_update_adaptive(struct e1000_hw *hw)
1577{
1578 struct e1000_mac_info *mac = &hw->mac;
1579
1580 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1581 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1582 mac->in_ifs_mode = 1;
1583 if (mac->current_ifs_val < mac->ifs_max_val) {
1584 if (!mac->current_ifs_val)
1585 mac->current_ifs_val = mac->ifs_min_val;
1586 else
1587 mac->current_ifs_val +=
1588 mac->ifs_step_size;
1589 ew32(AIT,
1590 mac->current_ifs_val);
1591 }
1592 }
1593 } else {
1594 if (mac->in_ifs_mode &&
1595 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1596 mac->current_ifs_val = 0;
1597 mac->in_ifs_mode = 0;
1598 ew32(AIT, 0);
1599 }
1600 }
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1611{
1612 *eecd = *eecd | E1000_EECD_SK;
1613 ew32(EECD, *eecd);
1614 e1e_flush();
1615 udelay(hw->nvm.delay_usec);
1616}
1617
1618
1619
1620
1621
1622
1623
1624
1625static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1626{
1627 *eecd = *eecd & ~E1000_EECD_SK;
1628 ew32(EECD, *eecd);
1629 e1e_flush();
1630 udelay(hw->nvm.delay_usec);
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1644{
1645 struct e1000_nvm_info *nvm = &hw->nvm;
1646 u32 eecd = er32(EECD);
1647 u32 mask;
1648
1649 mask = 0x01 << (count - 1);
1650 if (nvm->type == e1000_nvm_eeprom_spi)
1651 eecd |= E1000_EECD_DO;
1652
1653 do {
1654 eecd &= ~E1000_EECD_DI;
1655
1656 if (data & mask)
1657 eecd |= E1000_EECD_DI;
1658
1659 ew32(EECD, eecd);
1660 e1e_flush();
1661
1662 udelay(nvm->delay_usec);
1663
1664 e1000_raise_eec_clk(hw, &eecd);
1665 e1000_lower_eec_clk(hw, &eecd);
1666
1667 mask >>= 1;
1668 } while (mask);
1669
1670 eecd &= ~E1000_EECD_DI;
1671 ew32(EECD, eecd);
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1686{
1687 u32 eecd;
1688 u32 i;
1689 u16 data;
1690
1691 eecd = er32(EECD);
1692
1693 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1694 data = 0;
1695
1696 for (i = 0; i < count; i++) {
1697 data <<= 1;
1698 e1000_raise_eec_clk(hw, &eecd);
1699
1700 eecd = er32(EECD);
1701
1702 eecd &= ~E1000_EECD_DI;
1703 if (eecd & E1000_EECD_DO)
1704 data |= 1;
1705
1706 e1000_lower_eec_clk(hw, &eecd);
1707 }
1708
1709 return data;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1721{
1722 u32 attempts = 100000;
1723 u32 i, reg = 0;
1724
1725 for (i = 0; i < attempts; i++) {
1726 if (ee_reg == E1000_NVM_POLL_READ)
1727 reg = er32(EERD);
1728 else
1729 reg = er32(EEWR);
1730
1731 if (reg & E1000_NVM_RW_REG_DONE)
1732 return 0;
1733
1734 udelay(5);
1735 }
1736
1737 return -E1000_ERR_NVM;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1749{
1750 u32 eecd = er32(EECD);
1751 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1752
1753 ew32(EECD, eecd | E1000_EECD_REQ);
1754 eecd = er32(EECD);
1755
1756 while (timeout) {
1757 if (eecd & E1000_EECD_GNT)
1758 break;
1759 udelay(5);
1760 eecd = er32(EECD);
1761 timeout--;
1762 }
1763
1764 if (!timeout) {
1765 eecd &= ~E1000_EECD_REQ;
1766 ew32(EECD, eecd);
1767 hw_dbg(hw, "Could not acquire NVM grant\n");
1768 return -E1000_ERR_NVM;
1769 }
1770
1771 return 0;
1772}
1773
1774
1775
1776
1777
1778
1779
1780static void e1000_standby_nvm(struct e1000_hw *hw)
1781{
1782 struct e1000_nvm_info *nvm = &hw->nvm;
1783 u32 eecd = er32(EECD);
1784
1785 if (nvm->type == e1000_nvm_eeprom_spi) {
1786
1787 eecd |= E1000_EECD_CS;
1788 ew32(EECD, eecd);
1789 e1e_flush();
1790 udelay(nvm->delay_usec);
1791 eecd &= ~E1000_EECD_CS;
1792 ew32(EECD, eecd);
1793 e1e_flush();
1794 udelay(nvm->delay_usec);
1795 }
1796}
1797
1798
1799
1800
1801
1802
1803
1804static void e1000_stop_nvm(struct e1000_hw *hw)
1805{
1806 u32 eecd;
1807
1808 eecd = er32(EECD);
1809 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1810
1811 eecd |= E1000_EECD_CS;
1812 e1000_lower_eec_clk(hw, &eecd);
1813 }
1814}
1815
1816
1817
1818
1819
1820
1821
1822void e1000e_release_nvm(struct e1000_hw *hw)
1823{
1824 u32 eecd;
1825
1826 e1000_stop_nvm(hw);
1827
1828 eecd = er32(EECD);
1829 eecd &= ~E1000_EECD_REQ;
1830 ew32(EECD, eecd);
1831}
1832
1833
1834
1835
1836
1837
1838
1839static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1840{
1841 struct e1000_nvm_info *nvm = &hw->nvm;
1842 u32 eecd = er32(EECD);
1843 u16 timeout = 0;
1844 u8 spi_stat_reg;
1845
1846 if (nvm->type == e1000_nvm_eeprom_spi) {
1847
1848 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1849 ew32(EECD, eecd);
1850 udelay(1);
1851 timeout = NVM_MAX_RETRY_SPI;
1852
1853
1854
1855
1856
1857 while (timeout) {
1858 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1859 hw->nvm.opcode_bits);
1860 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1861 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1862 break;
1863
1864 udelay(5);
1865 e1000_standby_nvm(hw);
1866 timeout--;
1867 }
1868
1869 if (!timeout) {
1870 hw_dbg(hw, "SPI NVM Status error\n");
1871 return -E1000_ERR_NVM;
1872 }
1873 }
1874
1875 return 0;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1888{
1889 struct e1000_nvm_info *nvm = &hw->nvm;
1890 u32 i = 0;
1891 s32 ret_val;
1892 u16 word_in;
1893 u8 read_opcode = NVM_READ_OPCODE_SPI;
1894
1895
1896
1897 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1898 (words == 0)) {
1899 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1900 return -E1000_ERR_NVM;
1901 }
1902
1903 ret_val = nvm->ops.acquire_nvm(hw);
1904 if (ret_val)
1905 return ret_val;
1906
1907 ret_val = e1000_ready_nvm_eeprom(hw);
1908 if (ret_val) {
1909 nvm->ops.release_nvm(hw);
1910 return ret_val;
1911 }
1912
1913 e1000_standby_nvm(hw);
1914
1915 if ((nvm->address_bits == 8) && (offset >= 128))
1916 read_opcode |= NVM_A8_OPCODE_SPI;
1917
1918
1919 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1920 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1921
1922
1923
1924
1925 for (i = 0; i < words; i++) {
1926 word_in = e1000_shift_in_eec_bits(hw, 16);
1927 data[i] = (word_in >> 8) | (word_in << 8);
1928 }
1929
1930 nvm->ops.release_nvm(hw);
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1944{
1945 struct e1000_nvm_info *nvm = &hw->nvm;
1946 u32 i, eerd = 0;
1947 s32 ret_val = 0;
1948
1949
1950
1951 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1952 (words == 0)) {
1953 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1954 return -E1000_ERR_NVM;
1955 }
1956
1957 for (i = 0; i < words; i++) {
1958 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1959 E1000_NVM_RW_REG_START;
1960
1961 ew32(EERD, eerd);
1962 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1963 if (ret_val)
1964 break;
1965
1966 data[i] = (er32(EERD) >>
1967 E1000_NVM_RW_REG_DATA);
1968 }
1969
1970 return ret_val;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1986{
1987 struct e1000_nvm_info *nvm = &hw->nvm;
1988 s32 ret_val;
1989 u16 widx = 0;
1990
1991
1992
1993 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1994 (words == 0)) {
1995 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1996 return -E1000_ERR_NVM;
1997 }
1998
1999 ret_val = nvm->ops.acquire_nvm(hw);
2000 if (ret_val)
2001 return ret_val;
2002
2003 msleep(10);
2004
2005 while (widx < words) {
2006 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2007
2008 ret_val = e1000_ready_nvm_eeprom(hw);
2009 if (ret_val) {
2010 nvm->ops.release_nvm(hw);
2011 return ret_val;
2012 }
2013
2014 e1000_standby_nvm(hw);
2015
2016
2017 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2018 nvm->opcode_bits);
2019
2020 e1000_standby_nvm(hw);
2021
2022
2023
2024 if ((nvm->address_bits == 8) && (offset >= 128))
2025 write_opcode |= NVM_A8_OPCODE_SPI;
2026
2027
2028 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2029 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2030 nvm->address_bits);
2031
2032
2033 while (widx < words) {
2034 u16 word_out = data[widx];
2035 word_out = (word_out >> 8) | (word_out << 8);
2036 e1000_shift_out_eec_bits(hw, word_out, 16);
2037 widx++;
2038
2039 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2040 e1000_standby_nvm(hw);
2041 break;
2042 }
2043 }
2044 }
2045
2046 msleep(10);
2047 return 0;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2059{
2060 s32 ret_val;
2061 u16 offset, nvm_data, i;
2062
2063 for (i = 0; i < ETH_ALEN; i += 2) {
2064 offset = i >> 1;
2065 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2066 if (ret_val) {
2067 hw_dbg(hw, "NVM Read Error\n");
2068 return ret_val;
2069 }
2070 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2071 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2072 }
2073
2074
2075 if (hw->bus.func == E1000_FUNC_1)
2076 hw->mac.perm_addr[5] ^= 1;
2077
2078 for (i = 0; i < ETH_ALEN; i++)
2079 hw->mac.addr[i] = hw->mac.perm_addr[i];
2080
2081 return 0;
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2092{
2093 s32 ret_val;
2094 u16 checksum = 0;
2095 u16 i, nvm_data;
2096
2097 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2098 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2099 if (ret_val) {
2100 hw_dbg(hw, "NVM Read Error\n");
2101 return ret_val;
2102 }
2103 checksum += nvm_data;
2104 }
2105
2106 if (checksum != (u16) NVM_SUM) {
2107 hw_dbg(hw, "NVM Checksum Invalid\n");
2108 return -E1000_ERR_NVM;
2109 }
2110
2111 return 0;
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2123{
2124 s32 ret_val;
2125 u16 checksum = 0;
2126 u16 i, nvm_data;
2127
2128 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2129 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2130 if (ret_val) {
2131 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2132 return ret_val;
2133 }
2134 checksum += nvm_data;
2135 }
2136 checksum = (u16) NVM_SUM - checksum;
2137 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2138 if (ret_val)
2139 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2140
2141 return ret_val;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151void e1000e_reload_nvm(struct e1000_hw *hw)
2152{
2153 u32 ctrl_ext;
2154
2155 udelay(10);
2156 ctrl_ext = er32(CTRL_EXT);
2157 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2158 ew32(CTRL_EXT, ctrl_ext);
2159 e1e_flush();
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2171{
2172 u32 i;
2173 u8 sum = 0;
2174
2175 if (!buffer)
2176 return 0;
2177
2178 for (i = 0; i < length; i++)
2179 sum += buffer[i];
2180
2181 return (u8) (0 - sum);
2182}
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2195{
2196 u32 hicr;
2197 u8 i;
2198
2199
2200 hicr = er32(HICR);
2201 if ((hicr & E1000_HICR_EN) == 0) {
2202 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2203 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2204 }
2205
2206 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2207 hicr = er32(HICR);
2208 if (!(hicr & E1000_HICR_C))
2209 break;
2210 mdelay(1);
2211 }
2212
2213 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2214 hw_dbg(hw, "Previous command timeout failed .\n");
2215 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2216 }
2217
2218 return 0;
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228bool e1000e_check_mng_mode(struct e1000_hw *hw)
2229{
2230 u32 fwsm = er32(FWSM);
2231
2232 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2243{
2244 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2245 u32 *buffer = (u32 *)&hw->mng_cookie;
2246 u32 offset;
2247 s32 ret_val, hdr_csum, csum;
2248 u8 i, len;
2249
2250
2251 if (!e1000e_check_mng_mode(hw)) {
2252 hw->mac.tx_pkt_filtering = 0;
2253 return 0;
2254 }
2255
2256
2257
2258
2259 ret_val = e1000_mng_enable_host_if(hw);
2260 if (ret_val != 0) {
2261 hw->mac.tx_pkt_filtering = 0;
2262 return ret_val;
2263 }
2264
2265
2266 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2267 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2268 for (i = 0; i < len; i++)
2269 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2270 hdr_csum = hdr->checksum;
2271 hdr->checksum = 0;
2272 csum = e1000_calculate_checksum((u8 *)hdr,
2273 E1000_MNG_DHCP_COOKIE_LENGTH);
2274
2275
2276
2277
2278 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2279 hw->mac.tx_pkt_filtering = 1;
2280 return 1;
2281 }
2282
2283
2284 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2285 hw->mac.tx_pkt_filtering = 0;
2286 return 0;
2287 }
2288
2289 hw->mac.tx_pkt_filtering = 1;
2290 return 1;
2291}
2292
2293
2294
2295
2296
2297
2298
2299
2300static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2301 struct e1000_host_mng_command_header *hdr)
2302{
2303 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2304
2305
2306
2307 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2308
2309 length >>= 2;
2310
2311 for (i = 0; i < length; i++) {
2312 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2313 *((u32 *) hdr + i));
2314 e1e_flush();
2315 }
2316
2317 return 0;
2318}
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2333 u16 length, u16 offset, u8 *sum)
2334{
2335 u8 *tmp;
2336 u8 *bufptr = buffer;
2337 u32 data = 0;
2338 u16 remaining, i, j, prev_bytes;
2339
2340
2341
2342 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2343 return -E1000_ERR_PARAM;
2344
2345 tmp = (u8 *)&data;
2346 prev_bytes = offset & 0x3;
2347 offset >>= 2;
2348
2349 if (prev_bytes) {
2350 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2351 for (j = prev_bytes; j < sizeof(u32); j++) {
2352 *(tmp + j) = *bufptr++;
2353 *sum += *(tmp + j);
2354 }
2355 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2356 length -= j - prev_bytes;
2357 offset++;
2358 }
2359
2360 remaining = length & 0x3;
2361 length -= remaining;
2362
2363
2364 length >>= 2;
2365
2366
2367
2368 for (i = 0; i < length; i++) {
2369 for (j = 0; j < sizeof(u32); j++) {
2370 *(tmp + j) = *bufptr++;
2371 *sum += *(tmp + j);
2372 }
2373
2374 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2375 }
2376 if (remaining) {
2377 for (j = 0; j < sizeof(u32); j++) {
2378 if (j < remaining)
2379 *(tmp + j) = *bufptr++;
2380 else
2381 *(tmp + j) = 0;
2382
2383 *sum += *(tmp + j);
2384 }
2385 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2386 }
2387
2388 return 0;
2389}
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2400{
2401 struct e1000_host_mng_command_header hdr;
2402 s32 ret_val;
2403 u32 hicr;
2404
2405 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2406 hdr.command_length = length;
2407 hdr.reserved1 = 0;
2408 hdr.reserved2 = 0;
2409 hdr.checksum = 0;
2410
2411
2412 ret_val = e1000_mng_enable_host_if(hw);
2413 if (ret_val)
2414 return ret_val;
2415
2416
2417 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2418 sizeof(hdr), &(hdr.checksum));
2419 if (ret_val)
2420 return ret_val;
2421
2422
2423 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2424 if (ret_val)
2425 return ret_val;
2426
2427
2428 hicr = er32(HICR);
2429 ew32(HICR, hicr | E1000_HICR_C);
2430
2431 return 0;
2432}
2433
2434
2435
2436
2437
2438
2439
2440bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2441{
2442 u32 manc;
2443 u32 fwsm, factps;
2444 bool ret_val = 0;
2445
2446 manc = er32(MANC);
2447
2448 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2449 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2450 return ret_val;
2451
2452 if (hw->mac.arc_subsystem_valid) {
2453 fwsm = er32(FWSM);
2454 factps = er32(FACTPS);
2455
2456 if (!(factps & E1000_FACTPS_MNGCG) &&
2457 ((fwsm & E1000_FWSM_MODE_MASK) ==
2458 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2459 ret_val = 1;
2460 return ret_val;
2461 }
2462 } else {
2463 if ((manc & E1000_MANC_SMBUS_EN) &&
2464 !(manc & E1000_MANC_ASF_EN)) {
2465 ret_val = 1;
2466 return ret_val;
2467 }
2468 }
2469
2470 return ret_val;
2471}
2472
2473s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2474{
2475 s32 ret_val;
2476 u16 nvm_data;
2477
2478 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2479 if (ret_val) {
2480 hw_dbg(hw, "NVM Read Error\n");
2481 return ret_val;
2482 }
2483 *part_num = (u32)(nvm_data << 16);
2484
2485 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2486 if (ret_val) {
2487 hw_dbg(hw, "NVM Read Error\n");
2488 return ret_val;
2489 }
2490 *part_num |= nvm_data;
2491
2492 return 0;
2493}
2494