1
2
3
4
5
6
7
8#include <linux/types.h>
9#include <linux/if_ether.h>
10
11#include "e1000_hw.h"
12#include "e1000_i210.h"
13
14static s32 igb_update_flash_i210(struct e1000_hw *hw);
15
16
17
18
19
20
21
22static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
23{
24 u32 swsm;
25 s32 timeout = hw->nvm.word_size + 1;
26 s32 i = 0;
27
28
29 while (i < timeout) {
30 swsm = rd32(E1000_SWSM);
31 if (!(swsm & E1000_SWSM_SMBI))
32 break;
33
34 udelay(50);
35 i++;
36 }
37
38 if (i == timeout) {
39
40
41
42 if (hw->dev_spec._82575.clear_semaphore_once) {
43 hw->dev_spec._82575.clear_semaphore_once = false;
44 igb_put_hw_semaphore(hw);
45 for (i = 0; i < timeout; i++) {
46 swsm = rd32(E1000_SWSM);
47 if (!(swsm & E1000_SWSM_SMBI))
48 break;
49
50 udelay(50);
51 }
52 }
53
54
55 if (i == timeout) {
56 hw_dbg("Driver can't access device - SMBI bit is set.\n");
57 return -E1000_ERR_NVM;
58 }
59 }
60
61
62 for (i = 0; i < timeout; i++) {
63 swsm = rd32(E1000_SWSM);
64 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
65
66
67 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
68 break;
69
70 udelay(50);
71 }
72
73 if (i == timeout) {
74
75 igb_put_hw_semaphore(hw);
76 hw_dbg("Driver can't access the NVM\n");
77 return -E1000_ERR_NVM;
78 }
79
80 return 0;
81}
82
83
84
85
86
87
88
89
90
91
92static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
93{
94 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
95}
96
97
98
99
100
101
102
103
104static void igb_release_nvm_i210(struct e1000_hw *hw)
105{
106 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
107}
108
109
110
111
112
113
114
115
116
117s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
118{
119 u32 swfw_sync;
120 u32 swmask = mask;
121 u32 fwmask = mask << 16;
122 s32 ret_val = 0;
123 s32 i = 0, timeout = 200;
124
125 while (i < timeout) {
126 if (igb_get_hw_semaphore_i210(hw)) {
127 ret_val = -E1000_ERR_SWFW_SYNC;
128 goto out;
129 }
130
131 swfw_sync = rd32(E1000_SW_FW_SYNC);
132 if (!(swfw_sync & (fwmask | swmask)))
133 break;
134
135
136 igb_put_hw_semaphore(hw);
137 mdelay(5);
138 i++;
139 }
140
141 if (i == timeout) {
142 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
143 ret_val = -E1000_ERR_SWFW_SYNC;
144 goto out;
145 }
146
147 swfw_sync |= swmask;
148 wr32(E1000_SW_FW_SYNC, swfw_sync);
149
150 igb_put_hw_semaphore(hw);
151out:
152 return ret_val;
153}
154
155
156
157
158
159
160
161
162
163void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
164{
165 u32 swfw_sync;
166
167 while (igb_get_hw_semaphore_i210(hw))
168 ;
169
170 swfw_sync = rd32(E1000_SW_FW_SYNC);
171 swfw_sync &= ~mask;
172 wr32(E1000_SW_FW_SYNC, swfw_sync);
173
174 igb_put_hw_semaphore(hw);
175}
176
177
178
179
180
181
182
183
184
185
186
187static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
188 u16 *data)
189{
190 s32 status = 0;
191 u16 i, count;
192
193
194
195
196
197 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
198 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
199 E1000_EERD_EEWR_MAX_COUNT : (words - i);
200 if (!(hw->nvm.ops.acquire(hw))) {
201 status = igb_read_nvm_eerd(hw, offset, count,
202 data + i);
203 hw->nvm.ops.release(hw);
204 } else {
205 status = E1000_ERR_SWFW_SYNC;
206 }
207
208 if (status)
209 break;
210 }
211
212 return status;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
228 u16 *data)
229{
230 struct e1000_nvm_info *nvm = &hw->nvm;
231 u32 i, k, eewr = 0;
232 u32 attempts = 100000;
233 s32 ret_val = 0;
234
235
236
237
238 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
239 (words == 0)) {
240 hw_dbg("nvm parameter(s) out of bounds\n");
241 ret_val = -E1000_ERR_NVM;
242 goto out;
243 }
244
245 for (i = 0; i < words; i++) {
246 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
247 (data[i] << E1000_NVM_RW_REG_DATA) |
248 E1000_NVM_RW_REG_START;
249
250 wr32(E1000_SRWR, eewr);
251
252 for (k = 0; k < attempts; k++) {
253 if (E1000_NVM_RW_REG_DONE &
254 rd32(E1000_SRWR)) {
255 ret_val = 0;
256 break;
257 }
258 udelay(5);
259 }
260
261 if (ret_val) {
262 hw_dbg("Shadow RAM write EEWR timed out\n");
263 break;
264 }
265 }
266
267out:
268 return ret_val;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
288 u16 *data)
289{
290 s32 status = 0;
291 u16 i, count;
292
293
294
295
296
297 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
298 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
299 E1000_EERD_EEWR_MAX_COUNT : (words - i);
300 if (!(hw->nvm.ops.acquire(hw))) {
301 status = igb_write_nvm_srwr(hw, offset, count,
302 data + i);
303 hw->nvm.ops.release(hw);
304 } else {
305 status = E1000_ERR_SWFW_SYNC;
306 }
307
308 if (status)
309 break;
310 }
311
312 return status;
313}
314
315
316
317
318
319
320
321
322
323
324static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
325{
326 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
327 u32 invm_dword;
328 u16 i;
329 u8 record_type, word_address;
330
331 for (i = 0; i < E1000_INVM_SIZE; i++) {
332 invm_dword = rd32(E1000_INVM_DATA_REG(i));
333
334 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
335 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
336 break;
337 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
338 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
339 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
340 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
341 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
342 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
343 if (word_address == address) {
344 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
345 hw_dbg("Read INVM Word 0x%02x = %x\n",
346 address, *data);
347 status = 0;
348 break;
349 }
350 }
351 }
352 if (status)
353 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
354 return status;
355}
356
357
358
359
360
361
362
363
364
365
366static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
367 u16 __always_unused words, u16 *data)
368{
369 s32 ret_val = 0;
370
371
372 switch (offset) {
373 case NVM_MAC_ADDR:
374 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
375 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
376 &data[1]);
377 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
378 &data[2]);
379 if (ret_val)
380 hw_dbg("MAC Addr not found in iNVM\n");
381 break;
382 case NVM_INIT_CTRL_2:
383 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
384 if (ret_val) {
385 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
386 ret_val = 0;
387 }
388 break;
389 case NVM_INIT_CTRL_4:
390 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
391 if (ret_val) {
392 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
393 ret_val = 0;
394 }
395 break;
396 case NVM_LED_1_CFG:
397 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
398 if (ret_val) {
399 *data = NVM_LED_1_CFG_DEFAULT_I211;
400 ret_val = 0;
401 }
402 break;
403 case NVM_LED_0_2_CFG:
404 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
405 if (ret_val) {
406 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
407 ret_val = 0;
408 }
409 break;
410 case NVM_ID_LED_SETTINGS:
411 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
412 if (ret_val) {
413 *data = ID_LED_RESERVED_FFFF;
414 ret_val = 0;
415 }
416 break;
417 case NVM_SUB_DEV_ID:
418 *data = hw->subsystem_device_id;
419 break;
420 case NVM_SUB_VEN_ID:
421 *data = hw->subsystem_vendor_id;
422 break;
423 case NVM_DEV_ID:
424 *data = hw->device_id;
425 break;
426 case NVM_VEN_ID:
427 *data = hw->vendor_id;
428 break;
429 default:
430 hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
431 *data = NVM_RESERVED_WORD;
432 break;
433 }
434 return ret_val;
435}
436
437
438
439
440
441
442
443
444s32 igb_read_invm_version(struct e1000_hw *hw,
445 struct e1000_fw_version *invm_ver) {
446 u32 *record = NULL;
447 u32 *next_record = NULL;
448 u32 i = 0;
449 u32 invm_dword = 0;
450 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
451 E1000_INVM_RECORD_SIZE_IN_BYTES);
452 u32 buffer[E1000_INVM_SIZE];
453 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
454 u16 version = 0;
455
456
457 for (i = 0; i < E1000_INVM_SIZE; i++) {
458 invm_dword = rd32(E1000_INVM_DATA_REG(i));
459 buffer[i] = invm_dword;
460 }
461
462
463 for (i = 1; i < invm_blocks; i++) {
464 record = &buffer[invm_blocks - i];
465 next_record = &buffer[invm_blocks - i + 1];
466
467
468 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
469 version = 0;
470 status = 0;
471 break;
472 }
473
474 else if ((i == 1) &&
475 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
476 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
477 status = 0;
478 break;
479 }
480
481
482
483 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
484 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
485 (i != 1))) {
486 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
487 >> 13;
488 status = 0;
489 break;
490 }
491
492
493
494 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
495 ((*record & 0x3) == 0)) {
496 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
497 status = 0;
498 break;
499 }
500 }
501
502 if (!status) {
503 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
504 >> E1000_INVM_MAJOR_SHIFT;
505 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
506 }
507
508 for (i = 1; i < invm_blocks; i++) {
509 record = &buffer[invm_blocks - i];
510 next_record = &buffer[invm_blocks - i + 1];
511
512
513 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
514 invm_ver->invm_img_type = 0;
515 status = 0;
516 break;
517 }
518
519 else if ((((*record & 0x3) == 0) &&
520 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
521 ((((*record & 0x3) != 0) && (i != 1)))) {
522 invm_ver->invm_img_type =
523 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
524 status = 0;
525 break;
526 }
527 }
528 return status;
529}
530
531
532
533
534
535
536
537
538static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
539{
540 s32 status = 0;
541 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
542
543 if (!(hw->nvm.ops.acquire(hw))) {
544
545
546
547
548
549 read_op_ptr = hw->nvm.ops.read;
550 hw->nvm.ops.read = igb_read_nvm_eerd;
551
552 status = igb_validate_nvm_checksum(hw);
553
554
555 hw->nvm.ops.read = read_op_ptr;
556
557 hw->nvm.ops.release(hw);
558 } else {
559 status = E1000_ERR_SWFW_SYNC;
560 }
561
562 return status;
563}
564
565
566
567
568
569
570
571
572
573static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
574{
575 s32 ret_val = 0;
576 u16 checksum = 0;
577 u16 i, nvm_data;
578
579
580
581
582
583 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
584 if (ret_val) {
585 hw_dbg("EEPROM read failed\n");
586 goto out;
587 }
588
589 if (!(hw->nvm.ops.acquire(hw))) {
590
591
592
593
594
595 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
596 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
597 if (ret_val) {
598 hw->nvm.ops.release(hw);
599 hw_dbg("NVM Read Error while updating checksum.\n");
600 goto out;
601 }
602 checksum += nvm_data;
603 }
604 checksum = (u16) NVM_SUM - checksum;
605 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
606 &checksum);
607 if (ret_val) {
608 hw->nvm.ops.release(hw);
609 hw_dbg("NVM Write Error while updating checksum.\n");
610 goto out;
611 }
612
613 hw->nvm.ops.release(hw);
614
615 ret_val = igb_update_flash_i210(hw);
616 } else {
617 ret_val = -E1000_ERR_SWFW_SYNC;
618 }
619out:
620 return ret_val;
621}
622
623
624
625
626
627
628static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
629{
630 s32 ret_val = -E1000_ERR_NVM;
631 u32 i, reg;
632
633 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
634 reg = rd32(E1000_EECD);
635 if (reg & E1000_EECD_FLUDONE_I210) {
636 ret_val = 0;
637 break;
638 }
639 udelay(5);
640 }
641
642 return ret_val;
643}
644
645
646
647
648
649
650bool igb_get_flash_presence_i210(struct e1000_hw *hw)
651{
652 u32 eec = 0;
653 bool ret_val = false;
654
655 eec = rd32(E1000_EECD);
656 if (eec & E1000_EECD_FLASH_DETECTED_I210)
657 ret_val = true;
658
659 return ret_val;
660}
661
662
663
664
665
666
667static s32 igb_update_flash_i210(struct e1000_hw *hw)
668{
669 s32 ret_val = 0;
670 u32 flup;
671
672 ret_val = igb_pool_flash_update_done_i210(hw);
673 if (ret_val == -E1000_ERR_NVM) {
674 hw_dbg("Flash update time out\n");
675 goto out;
676 }
677
678 flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
679 wr32(E1000_EECD, flup);
680
681 ret_val = igb_pool_flash_update_done_i210(hw);
682 if (ret_val)
683 hw_dbg("Flash update time out\n");
684 else
685 hw_dbg("Flash update complete\n");
686
687out:
688 return ret_val;
689}
690
691
692
693
694
695
696
697
698
699s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
700{
701 s32 ret_val;
702
703 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
704 if (ret_val) {
705 hw_dbg("NVM Read Error\n");
706 goto out;
707 }
708
709 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
710 switch (hw->phy.media_type) {
711 case e1000_media_type_internal_serdes:
712 *data = ID_LED_DEFAULT_I210_SERDES;
713 break;
714 case e1000_media_type_copper:
715 default:
716 *data = ID_LED_DEFAULT_I210;
717 break;
718 }
719 }
720out:
721 return ret_val;
722}
723
724
725
726
727
728
729
730
731
732static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
733 u8 dev_addr, u16 *data, bool read)
734{
735 s32 ret_val = 0;
736
737 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
738 if (ret_val)
739 return ret_val;
740
741 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
742 if (ret_val)
743 return ret_val;
744
745 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
746 dev_addr);
747 if (ret_val)
748 return ret_val;
749
750 if (read)
751 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
752 else
753 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
754 if (ret_val)
755 return ret_val;
756
757
758 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
759 if (ret_val)
760 return ret_val;
761
762 return ret_val;
763}
764
765
766
767
768
769
770
771
772s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
773{
774 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
775}
776
777
778
779
780
781
782
783
784s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
785{
786 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
787}
788
789
790
791
792
793s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
794{
795 s32 ret_val = 0;
796 struct e1000_nvm_info *nvm = &hw->nvm;
797
798 nvm->ops.acquire = igb_acquire_nvm_i210;
799 nvm->ops.release = igb_release_nvm_i210;
800 nvm->ops.valid_led_default = igb_valid_led_default_i210;
801
802
803 if (igb_get_flash_presence_i210(hw)) {
804 hw->nvm.type = e1000_nvm_flash_hw;
805 nvm->ops.read = igb_read_nvm_srrd_i210;
806 nvm->ops.write = igb_write_nvm_srwr_i210;
807 nvm->ops.validate = igb_validate_nvm_checksum_i210;
808 nvm->ops.update = igb_update_nvm_checksum_i210;
809 } else {
810 hw->nvm.type = e1000_nvm_invm;
811 nvm->ops.read = igb_read_invm_i210;
812 nvm->ops.write = NULL;
813 nvm->ops.validate = NULL;
814 nvm->ops.update = NULL;
815 }
816 return ret_val;
817}
818
819
820
821
822
823
824
825
826s32 igb_pll_workaround_i210(struct e1000_hw *hw)
827{
828 s32 ret_val;
829 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
830 u16 nvm_word, phy_word, pci_word, tmp_nvm;
831 int i;
832
833
834 wuc = rd32(E1000_WUC);
835 mdicnfg = rd32(E1000_MDICNFG);
836 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
837 wr32(E1000_MDICNFG, reg_val);
838
839
840 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
841 &nvm_word);
842 if (ret_val)
843 nvm_word = E1000_INVM_DEFAULT_AL;
844 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
845 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
846 phy_word = E1000_PHY_PLL_UNCONF;
847 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
848
849 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
850 if ((phy_word & E1000_PHY_PLL_UNCONF)
851 != E1000_PHY_PLL_UNCONF) {
852 ret_val = 0;
853 break;
854 } else {
855 ret_val = -E1000_ERR_PHY;
856 }
857
858 ctrl = rd32(E1000_CTRL);
859 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
860
861 ctrl_ext = rd32(E1000_CTRL_EXT);
862 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
863 wr32(E1000_CTRL_EXT, ctrl_ext);
864
865 wr32(E1000_WUC, 0);
866 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
867 wr32(E1000_EEARBC_I210, reg_val);
868
869 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
870 pci_word |= E1000_PCI_PMCSR_D3;
871 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
872 usleep_range(1000, 2000);
873 pci_word &= ~E1000_PCI_PMCSR_D3;
874 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
875 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
876 wr32(E1000_EEARBC_I210, reg_val);
877
878
879 wr32(E1000_WUC, wuc);
880 }
881 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
882
883 wr32(E1000_MDICNFG, mdicnfg);
884 return ret_val;
885}
886
887
888
889
890
891
892
893
894
895
896
897s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
898{
899 s32 timeout = PHY_CFG_TIMEOUT;
900 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
901
902 while (timeout) {
903 if (rd32(E1000_EEMNGCTL_I210) & mask)
904 break;
905 usleep_range(1000, 2000);
906 timeout--;
907 }
908 if (!timeout)
909 hw_dbg("MNG configuration cycle has not completed.\n");
910
911 return 0;
912}
913