1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/if_ether.h>
31
32#include "e1000_hw.h"
33#include "e1000_i210.h"
34
35static s32 igb_update_flash_i210(struct e1000_hw *hw);
36
37
38
39
40
41
42
43static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
44{
45 u32 swsm;
46 s32 timeout = hw->nvm.word_size + 1;
47 s32 i = 0;
48
49
50 while (i < timeout) {
51 swsm = rd32(E1000_SWSM);
52 if (!(swsm & E1000_SWSM_SMBI))
53 break;
54
55 udelay(50);
56 i++;
57 }
58
59 if (i == timeout) {
60
61
62
63 if (hw->dev_spec._82575.clear_semaphore_once) {
64 hw->dev_spec._82575.clear_semaphore_once = false;
65 igb_put_hw_semaphore(hw);
66 for (i = 0; i < timeout; i++) {
67 swsm = rd32(E1000_SWSM);
68 if (!(swsm & E1000_SWSM_SMBI))
69 break;
70
71 udelay(50);
72 }
73 }
74
75
76 if (i == timeout) {
77 hw_dbg("Driver can't access device - SMBI bit is set.\n");
78 return -E1000_ERR_NVM;
79 }
80 }
81
82
83 for (i = 0; i < timeout; i++) {
84 swsm = rd32(E1000_SWSM);
85 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
86
87
88 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
89 break;
90
91 udelay(50);
92 }
93
94 if (i == timeout) {
95
96 igb_put_hw_semaphore(hw);
97 hw_dbg("Driver can't access the NVM\n");
98 return -E1000_ERR_NVM;
99 }
100
101 return 0;
102}
103
104
105
106
107
108
109
110
111
112
113static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
114{
115 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
116}
117
118
119
120
121
122
123
124
125static void igb_release_nvm_i210(struct e1000_hw *hw)
126{
127 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
128}
129
130
131
132
133
134
135
136
137
138s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
139{
140 u32 swfw_sync;
141 u32 swmask = mask;
142 u32 fwmask = mask << 16;
143 s32 ret_val = 0;
144 s32 i = 0, timeout = 200;
145
146 while (i < timeout) {
147 if (igb_get_hw_semaphore_i210(hw)) {
148 ret_val = -E1000_ERR_SWFW_SYNC;
149 goto out;
150 }
151
152 swfw_sync = rd32(E1000_SW_FW_SYNC);
153 if (!(swfw_sync & (fwmask | swmask)))
154 break;
155
156
157 igb_put_hw_semaphore(hw);
158 mdelay(5);
159 i++;
160 }
161
162 if (i == timeout) {
163 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
164 ret_val = -E1000_ERR_SWFW_SYNC;
165 goto out;
166 }
167
168 swfw_sync |= swmask;
169 wr32(E1000_SW_FW_SYNC, swfw_sync);
170
171 igb_put_hw_semaphore(hw);
172out:
173 return ret_val;
174}
175
176
177
178
179
180
181
182
183
184void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
185{
186 u32 swfw_sync;
187
188 while (igb_get_hw_semaphore_i210(hw))
189 ;
190
191 swfw_sync = rd32(E1000_SW_FW_SYNC);
192 swfw_sync &= ~mask;
193 wr32(E1000_SW_FW_SYNC, swfw_sync);
194
195 igb_put_hw_semaphore(hw);
196}
197
198
199
200
201
202
203
204
205
206
207
208static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
209 u16 *data)
210{
211 s32 status = 0;
212 u16 i, count;
213
214
215
216
217
218 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
219 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
220 E1000_EERD_EEWR_MAX_COUNT : (words - i);
221 if (!(hw->nvm.ops.acquire(hw))) {
222 status = igb_read_nvm_eerd(hw, offset, count,
223 data + i);
224 hw->nvm.ops.release(hw);
225 } else {
226 status = E1000_ERR_SWFW_SYNC;
227 }
228
229 if (status)
230 break;
231 }
232
233 return status;
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
249 u16 *data)
250{
251 struct e1000_nvm_info *nvm = &hw->nvm;
252 u32 i, k, eewr = 0;
253 u32 attempts = 100000;
254 s32 ret_val = 0;
255
256
257
258
259 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
260 (words == 0)) {
261 hw_dbg("nvm parameter(s) out of bounds\n");
262 ret_val = -E1000_ERR_NVM;
263 goto out;
264 }
265
266 for (i = 0; i < words; i++) {
267 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
268 (data[i] << E1000_NVM_RW_REG_DATA) |
269 E1000_NVM_RW_REG_START;
270
271 wr32(E1000_SRWR, eewr);
272
273 for (k = 0; k < attempts; k++) {
274 if (E1000_NVM_RW_REG_DONE &
275 rd32(E1000_SRWR)) {
276 ret_val = 0;
277 break;
278 }
279 udelay(5);
280 }
281
282 if (ret_val) {
283 hw_dbg("Shadow RAM write EEWR timed out\n");
284 break;
285 }
286 }
287
288out:
289 return ret_val;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
309 u16 *data)
310{
311 s32 status = 0;
312 u16 i, count;
313
314
315
316
317
318 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
319 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
320 E1000_EERD_EEWR_MAX_COUNT : (words - i);
321 if (!(hw->nvm.ops.acquire(hw))) {
322 status = igb_write_nvm_srwr(hw, offset, count,
323 data + i);
324 hw->nvm.ops.release(hw);
325 } else {
326 status = E1000_ERR_SWFW_SYNC;
327 }
328
329 if (status)
330 break;
331 }
332
333 return status;
334}
335
336
337
338
339
340
341
342
343
344
345static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
346{
347 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
348 u32 invm_dword;
349 u16 i;
350 u8 record_type, word_address;
351
352 for (i = 0; i < E1000_INVM_SIZE; i++) {
353 invm_dword = rd32(E1000_INVM_DATA_REG(i));
354
355 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
356 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
357 break;
358 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
359 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
360 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
361 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
362 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
363 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
364 if (word_address == address) {
365 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
366 hw_dbg("Read INVM Word 0x%02x = %x\n",
367 address, *data);
368 status = 0;
369 break;
370 }
371 }
372 }
373 if (status)
374 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
375 return status;
376}
377
378
379
380
381
382
383
384
385
386static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
387 u16 words __always_unused, u16 *data)
388{
389 s32 ret_val = 0;
390
391
392 switch (offset) {
393 case NVM_MAC_ADDR:
394 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
395 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
396 &data[1]);
397 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
398 &data[2]);
399 if (ret_val)
400 hw_dbg("MAC Addr not found in iNVM\n");
401 break;
402 case NVM_INIT_CTRL_2:
403 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
404 if (ret_val) {
405 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
406 ret_val = 0;
407 }
408 break;
409 case NVM_INIT_CTRL_4:
410 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
411 if (ret_val) {
412 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
413 ret_val = 0;
414 }
415 break;
416 case NVM_LED_1_CFG:
417 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
418 if (ret_val) {
419 *data = NVM_LED_1_CFG_DEFAULT_I211;
420 ret_val = 0;
421 }
422 break;
423 case NVM_LED_0_2_CFG:
424 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
425 if (ret_val) {
426 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
427 ret_val = 0;
428 }
429 break;
430 case NVM_ID_LED_SETTINGS:
431 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
432 if (ret_val) {
433 *data = ID_LED_RESERVED_FFFF;
434 ret_val = 0;
435 }
436 break;
437 case NVM_SUB_DEV_ID:
438 *data = hw->subsystem_device_id;
439 break;
440 case NVM_SUB_VEN_ID:
441 *data = hw->subsystem_vendor_id;
442 break;
443 case NVM_DEV_ID:
444 *data = hw->device_id;
445 break;
446 case NVM_VEN_ID:
447 *data = hw->vendor_id;
448 break;
449 default:
450 hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
451 *data = NVM_RESERVED_WORD;
452 break;
453 }
454 return ret_val;
455}
456
457
458
459
460
461
462
463
464s32 igb_read_invm_version(struct e1000_hw *hw,
465 struct e1000_fw_version *invm_ver) {
466 u32 *record = NULL;
467 u32 *next_record = NULL;
468 u32 i = 0;
469 u32 invm_dword = 0;
470 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
471 E1000_INVM_RECORD_SIZE_IN_BYTES);
472 u32 buffer[E1000_INVM_SIZE];
473 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
474 u16 version = 0;
475
476
477 for (i = 0; i < E1000_INVM_SIZE; i++) {
478 invm_dword = rd32(E1000_INVM_DATA_REG(i));
479 buffer[i] = invm_dword;
480 }
481
482
483 for (i = 1; i < invm_blocks; i++) {
484 record = &buffer[invm_blocks - i];
485 next_record = &buffer[invm_blocks - i + 1];
486
487
488 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
489 version = 0;
490 status = 0;
491 break;
492 }
493
494 else if ((i == 1) &&
495 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
496 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
497 status = 0;
498 break;
499 }
500
501
502
503 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
504 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
505 (i != 1))) {
506 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
507 >> 13;
508 status = 0;
509 break;
510 }
511
512
513
514 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
515 ((*record & 0x3) == 0)) {
516 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
517 status = 0;
518 break;
519 }
520 }
521
522 if (!status) {
523 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
524 >> E1000_INVM_MAJOR_SHIFT;
525 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
526 }
527
528 for (i = 1; i < invm_blocks; i++) {
529 record = &buffer[invm_blocks - i];
530 next_record = &buffer[invm_blocks - i + 1];
531
532
533 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
534 invm_ver->invm_img_type = 0;
535 status = 0;
536 break;
537 }
538
539 else if ((((*record & 0x3) == 0) &&
540 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
541 ((((*record & 0x3) != 0) && (i != 1)))) {
542 invm_ver->invm_img_type =
543 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
544 status = 0;
545 break;
546 }
547 }
548 return status;
549}
550
551
552
553
554
555
556
557
558static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
559{
560 s32 status = 0;
561 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
562
563 if (!(hw->nvm.ops.acquire(hw))) {
564
565
566
567
568
569 read_op_ptr = hw->nvm.ops.read;
570 hw->nvm.ops.read = igb_read_nvm_eerd;
571
572 status = igb_validate_nvm_checksum(hw);
573
574
575 hw->nvm.ops.read = read_op_ptr;
576
577 hw->nvm.ops.release(hw);
578 } else {
579 status = E1000_ERR_SWFW_SYNC;
580 }
581
582 return status;
583}
584
585
586
587
588
589
590
591
592
593static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
594{
595 s32 ret_val = 0;
596 u16 checksum = 0;
597 u16 i, nvm_data;
598
599
600
601
602
603 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
604 if (ret_val) {
605 hw_dbg("EEPROM read failed\n");
606 goto out;
607 }
608
609 if (!(hw->nvm.ops.acquire(hw))) {
610
611
612
613
614
615 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
616 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
617 if (ret_val) {
618 hw->nvm.ops.release(hw);
619 hw_dbg("NVM Read Error while updating checksum.\n");
620 goto out;
621 }
622 checksum += nvm_data;
623 }
624 checksum = (u16) NVM_SUM - checksum;
625 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
626 &checksum);
627 if (ret_val) {
628 hw->nvm.ops.release(hw);
629 hw_dbg("NVM Write Error while updating checksum.\n");
630 goto out;
631 }
632
633 hw->nvm.ops.release(hw);
634
635 ret_val = igb_update_flash_i210(hw);
636 } else {
637 ret_val = -E1000_ERR_SWFW_SYNC;
638 }
639out:
640 return ret_val;
641}
642
643
644
645
646
647
648static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
649{
650 s32 ret_val = -E1000_ERR_NVM;
651 u32 i, reg;
652
653 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
654 reg = rd32(E1000_EECD);
655 if (reg & E1000_EECD_FLUDONE_I210) {
656 ret_val = 0;
657 break;
658 }
659 udelay(5);
660 }
661
662 return ret_val;
663}
664
665
666
667
668
669
670bool igb_get_flash_presence_i210(struct e1000_hw *hw)
671{
672 u32 eec = 0;
673 bool ret_val = false;
674
675 eec = rd32(E1000_EECD);
676 if (eec & E1000_EECD_FLASH_DETECTED_I210)
677 ret_val = true;
678
679 return ret_val;
680}
681
682
683
684
685
686
687static s32 igb_update_flash_i210(struct e1000_hw *hw)
688{
689 s32 ret_val = 0;
690 u32 flup;
691
692 ret_val = igb_pool_flash_update_done_i210(hw);
693 if (ret_val == -E1000_ERR_NVM) {
694 hw_dbg("Flash update time out\n");
695 goto out;
696 }
697
698 flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
699 wr32(E1000_EECD, flup);
700
701 ret_val = igb_pool_flash_update_done_i210(hw);
702 if (ret_val)
703 hw_dbg("Flash update time out\n");
704 else
705 hw_dbg("Flash update complete\n");
706
707out:
708 return ret_val;
709}
710
711
712
713
714
715
716
717
718
719s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
720{
721 s32 ret_val;
722
723 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
724 if (ret_val) {
725 hw_dbg("NVM Read Error\n");
726 goto out;
727 }
728
729 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
730 switch (hw->phy.media_type) {
731 case e1000_media_type_internal_serdes:
732 *data = ID_LED_DEFAULT_I210_SERDES;
733 break;
734 case e1000_media_type_copper:
735 default:
736 *data = ID_LED_DEFAULT_I210;
737 break;
738 }
739 }
740out:
741 return ret_val;
742}
743
744
745
746
747
748
749
750
751
752static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
753 u8 dev_addr, u16 *data, bool read)
754{
755 s32 ret_val = 0;
756
757 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
758 if (ret_val)
759 return ret_val;
760
761 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
762 if (ret_val)
763 return ret_val;
764
765 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
766 dev_addr);
767 if (ret_val)
768 return ret_val;
769
770 if (read)
771 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
772 else
773 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
774 if (ret_val)
775 return ret_val;
776
777
778 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
779 if (ret_val)
780 return ret_val;
781
782 return ret_val;
783}
784
785
786
787
788
789
790
791
792s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
793{
794 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
795}
796
797
798
799
800
801
802
803
804s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
805{
806 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
807}
808
809
810
811
812
813s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
814{
815 s32 ret_val = 0;
816 struct e1000_nvm_info *nvm = &hw->nvm;
817
818 nvm->ops.acquire = igb_acquire_nvm_i210;
819 nvm->ops.release = igb_release_nvm_i210;
820 nvm->ops.valid_led_default = igb_valid_led_default_i210;
821
822
823 if (igb_get_flash_presence_i210(hw)) {
824 hw->nvm.type = e1000_nvm_flash_hw;
825 nvm->ops.read = igb_read_nvm_srrd_i210;
826 nvm->ops.write = igb_write_nvm_srwr_i210;
827 nvm->ops.validate = igb_validate_nvm_checksum_i210;
828 nvm->ops.update = igb_update_nvm_checksum_i210;
829 } else {
830 hw->nvm.type = e1000_nvm_invm;
831 nvm->ops.read = igb_read_invm_i210;
832 nvm->ops.write = NULL;
833 nvm->ops.validate = NULL;
834 nvm->ops.update = NULL;
835 }
836 return ret_val;
837}
838
839
840
841
842
843
844
845
846s32 igb_pll_workaround_i210(struct e1000_hw *hw)
847{
848 s32 ret_val;
849 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
850 u16 nvm_word, phy_word, pci_word, tmp_nvm;
851 int i;
852
853
854 wuc = rd32(E1000_WUC);
855 mdicnfg = rd32(E1000_MDICNFG);
856 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
857 wr32(E1000_MDICNFG, reg_val);
858
859
860 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
861 &nvm_word);
862 if (ret_val)
863 nvm_word = E1000_INVM_DEFAULT_AL;
864 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
865 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
866 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
867
868 igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
869 if ((phy_word & E1000_PHY_PLL_UNCONF)
870 != E1000_PHY_PLL_UNCONF) {
871 ret_val = 0;
872 break;
873 } else {
874 ret_val = -E1000_ERR_PHY;
875 }
876
877 ctrl = rd32(E1000_CTRL);
878 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
879
880 ctrl_ext = rd32(E1000_CTRL_EXT);
881 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
882 wr32(E1000_CTRL_EXT, ctrl_ext);
883
884 wr32(E1000_WUC, 0);
885 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
886 wr32(E1000_EEARBC_I210, reg_val);
887
888 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
889 pci_word |= E1000_PCI_PMCSR_D3;
890 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
891 usleep_range(1000, 2000);
892 pci_word &= ~E1000_PCI_PMCSR_D3;
893 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
894 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
895 wr32(E1000_EEARBC_I210, reg_val);
896
897
898 wr32(E1000_WUC, wuc);
899 }
900 igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
901
902 wr32(E1000_MDICNFG, mdicnfg);
903 return ret_val;
904}
905
906
907
908
909
910
911
912
913
914
915
916s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
917{
918 s32 timeout = PHY_CFG_TIMEOUT;
919 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
920
921 while (timeout) {
922 if (rd32(E1000_EEMNGCTL_I210) & mask)
923 break;
924 usleep_range(1000, 2000);
925 timeout--;
926 }
927 if (!timeout)
928 hw_dbg("MNG configuration cycle has not completed.\n");
929
930 return 0;
931}
932