1
2
3
4
5#include "igc_api.h"
6
7static s32 igc_init_nvm_params_i225(struct igc_hw *hw);
8static s32 igc_init_mac_params_i225(struct igc_hw *hw);
9static s32 igc_init_phy_params_i225(struct igc_hw *hw);
10static s32 igc_reset_hw_i225(struct igc_hw *hw);
11static s32 igc_acquire_nvm_i225(struct igc_hw *hw);
12static void igc_release_nvm_i225(struct igc_hw *hw);
13static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw);
14static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
15 u16 *data);
16static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw);
17static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data);
18
19
20
21
22
23static s32 igc_init_nvm_params_i225(struct igc_hw *hw)
24{
25 struct igc_nvm_info *nvm = &hw->nvm;
26 u32 eecd = IGC_READ_REG(hw, IGC_EECD);
27 u16 size;
28
29 DEBUGFUNC("igc_init_nvm_params_i225");
30
31 size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
32 IGC_EECD_SIZE_EX_SHIFT);
33
34
35
36
37 size += NVM_WORD_SIZE_BASE_SHIFT;
38
39
40
41
42 if (size > 15)
43 size = 15;
44
45 nvm->word_size = 1 << size;
46 nvm->opcode_bits = 8;
47 nvm->delay_usec = 1;
48 nvm->type = igc_nvm_eeprom_spi;
49
50
51 nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
52 nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
53 16 : 8;
54
55 if (nvm->word_size == (1 << 15))
56 nvm->page_size = 128;
57
58 nvm->ops.acquire = igc_acquire_nvm_i225;
59 nvm->ops.release = igc_release_nvm_i225;
60 nvm->ops.valid_led_default = igc_valid_led_default_i225;
61 if (igc_get_flash_presence_i225(hw)) {
62 hw->nvm.type = igc_nvm_flash_hw;
63 nvm->ops.read = igc_read_nvm_srrd_i225;
64 nvm->ops.write = igc_write_nvm_srwr_i225;
65 nvm->ops.validate = igc_validate_nvm_checksum_i225;
66 nvm->ops.update = igc_update_nvm_checksum_i225;
67 } else {
68 hw->nvm.type = igc_nvm_invm;
69 nvm->ops.write = igc_null_write_nvm;
70 nvm->ops.validate = igc_null_ops_generic;
71 nvm->ops.update = igc_null_ops_generic;
72 }
73
74 return IGC_SUCCESS;
75}
76
77
78
79
80
81static s32 igc_init_mac_params_i225(struct igc_hw *hw)
82{
83 struct igc_mac_info *mac = &hw->mac;
84 struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225;
85
86 DEBUGFUNC("igc_init_mac_params_i225");
87
88
89 igc_init_mac_ops_generic(hw);
90
91
92 hw->phy.media_type = igc_media_type_copper;
93
94 mac->mta_reg_count = 128;
95
96 mac->rar_entry_count = IGC_RAR_ENTRIES_BASE;
97
98
99 mac->ops.reset_hw = igc_reset_hw_i225;
100
101 mac->ops.init_hw = igc_init_hw_i225;
102
103 mac->ops.setup_link = igc_setup_link_generic;
104
105 mac->ops.check_for_link = igc_check_for_link_i225;
106
107 mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic;
108
109 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
110
111 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
112
113
114 dev_spec->clear_semaphore_once = true;
115 mac->ops.setup_physical_interface = igc_setup_copper_link_i225;
116
117
118 mac->asf_firmware_present = true;
119
120
121 mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic;
122
123 mac->ops.write_vfta = igc_write_vfta_generic;
124
125 return IGC_SUCCESS;
126}
127
128
129
130
131
132static s32 igc_init_phy_params_i225(struct igc_hw *hw)
133{
134 struct igc_phy_info *phy = &hw->phy;
135 s32 ret_val = IGC_SUCCESS;
136 u32 ctrl_ext;
137
138 DEBUGFUNC("igc_init_phy_params_i225");
139
140 phy->ops.read_i2c_byte = igc_read_i2c_byte_generic;
141 phy->ops.write_i2c_byte = igc_write_i2c_byte_generic;
142
143 if (hw->phy.media_type != igc_media_type_copper) {
144 phy->type = igc_phy_none;
145 goto out;
146 }
147
148 phy->ops.power_up = igc_power_up_phy_copper;
149 phy->ops.power_down = igc_power_down_phy_copper_base;
150
151 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
152
153 phy->reset_delay_us = 100;
154
155 phy->ops.acquire = igc_acquire_phy_base;
156 phy->ops.check_reset_block = igc_check_reset_block_generic;
157 phy->ops.commit = igc_phy_sw_reset_generic;
158 phy->ops.release = igc_release_phy_base;
159
160 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
161
162
163
164
165
166
167 ret_val = hw->phy.ops.reset(hw);
168 if (ret_val)
169 goto out;
170
171 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext);
172 phy->ops.read_reg = igc_read_phy_reg_gpy;
173 phy->ops.write_reg = igc_write_phy_reg_gpy;
174
175 ret_val = igc_get_phy_id(hw);
176
177 switch (phy->id) {
178 case I225_I_PHY_ID:
179 phy->type = igc_phy_i225;
180 phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225;
181 phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225;
182
183 break;
184 default:
185 ret_val = -IGC_ERR_PHY;
186 goto out;
187 }
188
189out:
190 return ret_val;
191}
192
193
194
195
196
197
198
199static s32 igc_reset_hw_i225(struct igc_hw *hw)
200{
201 u32 ctrl;
202 s32 ret_val;
203
204 DEBUGFUNC("igc_reset_hw_i225");
205
206
207
208
209
210 ret_val = igc_disable_pcie_master_generic(hw);
211 if (ret_val)
212 DEBUGOUT("PCI-E Master disable polling has failed.\n");
213
214 DEBUGOUT("Masking off all interrupts\n");
215 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
216
217 IGC_WRITE_REG(hw, IGC_RCTL, 0);
218 IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP);
219 IGC_WRITE_FLUSH(hw);
220
221 msec_delay(10);
222
223 ctrl = IGC_READ_REG(hw, IGC_CTRL);
224
225 DEBUGOUT("Issuing a global reset to MAC\n");
226 IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_RST);
227
228 ret_val = igc_get_auto_rd_done_generic(hw);
229 if (ret_val) {
230
231
232
233
234
235 DEBUGOUT("Auto Read Done did not complete\n");
236 }
237
238
239 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
240 IGC_READ_REG(hw, IGC_ICR);
241
242
243 ret_val = igc_check_alt_mac_addr_generic(hw);
244
245 return ret_val;
246}
247
248
249
250
251
252
253
254
255
256static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
257{
258 s32 ret_val;
259
260 DEBUGFUNC("igc_acquire_nvm_i225");
261
262 ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
263
264 return ret_val;
265}
266
267
268
269
270
271
272
273static void igc_release_nvm_i225(struct igc_hw *hw)
274{
275 DEBUGFUNC("igc_release_nvm_i225");
276
277 igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
278}
279
280
281
282
283
284
285
286
287s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
288{
289 u32 swfw_sync;
290 u32 swmask = mask;
291 u32 fwmask = mask << 16;
292 s32 ret_val = IGC_SUCCESS;
293 s32 i = 0, timeout = 200;
294
295 DEBUGFUNC("igc_acquire_swfw_sync_i225");
296
297 while (i < timeout) {
298 if (igc_get_hw_semaphore_i225(hw)) {
299 ret_val = -IGC_ERR_SWFW_SYNC;
300 goto out;
301 }
302
303 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
304 if (!(swfw_sync & (fwmask | swmask)))
305 break;
306
307
308
309
310 igc_put_hw_semaphore_generic(hw);
311 msec_delay_irq(5);
312 i++;
313 }
314
315 if (i == timeout) {
316 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
317 ret_val = -IGC_ERR_SWFW_SYNC;
318 goto out;
319 }
320
321 swfw_sync |= swmask;
322 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
323
324 igc_put_hw_semaphore_generic(hw);
325
326out:
327 return ret_val;
328}
329
330
331
332
333
334
335
336
337void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
338{
339 u32 swfw_sync;
340
341 DEBUGFUNC("igc_release_swfw_sync_i225");
342
343 while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS)
344 ;
345
346 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
347 swfw_sync &= ~mask;
348 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
349
350 igc_put_hw_semaphore_generic(hw);
351}
352
353
354
355
356
357
358
359
360
361s32 igc_setup_copper_link_i225(struct igc_hw *hw)
362{
363 u32 phpm_reg;
364 s32 ret_val;
365 u32 ctrl;
366
367 DEBUGFUNC("igc_setup_copper_link_i225");
368
369 ctrl = IGC_READ_REG(hw, IGC_CTRL);
370 ctrl |= IGC_CTRL_SLU;
371 ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
372 IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
373
374 phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM);
375 phpm_reg &= ~IGC_I225_PHPM_GO_LINKD;
376 IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg);
377
378 ret_val = igc_setup_copper_link_generic(hw);
379
380 return ret_val;
381}
382
383
384
385
386
387
388static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
389{
390 u32 swsm;
391 s32 timeout = hw->nvm.word_size + 1;
392 s32 i = 0;
393
394 DEBUGFUNC("igc_get_hw_semaphore_i225");
395
396
397 while (i < timeout) {
398 swsm = IGC_READ_REG(hw, IGC_SWSM);
399 if (!(swsm & IGC_SWSM_SMBI))
400 break;
401
402 usec_delay(50);
403 i++;
404 }
405
406 if (i == timeout) {
407
408
409
410 if (hw->dev_spec._i225.clear_semaphore_once) {
411 hw->dev_spec._i225.clear_semaphore_once = false;
412 igc_put_hw_semaphore_generic(hw);
413 for (i = 0; i < timeout; i++) {
414 swsm = IGC_READ_REG(hw, IGC_SWSM);
415 if (!(swsm & IGC_SWSM_SMBI))
416 break;
417
418 usec_delay(50);
419 }
420 }
421
422
423 if (i == timeout) {
424 DEBUGOUT("Driver can't access device -\n");
425 DEBUGOUT("SMBI bit is set.\n");
426 return -IGC_ERR_NVM;
427 }
428 }
429
430
431 for (i = 0; i < timeout; i++) {
432 swsm = IGC_READ_REG(hw, IGC_SWSM);
433 IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
434
435
436 if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI)
437 break;
438
439 usec_delay(50);
440 }
441
442 if (i == timeout) {
443
444 igc_put_hw_semaphore_generic(hw);
445 DEBUGOUT("Driver can't access the NVM\n");
446 return -IGC_ERR_NVM;
447 }
448
449 return IGC_SUCCESS;
450}
451
452
453
454
455
456
457
458
459
460
461s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
462 u16 *data)
463{
464 s32 status = IGC_SUCCESS;
465 u16 i, count;
466
467 DEBUGFUNC("igc_read_nvm_srrd_i225");
468
469
470
471
472
473 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
474 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
475 IGC_EERD_EEWR_MAX_COUNT : (words - i);
476 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
477 status = igc_read_nvm_eerd(hw, offset, count,
478 data + i);
479 hw->nvm.ops.release(hw);
480 } else {
481 status = IGC_ERR_SWFW_SYNC;
482 }
483
484 if (status != IGC_SUCCESS)
485 break;
486 }
487
488 return status;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
507 u16 *data)
508{
509 s32 status = IGC_SUCCESS;
510 u16 i, count;
511
512 DEBUGFUNC("igc_write_nvm_srwr_i225");
513
514
515
516
517
518 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
519 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
520 IGC_EERD_EEWR_MAX_COUNT : (words - i);
521 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
522 status = __igc_write_nvm_srwr(hw, offset, count,
523 data + i);
524 hw->nvm.ops.release(hw);
525 } else {
526 status = IGC_ERR_SWFW_SYNC;
527 }
528
529 if (status != IGC_SUCCESS)
530 break;
531 }
532
533 return status;
534}
535
536
537
538
539
540
541
542
543
544
545
546
547static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
548 u16 *data)
549{
550 struct igc_nvm_info *nvm = &hw->nvm;
551 u32 i, k, eewr = 0;
552 u32 attempts = 100000;
553 s32 ret_val = IGC_SUCCESS;
554
555 DEBUGFUNC("__igc_write_nvm_srwr");
556
557
558
559
560 if (offset >= nvm->word_size || words > (nvm->word_size - offset) ||
561 words == 0) {
562 DEBUGOUT("nvm parameter(s) out of bounds\n");
563 ret_val = -IGC_ERR_NVM;
564 goto out;
565 }
566
567 for (i = 0; i < words; i++) {
568 eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
569 (data[i] << IGC_NVM_RW_REG_DATA) |
570 IGC_NVM_RW_REG_START;
571
572 IGC_WRITE_REG(hw, IGC_SRWR, eewr);
573
574 for (k = 0; k < attempts; k++) {
575 if (IGC_NVM_RW_REG_DONE &
576 IGC_READ_REG(hw, IGC_SRWR)) {
577 ret_val = IGC_SUCCESS;
578 break;
579 }
580 usec_delay(5);
581 }
582
583 if (ret_val != IGC_SUCCESS) {
584 DEBUGOUT("Shadow RAM write EEWR timed out\n");
585 break;
586 }
587 }
588
589out:
590 return ret_val;
591}
592
593
594
595
596
597
598
599s32 igc_read_invm_version_i225(struct igc_hw *hw,
600 struct igc_fw_version *invm_ver)
601{
602 u32 *record = NULL;
603 u32 *next_record = NULL;
604 u32 i = 0;
605 u32 invm_dword = 0;
606 u32 invm_blocks = IGC_INVM_SIZE - (IGC_INVM_ULT_BYTES_SIZE /
607 IGC_INVM_RECORD_SIZE_IN_BYTES);
608 u32 buffer[IGC_INVM_SIZE];
609 s32 status = -IGC_ERR_INVM_VALUE_NOT_FOUND;
610 u16 version = 0;
611
612 DEBUGFUNC("igc_read_invm_version_i225");
613
614
615 for (i = 0; i < IGC_INVM_SIZE; i++) {
616 invm_dword = IGC_READ_REG(hw, IGC_INVM_DATA_REG(i));
617 buffer[i] = invm_dword;
618 }
619
620
621 for (i = 1; i < invm_blocks; i++) {
622 record = &buffer[invm_blocks - i];
623 next_record = &buffer[invm_blocks - i + 1];
624
625
626 if (i == 1 && (*record & IGC_INVM_VER_FIELD_ONE) == 0) {
627 version = 0;
628 status = IGC_SUCCESS;
629 break;
630 }
631
632 else if ((i == 1) &&
633 ((*record & IGC_INVM_VER_FIELD_TWO) == 0)) {
634 version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3;
635 status = IGC_SUCCESS;
636 break;
637 }
638
639
640
641 else if ((((*record & IGC_INVM_VER_FIELD_ONE) == 0) &&
642 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
643 (i != 1))) {
644 version = (*next_record & IGC_INVM_VER_FIELD_TWO)
645 >> 13;
646 status = IGC_SUCCESS;
647 break;
648 }
649
650
651
652 else if (((*record & IGC_INVM_VER_FIELD_TWO) == 0) &&
653 ((*record & 0x3) == 0)) {
654 version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3;
655 status = IGC_SUCCESS;
656 break;
657 }
658 }
659
660 if (status == IGC_SUCCESS) {
661 invm_ver->invm_major = (version & IGC_INVM_MAJOR_MASK)
662 >> IGC_INVM_MAJOR_SHIFT;
663 invm_ver->invm_minor = version & IGC_INVM_MINOR_MASK;
664 }
665
666 for (i = 1; i < invm_blocks; i++) {
667 record = &buffer[invm_blocks - i];
668 next_record = &buffer[invm_blocks - i + 1];
669
670
671 if (i == 1 && (*record & IGC_INVM_IMGTYPE_FIELD) == 0) {
672 invm_ver->invm_img_type = 0;
673 status = IGC_SUCCESS;
674 break;
675 }
676
677 else if ((((*record & 0x3) == 0) &&
678 ((*record & IGC_INVM_IMGTYPE_FIELD) == 0)) ||
679 ((((*record & 0x3) != 0) && (i != 1)))) {
680 invm_ver->invm_img_type =
681 (*next_record & IGC_INVM_IMGTYPE_FIELD) >> 23;
682 status = IGC_SUCCESS;
683 break;
684 }
685 }
686 return status;
687}
688
689
690
691
692
693
694
695s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
696{
697 s32 status = IGC_SUCCESS;
698 s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset,
699 u16 count, u16 *data);
700
701 DEBUGFUNC("igc_validate_nvm_checksum_i225");
702
703 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
704
705
706
707
708 read_op_ptr = hw->nvm.ops.read;
709 hw->nvm.ops.read = igc_read_nvm_eerd;
710
711 status = igc_validate_nvm_checksum_generic(hw);
712
713
714 hw->nvm.ops.read = read_op_ptr;
715
716 hw->nvm.ops.release(hw);
717 } else {
718 status = IGC_ERR_SWFW_SYNC;
719 }
720
721 return status;
722}
723
724
725
726
727
728
729
730
731s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
732{
733 s32 ret_val;
734 u16 checksum = 0;
735 u16 i, nvm_data;
736
737 DEBUGFUNC("igc_update_nvm_checksum_i225");
738
739
740
741
742
743 ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
744 if (ret_val != IGC_SUCCESS) {
745 DEBUGOUT("EEPROM read failed\n");
746 goto out;
747 }
748
749 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
750
751
752
753
754
755 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
756 ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
757 if (ret_val) {
758 hw->nvm.ops.release(hw);
759 DEBUGOUT("NVM Read Error while updating\n");
760 DEBUGOUT("checksum.\n");
761 goto out;
762 }
763 checksum += nvm_data;
764 }
765 checksum = (u16)NVM_SUM - checksum;
766 ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
767 &checksum);
768 if (ret_val != IGC_SUCCESS) {
769 hw->nvm.ops.release(hw);
770 DEBUGOUT("NVM Write Error while updating checksum.\n");
771 goto out;
772 }
773
774 hw->nvm.ops.release(hw);
775
776 ret_val = igc_update_flash_i225(hw);
777 } else {
778 ret_val = IGC_ERR_SWFW_SYNC;
779 }
780out:
781 return ret_val;
782}
783
784
785
786
787bool igc_get_flash_presence_i225(struct igc_hw *hw)
788{
789 u32 eec = 0;
790 bool ret_val = false;
791
792 DEBUGFUNC("igc_get_flash_presence_i225");
793
794 eec = IGC_READ_REG(hw, IGC_EECD);
795
796 if (eec & IGC_EECD_FLASH_DETECTED_I225)
797 ret_val = true;
798
799 return ret_val;
800}
801
802
803
804
805
806
807
808s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw,
809 u32 burst_counter)
810{
811 s32 ret_val = IGC_SUCCESS;
812
813 DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225");
814
815
816 if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) {
817
818 IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter);
819 } else {
820 ret_val = IGC_ERR_INVALID_ARGUMENT;
821 }
822
823 return ret_val;
824}
825
826
827
828
829
830
831
832
833s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode,
834 u32 address)
835{
836 u32 flswctl = 0;
837 s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
838 s32 ret_val = IGC_SUCCESS;
839
840 DEBUGFUNC("igc_write_erase_flash_command_i225");
841
842 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
843
844 while (timeout) {
845 if (flswctl & IGC_FLSWCTL_DONE)
846 break;
847 usec_delay(5);
848 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
849 timeout--;
850 }
851
852 if (!timeout) {
853 DEBUGOUT("Flash transaction was not done\n");
854 return -IGC_ERR_NVM;
855 }
856
857
858 flswctl = address | opcode;
859 IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl);
860
861
862 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
863 if (!(flswctl & IGC_FLSWCTL_CMDV)) {
864 DEBUGOUT("Write flash command failed\n");
865 ret_val = IGC_ERR_INVALID_ARGUMENT;
866 }
867
868 return ret_val;
869}
870
871
872
873
874
875
876
877
878
879s32 igc_update_flash_i225(struct igc_hw *hw)
880{
881 u16 current_offset_data = 0;
882 u32 block_sw_protect = 1;
883 u16 base_address = 0x0;
884 u32 i, fw_valid_bit;
885 u16 current_offset;
886 s32 ret_val = 0;
887 u32 flup;
888
889 DEBUGFUNC("igc_update_flash_i225");
890
891 block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) &
892 IGC_FLSECU_BLK_SW_ACCESS_I225;
893 fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) &
894 IGC_FWSM_FW_VALID_I225;
895 if (fw_valid_bit) {
896 ret_val = igc_pool_flash_update_done_i225(hw);
897 if (ret_val == -IGC_ERR_NVM) {
898 DEBUGOUT("Flash update time out\n");
899 goto out;
900 }
901
902 flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225;
903 IGC_WRITE_REG(hw, IGC_EECD, flup);
904
905 ret_val = igc_pool_flash_update_done_i225(hw);
906 if (ret_val == IGC_SUCCESS)
907 DEBUGOUT("Flash update complete\n");
908 else
909 DEBUGOUT("Flash update time out\n");
910 } else if (!block_sw_protect) {
911
912
913
914
915
916
917 if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225)
918 base_address = 0x1000;
919
920
921 ret_val = igc_write_erase_flash_command_i225(hw,
922 IGC_I225_ERASE_CMD_OPCODE,
923 base_address);
924 if (!ret_val) {
925 DEBUGOUT("Sector erase failed\n");
926 goto out;
927 }
928
929 current_offset = base_address;
930
931
932 for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) {
933
934 ret_val = igc_set_flsw_flash_burst_counter_i225(hw,
935 0x2);
936 if (ret_val != IGC_SUCCESS)
937 break;
938
939
940 ret_val = igc_write_erase_flash_command_i225(hw,
941 IGC_I225_WRITE_CMD_OPCODE,
942 2 * current_offset);
943 if (ret_val != IGC_SUCCESS)
944 break;
945
946 ret_val = igc_read_nvm_eerd(hw, current_offset,
947 1, ¤t_offset_data);
948 if (ret_val) {
949 DEBUGOUT("Failed to read from EEPROM\n");
950 goto out;
951 }
952
953
954 IGC_WRITE_REG(hw, IGC_I225_FLSWDATA,
955 current_offset_data);
956 current_offset++;
957
958
959 ret_val = igc_poll_eerd_eewr_done(hw,
960 IGC_NVM_POLL_READ);
961 if (ret_val)
962 break;
963
964 usec_delay(1000);
965 }
966 }
967out:
968 return ret_val;
969}
970
971
972
973
974s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
975{
976 s32 ret_val = -IGC_ERR_NVM;
977 u32 i, reg;
978
979 DEBUGFUNC("igc_pool_flash_update_done_i225");
980
981 for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
982 reg = IGC_READ_REG(hw, IGC_EECD);
983 if (reg & IGC_EECD_FLUDONE_I225) {
984 ret_val = IGC_SUCCESS;
985 break;
986 }
987 usec_delay(5);
988 }
989
990 return ret_val;
991}
992
993
994
995
996
997
998
999
1000static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
1001{
1002 u16 speed, duplex;
1003 u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
1004 s32 size;
1005
1006 DEBUGFUNC("igc_set_ltr_i225");
1007
1008
1009 if (link) {
1010 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1011
1012
1013
1014
1015 if (hw->phy.media_type == igc_media_type_copper &&
1016 !hw->dev_spec._i225.eee_disable &&
1017 speed != SPEED_10) {
1018
1019 ltrc = IGC_READ_REG(hw, IGC_LTRC) |
1020 IGC_LTRC_EEEMS_EN;
1021 IGC_WRITE_REG(hw, IGC_LTRC, ltrc);
1022
1023
1024 if (speed == SPEED_100)
1025 tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) &
1026 IGC_TW_SYSTEM_100_MASK) >>
1027 IGC_TW_SYSTEM_100_SHIFT) * 500;
1028 else
1029 tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) &
1030 IGC_TW_SYSTEM_1000_MASK) * 500;
1031 } else {
1032 tw_system = 0;
1033 }
1034
1035
1036 size = IGC_READ_REG(hw, IGC_RXPBS) &
1037 IGC_RXPBS_SIZE_I225_MASK;
1038
1039
1040 if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) {
1041 size -= (IGC_READ_REG(hw, IGC_DMACR) &
1042 IGC_DMACR_DMACTHR_MASK) >>
1043 IGC_DMACR_DMACTHR_SHIFT;
1044
1045 size *= 1024 * 8;
1046 } else {
1047
1048
1049
1050 size *= 1024;
1051 size -= hw->dev_spec._i225.mtu;
1052 size *= 8;
1053 }
1054
1055 if (size < 0) {
1056 DEBUGOUT1("Invalid effective Rx buffer size %d\n",
1057 size);
1058 return -IGC_ERR_CONFIG;
1059 }
1060
1061
1062
1063
1064
1065
1066 ltr_min = (1000 * size) / speed;
1067 ltr_max = ltr_min + tw_system;
1068 scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
1069 IGC_LTRMINV_SCALE_32768;
1070 scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
1071 IGC_LTRMAXV_SCALE_32768;
1072 ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
1073 ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
1074
1075
1076 ltrv = IGC_READ_REG(hw, IGC_LTRMINV);
1077 if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
1078 ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
1079 (scale_min << IGC_LTRMINV_SCALE_SHIFT);
1080 IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv);
1081 }
1082
1083 ltrv = IGC_READ_REG(hw, IGC_LTRMAXV);
1084 if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
1085 ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
1086 (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
1087 IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv);
1088 }
1089 }
1090
1091 return IGC_SUCCESS;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101s32 igc_check_for_link_i225(struct igc_hw *hw)
1102{
1103 struct igc_mac_info *mac = &hw->mac;
1104 s32 ret_val;
1105 bool link = false;
1106
1107 DEBUGFUNC("igc_check_for_link_i225");
1108
1109
1110
1111
1112
1113
1114 if (!mac->get_link_status) {
1115 ret_val = IGC_SUCCESS;
1116 goto out;
1117 }
1118
1119
1120
1121
1122
1123 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
1124 if (ret_val)
1125 goto out;
1126
1127 if (!link)
1128 goto out;
1129
1130 mac->get_link_status = false;
1131
1132
1133
1134
1135 igc_check_downshift_generic(hw);
1136
1137
1138
1139
1140 if (!mac->autoneg)
1141 goto out;
1142
1143
1144
1145
1146
1147 mac->ops.config_collision_dist(hw);
1148
1149
1150
1151
1152
1153
1154 ret_val = igc_config_fc_after_link_up_generic(hw);
1155 if (ret_val)
1156 DEBUGOUT("Error configuring flow control\n");
1157out:
1158
1159
1160
1161 ret_val = igc_set_ltr_i225(hw, link);
1162
1163 return ret_val;
1164}
1165
1166
1167
1168
1169
1170
1171void igc_init_function_pointers_i225(struct igc_hw *hw)
1172{
1173 igc_init_mac_ops_generic(hw);
1174 igc_init_phy_ops_generic(hw);
1175 igc_init_nvm_ops_generic(hw);
1176 hw->mac.ops.init_params = igc_init_mac_params_i225;
1177 hw->nvm.ops.init_params = igc_init_nvm_params_i225;
1178 hw->phy.ops.init_params = igc_init_phy_params_i225;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data)
1189{
1190 s32 ret_val;
1191
1192 DEBUGFUNC("igc_valid_led_default_i225");
1193
1194 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1195 if (ret_val) {
1196 DEBUGOUT("NVM Read Error\n");
1197 goto out;
1198 }
1199
1200 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1201 switch (hw->phy.media_type) {
1202 case igc_media_type_internal_serdes:
1203 *data = ID_LED_DEFAULT_I225_SERDES;
1204 break;
1205 case igc_media_type_copper:
1206 default:
1207 *data = ID_LED_DEFAULT_I225;
1208 break;
1209 }
1210 }
1211out:
1212 return ret_val;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static s32 igc_get_cfg_done_i225(struct igc_hw *hw)
1225{
1226 s32 timeout = PHY_CFG_TIMEOUT;
1227 u32 mask = IGC_NVM_CFG_DONE_PORT_0;
1228
1229 DEBUGFUNC("igc_get_cfg_done_i225");
1230
1231 while (timeout) {
1232 if (IGC_READ_REG(hw, IGC_EEMNGCTL_I225) & mask)
1233 break;
1234 msec_delay(1);
1235 timeout--;
1236 }
1237 if (!timeout)
1238 DEBUGOUT("MNG configuration cycle has not completed.\n");
1239
1240 return IGC_SUCCESS;
1241}
1242
1243
1244
1245
1246
1247
1248s32 igc_init_hw_i225(struct igc_hw *hw)
1249{
1250 s32 ret_val;
1251
1252 DEBUGFUNC("igc_init_hw_i225");
1253
1254 hw->phy.ops.get_cfg_done = igc_get_cfg_done_i225;
1255 ret_val = igc_init_hw_base(hw);
1256 return ret_val;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active)
1268{
1269 u32 data;
1270
1271 DEBUGFUNC("igc_set_d0_lplu_state_i225");
1272
1273 data = IGC_READ_REG(hw, IGC_I225_PHPM);
1274
1275 if (active) {
1276 data |= IGC_I225_PHPM_DIS_1000;
1277 data |= IGC_I225_PHPM_DIS_2500;
1278 } else {
1279 data &= ~IGC_I225_PHPM_DIS_1000;
1280 data &= ~IGC_I225_PHPM_DIS_2500;
1281 }
1282
1283 IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1284 return IGC_SUCCESS;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active)
1296{
1297 u32 data;
1298
1299 DEBUGFUNC("igc_set_d3_lplu_state_i225");
1300
1301 data = IGC_READ_REG(hw, IGC_I225_PHPM);
1302
1303 if (active) {
1304 data |= IGC_I225_PHPM_DIS_100_D3;
1305 data |= IGC_I225_PHPM_DIS_1000_D3;
1306 data |= IGC_I225_PHPM_DIS_2500_D3;
1307 } else {
1308 data &= ~IGC_I225_PHPM_DIS_100_D3;
1309 data &= ~IGC_I225_PHPM_DIS_1000_D3;
1310 data &= ~IGC_I225_PHPM_DIS_2500_D3;
1311 }
1312
1313 IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
1314 return IGC_SUCCESS;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
1328 bool adv100M)
1329{
1330 u32 ipcnfg, eeer;
1331
1332 DEBUGFUNC("igc_set_eee_i225");
1333
1334 if (hw->mac.type != igc_i225 ||
1335 hw->phy.media_type != igc_media_type_copper)
1336 goto out;
1337 ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG);
1338 eeer = IGC_READ_REG(hw, IGC_EEER);
1339
1340
1341 if (!(hw->dev_spec._i225.eee_disable)) {
1342 u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU);
1343
1344 if (adv100M)
1345 ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
1346 else
1347 ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
1348
1349 if (adv1G)
1350 ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
1351 else
1352 ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
1353
1354 if (adv2p5G)
1355 ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
1356 else
1357 ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
1358
1359 eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1360 IGC_EEER_LPI_FC);
1361
1362
1363 if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
1364 DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
1365 } else {
1366 ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
1367 IGC_IPCNFG_EEE_100M_AN);
1368 eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
1369 IGC_EEER_LPI_FC);
1370 }
1371 IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg);
1372 IGC_WRITE_REG(hw, IGC_EEER, eeer);
1373 IGC_READ_REG(hw, IGC_IPCNFG);
1374 IGC_READ_REG(hw, IGC_EEER);
1375out:
1376
1377 return IGC_SUCCESS;
1378}
1379