1
2
3
4#include "igc_phy.h"
5
6
7
8
9
10
11
12
13
14s32 igc_check_reset_block(struct igc_hw *hw)
15{
16 u32 manc;
17
18 manc = rd32(IGC_MANC);
19
20 return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
21 IGC_ERR_BLK_PHY_RESET : 0;
22}
23
24
25
26
27
28
29
30
31s32 igc_get_phy_id(struct igc_hw *hw)
32{
33 struct igc_phy_info *phy = &hw->phy;
34 s32 ret_val = 0;
35 u16 phy_id;
36
37 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
38 if (ret_val)
39 goto out;
40
41 phy->id = (u32)(phy_id << 16);
42 usleep_range(200, 500);
43 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
44 if (ret_val)
45 goto out;
46
47 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
48 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
49
50out:
51 return ret_val;
52}
53
54
55
56
57
58
59
60
61
62
63s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
64 u32 usec_interval, bool *success)
65{
66 u16 i, phy_status;
67 s32 ret_val = 0;
68
69 for (i = 0; i < iterations; i++) {
70
71
72
73
74 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
75 if (ret_val && usec_interval > 0) {
76
77
78
79
80 if (usec_interval >= 1000)
81 mdelay(usec_interval / 1000);
82 else
83 udelay(usec_interval);
84 }
85 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
86 if (ret_val)
87 break;
88 if (phy_status & MII_SR_LINK_STATUS)
89 break;
90 if (usec_interval >= 1000)
91 mdelay(usec_interval / 1000);
92 else
93 udelay(usec_interval);
94 }
95
96 *success = (i < iterations) ? true : false;
97
98 return ret_val;
99}
100
101
102
103
104
105
106
107
108void igc_power_up_phy_copper(struct igc_hw *hw)
109{
110 u16 mii_reg = 0;
111
112
113 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
114 mii_reg &= ~MII_CR_POWER_DOWN;
115 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
116}
117
118
119
120
121
122
123
124
125void igc_power_down_phy_copper(struct igc_hw *hw)
126{
127 u16 mii_reg = 0;
128
129
130 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
131 mii_reg |= MII_CR_POWER_DOWN;
132
133
134
135
136
137 usleep_range(1000, 2000);
138}
139
140
141
142
143
144
145
146
147
148s32 igc_check_downshift(struct igc_hw *hw)
149{
150 struct igc_phy_info *phy = &hw->phy;
151 s32 ret_val;
152
153 switch (phy->type) {
154 case igc_phy_i225:
155 default:
156
157 phy->speed_downgraded = false;
158 ret_val = 0;
159 }
160
161 return ret_val;
162}
163
164
165
166
167
168
169
170
171
172
173s32 igc_phy_hw_reset(struct igc_hw *hw)
174{
175 struct igc_phy_info *phy = &hw->phy;
176 u32 phpm = 0, timeout = 10000;
177 s32 ret_val;
178 u32 ctrl;
179
180 ret_val = igc_check_reset_block(hw);
181 if (ret_val) {
182 ret_val = 0;
183 goto out;
184 }
185
186 ret_val = phy->ops.acquire(hw);
187 if (ret_val)
188 goto out;
189
190 phpm = rd32(IGC_I225_PHPM);
191
192 ctrl = rd32(IGC_CTRL);
193 wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
194 wrfl();
195
196 udelay(phy->reset_delay_us);
197
198 wr32(IGC_CTRL, ctrl);
199 wrfl();
200
201
202 usleep_range(100, 150);
203 do {
204 phpm = rd32(IGC_I225_PHPM);
205 timeout--;
206 udelay(1);
207 } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
208
209 if (!timeout)
210 hw_dbg("Timeout is expired after a phy reset\n");
211
212 usleep_range(100, 150);
213
214 phy->ops.release(hw);
215
216out:
217 return ret_val;
218}
219
220
221
222
223
224
225
226
227
228
229static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
230{
231 struct igc_phy_info *phy = &hw->phy;
232 u16 aneg_multigbt_an_ctrl = 0;
233 u16 mii_1000t_ctrl_reg = 0;
234 u16 mii_autoneg_adv_reg;
235 s32 ret_val;
236
237 phy->autoneg_advertised &= phy->autoneg_mask;
238
239
240 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
241 if (ret_val)
242 return ret_val;
243
244 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
245
246 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
247 &mii_1000t_ctrl_reg);
248 if (ret_val)
249 return ret_val;
250 }
251
252 if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
253
254 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
255 MMD_DEVADDR_SHIFT) |
256 ANEG_MULTIGBT_AN_CTRL,
257 &aneg_multigbt_an_ctrl);
258
259 if (ret_val)
260 return ret_val;
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
275 NWAY_AR_100TX_HD_CAPS |
276 NWAY_AR_10T_FD_CAPS |
277 NWAY_AR_10T_HD_CAPS);
278 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
279
280 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
281
282
283 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
284 hw_dbg("Advertise 10mb Half duplex\n");
285 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
286 }
287
288
289 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
290 hw_dbg("Advertise 10mb Full duplex\n");
291 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
292 }
293
294
295 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
296 hw_dbg("Advertise 100mb Half duplex\n");
297 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
298 }
299
300
301 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
302 hw_dbg("Advertise 100mb Full duplex\n");
303 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
304 }
305
306
307 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
308 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
309
310
311 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
312 hw_dbg("Advertise 1000mb Full duplex\n");
313 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
314 }
315
316
317 if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
318 hw_dbg("Advertise 2500mb Half duplex request denied!\n");
319
320
321 if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
322 hw_dbg("Advertise 2500mb Full duplex\n");
323 aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
324 } else {
325 aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
326 }
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 switch (hw->fc.current_mode) {
346 case igc_fc_none:
347
348
349
350 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
351 break;
352 case igc_fc_rx_pause:
353
354
355
356
357
358
359
360
361
362 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
363 break;
364 case igc_fc_tx_pause:
365
366
367
368 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
369 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
370 break;
371 case igc_fc_full:
372
373
374
375 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
376 break;
377 default:
378 hw_dbg("Flow control param set incorrectly\n");
379 return -IGC_ERR_CONFIG;
380 }
381
382 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
383 if (ret_val)
384 return ret_val;
385
386 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
387
388 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
389 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
390 mii_1000t_ctrl_reg);
391
392 if (phy->autoneg_mask & ADVERTISE_2500_FULL)
393 ret_val = phy->ops.write_reg(hw,
394 (STANDARD_AN_REG_MASK <<
395 MMD_DEVADDR_SHIFT) |
396 ANEG_MULTIGBT_AN_CTRL,
397 aneg_multigbt_an_ctrl);
398
399 return ret_val;
400}
401
402
403
404
405
406
407
408
409static s32 igc_wait_autoneg(struct igc_hw *hw)
410{
411 u16 i, phy_status;
412 s32 ret_val = 0;
413
414
415 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
416 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
417 if (ret_val)
418 break;
419 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
420 if (ret_val)
421 break;
422 if (phy_status & MII_SR_AUTONEG_COMPLETE)
423 break;
424 msleep(100);
425 }
426
427
428
429
430 return ret_val;
431}
432
433
434
435
436
437
438
439
440
441
442static s32 igc_copper_link_autoneg(struct igc_hw *hw)
443{
444 struct igc_phy_info *phy = &hw->phy;
445 u16 phy_ctrl;
446 s32 ret_val;
447
448
449
450
451 phy->autoneg_advertised &= phy->autoneg_mask;
452
453
454
455
456 if (phy->autoneg_advertised == 0)
457 phy->autoneg_advertised = phy->autoneg_mask;
458
459 hw_dbg("Reconfiguring auto-neg advertisement params\n");
460 ret_val = igc_phy_setup_autoneg(hw);
461 if (ret_val) {
462 hw_dbg("Error Setting up Auto-Negotiation\n");
463 goto out;
464 }
465 hw_dbg("Restarting Auto-Neg\n");
466
467
468
469
470 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
471 if (ret_val)
472 goto out;
473
474 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
475 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
476 if (ret_val)
477 goto out;
478
479
480
481
482 if (phy->autoneg_wait_to_complete) {
483 ret_val = igc_wait_autoneg(hw);
484 if (ret_val) {
485 hw_dbg("Error while waiting for autoneg to complete\n");
486 goto out;
487 }
488 }
489
490 hw->mac.get_link_status = true;
491
492out:
493 return ret_val;
494}
495
496
497
498
499
500
501
502
503
504
505s32 igc_setup_copper_link(struct igc_hw *hw)
506{
507 s32 ret_val = 0;
508 bool link;
509
510 if (hw->mac.autoneg) {
511
512
513
514 ret_val = igc_copper_link_autoneg(hw);
515 if (ret_val)
516 goto out;
517 } else {
518
519
520
521 hw_dbg("Forcing Speed and Duplex\n");
522 ret_val = hw->phy.ops.force_speed_duplex(hw);
523 if (ret_val) {
524 hw_dbg("Error Forcing Speed and Duplex\n");
525 goto out;
526 }
527 }
528
529
530
531
532 ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
533 if (ret_val)
534 goto out;
535
536 if (link) {
537 hw_dbg("Valid link established!!!\n");
538 igc_config_collision_dist(hw);
539 ret_val = igc_config_fc_after_link_up(hw);
540 } else {
541 hw_dbg("Unable to establish link!!!\n");
542 }
543
544out:
545 return ret_val;
546}
547
548
549
550
551
552
553
554
555
556
557static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
558{
559 struct igc_phy_info *phy = &hw->phy;
560 u32 i, mdic = 0;
561 s32 ret_val = 0;
562
563 if (offset > MAX_PHY_REG_ADDRESS) {
564 hw_dbg("PHY Address %d is out of range\n", offset);
565 ret_val = -IGC_ERR_PARAM;
566 goto out;
567 }
568
569
570
571
572
573 mdic = ((offset << IGC_MDIC_REG_SHIFT) |
574 (phy->addr << IGC_MDIC_PHY_SHIFT) |
575 (IGC_MDIC_OP_READ));
576
577 wr32(IGC_MDIC, mdic);
578
579
580
581
582
583 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
584 usleep_range(500, 1000);
585 mdic = rd32(IGC_MDIC);
586 if (mdic & IGC_MDIC_READY)
587 break;
588 }
589 if (!(mdic & IGC_MDIC_READY)) {
590 hw_dbg("MDI Read did not complete\n");
591 ret_val = -IGC_ERR_PHY;
592 goto out;
593 }
594 if (mdic & IGC_MDIC_ERROR) {
595 hw_dbg("MDI Error\n");
596 ret_val = -IGC_ERR_PHY;
597 goto out;
598 }
599 *data = (u16)mdic;
600
601out:
602 return ret_val;
603}
604
605
606
607
608
609
610
611
612
613static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
614{
615 struct igc_phy_info *phy = &hw->phy;
616 u32 i, mdic = 0;
617 s32 ret_val = 0;
618
619 if (offset > MAX_PHY_REG_ADDRESS) {
620 hw_dbg("PHY Address %d is out of range\n", offset);
621 ret_val = -IGC_ERR_PARAM;
622 goto out;
623 }
624
625
626
627
628
629 mdic = (((u32)data) |
630 (offset << IGC_MDIC_REG_SHIFT) |
631 (phy->addr << IGC_MDIC_PHY_SHIFT) |
632 (IGC_MDIC_OP_WRITE));
633
634 wr32(IGC_MDIC, mdic);
635
636
637
638
639
640 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
641 usleep_range(500, 1000);
642 mdic = rd32(IGC_MDIC);
643 if (mdic & IGC_MDIC_READY)
644 break;
645 }
646 if (!(mdic & IGC_MDIC_READY)) {
647 hw_dbg("MDI Write did not complete\n");
648 ret_val = -IGC_ERR_PHY;
649 goto out;
650 }
651 if (mdic & IGC_MDIC_ERROR) {
652 hw_dbg("MDI Error\n");
653 ret_val = -IGC_ERR_PHY;
654 goto out;
655 }
656
657out:
658 return ret_val;
659}
660
661
662
663
664
665
666
667
668
669static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
670 u8 dev_addr, u16 *data, bool read)
671{
672 s32 ret_val;
673
674 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
675 if (ret_val)
676 return ret_val;
677
678 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
679 if (ret_val)
680 return ret_val;
681
682 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
683 dev_addr);
684 if (ret_val)
685 return ret_val;
686
687 if (read)
688 ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
689 else
690 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
691 if (ret_val)
692 return ret_val;
693
694
695 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
696 if (ret_val)
697 return ret_val;
698
699 return ret_val;
700}
701
702
703
704
705
706
707
708
709static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
710 u8 dev_addr, u16 *data)
711{
712 return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
713}
714
715
716
717
718
719
720
721
722static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
723 u8 dev_addr, u16 data)
724{
725 return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
726}
727
728
729
730
731
732
733
734
735
736
737s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
738{
739 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
740 s32 ret_val;
741
742 offset = offset & GPY_REG_MASK;
743
744 if (!dev_addr) {
745 ret_val = hw->phy.ops.acquire(hw);
746 if (ret_val)
747 return ret_val;
748 ret_val = igc_write_phy_reg_mdic(hw, offset, data);
749 if (ret_val)
750 return ret_val;
751 hw->phy.ops.release(hw);
752 } else {
753 ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
754 data);
755 }
756
757 return ret_val;
758}
759
760
761
762
763
764
765
766
767
768
769
770s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
771{
772 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
773 s32 ret_val;
774
775 offset = offset & GPY_REG_MASK;
776
777 if (!dev_addr) {
778 ret_val = hw->phy.ops.acquire(hw);
779 if (ret_val)
780 return ret_val;
781 ret_val = igc_read_phy_reg_mdic(hw, offset, data);
782 if (ret_val)
783 return ret_val;
784 hw->phy.ops.release(hw);
785 } else {
786 ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
787 data);
788 }
789
790 return ret_val;
791}
792
793
794
795
796
797u16 igc_read_phy_fw_version(struct igc_hw *hw)
798{
799 struct igc_phy_info *phy = &hw->phy;
800 u16 gphy_version = 0;
801 u16 ret_val;
802
803
804 ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version);
805 if (ret_val)
806 hw_dbg("igc_phy: read wrong gphy version\n");
807
808 return gphy_version;
809}
810