1
2
3
4#include "igc_phy.h"
5
6
7
8
9
10
11
12
13
14s32 igc_check_reset_block(struct igc_hw *hw)
15{
16 u32 manc;
17
18 manc = rd32(IGC_MANC);
19
20 return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
21 IGC_ERR_BLK_PHY_RESET : 0;
22}
23
24
25
26
27
28
29
30
31s32 igc_get_phy_id(struct igc_hw *hw)
32{
33 struct igc_phy_info *phy = &hw->phy;
34 s32 ret_val = 0;
35 u16 phy_id;
36
37 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
38 if (ret_val)
39 goto out;
40
41 phy->id = (u32)(phy_id << 16);
42 usleep_range(200, 500);
43 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
44 if (ret_val)
45 goto out;
46
47 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
48 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
49
50out:
51 return ret_val;
52}
53
54
55
56
57
58
59
60
61
62
63s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
64 u32 usec_interval, bool *success)
65{
66 u16 i, phy_status;
67 s32 ret_val = 0;
68
69 for (i = 0; i < iterations; i++) {
70
71
72
73
74 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
75 if (ret_val && usec_interval > 0) {
76
77
78
79
80 if (usec_interval >= 1000)
81 mdelay(usec_interval / 1000);
82 else
83 udelay(usec_interval);
84 }
85 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
86 if (ret_val)
87 break;
88 if (phy_status & MII_SR_LINK_STATUS)
89 break;
90 if (usec_interval >= 1000)
91 mdelay(usec_interval / 1000);
92 else
93 udelay(usec_interval);
94 }
95
96 *success = (i < iterations) ? true : false;
97
98 return ret_val;
99}
100
101
102
103
104
105
106
107
108void igc_power_up_phy_copper(struct igc_hw *hw)
109{
110 u16 mii_reg = 0;
111
112
113 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
114 mii_reg &= ~MII_CR_POWER_DOWN;
115 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
116}
117
118
119
120
121
122
123
124
125void igc_power_down_phy_copper(struct igc_hw *hw)
126{
127 u16 mii_reg = 0;
128
129
130 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
131 mii_reg |= MII_CR_POWER_DOWN;
132
133
134
135
136
137 usleep_range(1000, 2000);
138}
139
140
141
142
143
144
145
146
147
148s32 igc_check_downshift(struct igc_hw *hw)
149{
150 struct igc_phy_info *phy = &hw->phy;
151 s32 ret_val;
152
153 switch (phy->type) {
154 case igc_phy_i225:
155 default:
156
157 phy->speed_downgraded = false;
158 ret_val = 0;
159 }
160
161 return ret_val;
162}
163
164
165
166
167
168
169
170
171
172
173s32 igc_phy_hw_reset(struct igc_hw *hw)
174{
175 struct igc_phy_info *phy = &hw->phy;
176 u32 phpm = 0, timeout = 10000;
177 s32 ret_val;
178 u32 ctrl;
179
180 ret_val = igc_check_reset_block(hw);
181 if (ret_val) {
182 ret_val = 0;
183 goto out;
184 }
185
186 ret_val = phy->ops.acquire(hw);
187 if (ret_val)
188 goto out;
189
190 phpm = rd32(IGC_I225_PHPM);
191
192 ctrl = rd32(IGC_CTRL);
193 wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
194 wrfl();
195
196 udelay(phy->reset_delay_us);
197
198 wr32(IGC_CTRL, ctrl);
199 wrfl();
200
201
202 usleep_range(100, 150);
203 do {
204 phpm = rd32(IGC_I225_PHPM);
205 timeout--;
206 udelay(1);
207 } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
208
209 if (!timeout)
210 hw_dbg("Timeout is expired after a phy reset\n");
211
212 usleep_range(100, 150);
213
214 phy->ops.release(hw);
215
216out:
217 return ret_val;
218}
219
220
221
222
223
224
225
226
227
228
229static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
230{
231 struct igc_phy_info *phy = &hw->phy;
232 u16 aneg_multigbt_an_ctrl = 0;
233 u16 mii_1000t_ctrl_reg = 0;
234 u16 mii_autoneg_adv_reg;
235 s32 ret_val;
236
237 phy->autoneg_advertised &= phy->autoneg_mask;
238
239
240 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
241 if (ret_val)
242 return ret_val;
243
244 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
245
246 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
247 &mii_1000t_ctrl_reg);
248 if (ret_val)
249 return ret_val;
250 }
251
252 if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
253 hw->phy.id == I225_I_PHY_ID) {
254
255 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
256 MMD_DEVADDR_SHIFT) |
257 ANEG_MULTIGBT_AN_CTRL,
258 &aneg_multigbt_an_ctrl);
259
260 if (ret_val)
261 return ret_val;
262 }
263
264
265
266
267
268
269
270
271
272
273
274
275 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
276 NWAY_AR_100TX_HD_CAPS |
277 NWAY_AR_10T_FD_CAPS |
278 NWAY_AR_10T_HD_CAPS);
279 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
280
281 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
282
283
284 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
285 hw_dbg("Advertise 10mb Half duplex\n");
286 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
287 }
288
289
290 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
291 hw_dbg("Advertise 10mb Full duplex\n");
292 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
293 }
294
295
296 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
297 hw_dbg("Advertise 100mb Half duplex\n");
298 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
299 }
300
301
302 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
303 hw_dbg("Advertise 100mb Full duplex\n");
304 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
305 }
306
307
308 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
309 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
310
311
312 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
313 hw_dbg("Advertise 1000mb Full duplex\n");
314 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
315 }
316
317
318 if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
319 hw_dbg("Advertise 2500mb Half duplex request denied!\n");
320
321
322 if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
323 hw_dbg("Advertise 2500mb Full duplex\n");
324 aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
325 } else {
326 aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
327 }
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346 switch (hw->fc.current_mode) {
347 case igc_fc_none:
348
349
350
351 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
352 break;
353 case igc_fc_rx_pause:
354
355
356
357
358
359
360
361
362
363 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
364 break;
365 case igc_fc_tx_pause:
366
367
368
369 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
370 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
371 break;
372 case igc_fc_full:
373
374
375
376 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
377 break;
378 default:
379 hw_dbg("Flow control param set incorrectly\n");
380 return -IGC_ERR_CONFIG;
381 }
382
383 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
384 if (ret_val)
385 return ret_val;
386
387 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
388
389 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
390 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
391 mii_1000t_ctrl_reg);
392
393 if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
394 hw->phy.id == I225_I_PHY_ID)
395 ret_val = phy->ops.write_reg(hw,
396 (STANDARD_AN_REG_MASK <<
397 MMD_DEVADDR_SHIFT) |
398 ANEG_MULTIGBT_AN_CTRL,
399 aneg_multigbt_an_ctrl);
400
401 return ret_val;
402}
403
404
405
406
407
408
409
410
411static s32 igc_wait_autoneg(struct igc_hw *hw)
412{
413 u16 i, phy_status;
414 s32 ret_val = 0;
415
416
417 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
418 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
419 if (ret_val)
420 break;
421 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
422 if (ret_val)
423 break;
424 if (phy_status & MII_SR_AUTONEG_COMPLETE)
425 break;
426 msleep(100);
427 }
428
429
430
431
432 return ret_val;
433}
434
435
436
437
438
439
440
441
442
443
444static s32 igc_copper_link_autoneg(struct igc_hw *hw)
445{
446 struct igc_phy_info *phy = &hw->phy;
447 u16 phy_ctrl;
448 s32 ret_val;
449
450
451
452
453 phy->autoneg_advertised &= phy->autoneg_mask;
454
455
456
457
458 if (phy->autoneg_advertised == 0)
459 phy->autoneg_advertised = phy->autoneg_mask;
460
461 hw_dbg("Reconfiguring auto-neg advertisement params\n");
462 ret_val = igc_phy_setup_autoneg(hw);
463 if (ret_val) {
464 hw_dbg("Error Setting up Auto-Negotiation\n");
465 goto out;
466 }
467 hw_dbg("Restarting Auto-Neg\n");
468
469
470
471
472 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
473 if (ret_val)
474 goto out;
475
476 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
477 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
478 if (ret_val)
479 goto out;
480
481
482
483
484 if (phy->autoneg_wait_to_complete) {
485 ret_val = igc_wait_autoneg(hw);
486 if (ret_val) {
487 hw_dbg("Error while waiting for autoneg to complete\n");
488 goto out;
489 }
490 }
491
492 hw->mac.get_link_status = true;
493
494out:
495 return ret_val;
496}
497
498
499
500
501
502
503
504
505
506
507s32 igc_setup_copper_link(struct igc_hw *hw)
508{
509 s32 ret_val = 0;
510 bool link;
511
512 if (hw->mac.autoneg) {
513
514
515
516 ret_val = igc_copper_link_autoneg(hw);
517 if (ret_val)
518 goto out;
519 } else {
520
521
522
523 hw_dbg("Forcing Speed and Duplex\n");
524 ret_val = hw->phy.ops.force_speed_duplex(hw);
525 if (ret_val) {
526 hw_dbg("Error Forcing Speed and Duplex\n");
527 goto out;
528 }
529 }
530
531
532
533
534 ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
535 if (ret_val)
536 goto out;
537
538 if (link) {
539 hw_dbg("Valid link established!!!\n");
540 igc_config_collision_dist(hw);
541 ret_val = igc_config_fc_after_link_up(hw);
542 } else {
543 hw_dbg("Unable to establish link!!!\n");
544 }
545
546out:
547 return ret_val;
548}
549
550
551
552
553
554
555
556
557
558
559static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
560{
561 struct igc_phy_info *phy = &hw->phy;
562 u32 i, mdic = 0;
563 s32 ret_val = 0;
564
565 if (offset > MAX_PHY_REG_ADDRESS) {
566 hw_dbg("PHY Address %d is out of range\n", offset);
567 ret_val = -IGC_ERR_PARAM;
568 goto out;
569 }
570
571
572
573
574
575 mdic = ((offset << IGC_MDIC_REG_SHIFT) |
576 (phy->addr << IGC_MDIC_PHY_SHIFT) |
577 (IGC_MDIC_OP_READ));
578
579 wr32(IGC_MDIC, mdic);
580
581
582
583
584
585 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
586 usleep_range(500, 1000);
587 mdic = rd32(IGC_MDIC);
588 if (mdic & IGC_MDIC_READY)
589 break;
590 }
591 if (!(mdic & IGC_MDIC_READY)) {
592 hw_dbg("MDI Read did not complete\n");
593 ret_val = -IGC_ERR_PHY;
594 goto out;
595 }
596 if (mdic & IGC_MDIC_ERROR) {
597 hw_dbg("MDI Error\n");
598 ret_val = -IGC_ERR_PHY;
599 goto out;
600 }
601 *data = (u16)mdic;
602
603out:
604 return ret_val;
605}
606
607
608
609
610
611
612
613
614
615static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
616{
617 struct igc_phy_info *phy = &hw->phy;
618 u32 i, mdic = 0;
619 s32 ret_val = 0;
620
621 if (offset > MAX_PHY_REG_ADDRESS) {
622 hw_dbg("PHY Address %d is out of range\n", offset);
623 ret_val = -IGC_ERR_PARAM;
624 goto out;
625 }
626
627
628
629
630
631 mdic = (((u32)data) |
632 (offset << IGC_MDIC_REG_SHIFT) |
633 (phy->addr << IGC_MDIC_PHY_SHIFT) |
634 (IGC_MDIC_OP_WRITE));
635
636 wr32(IGC_MDIC, mdic);
637
638
639
640
641
642 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
643 usleep_range(500, 1000);
644 mdic = rd32(IGC_MDIC);
645 if (mdic & IGC_MDIC_READY)
646 break;
647 }
648 if (!(mdic & IGC_MDIC_READY)) {
649 hw_dbg("MDI Write did not complete\n");
650 ret_val = -IGC_ERR_PHY;
651 goto out;
652 }
653 if (mdic & IGC_MDIC_ERROR) {
654 hw_dbg("MDI Error\n");
655 ret_val = -IGC_ERR_PHY;
656 goto out;
657 }
658
659out:
660 return ret_val;
661}
662
663
664
665
666
667
668
669
670
671static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
672 u8 dev_addr, u16 *data, bool read)
673{
674 s32 ret_val;
675
676 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
677 if (ret_val)
678 return ret_val;
679
680 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
681 if (ret_val)
682 return ret_val;
683
684 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
685 dev_addr);
686 if (ret_val)
687 return ret_val;
688
689 if (read)
690 ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
691 else
692 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
693 if (ret_val)
694 return ret_val;
695
696
697 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
698 if (ret_val)
699 return ret_val;
700
701 return ret_val;
702}
703
704
705
706
707
708
709
710
711static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
712 u8 dev_addr, u16 *data)
713{
714 return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
715}
716
717
718
719
720
721
722
723
724static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
725 u8 dev_addr, u16 data)
726{
727 return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
728}
729
730
731
732
733
734
735
736
737
738
739s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
740{
741 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
742 s32 ret_val;
743
744 offset = offset & GPY_REG_MASK;
745
746 if (!dev_addr) {
747 ret_val = hw->phy.ops.acquire(hw);
748 if (ret_val)
749 return ret_val;
750 ret_val = igc_write_phy_reg_mdic(hw, offset, data);
751 if (ret_val)
752 return ret_val;
753 hw->phy.ops.release(hw);
754 } else {
755 ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
756 data);
757 }
758
759 return ret_val;
760}
761
762
763
764
765
766
767
768
769
770
771
772s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
773{
774 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
775 s32 ret_val;
776
777 offset = offset & GPY_REG_MASK;
778
779 if (!dev_addr) {
780 ret_val = hw->phy.ops.acquire(hw);
781 if (ret_val)
782 return ret_val;
783 ret_val = igc_read_phy_reg_mdic(hw, offset, data);
784 if (ret_val)
785 return ret_val;
786 hw->phy.ops.release(hw);
787 } else {
788 ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
789 data);
790 }
791
792 return ret_val;
793}
794