1
2
3
4#include "igc_phy.h"
5
6
7
8
9
10
11
12
13
14s32 igc_check_reset_block(struct igc_hw *hw)
15{
16 u32 manc;
17
18 manc = rd32(IGC_MANC);
19
20 return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
21 IGC_ERR_BLK_PHY_RESET : 0;
22}
23
24
25
26
27
28
29
30
31s32 igc_get_phy_id(struct igc_hw *hw)
32{
33 struct igc_phy_info *phy = &hw->phy;
34 s32 ret_val = 0;
35 u16 phy_id;
36
37 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
38 if (ret_val)
39 goto out;
40
41 phy->id = (u32)(phy_id << 16);
42 usleep_range(200, 500);
43 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
44 if (ret_val)
45 goto out;
46
47 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
48 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
49
50out:
51 return ret_val;
52}
53
54
55
56
57
58
59
60
61
62
63s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
64 u32 usec_interval, bool *success)
65{
66 u16 i, phy_status;
67 s32 ret_val = 0;
68
69 for (i = 0; i < iterations; i++) {
70
71
72
73
74 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
75 if (ret_val && usec_interval > 0) {
76
77
78
79
80 if (usec_interval >= 1000)
81 mdelay(usec_interval / 1000);
82 else
83 udelay(usec_interval);
84 }
85 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
86 if (ret_val)
87 break;
88 if (phy_status & MII_SR_LINK_STATUS)
89 break;
90 if (usec_interval >= 1000)
91 mdelay(usec_interval / 1000);
92 else
93 udelay(usec_interval);
94 }
95
96 *success = (i < iterations) ? true : false;
97
98 return ret_val;
99}
100
101
102
103
104
105
106
107
108void igc_power_up_phy_copper(struct igc_hw *hw)
109{
110 u16 mii_reg = 0;
111
112
113 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
114 mii_reg &= ~MII_CR_POWER_DOWN;
115 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
116}
117
118
119
120
121
122
123
124
125void igc_power_down_phy_copper(struct igc_hw *hw)
126{
127 u16 mii_reg = 0;
128
129
130 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
131 mii_reg |= MII_CR_POWER_DOWN;
132
133
134
135
136
137 usleep_range(1000, 2000);
138}
139
140
141
142
143
144
145
146void igc_check_downshift(struct igc_hw *hw)
147{
148 struct igc_phy_info *phy = &hw->phy;
149
150
151 phy->speed_downgraded = false;
152}
153
154
155
156
157
158
159
160
161
162
163s32 igc_phy_hw_reset(struct igc_hw *hw)
164{
165 struct igc_phy_info *phy = &hw->phy;
166 u32 phpm = 0, timeout = 10000;
167 s32 ret_val;
168 u32 ctrl;
169
170 ret_val = igc_check_reset_block(hw);
171 if (ret_val) {
172 ret_val = 0;
173 goto out;
174 }
175
176 ret_val = phy->ops.acquire(hw);
177 if (ret_val)
178 goto out;
179
180 phpm = rd32(IGC_I225_PHPM);
181
182 ctrl = rd32(IGC_CTRL);
183 wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
184 wrfl();
185
186 udelay(phy->reset_delay_us);
187
188 wr32(IGC_CTRL, ctrl);
189 wrfl();
190
191
192 usleep_range(100, 150);
193 do {
194 phpm = rd32(IGC_I225_PHPM);
195 timeout--;
196 udelay(1);
197 } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
198
199 if (!timeout)
200 hw_dbg("Timeout is expired after a phy reset\n");
201
202 usleep_range(100, 150);
203
204 phy->ops.release(hw);
205
206out:
207 return ret_val;
208}
209
210
211
212
213
214
215
216
217
218
219static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
220{
221 struct igc_phy_info *phy = &hw->phy;
222 u16 aneg_multigbt_an_ctrl = 0;
223 u16 mii_1000t_ctrl_reg = 0;
224 u16 mii_autoneg_adv_reg;
225 s32 ret_val;
226
227 phy->autoneg_advertised &= phy->autoneg_mask;
228
229
230 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
231 if (ret_val)
232 return ret_val;
233
234 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
235
236 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
237 &mii_1000t_ctrl_reg);
238 if (ret_val)
239 return ret_val;
240 }
241
242 if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
243
244 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
245 MMD_DEVADDR_SHIFT) |
246 ANEG_MULTIGBT_AN_CTRL,
247 &aneg_multigbt_an_ctrl);
248
249 if (ret_val)
250 return ret_val;
251 }
252
253
254
255
256
257
258
259
260
261
262
263
264 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
265 NWAY_AR_100TX_HD_CAPS |
266 NWAY_AR_10T_FD_CAPS |
267 NWAY_AR_10T_HD_CAPS);
268 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
269
270 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
271
272
273 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
274 hw_dbg("Advertise 10mb Half duplex\n");
275 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
276 }
277
278
279 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
280 hw_dbg("Advertise 10mb Full duplex\n");
281 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
282 }
283
284
285 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
286 hw_dbg("Advertise 100mb Half duplex\n");
287 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
288 }
289
290
291 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
292 hw_dbg("Advertise 100mb Full duplex\n");
293 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
294 }
295
296
297 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
298 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
299
300
301 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
302 hw_dbg("Advertise 1000mb Full duplex\n");
303 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
304 }
305
306
307 if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
308 hw_dbg("Advertise 2500mb Half duplex request denied!\n");
309
310
311 if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
312 hw_dbg("Advertise 2500mb Full duplex\n");
313 aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
314 } else {
315 aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
316 }
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335 switch (hw->fc.current_mode) {
336 case igc_fc_none:
337
338
339
340 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
341 break;
342 case igc_fc_rx_pause:
343
344
345
346
347
348
349
350
351
352 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
353 break;
354 case igc_fc_tx_pause:
355
356
357
358 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
359 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
360 break;
361 case igc_fc_full:
362
363
364
365 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
366 break;
367 default:
368 hw_dbg("Flow control param set incorrectly\n");
369 return -IGC_ERR_CONFIG;
370 }
371
372 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
373 if (ret_val)
374 return ret_val;
375
376 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
377
378 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
379 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
380 mii_1000t_ctrl_reg);
381
382 if (phy->autoneg_mask & ADVERTISE_2500_FULL)
383 ret_val = phy->ops.write_reg(hw,
384 (STANDARD_AN_REG_MASK <<
385 MMD_DEVADDR_SHIFT) |
386 ANEG_MULTIGBT_AN_CTRL,
387 aneg_multigbt_an_ctrl);
388
389 return ret_val;
390}
391
392
393
394
395
396
397
398
399static s32 igc_wait_autoneg(struct igc_hw *hw)
400{
401 u16 i, phy_status;
402 s32 ret_val = 0;
403
404
405 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
406 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
407 if (ret_val)
408 break;
409 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
410 if (ret_val)
411 break;
412 if (phy_status & MII_SR_AUTONEG_COMPLETE)
413 break;
414 msleep(100);
415 }
416
417
418
419
420 return ret_val;
421}
422
423
424
425
426
427
428
429
430
431
432static s32 igc_copper_link_autoneg(struct igc_hw *hw)
433{
434 struct igc_phy_info *phy = &hw->phy;
435 u16 phy_ctrl;
436 s32 ret_val;
437
438
439
440
441 phy->autoneg_advertised &= phy->autoneg_mask;
442
443
444
445
446 if (phy->autoneg_advertised == 0)
447 phy->autoneg_advertised = phy->autoneg_mask;
448
449 hw_dbg("Reconfiguring auto-neg advertisement params\n");
450 ret_val = igc_phy_setup_autoneg(hw);
451 if (ret_val) {
452 hw_dbg("Error Setting up Auto-Negotiation\n");
453 goto out;
454 }
455 hw_dbg("Restarting Auto-Neg\n");
456
457
458
459
460 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
461 if (ret_val)
462 goto out;
463
464 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
465 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
466 if (ret_val)
467 goto out;
468
469
470
471
472 if (phy->autoneg_wait_to_complete) {
473 ret_val = igc_wait_autoneg(hw);
474 if (ret_val) {
475 hw_dbg("Error while waiting for autoneg to complete\n");
476 goto out;
477 }
478 }
479
480 hw->mac.get_link_status = true;
481
482out:
483 return ret_val;
484}
485
486
487
488
489
490
491
492
493
494
495s32 igc_setup_copper_link(struct igc_hw *hw)
496{
497 s32 ret_val = 0;
498 bool link;
499
500 if (hw->mac.autoneg) {
501
502
503
504 ret_val = igc_copper_link_autoneg(hw);
505 if (ret_val)
506 goto out;
507 } else {
508
509
510
511 hw_dbg("Forcing Speed and Duplex\n");
512 ret_val = hw->phy.ops.force_speed_duplex(hw);
513 if (ret_val) {
514 hw_dbg("Error Forcing Speed and Duplex\n");
515 goto out;
516 }
517 }
518
519
520
521
522 ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
523 if (ret_val)
524 goto out;
525
526 if (link) {
527 hw_dbg("Valid link established!!!\n");
528 igc_config_collision_dist(hw);
529 ret_val = igc_config_fc_after_link_up(hw);
530 } else {
531 hw_dbg("Unable to establish link!!!\n");
532 }
533
534out:
535 return ret_val;
536}
537
538
539
540
541
542
543
544
545
546
547static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
548{
549 struct igc_phy_info *phy = &hw->phy;
550 u32 i, mdic = 0;
551 s32 ret_val = 0;
552
553 if (offset > MAX_PHY_REG_ADDRESS) {
554 hw_dbg("PHY Address %d is out of range\n", offset);
555 ret_val = -IGC_ERR_PARAM;
556 goto out;
557 }
558
559
560
561
562
563 mdic = ((offset << IGC_MDIC_REG_SHIFT) |
564 (phy->addr << IGC_MDIC_PHY_SHIFT) |
565 (IGC_MDIC_OP_READ));
566
567 wr32(IGC_MDIC, mdic);
568
569
570
571
572
573 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
574 udelay(50);
575 mdic = rd32(IGC_MDIC);
576 if (mdic & IGC_MDIC_READY)
577 break;
578 }
579 if (!(mdic & IGC_MDIC_READY)) {
580 hw_dbg("MDI Read did not complete\n");
581 ret_val = -IGC_ERR_PHY;
582 goto out;
583 }
584 if (mdic & IGC_MDIC_ERROR) {
585 hw_dbg("MDI Error\n");
586 ret_val = -IGC_ERR_PHY;
587 goto out;
588 }
589 *data = (u16)mdic;
590
591out:
592 return ret_val;
593}
594
595
596
597
598
599
600
601
602
603static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
604{
605 struct igc_phy_info *phy = &hw->phy;
606 u32 i, mdic = 0;
607 s32 ret_val = 0;
608
609 if (offset > MAX_PHY_REG_ADDRESS) {
610 hw_dbg("PHY Address %d is out of range\n", offset);
611 ret_val = -IGC_ERR_PARAM;
612 goto out;
613 }
614
615
616
617
618
619 mdic = (((u32)data) |
620 (offset << IGC_MDIC_REG_SHIFT) |
621 (phy->addr << IGC_MDIC_PHY_SHIFT) |
622 (IGC_MDIC_OP_WRITE));
623
624 wr32(IGC_MDIC, mdic);
625
626
627
628
629
630 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
631 udelay(50);
632 mdic = rd32(IGC_MDIC);
633 if (mdic & IGC_MDIC_READY)
634 break;
635 }
636 if (!(mdic & IGC_MDIC_READY)) {
637 hw_dbg("MDI Write did not complete\n");
638 ret_val = -IGC_ERR_PHY;
639 goto out;
640 }
641 if (mdic & IGC_MDIC_ERROR) {
642 hw_dbg("MDI Error\n");
643 ret_val = -IGC_ERR_PHY;
644 goto out;
645 }
646
647out:
648 return ret_val;
649}
650
651
652
653
654
655
656
657
658
659static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
660 u8 dev_addr, u16 *data, bool read)
661{
662 s32 ret_val;
663
664 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
665 if (ret_val)
666 return ret_val;
667
668 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
669 if (ret_val)
670 return ret_val;
671
672 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
673 dev_addr);
674 if (ret_val)
675 return ret_val;
676
677 if (read)
678 ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
679 else
680 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
681 if (ret_val)
682 return ret_val;
683
684
685 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
686 if (ret_val)
687 return ret_val;
688
689 return ret_val;
690}
691
692
693
694
695
696
697
698
699static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
700 u8 dev_addr, u16 *data)
701{
702 return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
703}
704
705
706
707
708
709
710
711
712static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
713 u8 dev_addr, u16 data)
714{
715 return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
716}
717
718
719
720
721
722
723
724
725
726
727s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
728{
729 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
730 s32 ret_val;
731
732 offset = offset & GPY_REG_MASK;
733
734 if (!dev_addr) {
735 ret_val = hw->phy.ops.acquire(hw);
736 if (ret_val)
737 return ret_val;
738 ret_val = igc_write_phy_reg_mdic(hw, offset, data);
739 hw->phy.ops.release(hw);
740 } else {
741 ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
742 data);
743 }
744
745 return ret_val;
746}
747
748
749
750
751
752
753
754
755
756
757
758s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
759{
760 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
761 s32 ret_val;
762
763 offset = offset & GPY_REG_MASK;
764
765 if (!dev_addr) {
766 ret_val = hw->phy.ops.acquire(hw);
767 if (ret_val)
768 return ret_val;
769 ret_val = igc_read_phy_reg_mdic(hw, offset, data);
770 hw->phy.ops.release(hw);
771 } else {
772 ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
773 data);
774 }
775
776 return ret_val;
777}
778
779
780
781
782
783u16 igc_read_phy_fw_version(struct igc_hw *hw)
784{
785 struct igc_phy_info *phy = &hw->phy;
786 u16 gphy_version = 0;
787 u16 ret_val;
788
789
790 ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version);
791 if (ret_val)
792 hw_dbg("igc_phy: read wrong gphy version\n");
793
794 return gphy_version;
795}
796