1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32
33#include "ixgbe.h"
34#include "ixgbe_phy.h"
35
36static void ixgbe_i2c_start(struct ixgbe_hw *hw);
37static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
38static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
39static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
40static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
41static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
42static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
43static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
44static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
45static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
46static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
47static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
48static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
49static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
50
51
52
53
54
55
56
57
58static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
59{
60 s32 status;
61
62 status = ixgbe_clock_out_i2c_byte(hw, byte);
63 if (status)
64 return status;
65 return ixgbe_get_i2c_ack(hw);
66}
67
68
69
70
71
72
73
74
75static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
76{
77 s32 status;
78
79 status = ixgbe_clock_in_i2c_byte(hw, byte);
80 if (status)
81 return status;
82
83 return ixgbe_clock_out_i2c_bit(hw, false);
84}
85
86
87
88
89
90
91
92
93static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
94{
95 u16 sum = add1 + add2;
96
97 sum = (sum & 0xFF) + (sum >> 8);
98 return sum & 0xFF;
99}
100
101
102
103
104
105
106
107
108
109
110
111s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
112 u16 reg, u16 *val, bool lock)
113{
114 u32 swfw_mask = hw->phy.phy_semaphore_mask;
115 int max_retry = 3;
116 int retry = 0;
117 u8 csum_byte;
118 u8 high_bits;
119 u8 low_bits;
120 u8 reg_high;
121 u8 csum;
122
123 reg_high = ((reg >> 7) & 0xFE) | 1;
124 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
125 csum = ~csum;
126 do {
127 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
128 return IXGBE_ERR_SWFW_SYNC;
129 ixgbe_i2c_start(hw);
130
131 if (ixgbe_out_i2c_byte_ack(hw, addr))
132 goto fail;
133
134 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
135 goto fail;
136
137 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
138 goto fail;
139
140 if (ixgbe_out_i2c_byte_ack(hw, csum))
141 goto fail;
142
143 ixgbe_i2c_start(hw);
144
145 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
146 goto fail;
147
148 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
149 goto fail;
150
151 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
152 goto fail;
153
154 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
155 goto fail;
156
157 if (ixgbe_clock_out_i2c_bit(hw, false))
158 goto fail;
159 ixgbe_i2c_stop(hw);
160 if (lock)
161 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
162 *val = (high_bits << 8) | low_bits;
163 return 0;
164
165fail:
166 ixgbe_i2c_bus_clear(hw);
167 if (lock)
168 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
169 retry++;
170 if (retry < max_retry)
171 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
172 else
173 hw_dbg(hw, "I2C byte read combined error.\n");
174 } while (retry < max_retry);
175
176 return IXGBE_ERR_I2C;
177}
178
179
180
181
182
183
184
185
186
187
188
189s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
190 u16 reg, u16 val, bool lock)
191{
192 u32 swfw_mask = hw->phy.phy_semaphore_mask;
193 int max_retry = 1;
194 int retry = 0;
195 u8 reg_high;
196 u8 csum;
197
198 reg_high = (reg >> 7) & 0xFE;
199 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
200 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
201 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
202 csum = ~csum;
203 do {
204 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
205 return IXGBE_ERR_SWFW_SYNC;
206 ixgbe_i2c_start(hw);
207
208 if (ixgbe_out_i2c_byte_ack(hw, addr))
209 goto fail;
210
211 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
212 goto fail;
213
214 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
215 goto fail;
216
217 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
218 goto fail;
219
220 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
221 goto fail;
222
223 if (ixgbe_out_i2c_byte_ack(hw, csum))
224 goto fail;
225 ixgbe_i2c_stop(hw);
226 if (lock)
227 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
228 return 0;
229
230fail:
231 ixgbe_i2c_bus_clear(hw);
232 if (lock)
233 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
234 retry++;
235 if (retry < max_retry)
236 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
237 else
238 hw_dbg(hw, "I2C byte write combined error.\n");
239 } while (retry < max_retry);
240
241 return IXGBE_ERR_I2C;
242}
243
244
245
246
247
248
249
250
251static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
252{
253 u16 ext_ability = 0;
254
255 hw->phy.mdio.prtad = phy_addr;
256 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
257 return false;
258
259 if (ixgbe_get_phy_id(hw))
260 return false;
261
262 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
263
264 if (hw->phy.type == ixgbe_phy_unknown) {
265 hw->phy.ops.read_reg(hw,
266 MDIO_PMA_EXTABLE,
267 MDIO_MMD_PMAPMD,
268 &ext_ability);
269 if (ext_ability &
270 (MDIO_PMA_EXTABLE_10GBT |
271 MDIO_PMA_EXTABLE_1000BT))
272 hw->phy.type = ixgbe_phy_cu_unknown;
273 else
274 hw->phy.type = ixgbe_phy_generic;
275 }
276
277 return true;
278}
279
280
281
282
283
284
285
286s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
287{
288 u32 phy_addr;
289 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
290
291 if (!hw->phy.phy_semaphore_mask) {
292 if (hw->bus.lan_id)
293 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
294 else
295 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
296 }
297
298 if (hw->phy.type != ixgbe_phy_unknown)
299 return 0;
300
301 if (hw->phy.nw_mng_if_sel) {
302 phy_addr = (hw->phy.nw_mng_if_sel &
303 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
304 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
305 if (ixgbe_probe_phy(hw, phy_addr))
306 return 0;
307 else
308 return IXGBE_ERR_PHY_ADDR_INVALID;
309 }
310
311 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
312 if (ixgbe_probe_phy(hw, phy_addr)) {
313 status = 0;
314 break;
315 }
316 }
317
318
319
320
321
322 if (status)
323 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
324
325 return status;
326}
327
328
329
330
331
332
333
334
335
336
337bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
338{
339 u32 mmngc;
340
341
342 if (hw->mac.type == ixgbe_mac_82598EB)
343 return false;
344
345 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
346 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
347 hw_dbg(hw, "MNG_VETO bit detected.\n");
348 return true;
349 }
350
351 return false;
352}
353
354
355
356
357
358
359static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
360{
361 s32 status;
362 u16 phy_id_high = 0;
363 u16 phy_id_low = 0;
364
365 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
366 &phy_id_high);
367
368 if (!status) {
369 hw->phy.id = (u32)(phy_id_high << 16);
370 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
371 &phy_id_low);
372 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
373 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
374 }
375 return status;
376}
377
378
379
380
381
382
383static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
384{
385 enum ixgbe_phy_type phy_type;
386
387 switch (phy_id) {
388 case TN1010_PHY_ID:
389 phy_type = ixgbe_phy_tn;
390 break;
391 case X550_PHY_ID2:
392 case X550_PHY_ID3:
393 case X540_PHY_ID:
394 phy_type = ixgbe_phy_aq;
395 break;
396 case QT2022_PHY_ID:
397 phy_type = ixgbe_phy_qt;
398 break;
399 case ATH_PHY_ID:
400 phy_type = ixgbe_phy_nl;
401 break;
402 case X557_PHY_ID:
403 case X557_PHY_ID2:
404 phy_type = ixgbe_phy_x550em_ext_t;
405 break;
406 default:
407 phy_type = ixgbe_phy_unknown;
408 break;
409 }
410
411 return phy_type;
412}
413
414
415
416
417
418s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
419{
420 u32 i;
421 u16 ctrl = 0;
422 s32 status = 0;
423
424 if (hw->phy.type == ixgbe_phy_unknown)
425 status = ixgbe_identify_phy_generic(hw);
426
427 if (status != 0 || hw->phy.type == ixgbe_phy_none)
428 return status;
429
430
431 if (!hw->phy.reset_if_overtemp &&
432 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
433 return 0;
434
435
436 if (ixgbe_check_reset_blocked(hw))
437 return 0;
438
439
440
441
442
443 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
444 MDIO_MMD_PHYXS,
445 MDIO_CTRL1_RESET);
446
447
448
449
450
451
452 for (i = 0; i < 30; i++) {
453 msleep(100);
454 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
455 status = hw->phy.ops.read_reg(hw,
456 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
457 MDIO_MMD_PMAPMD, &ctrl);
458 if (status)
459 return status;
460
461 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
462 udelay(2);
463 break;
464 }
465 } else {
466 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
467 MDIO_MMD_PHYXS, &ctrl);
468 if (status)
469 return status;
470
471 if (!(ctrl & MDIO_CTRL1_RESET)) {
472 udelay(2);
473 break;
474 }
475 }
476 }
477
478 if (ctrl & MDIO_CTRL1_RESET) {
479 hw_dbg(hw, "PHY reset polling failed to complete.\n");
480 return IXGBE_ERR_RESET_FAILED;
481 }
482
483 return 0;
484}
485
486
487
488
489
490
491
492
493s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
494 u16 *phy_data)
495{
496 u32 i, data, command;
497
498
499 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
500 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
501 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
502 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
503
504 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
505
506
507
508
509
510 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
511 udelay(10);
512
513 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
514 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
515 break;
516 }
517
518
519 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
520 hw_dbg(hw, "PHY address command did not complete.\n");
521 return IXGBE_ERR_PHY;
522 }
523
524
525
526
527 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
528 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
529 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
530 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
531
532 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
533
534
535
536
537
538 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
539 udelay(10);
540
541 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
542 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
543 break;
544 }
545
546 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
547 hw_dbg(hw, "PHY read command didn't complete\n");
548 return IXGBE_ERR_PHY;
549 }
550
551
552
553
554 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
555 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
556 *phy_data = (u16)(data);
557
558 return 0;
559}
560
561
562
563
564
565
566
567
568s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
569 u32 device_type, u16 *phy_data)
570{
571 s32 status;
572 u32 gssr = hw->phy.phy_semaphore_mask;
573
574 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
575 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
576 phy_data);
577 hw->mac.ops.release_swfw_sync(hw, gssr);
578 } else {
579 return IXGBE_ERR_SWFW_SYNC;
580 }
581
582 return status;
583}
584
585
586
587
588
589
590
591
592
593s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
594 u32 device_type, u16 phy_data)
595{
596 u32 i, command;
597
598
599 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
600
601
602 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
603 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
604 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
605 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
606
607 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
608
609
610
611
612
613
614 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
615 udelay(10);
616
617 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
618 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
619 break;
620 }
621
622 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
623 hw_dbg(hw, "PHY address cmd didn't complete\n");
624 return IXGBE_ERR_PHY;
625 }
626
627
628
629
630
631 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
632 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
633 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
634 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
635
636 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
637
638
639
640
641
642 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
643 udelay(10);
644
645 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
646 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
647 break;
648 }
649
650 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
651 hw_dbg(hw, "PHY write cmd didn't complete\n");
652 return IXGBE_ERR_PHY;
653 }
654
655 return 0;
656}
657
658
659
660
661
662
663
664
665
666s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
667 u32 device_type, u16 phy_data)
668{
669 s32 status;
670 u32 gssr = hw->phy.phy_semaphore_mask;
671
672 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
673 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
674 phy_data);
675 hw->mac.ops.release_swfw_sync(hw, gssr);
676 } else {
677 return IXGBE_ERR_SWFW_SYNC;
678 }
679
680 return status;
681}
682
683
684
685
686
687
688
689s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
690{
691 s32 status = 0;
692 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
693 bool autoneg = false;
694 ixgbe_link_speed speed;
695
696 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
697
698
699 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
700
701 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
702 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
703 (speed & IXGBE_LINK_SPEED_10GB_FULL))
704 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
705
706 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
707
708 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
709 MDIO_MMD_AN, &autoneg_reg);
710
711 if (hw->mac.type == ixgbe_mac_X550) {
712
713 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
714 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
715 (speed & IXGBE_LINK_SPEED_5GB_FULL))
716 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
717
718
719 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
720 if ((hw->phy.autoneg_advertised &
721 IXGBE_LINK_SPEED_2_5GB_FULL) &&
722 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
723 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
724 }
725
726
727 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
728 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
729 (speed & IXGBE_LINK_SPEED_1GB_FULL))
730 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
731
732 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
733 MDIO_MMD_AN, autoneg_reg);
734
735
736 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
737
738 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
739 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
740 (speed & IXGBE_LINK_SPEED_100_FULL))
741 autoneg_reg |= ADVERTISE_100FULL;
742
743 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
744
745
746 if (ixgbe_check_reset_blocked(hw))
747 return 0;
748
749
750 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
751 MDIO_MMD_AN, &autoneg_reg);
752
753 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
754
755 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
756 MDIO_MMD_AN, autoneg_reg);
757
758 return status;
759}
760
761
762
763
764
765
766s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
767 ixgbe_link_speed speed,
768 bool autoneg_wait_to_complete)
769{
770
771
772
773 hw->phy.autoneg_advertised = 0;
774
775 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
776 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
777
778 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
779 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
780
781 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
782 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
783
784 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
785 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
786
787 if (speed & IXGBE_LINK_SPEED_100_FULL)
788 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
789
790 if (speed & IXGBE_LINK_SPEED_10_FULL)
791 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
792
793
794 if (hw->phy.ops.setup_link)
795 hw->phy.ops.setup_link(hw);
796
797 return 0;
798}
799
800
801
802
803
804
805
806
807static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
808{
809 u16 speed_ability;
810 s32 status;
811
812 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
813 &speed_ability);
814 if (status)
815 return status;
816
817 if (speed_ability & MDIO_SPEED_10G)
818 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
819 if (speed_ability & MDIO_PMA_SPEED_1000)
820 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
821 if (speed_ability & MDIO_PMA_SPEED_100)
822 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
823
824 switch (hw->mac.type) {
825 case ixgbe_mac_X550:
826 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
827 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
828 break;
829 case ixgbe_mac_X550EM_x:
830 case ixgbe_mac_x550em_a:
831 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
832 break;
833 default:
834 break;
835 }
836
837 return 0;
838}
839
840
841
842
843
844
845
846s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
847 ixgbe_link_speed *speed,
848 bool *autoneg)
849{
850 s32 status = 0;
851
852 *autoneg = true;
853 if (!hw->phy.speeds_supported)
854 status = ixgbe_get_copper_speeds_supported(hw);
855
856 *speed = hw->phy.speeds_supported;
857 return status;
858}
859
860
861
862
863
864
865
866
867s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
868 bool *link_up)
869{
870 s32 status;
871 u32 time_out;
872 u32 max_time_out = 10;
873 u16 phy_link = 0;
874 u16 phy_speed = 0;
875 u16 phy_data = 0;
876
877
878 *link_up = false;
879 *speed = IXGBE_LINK_SPEED_10GB_FULL;
880
881
882
883
884
885
886 for (time_out = 0; time_out < max_time_out; time_out++) {
887 udelay(10);
888 status = hw->phy.ops.read_reg(hw,
889 MDIO_STAT1,
890 MDIO_MMD_VEND1,
891 &phy_data);
892 phy_link = phy_data &
893 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
894 phy_speed = phy_data &
895 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
896 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
897 *link_up = true;
898 if (phy_speed ==
899 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
900 *speed = IXGBE_LINK_SPEED_1GB_FULL;
901 break;
902 }
903 }
904
905 return status;
906}
907
908
909
910
911
912
913
914
915
916
917s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
918{
919 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
920 bool autoneg = false;
921 ixgbe_link_speed speed;
922
923 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
924
925 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
926
927 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
928 MDIO_MMD_AN,
929 &autoneg_reg);
930
931 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
932 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
933 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
934
935 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
936 MDIO_MMD_AN,
937 autoneg_reg);
938 }
939
940 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
941
942 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
943 MDIO_MMD_AN,
944 &autoneg_reg);
945
946 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
947 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
948 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
949
950 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
951 MDIO_MMD_AN,
952 autoneg_reg);
953 }
954
955 if (speed & IXGBE_LINK_SPEED_100_FULL) {
956
957 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
958 MDIO_MMD_AN,
959 &autoneg_reg);
960
961 autoneg_reg &= ~(ADVERTISE_100FULL |
962 ADVERTISE_100HALF);
963 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
964 autoneg_reg |= ADVERTISE_100FULL;
965
966 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
967 MDIO_MMD_AN,
968 autoneg_reg);
969 }
970
971
972 if (ixgbe_check_reset_blocked(hw))
973 return 0;
974
975
976 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
977 MDIO_MMD_AN, &autoneg_reg);
978
979 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
980
981 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
982 MDIO_MMD_AN, autoneg_reg);
983 return 0;
984}
985
986
987
988
989
990s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
991{
992 u16 phy_offset, control, eword, edata, block_crc;
993 bool end_data = false;
994 u16 list_offset, data_offset;
995 u16 phy_data = 0;
996 s32 ret_val;
997 u32 i;
998
999
1000 if (ixgbe_check_reset_blocked(hw))
1001 return 0;
1002
1003 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1004
1005
1006 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1007 (phy_data | MDIO_CTRL1_RESET));
1008
1009 for (i = 0; i < 100; i++) {
1010 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1011 &phy_data);
1012 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1013 break;
1014 usleep_range(10000, 20000);
1015 }
1016
1017 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1018 hw_dbg(hw, "PHY reset did not complete.\n");
1019 return IXGBE_ERR_PHY;
1020 }
1021
1022
1023 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1024 &data_offset);
1025 if (ret_val)
1026 return ret_val;
1027
1028 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1029 data_offset++;
1030 while (!end_data) {
1031
1032
1033
1034 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1035 if (ret_val)
1036 goto err_eeprom;
1037 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1038 IXGBE_CONTROL_SHIFT_NL;
1039 edata = eword & IXGBE_DATA_MASK_NL;
1040 switch (control) {
1041 case IXGBE_DELAY_NL:
1042 data_offset++;
1043 hw_dbg(hw, "DELAY: %d MS\n", edata);
1044 usleep_range(edata * 1000, edata * 2000);
1045 break;
1046 case IXGBE_DATA_NL:
1047 hw_dbg(hw, "DATA:\n");
1048 data_offset++;
1049 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1050 &phy_offset);
1051 if (ret_val)
1052 goto err_eeprom;
1053 for (i = 0; i < edata; i++) {
1054 ret_val = hw->eeprom.ops.read(hw, data_offset,
1055 &eword);
1056 if (ret_val)
1057 goto err_eeprom;
1058 hw->phy.ops.write_reg(hw, phy_offset,
1059 MDIO_MMD_PMAPMD, eword);
1060 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1061 phy_offset);
1062 data_offset++;
1063 phy_offset++;
1064 }
1065 break;
1066 case IXGBE_CONTROL_NL:
1067 data_offset++;
1068 hw_dbg(hw, "CONTROL:\n");
1069 if (edata == IXGBE_CONTROL_EOL_NL) {
1070 hw_dbg(hw, "EOL\n");
1071 end_data = true;
1072 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1073 hw_dbg(hw, "SOL\n");
1074 } else {
1075 hw_dbg(hw, "Bad control value\n");
1076 return IXGBE_ERR_PHY;
1077 }
1078 break;
1079 default:
1080 hw_dbg(hw, "Bad control type\n");
1081 return IXGBE_ERR_PHY;
1082 }
1083 }
1084
1085 return ret_val;
1086
1087err_eeprom:
1088 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1089 return IXGBE_ERR_PHY;
1090}
1091
1092
1093
1094
1095
1096
1097
1098s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1099{
1100 switch (hw->mac.ops.get_media_type(hw)) {
1101 case ixgbe_media_type_fiber:
1102 return ixgbe_identify_sfp_module_generic(hw);
1103 case ixgbe_media_type_fiber_qsfp:
1104 return ixgbe_identify_qsfp_module_generic(hw);
1105 default:
1106 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1107 return IXGBE_ERR_SFP_NOT_PRESENT;
1108 }
1109
1110 return IXGBE_ERR_SFP_NOT_PRESENT;
1111}
1112
1113
1114
1115
1116
1117
1118
1119s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1120{
1121 struct ixgbe_adapter *adapter = hw->back;
1122 s32 status;
1123 u32 vendor_oui = 0;
1124 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1125 u8 identifier = 0;
1126 u8 comp_codes_1g = 0;
1127 u8 comp_codes_10g = 0;
1128 u8 oui_bytes[3] = {0, 0, 0};
1129 u8 cable_tech = 0;
1130 u8 cable_spec = 0;
1131 u16 enforce_sfp = 0;
1132
1133 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1134 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1135 return IXGBE_ERR_SFP_NOT_PRESENT;
1136 }
1137
1138
1139 hw->mac.ops.set_lan_id(hw);
1140
1141 status = hw->phy.ops.read_i2c_eeprom(hw,
1142 IXGBE_SFF_IDENTIFIER,
1143 &identifier);
1144
1145 if (status)
1146 goto err_read_i2c_eeprom;
1147
1148 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1149 hw->phy.type = ixgbe_phy_sfp_unsupported;
1150 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1151 }
1152 status = hw->phy.ops.read_i2c_eeprom(hw,
1153 IXGBE_SFF_1GBE_COMP_CODES,
1154 &comp_codes_1g);
1155
1156 if (status)
1157 goto err_read_i2c_eeprom;
1158
1159 status = hw->phy.ops.read_i2c_eeprom(hw,
1160 IXGBE_SFF_10GBE_COMP_CODES,
1161 &comp_codes_10g);
1162
1163 if (status)
1164 goto err_read_i2c_eeprom;
1165 status = hw->phy.ops.read_i2c_eeprom(hw,
1166 IXGBE_SFF_CABLE_TECHNOLOGY,
1167 &cable_tech);
1168
1169 if (status)
1170 goto err_read_i2c_eeprom;
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 if (hw->mac.type == ixgbe_mac_82598EB) {
1189 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1190 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1191 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1192 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1193 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1194 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1195 else
1196 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1197 } else {
1198 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1199 if (hw->bus.lan_id == 0)
1200 hw->phy.sfp_type =
1201 ixgbe_sfp_type_da_cu_core0;
1202 else
1203 hw->phy.sfp_type =
1204 ixgbe_sfp_type_da_cu_core1;
1205 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1206 hw->phy.ops.read_i2c_eeprom(
1207 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1208 &cable_spec);
1209 if (cable_spec &
1210 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1211 if (hw->bus.lan_id == 0)
1212 hw->phy.sfp_type =
1213 ixgbe_sfp_type_da_act_lmt_core0;
1214 else
1215 hw->phy.sfp_type =
1216 ixgbe_sfp_type_da_act_lmt_core1;
1217 } else {
1218 hw->phy.sfp_type =
1219 ixgbe_sfp_type_unknown;
1220 }
1221 } else if (comp_codes_10g &
1222 (IXGBE_SFF_10GBASESR_CAPABLE |
1223 IXGBE_SFF_10GBASELR_CAPABLE)) {
1224 if (hw->bus.lan_id == 0)
1225 hw->phy.sfp_type =
1226 ixgbe_sfp_type_srlr_core0;
1227 else
1228 hw->phy.sfp_type =
1229 ixgbe_sfp_type_srlr_core1;
1230 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1231 if (hw->bus.lan_id == 0)
1232 hw->phy.sfp_type =
1233 ixgbe_sfp_type_1g_cu_core0;
1234 else
1235 hw->phy.sfp_type =
1236 ixgbe_sfp_type_1g_cu_core1;
1237 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1238 if (hw->bus.lan_id == 0)
1239 hw->phy.sfp_type =
1240 ixgbe_sfp_type_1g_sx_core0;
1241 else
1242 hw->phy.sfp_type =
1243 ixgbe_sfp_type_1g_sx_core1;
1244 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1245 if (hw->bus.lan_id == 0)
1246 hw->phy.sfp_type =
1247 ixgbe_sfp_type_1g_lx_core0;
1248 else
1249 hw->phy.sfp_type =
1250 ixgbe_sfp_type_1g_lx_core1;
1251 } else {
1252 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1253 }
1254 }
1255
1256 if (hw->phy.sfp_type != stored_sfp_type)
1257 hw->phy.sfp_setup_needed = true;
1258
1259
1260 hw->phy.multispeed_fiber = false;
1261 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1262 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1263 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1264 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1265 hw->phy.multispeed_fiber = true;
1266
1267
1268 if (hw->phy.type != ixgbe_phy_nl) {
1269 hw->phy.id = identifier;
1270 status = hw->phy.ops.read_i2c_eeprom(hw,
1271 IXGBE_SFF_VENDOR_OUI_BYTE0,
1272 &oui_bytes[0]);
1273
1274 if (status != 0)
1275 goto err_read_i2c_eeprom;
1276
1277 status = hw->phy.ops.read_i2c_eeprom(hw,
1278 IXGBE_SFF_VENDOR_OUI_BYTE1,
1279 &oui_bytes[1]);
1280
1281 if (status != 0)
1282 goto err_read_i2c_eeprom;
1283
1284 status = hw->phy.ops.read_i2c_eeprom(hw,
1285 IXGBE_SFF_VENDOR_OUI_BYTE2,
1286 &oui_bytes[2]);
1287
1288 if (status != 0)
1289 goto err_read_i2c_eeprom;
1290
1291 vendor_oui =
1292 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1293 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1294 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1295
1296 switch (vendor_oui) {
1297 case IXGBE_SFF_VENDOR_OUI_TYCO:
1298 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1299 hw->phy.type =
1300 ixgbe_phy_sfp_passive_tyco;
1301 break;
1302 case IXGBE_SFF_VENDOR_OUI_FTL:
1303 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1304 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1305 else
1306 hw->phy.type = ixgbe_phy_sfp_ftl;
1307 break;
1308 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1309 hw->phy.type = ixgbe_phy_sfp_avago;
1310 break;
1311 case IXGBE_SFF_VENDOR_OUI_INTEL:
1312 hw->phy.type = ixgbe_phy_sfp_intel;
1313 break;
1314 default:
1315 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1316 hw->phy.type =
1317 ixgbe_phy_sfp_passive_unknown;
1318 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1319 hw->phy.type =
1320 ixgbe_phy_sfp_active_unknown;
1321 else
1322 hw->phy.type = ixgbe_phy_sfp_unknown;
1323 break;
1324 }
1325 }
1326
1327
1328 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1329 IXGBE_SFF_DA_ACTIVE_CABLE))
1330 return 0;
1331
1332
1333 if (comp_codes_10g == 0 &&
1334 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1335 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1336 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1337 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1338 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1339 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1340 hw->phy.type = ixgbe_phy_sfp_unsupported;
1341 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1342 }
1343
1344
1345 if (hw->mac.type == ixgbe_mac_82598EB)
1346 return 0;
1347
1348 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1349 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1350 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1351 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1352 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1353 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1354 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1355 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1356
1357 if (hw->phy.type == ixgbe_phy_sfp_intel)
1358 return 0;
1359 if (hw->allow_unsupported_sfp) {
1360 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1361 return 0;
1362 }
1363 hw_dbg(hw, "SFP+ module not supported\n");
1364 hw->phy.type = ixgbe_phy_sfp_unsupported;
1365 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1366 }
1367 return 0;
1368
1369err_read_i2c_eeprom:
1370 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1371 if (hw->phy.type != ixgbe_phy_nl) {
1372 hw->phy.id = 0;
1373 hw->phy.type = ixgbe_phy_unknown;
1374 }
1375 return IXGBE_ERR_SFP_NOT_PRESENT;
1376}
1377
1378
1379
1380
1381
1382
1383
1384s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1385{
1386 struct ixgbe_adapter *adapter = hw->back;
1387 s32 status;
1388 u32 vendor_oui = 0;
1389 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1390 u8 identifier = 0;
1391 u8 comp_codes_1g = 0;
1392 u8 comp_codes_10g = 0;
1393 u8 oui_bytes[3] = {0, 0, 0};
1394 u16 enforce_sfp = 0;
1395 u8 connector = 0;
1396 u8 cable_length = 0;
1397 u8 device_tech = 0;
1398 bool active_cable = false;
1399
1400 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1401 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1402 return IXGBE_ERR_SFP_NOT_PRESENT;
1403 }
1404
1405
1406 hw->mac.ops.set_lan_id(hw);
1407
1408 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1409 &identifier);
1410
1411 if (status != 0)
1412 goto err_read_i2c_eeprom;
1413
1414 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1415 hw->phy.type = ixgbe_phy_sfp_unsupported;
1416 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1417 }
1418
1419 hw->phy.id = identifier;
1420
1421 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1422 &comp_codes_10g);
1423
1424 if (status != 0)
1425 goto err_read_i2c_eeprom;
1426
1427 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1428 &comp_codes_1g);
1429
1430 if (status != 0)
1431 goto err_read_i2c_eeprom;
1432
1433 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1434 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1435 if (hw->bus.lan_id == 0)
1436 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1437 else
1438 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1439 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1440 IXGBE_SFF_10GBASELR_CAPABLE)) {
1441 if (hw->bus.lan_id == 0)
1442 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1443 else
1444 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1445 } else {
1446 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1447 active_cable = true;
1448
1449 if (!active_cable) {
1450
1451
1452
1453 hw->phy.ops.read_i2c_eeprom(hw,
1454 IXGBE_SFF_QSFP_CONNECTOR,
1455 &connector);
1456
1457 hw->phy.ops.read_i2c_eeprom(hw,
1458 IXGBE_SFF_QSFP_CABLE_LENGTH,
1459 &cable_length);
1460
1461 hw->phy.ops.read_i2c_eeprom(hw,
1462 IXGBE_SFF_QSFP_DEVICE_TECH,
1463 &device_tech);
1464
1465 if ((connector ==
1466 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1467 (cable_length > 0) &&
1468 ((device_tech >> 4) ==
1469 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1470 active_cable = true;
1471 }
1472
1473 if (active_cable) {
1474 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1475 if (hw->bus.lan_id == 0)
1476 hw->phy.sfp_type =
1477 ixgbe_sfp_type_da_act_lmt_core0;
1478 else
1479 hw->phy.sfp_type =
1480 ixgbe_sfp_type_da_act_lmt_core1;
1481 } else {
1482
1483 hw->phy.type = ixgbe_phy_sfp_unsupported;
1484 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1485 }
1486 }
1487
1488 if (hw->phy.sfp_type != stored_sfp_type)
1489 hw->phy.sfp_setup_needed = true;
1490
1491
1492 hw->phy.multispeed_fiber = false;
1493 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1494 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1495 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1496 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1497 hw->phy.multispeed_fiber = true;
1498
1499
1500 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1501 IXGBE_SFF_10GBASELR_CAPABLE)) {
1502 status = hw->phy.ops.read_i2c_eeprom(hw,
1503 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1504 &oui_bytes[0]);
1505
1506 if (status != 0)
1507 goto err_read_i2c_eeprom;
1508
1509 status = hw->phy.ops.read_i2c_eeprom(hw,
1510 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1511 &oui_bytes[1]);
1512
1513 if (status != 0)
1514 goto err_read_i2c_eeprom;
1515
1516 status = hw->phy.ops.read_i2c_eeprom(hw,
1517 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1518 &oui_bytes[2]);
1519
1520 if (status != 0)
1521 goto err_read_i2c_eeprom;
1522
1523 vendor_oui =
1524 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1525 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1526 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1527
1528 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1529 hw->phy.type = ixgbe_phy_qsfp_intel;
1530 else
1531 hw->phy.type = ixgbe_phy_qsfp_unknown;
1532
1533 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1534 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1535
1536 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1537 return 0;
1538 if (hw->allow_unsupported_sfp) {
1539 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1540 return 0;
1541 }
1542 hw_dbg(hw, "QSFP module not supported\n");
1543 hw->phy.type = ixgbe_phy_sfp_unsupported;
1544 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1545 }
1546 return 0;
1547 }
1548 return 0;
1549
1550err_read_i2c_eeprom:
1551 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1552 hw->phy.id = 0;
1553 hw->phy.type = ixgbe_phy_unknown;
1554
1555 return IXGBE_ERR_SFP_NOT_PRESENT;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1568 u16 *list_offset,
1569 u16 *data_offset)
1570{
1571 u16 sfp_id;
1572 u16 sfp_type = hw->phy.sfp_type;
1573
1574 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1575 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1576
1577 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1578 return IXGBE_ERR_SFP_NOT_PRESENT;
1579
1580 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1581 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1582 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1583
1584
1585
1586
1587
1588 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1589 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1590 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1591 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1592 sfp_type = ixgbe_sfp_type_srlr_core0;
1593 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1594 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1595 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1596 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1597 sfp_type = ixgbe_sfp_type_srlr_core1;
1598
1599
1600 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1601 hw_err(hw, "eeprom read at %d failed\n",
1602 IXGBE_PHY_INIT_OFFSET_NL);
1603 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1604 }
1605
1606 if ((!*list_offset) || (*list_offset == 0xFFFF))
1607 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1608
1609
1610 (*list_offset)++;
1611
1612
1613
1614
1615
1616 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1617 goto err_phy;
1618
1619 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1620 if (sfp_id == sfp_type) {
1621 (*list_offset)++;
1622 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1623 goto err_phy;
1624 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1625 hw_dbg(hw, "SFP+ module not supported\n");
1626 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1627 } else {
1628 break;
1629 }
1630 } else {
1631 (*list_offset) += 2;
1632 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1633 goto err_phy;
1634 }
1635 }
1636
1637 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1638 hw_dbg(hw, "No matching SFP+ module found\n");
1639 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1640 }
1641
1642 return 0;
1643
1644err_phy:
1645 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1646 return IXGBE_ERR_PHY;
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1658 u8 *eeprom_data)
1659{
1660 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1661 IXGBE_I2C_EEPROM_DEV_ADDR,
1662 eeprom_data);
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1674 u8 *sff8472_data)
1675{
1676 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1677 IXGBE_I2C_EEPROM_DEV_ADDR2,
1678 sff8472_data);
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1690 u8 eeprom_data)
1691{
1692 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1693 IXGBE_I2C_EEPROM_DEV_ADDR,
1694 eeprom_data);
1695}
1696
1697
1698
1699
1700
1701
1702
1703static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1704{
1705 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1706 offset == IXGBE_SFF_IDENTIFIER &&
1707 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1708 return true;
1709 return false;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1723 u8 dev_addr, u8 *data, bool lock)
1724{
1725 s32 status;
1726 u32 max_retry = 10;
1727 u32 retry = 0;
1728 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1729 bool nack = true;
1730
1731 if (hw->mac.type >= ixgbe_mac_X550)
1732 max_retry = 3;
1733 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
1734 max_retry = IXGBE_SFP_DETECT_RETRIES;
1735
1736 *data = 0;
1737
1738 do {
1739 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1740 return IXGBE_ERR_SWFW_SYNC;
1741
1742 ixgbe_i2c_start(hw);
1743
1744
1745 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1746 if (status != 0)
1747 goto fail;
1748
1749 status = ixgbe_get_i2c_ack(hw);
1750 if (status != 0)
1751 goto fail;
1752
1753 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1754 if (status != 0)
1755 goto fail;
1756
1757 status = ixgbe_get_i2c_ack(hw);
1758 if (status != 0)
1759 goto fail;
1760
1761 ixgbe_i2c_start(hw);
1762
1763
1764 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
1765 if (status != 0)
1766 goto fail;
1767
1768 status = ixgbe_get_i2c_ack(hw);
1769 if (status != 0)
1770 goto fail;
1771
1772 status = ixgbe_clock_in_i2c_byte(hw, data);
1773 if (status != 0)
1774 goto fail;
1775
1776 status = ixgbe_clock_out_i2c_bit(hw, nack);
1777 if (status != 0)
1778 goto fail;
1779
1780 ixgbe_i2c_stop(hw);
1781 if (lock)
1782 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1783 return 0;
1784
1785fail:
1786 ixgbe_i2c_bus_clear(hw);
1787 if (lock) {
1788 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1789 msleep(100);
1790 }
1791 retry++;
1792 if (retry < max_retry)
1793 hw_dbg(hw, "I2C byte read error - Retrying.\n");
1794 else
1795 hw_dbg(hw, "I2C byte read error.\n");
1796
1797 } while (retry < max_retry);
1798
1799 return status;
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1812 u8 dev_addr, u8 *data)
1813{
1814 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1815 data, true);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1828 u8 dev_addr, u8 *data)
1829{
1830 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1831 data, false);
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1845 u8 dev_addr, u8 data, bool lock)
1846{
1847 s32 status;
1848 u32 max_retry = 1;
1849 u32 retry = 0;
1850 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1851
1852 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1853 return IXGBE_ERR_SWFW_SYNC;
1854
1855 do {
1856 ixgbe_i2c_start(hw);
1857
1858 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1859 if (status != 0)
1860 goto fail;
1861
1862 status = ixgbe_get_i2c_ack(hw);
1863 if (status != 0)
1864 goto fail;
1865
1866 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1867 if (status != 0)
1868 goto fail;
1869
1870 status = ixgbe_get_i2c_ack(hw);
1871 if (status != 0)
1872 goto fail;
1873
1874 status = ixgbe_clock_out_i2c_byte(hw, data);
1875 if (status != 0)
1876 goto fail;
1877
1878 status = ixgbe_get_i2c_ack(hw);
1879 if (status != 0)
1880 goto fail;
1881
1882 ixgbe_i2c_stop(hw);
1883 if (lock)
1884 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1885 return 0;
1886
1887fail:
1888 ixgbe_i2c_bus_clear(hw);
1889 retry++;
1890 if (retry < max_retry)
1891 hw_dbg(hw, "I2C byte write error - Retrying.\n");
1892 else
1893 hw_dbg(hw, "I2C byte write error.\n");
1894 } while (retry < max_retry);
1895
1896 if (lock)
1897 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1898
1899 return status;
1900}
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1912 u8 dev_addr, u8 data)
1913{
1914 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1915 data, true);
1916}
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1928 u8 dev_addr, u8 data)
1929{
1930 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1931 data, false);
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1942{
1943 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1944
1945 i2cctl |= IXGBE_I2C_BB_EN(hw);
1946
1947
1948 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1949 ixgbe_raise_i2c_clk(hw, &i2cctl);
1950
1951
1952 udelay(IXGBE_I2C_T_SU_STA);
1953
1954 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1955
1956
1957 udelay(IXGBE_I2C_T_HD_STA);
1958
1959 ixgbe_lower_i2c_clk(hw, &i2cctl);
1960
1961
1962 udelay(IXGBE_I2C_T_LOW);
1963
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
1975{
1976 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1977 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
1978 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
1979 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
1980
1981
1982 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1983 ixgbe_raise_i2c_clk(hw, &i2cctl);
1984
1985
1986 udelay(IXGBE_I2C_T_SU_STO);
1987
1988 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1989
1990
1991 udelay(IXGBE_I2C_T_BUF);
1992
1993 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
1994 i2cctl &= ~bb_en_bit;
1995 i2cctl |= data_oe_bit | clk_oe_bit;
1996 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
1997 IXGBE_WRITE_FLUSH(hw);
1998 }
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2009{
2010 s32 i;
2011 bool bit = false;
2012
2013 *data = 0;
2014 for (i = 7; i >= 0; i--) {
2015 ixgbe_clock_in_i2c_bit(hw, &bit);
2016 *data |= bit << i;
2017 }
2018
2019 return 0;
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2030{
2031 s32 status;
2032 s32 i;
2033 u32 i2cctl;
2034 bool bit = false;
2035
2036 for (i = 7; i >= 0; i--) {
2037 bit = (data >> i) & 0x1;
2038 status = ixgbe_clock_out_i2c_bit(hw, bit);
2039
2040 if (status != 0)
2041 break;
2042 }
2043
2044
2045 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2046 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2047 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2048 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2049 IXGBE_WRITE_FLUSH(hw);
2050
2051 return status;
2052}
2053
2054
2055
2056
2057
2058
2059
2060static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2061{
2062 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2063 s32 status = 0;
2064 u32 i = 0;
2065 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2066 u32 timeout = 10;
2067 bool ack = true;
2068
2069 if (data_oe_bit) {
2070 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2071 i2cctl |= data_oe_bit;
2072 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2073 IXGBE_WRITE_FLUSH(hw);
2074 }
2075 ixgbe_raise_i2c_clk(hw, &i2cctl);
2076
2077
2078 udelay(IXGBE_I2C_T_HIGH);
2079
2080
2081
2082 for (i = 0; i < timeout; i++) {
2083 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2084 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2085
2086 udelay(1);
2087 if (ack == 0)
2088 break;
2089 }
2090
2091 if (ack == 1) {
2092 hw_dbg(hw, "I2C ack was not received.\n");
2093 status = IXGBE_ERR_I2C;
2094 }
2095
2096 ixgbe_lower_i2c_clk(hw, &i2cctl);
2097
2098
2099 udelay(IXGBE_I2C_T_LOW);
2100
2101 return status;
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2112{
2113 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2114 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2115
2116 if (data_oe_bit) {
2117 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2118 i2cctl |= data_oe_bit;
2119 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2120 IXGBE_WRITE_FLUSH(hw);
2121 }
2122 ixgbe_raise_i2c_clk(hw, &i2cctl);
2123
2124
2125 udelay(IXGBE_I2C_T_HIGH);
2126
2127 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2128 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2129
2130 ixgbe_lower_i2c_clk(hw, &i2cctl);
2131
2132
2133 udelay(IXGBE_I2C_T_LOW);
2134
2135 return 0;
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2146{
2147 s32 status;
2148 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2149
2150 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2151 if (status == 0) {
2152 ixgbe_raise_i2c_clk(hw, &i2cctl);
2153
2154
2155 udelay(IXGBE_I2C_T_HIGH);
2156
2157 ixgbe_lower_i2c_clk(hw, &i2cctl);
2158
2159
2160
2161
2162 udelay(IXGBE_I2C_T_LOW);
2163 } else {
2164 hw_dbg(hw, "I2C data was not set to %X\n", data);
2165 return IXGBE_ERR_I2C;
2166 }
2167
2168 return 0;
2169}
2170
2171
2172
2173
2174
2175
2176
2177
2178static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2179{
2180 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2181 u32 i = 0;
2182 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2183 u32 i2cctl_r = 0;
2184
2185 if (clk_oe_bit) {
2186 *i2cctl |= clk_oe_bit;
2187 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2188 }
2189
2190 for (i = 0; i < timeout; i++) {
2191 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2192 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2193 IXGBE_WRITE_FLUSH(hw);
2194
2195 udelay(IXGBE_I2C_T_RISE);
2196
2197 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2198 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2199 break;
2200 }
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2212{
2213
2214 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2215 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2216
2217 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2218 IXGBE_WRITE_FLUSH(hw);
2219
2220
2221 udelay(IXGBE_I2C_T_FALL);
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2234{
2235 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2236
2237 if (data)
2238 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2239 else
2240 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2241 *i2cctl &= ~data_oe_bit;
2242
2243 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2244 IXGBE_WRITE_FLUSH(hw);
2245
2246
2247 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2248
2249 if (!data)
2250 return 0;
2251 if (data_oe_bit) {
2252 *i2cctl |= data_oe_bit;
2253 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2254 IXGBE_WRITE_FLUSH(hw);
2255 }
2256
2257
2258 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2259 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2260 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2261 return IXGBE_ERR_I2C;
2262 }
2263
2264 return 0;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2276{
2277 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2278
2279 if (data_oe_bit) {
2280 *i2cctl |= data_oe_bit;
2281 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2282 IXGBE_WRITE_FLUSH(hw);
2283 udelay(IXGBE_I2C_T_FALL);
2284 }
2285
2286 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2287 return true;
2288 return false;
2289}
2290
2291
2292
2293
2294
2295
2296
2297
2298static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2299{
2300 u32 i2cctl;
2301 u32 i;
2302
2303 ixgbe_i2c_start(hw);
2304 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2305
2306 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2307
2308 for (i = 0; i < 9; i++) {
2309 ixgbe_raise_i2c_clk(hw, &i2cctl);
2310
2311
2312 udelay(IXGBE_I2C_T_HIGH);
2313
2314 ixgbe_lower_i2c_clk(hw, &i2cctl);
2315
2316
2317 udelay(IXGBE_I2C_T_LOW);
2318 }
2319
2320 ixgbe_i2c_start(hw);
2321
2322
2323 ixgbe_i2c_stop(hw);
2324}
2325
2326
2327
2328
2329
2330
2331
2332s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2333{
2334 u16 phy_data = 0;
2335
2336 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2337 return 0;
2338
2339
2340 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2341 MDIO_MMD_PMAPMD, &phy_data);
2342
2343 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2344 return 0;
2345
2346 return IXGBE_ERR_OVERTEMP;
2347}
2348
2349
2350
2351
2352
2353s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2354{
2355 u32 status;
2356 u16 reg;
2357
2358
2359 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2360 return 0;
2361
2362 if (!on && ixgbe_mng_present(hw))
2363 return 0;
2364
2365 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2366 if (status)
2367 return status;
2368
2369 if (on) {
2370 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2371 } else {
2372 if (ixgbe_check_reset_blocked(hw))
2373 return 0;
2374 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2375 }
2376
2377 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2378 return status;
2379}
2380