1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32
33#include "ixgbe.h"
34#include "ixgbe_phy.h"
35
36static void ixgbe_i2c_start(struct ixgbe_hw *hw);
37static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
38static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
39static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
40static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
41static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
42static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
43static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
44static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
45static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
46static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
47static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
48static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
49static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
50static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
51
52
53
54
55
56
57
58
59static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
60{
61 s32 status;
62
63 status = ixgbe_clock_out_i2c_byte(hw, byte);
64 if (status)
65 return status;
66 return ixgbe_get_i2c_ack(hw);
67}
68
69
70
71
72
73
74
75
76static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
77{
78 s32 status;
79
80 status = ixgbe_clock_in_i2c_byte(hw, byte);
81 if (status)
82 return status;
83
84 return ixgbe_clock_out_i2c_bit(hw, false);
85}
86
87
88
89
90
91
92
93
94static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
95{
96 u16 sum = add1 + add2;
97
98 sum = (sum & 0xFF) + (sum >> 8);
99 return sum & 0xFF;
100}
101
102
103
104
105
106
107
108
109
110
111
112s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
113 u16 reg, u16 *val, bool lock)
114{
115 u32 swfw_mask = hw->phy.phy_semaphore_mask;
116 int max_retry = 3;
117 int retry = 0;
118 u8 csum_byte;
119 u8 high_bits;
120 u8 low_bits;
121 u8 reg_high;
122 u8 csum;
123
124 reg_high = ((reg >> 7) & 0xFE) | 1;
125 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
126 csum = ~csum;
127 do {
128 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
129 return IXGBE_ERR_SWFW_SYNC;
130 ixgbe_i2c_start(hw);
131
132 if (ixgbe_out_i2c_byte_ack(hw, addr))
133 goto fail;
134
135 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
136 goto fail;
137
138 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
139 goto fail;
140
141 if (ixgbe_out_i2c_byte_ack(hw, csum))
142 goto fail;
143
144 ixgbe_i2c_start(hw);
145
146 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
147 goto fail;
148
149 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
150 goto fail;
151
152 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
153 goto fail;
154
155 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
156 goto fail;
157
158 if (ixgbe_clock_out_i2c_bit(hw, false))
159 goto fail;
160 ixgbe_i2c_stop(hw);
161 if (lock)
162 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
163 *val = (high_bits << 8) | low_bits;
164 return 0;
165
166fail:
167 ixgbe_i2c_bus_clear(hw);
168 if (lock)
169 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
170 retry++;
171 if (retry < max_retry)
172 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
173 else
174 hw_dbg(hw, "I2C byte read combined error.\n");
175 } while (retry < max_retry);
176
177 return IXGBE_ERR_I2C;
178}
179
180
181
182
183
184
185
186
187
188
189
190s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
191 u16 reg, u16 val, bool lock)
192{
193 u32 swfw_mask = hw->phy.phy_semaphore_mask;
194 int max_retry = 1;
195 int retry = 0;
196 u8 reg_high;
197 u8 csum;
198
199 reg_high = (reg >> 7) & 0xFE;
200 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
201 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
202 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
203 csum = ~csum;
204 do {
205 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
206 return IXGBE_ERR_SWFW_SYNC;
207 ixgbe_i2c_start(hw);
208
209 if (ixgbe_out_i2c_byte_ack(hw, addr))
210 goto fail;
211
212 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
213 goto fail;
214
215 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
216 goto fail;
217
218 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
219 goto fail;
220
221 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
222 goto fail;
223
224 if (ixgbe_out_i2c_byte_ack(hw, csum))
225 goto fail;
226 ixgbe_i2c_stop(hw);
227 if (lock)
228 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
229 return 0;
230
231fail:
232 ixgbe_i2c_bus_clear(hw);
233 if (lock)
234 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
235 retry++;
236 if (retry < max_retry)
237 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
238 else
239 hw_dbg(hw, "I2C byte write combined error.\n");
240 } while (retry < max_retry);
241
242 return IXGBE_ERR_I2C;
243}
244
245
246
247
248
249
250
251
252static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
253{
254 u16 ext_ability = 0;
255
256 hw->phy.mdio.prtad = phy_addr;
257 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
258 return false;
259
260 if (ixgbe_get_phy_id(hw))
261 return false;
262
263 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
264
265 if (hw->phy.type == ixgbe_phy_unknown) {
266 hw->phy.ops.read_reg(hw,
267 MDIO_PMA_EXTABLE,
268 MDIO_MMD_PMAPMD,
269 &ext_ability);
270 if (ext_ability &
271 (MDIO_PMA_EXTABLE_10GBT |
272 MDIO_PMA_EXTABLE_1000BT))
273 hw->phy.type = ixgbe_phy_cu_unknown;
274 else
275 hw->phy.type = ixgbe_phy_generic;
276 }
277
278 return true;
279}
280
281
282
283
284
285
286
287s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
288{
289 u32 phy_addr;
290 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
291
292 if (!hw->phy.phy_semaphore_mask) {
293 if (hw->bus.lan_id)
294 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
295 else
296 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
297 }
298
299 if (hw->phy.type != ixgbe_phy_unknown)
300 return 0;
301
302 if (hw->phy.nw_mng_if_sel) {
303 phy_addr = (hw->phy.nw_mng_if_sel &
304 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
305 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
306 if (ixgbe_probe_phy(hw, phy_addr))
307 return 0;
308 else
309 return IXGBE_ERR_PHY_ADDR_INVALID;
310 }
311
312 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
313 if (ixgbe_probe_phy(hw, phy_addr)) {
314 status = 0;
315 break;
316 }
317 }
318
319
320
321
322
323 if (status)
324 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
325
326 return status;
327}
328
329
330
331
332
333
334
335
336
337
338bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
339{
340 u32 mmngc;
341
342
343 if (hw->mac.type == ixgbe_mac_82598EB)
344 return false;
345
346 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
347 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
348 hw_dbg(hw, "MNG_VETO bit detected.\n");
349 return true;
350 }
351
352 return false;
353}
354
355
356
357
358
359
360static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
361{
362 s32 status;
363 u16 phy_id_high = 0;
364 u16 phy_id_low = 0;
365
366 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
367 &phy_id_high);
368
369 if (!status) {
370 hw->phy.id = (u32)(phy_id_high << 16);
371 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
372 &phy_id_low);
373 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
374 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
375 }
376 return status;
377}
378
379
380
381
382
383
384static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
385{
386 enum ixgbe_phy_type phy_type;
387
388 switch (phy_id) {
389 case TN1010_PHY_ID:
390 phy_type = ixgbe_phy_tn;
391 break;
392 case X550_PHY_ID2:
393 case X550_PHY_ID3:
394 case X540_PHY_ID:
395 phy_type = ixgbe_phy_aq;
396 break;
397 case QT2022_PHY_ID:
398 phy_type = ixgbe_phy_qt;
399 break;
400 case ATH_PHY_ID:
401 phy_type = ixgbe_phy_nl;
402 break;
403 case X557_PHY_ID:
404 case X557_PHY_ID2:
405 phy_type = ixgbe_phy_x550em_ext_t;
406 break;
407 default:
408 phy_type = ixgbe_phy_unknown;
409 break;
410 }
411
412 return phy_type;
413}
414
415
416
417
418
419s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
420{
421 u32 i;
422 u16 ctrl = 0;
423 s32 status = 0;
424
425 if (hw->phy.type == ixgbe_phy_unknown)
426 status = ixgbe_identify_phy_generic(hw);
427
428 if (status != 0 || hw->phy.type == ixgbe_phy_none)
429 return status;
430
431
432 if (!hw->phy.reset_if_overtemp &&
433 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
434 return 0;
435
436
437 if (ixgbe_check_reset_blocked(hw))
438 return 0;
439
440
441
442
443
444 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
445 MDIO_MMD_PHYXS,
446 MDIO_CTRL1_RESET);
447
448
449
450
451
452
453 for (i = 0; i < 30; i++) {
454 msleep(100);
455 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
456 status = hw->phy.ops.read_reg(hw,
457 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
458 MDIO_MMD_PMAPMD, &ctrl);
459 if (status)
460 return status;
461
462 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
463 udelay(2);
464 break;
465 }
466 } else {
467 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
468 MDIO_MMD_PHYXS, &ctrl);
469 if (status)
470 return status;
471
472 if (!(ctrl & MDIO_CTRL1_RESET)) {
473 udelay(2);
474 break;
475 }
476 }
477 }
478
479 if (ctrl & MDIO_CTRL1_RESET) {
480 hw_dbg(hw, "PHY reset polling failed to complete.\n");
481 return IXGBE_ERR_RESET_FAILED;
482 }
483
484 return 0;
485}
486
487
488
489
490
491
492
493
494s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
495 u16 *phy_data)
496{
497 u32 i, data, command;
498
499
500 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
501 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
502 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
503 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
504
505 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
506
507
508
509
510
511 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
512 udelay(10);
513
514 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
515 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
516 break;
517 }
518
519
520 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
521 hw_dbg(hw, "PHY address command did not complete.\n");
522 return IXGBE_ERR_PHY;
523 }
524
525
526
527
528 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
529 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
530 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
531 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
532
533 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
534
535
536
537
538
539 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
540 udelay(10);
541
542 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
543 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
544 break;
545 }
546
547 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
548 hw_dbg(hw, "PHY read command didn't complete\n");
549 return IXGBE_ERR_PHY;
550 }
551
552
553
554
555 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
556 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
557 *phy_data = (u16)(data);
558
559 return 0;
560}
561
562
563
564
565
566
567
568
569s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
570 u32 device_type, u16 *phy_data)
571{
572 s32 status;
573 u32 gssr = hw->phy.phy_semaphore_mask;
574
575 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
576 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
577 phy_data);
578 hw->mac.ops.release_swfw_sync(hw, gssr);
579 } else {
580 return IXGBE_ERR_SWFW_SYNC;
581 }
582
583 return status;
584}
585
586
587
588
589
590
591
592
593
594s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
595 u32 device_type, u16 phy_data)
596{
597 u32 i, command;
598
599
600 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
601
602
603 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
604 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
605 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
606 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
607
608 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
609
610
611
612
613
614
615 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
616 udelay(10);
617
618 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
619 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
620 break;
621 }
622
623 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
624 hw_dbg(hw, "PHY address cmd didn't complete\n");
625 return IXGBE_ERR_PHY;
626 }
627
628
629
630
631
632 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
633 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
634 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
635 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
636
637 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
638
639
640
641
642
643 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
644 udelay(10);
645
646 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
647 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
648 break;
649 }
650
651 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
652 hw_dbg(hw, "PHY write cmd didn't complete\n");
653 return IXGBE_ERR_PHY;
654 }
655
656 return 0;
657}
658
659
660
661
662
663
664
665
666
667s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
668 u32 device_type, u16 phy_data)
669{
670 s32 status;
671 u32 gssr = hw->phy.phy_semaphore_mask;
672
673 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
674 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
675 phy_data);
676 hw->mac.ops.release_swfw_sync(hw, gssr);
677 } else {
678 return IXGBE_ERR_SWFW_SYNC;
679 }
680
681 return status;
682}
683
684
685
686
687
688
689
690s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
691{
692 s32 status = 0;
693 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
694 bool autoneg = false;
695 ixgbe_link_speed speed;
696
697 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
698
699
700 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
701
702 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
703 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
704 (speed & IXGBE_LINK_SPEED_10GB_FULL))
705 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
706
707 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
708
709 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
710 MDIO_MMD_AN, &autoneg_reg);
711
712 if (hw->mac.type == ixgbe_mac_X550) {
713
714 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
715 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
716 (speed & IXGBE_LINK_SPEED_5GB_FULL))
717 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
718
719
720 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
721 if ((hw->phy.autoneg_advertised &
722 IXGBE_LINK_SPEED_2_5GB_FULL) &&
723 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
724 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
725 }
726
727
728 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
729 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
730 (speed & IXGBE_LINK_SPEED_1GB_FULL))
731 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
732
733 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
734 MDIO_MMD_AN, autoneg_reg);
735
736
737 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
738
739 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
740 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
741 (speed & IXGBE_LINK_SPEED_100_FULL))
742 autoneg_reg |= ADVERTISE_100FULL;
743
744 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
745
746
747 if (ixgbe_check_reset_blocked(hw))
748 return 0;
749
750
751 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
752 MDIO_MMD_AN, &autoneg_reg);
753
754 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
755
756 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
757 MDIO_MMD_AN, autoneg_reg);
758
759 return status;
760}
761
762
763
764
765
766
767s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
768 ixgbe_link_speed speed,
769 bool autoneg_wait_to_complete)
770{
771
772
773
774 hw->phy.autoneg_advertised = 0;
775
776 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
777 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
778
779 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
780 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
781
782 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
783 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
784
785 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
786 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
787
788 if (speed & IXGBE_LINK_SPEED_100_FULL)
789 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
790
791 if (speed & IXGBE_LINK_SPEED_10_FULL)
792 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
793
794
795 if (hw->phy.ops.setup_link)
796 hw->phy.ops.setup_link(hw);
797
798 return 0;
799}
800
801
802
803
804
805
806
807
808static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
809{
810 u16 speed_ability;
811 s32 status;
812
813 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
814 &speed_ability);
815 if (status)
816 return status;
817
818 if (speed_ability & MDIO_SPEED_10G)
819 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
820 if (speed_ability & MDIO_PMA_SPEED_1000)
821 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
822 if (speed_ability & MDIO_PMA_SPEED_100)
823 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
824
825 switch (hw->mac.type) {
826 case ixgbe_mac_X550:
827 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
828 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
829 break;
830 case ixgbe_mac_X550EM_x:
831 case ixgbe_mac_x550em_a:
832 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
833 break;
834 default:
835 break;
836 }
837
838 return 0;
839}
840
841
842
843
844
845
846
847s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
848 ixgbe_link_speed *speed,
849 bool *autoneg)
850{
851 s32 status = 0;
852
853 *autoneg = true;
854 if (!hw->phy.speeds_supported)
855 status = ixgbe_get_copper_speeds_supported(hw);
856
857 *speed = hw->phy.speeds_supported;
858 return status;
859}
860
861
862
863
864
865
866
867
868s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
869 bool *link_up)
870{
871 s32 status;
872 u32 time_out;
873 u32 max_time_out = 10;
874 u16 phy_link = 0;
875 u16 phy_speed = 0;
876 u16 phy_data = 0;
877
878
879 *link_up = false;
880 *speed = IXGBE_LINK_SPEED_10GB_FULL;
881
882
883
884
885
886
887 for (time_out = 0; time_out < max_time_out; time_out++) {
888 udelay(10);
889 status = hw->phy.ops.read_reg(hw,
890 MDIO_STAT1,
891 MDIO_MMD_VEND1,
892 &phy_data);
893 phy_link = phy_data &
894 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
895 phy_speed = phy_data &
896 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
897 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
898 *link_up = true;
899 if (phy_speed ==
900 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
901 *speed = IXGBE_LINK_SPEED_1GB_FULL;
902 break;
903 }
904 }
905
906 return status;
907}
908
909
910
911
912
913
914
915
916
917
918s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
919{
920 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
921 bool autoneg = false;
922 ixgbe_link_speed speed;
923
924 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
925
926 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
927
928 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
929 MDIO_MMD_AN,
930 &autoneg_reg);
931
932 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
933 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
934 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
935
936 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
937 MDIO_MMD_AN,
938 autoneg_reg);
939 }
940
941 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
942
943 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
944 MDIO_MMD_AN,
945 &autoneg_reg);
946
947 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
948 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
949 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
950
951 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
952 MDIO_MMD_AN,
953 autoneg_reg);
954 }
955
956 if (speed & IXGBE_LINK_SPEED_100_FULL) {
957
958 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
959 MDIO_MMD_AN,
960 &autoneg_reg);
961
962 autoneg_reg &= ~(ADVERTISE_100FULL |
963 ADVERTISE_100HALF);
964 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
965 autoneg_reg |= ADVERTISE_100FULL;
966
967 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
968 MDIO_MMD_AN,
969 autoneg_reg);
970 }
971
972
973 if (ixgbe_check_reset_blocked(hw))
974 return 0;
975
976
977 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
978 MDIO_MMD_AN, &autoneg_reg);
979
980 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
981
982 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
983 MDIO_MMD_AN, autoneg_reg);
984 return 0;
985}
986
987
988
989
990
991s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
992{
993 u16 phy_offset, control, eword, edata, block_crc;
994 bool end_data = false;
995 u16 list_offset, data_offset;
996 u16 phy_data = 0;
997 s32 ret_val;
998 u32 i;
999
1000
1001 if (ixgbe_check_reset_blocked(hw))
1002 return 0;
1003
1004 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1005
1006
1007 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1008 (phy_data | MDIO_CTRL1_RESET));
1009
1010 for (i = 0; i < 100; i++) {
1011 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1012 &phy_data);
1013 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1014 break;
1015 usleep_range(10000, 20000);
1016 }
1017
1018 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1019 hw_dbg(hw, "PHY reset did not complete.\n");
1020 return IXGBE_ERR_PHY;
1021 }
1022
1023
1024 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1025 &data_offset);
1026 if (ret_val)
1027 return ret_val;
1028
1029 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1030 data_offset++;
1031 while (!end_data) {
1032
1033
1034
1035 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1036 if (ret_val)
1037 goto err_eeprom;
1038 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1039 IXGBE_CONTROL_SHIFT_NL;
1040 edata = eword & IXGBE_DATA_MASK_NL;
1041 switch (control) {
1042 case IXGBE_DELAY_NL:
1043 data_offset++;
1044 hw_dbg(hw, "DELAY: %d MS\n", edata);
1045 usleep_range(edata * 1000, edata * 2000);
1046 break;
1047 case IXGBE_DATA_NL:
1048 hw_dbg(hw, "DATA:\n");
1049 data_offset++;
1050 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1051 &phy_offset);
1052 if (ret_val)
1053 goto err_eeprom;
1054 for (i = 0; i < edata; i++) {
1055 ret_val = hw->eeprom.ops.read(hw, data_offset,
1056 &eword);
1057 if (ret_val)
1058 goto err_eeprom;
1059 hw->phy.ops.write_reg(hw, phy_offset,
1060 MDIO_MMD_PMAPMD, eword);
1061 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1062 phy_offset);
1063 data_offset++;
1064 phy_offset++;
1065 }
1066 break;
1067 case IXGBE_CONTROL_NL:
1068 data_offset++;
1069 hw_dbg(hw, "CONTROL:\n");
1070 if (edata == IXGBE_CONTROL_EOL_NL) {
1071 hw_dbg(hw, "EOL\n");
1072 end_data = true;
1073 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1074 hw_dbg(hw, "SOL\n");
1075 } else {
1076 hw_dbg(hw, "Bad control value\n");
1077 return IXGBE_ERR_PHY;
1078 }
1079 break;
1080 default:
1081 hw_dbg(hw, "Bad control type\n");
1082 return IXGBE_ERR_PHY;
1083 }
1084 }
1085
1086 return ret_val;
1087
1088err_eeprom:
1089 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1090 return IXGBE_ERR_PHY;
1091}
1092
1093
1094
1095
1096
1097
1098
1099s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1100{
1101 switch (hw->mac.ops.get_media_type(hw)) {
1102 case ixgbe_media_type_fiber:
1103 return ixgbe_identify_sfp_module_generic(hw);
1104 case ixgbe_media_type_fiber_qsfp:
1105 return ixgbe_identify_qsfp_module_generic(hw);
1106 default:
1107 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1108 return IXGBE_ERR_SFP_NOT_PRESENT;
1109 }
1110
1111 return IXGBE_ERR_SFP_NOT_PRESENT;
1112}
1113
1114
1115
1116
1117
1118
1119
1120s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1121{
1122 struct ixgbe_adapter *adapter = hw->back;
1123 s32 status;
1124 u32 vendor_oui = 0;
1125 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1126 u8 identifier = 0;
1127 u8 comp_codes_1g = 0;
1128 u8 comp_codes_10g = 0;
1129 u8 oui_bytes[3] = {0, 0, 0};
1130 u8 cable_tech = 0;
1131 u8 cable_spec = 0;
1132 u16 enforce_sfp = 0;
1133
1134 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1135 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1136 return IXGBE_ERR_SFP_NOT_PRESENT;
1137 }
1138
1139
1140 hw->mac.ops.set_lan_id(hw);
1141
1142 status = hw->phy.ops.read_i2c_eeprom(hw,
1143 IXGBE_SFF_IDENTIFIER,
1144 &identifier);
1145
1146 if (status)
1147 goto err_read_i2c_eeprom;
1148
1149 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1150 hw->phy.type = ixgbe_phy_sfp_unsupported;
1151 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1152 }
1153 status = hw->phy.ops.read_i2c_eeprom(hw,
1154 IXGBE_SFF_1GBE_COMP_CODES,
1155 &comp_codes_1g);
1156
1157 if (status)
1158 goto err_read_i2c_eeprom;
1159
1160 status = hw->phy.ops.read_i2c_eeprom(hw,
1161 IXGBE_SFF_10GBE_COMP_CODES,
1162 &comp_codes_10g);
1163
1164 if (status)
1165 goto err_read_i2c_eeprom;
1166 status = hw->phy.ops.read_i2c_eeprom(hw,
1167 IXGBE_SFF_CABLE_TECHNOLOGY,
1168 &cable_tech);
1169
1170 if (status)
1171 goto err_read_i2c_eeprom;
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 if (hw->mac.type == ixgbe_mac_82598EB) {
1190 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1191 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1192 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1193 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1194 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1195 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1196 else
1197 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1198 } else {
1199 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1200 if (hw->bus.lan_id == 0)
1201 hw->phy.sfp_type =
1202 ixgbe_sfp_type_da_cu_core0;
1203 else
1204 hw->phy.sfp_type =
1205 ixgbe_sfp_type_da_cu_core1;
1206 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1207 hw->phy.ops.read_i2c_eeprom(
1208 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1209 &cable_spec);
1210 if (cable_spec &
1211 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1212 if (hw->bus.lan_id == 0)
1213 hw->phy.sfp_type =
1214 ixgbe_sfp_type_da_act_lmt_core0;
1215 else
1216 hw->phy.sfp_type =
1217 ixgbe_sfp_type_da_act_lmt_core1;
1218 } else {
1219 hw->phy.sfp_type =
1220 ixgbe_sfp_type_unknown;
1221 }
1222 } else if (comp_codes_10g &
1223 (IXGBE_SFF_10GBASESR_CAPABLE |
1224 IXGBE_SFF_10GBASELR_CAPABLE)) {
1225 if (hw->bus.lan_id == 0)
1226 hw->phy.sfp_type =
1227 ixgbe_sfp_type_srlr_core0;
1228 else
1229 hw->phy.sfp_type =
1230 ixgbe_sfp_type_srlr_core1;
1231 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1232 if (hw->bus.lan_id == 0)
1233 hw->phy.sfp_type =
1234 ixgbe_sfp_type_1g_cu_core0;
1235 else
1236 hw->phy.sfp_type =
1237 ixgbe_sfp_type_1g_cu_core1;
1238 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1239 if (hw->bus.lan_id == 0)
1240 hw->phy.sfp_type =
1241 ixgbe_sfp_type_1g_sx_core0;
1242 else
1243 hw->phy.sfp_type =
1244 ixgbe_sfp_type_1g_sx_core1;
1245 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1246 if (hw->bus.lan_id == 0)
1247 hw->phy.sfp_type =
1248 ixgbe_sfp_type_1g_lx_core0;
1249 else
1250 hw->phy.sfp_type =
1251 ixgbe_sfp_type_1g_lx_core1;
1252 } else {
1253 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1254 }
1255 }
1256
1257 if (hw->phy.sfp_type != stored_sfp_type)
1258 hw->phy.sfp_setup_needed = true;
1259
1260
1261 hw->phy.multispeed_fiber = false;
1262 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1263 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1264 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1265 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1266 hw->phy.multispeed_fiber = true;
1267
1268
1269 if (hw->phy.type != ixgbe_phy_nl) {
1270 hw->phy.id = identifier;
1271 status = hw->phy.ops.read_i2c_eeprom(hw,
1272 IXGBE_SFF_VENDOR_OUI_BYTE0,
1273 &oui_bytes[0]);
1274
1275 if (status != 0)
1276 goto err_read_i2c_eeprom;
1277
1278 status = hw->phy.ops.read_i2c_eeprom(hw,
1279 IXGBE_SFF_VENDOR_OUI_BYTE1,
1280 &oui_bytes[1]);
1281
1282 if (status != 0)
1283 goto err_read_i2c_eeprom;
1284
1285 status = hw->phy.ops.read_i2c_eeprom(hw,
1286 IXGBE_SFF_VENDOR_OUI_BYTE2,
1287 &oui_bytes[2]);
1288
1289 if (status != 0)
1290 goto err_read_i2c_eeprom;
1291
1292 vendor_oui =
1293 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1294 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1295 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1296
1297 switch (vendor_oui) {
1298 case IXGBE_SFF_VENDOR_OUI_TYCO:
1299 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1300 hw->phy.type =
1301 ixgbe_phy_sfp_passive_tyco;
1302 break;
1303 case IXGBE_SFF_VENDOR_OUI_FTL:
1304 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1305 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1306 else
1307 hw->phy.type = ixgbe_phy_sfp_ftl;
1308 break;
1309 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1310 hw->phy.type = ixgbe_phy_sfp_avago;
1311 break;
1312 case IXGBE_SFF_VENDOR_OUI_INTEL:
1313 hw->phy.type = ixgbe_phy_sfp_intel;
1314 break;
1315 default:
1316 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1317 hw->phy.type =
1318 ixgbe_phy_sfp_passive_unknown;
1319 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1320 hw->phy.type =
1321 ixgbe_phy_sfp_active_unknown;
1322 else
1323 hw->phy.type = ixgbe_phy_sfp_unknown;
1324 break;
1325 }
1326 }
1327
1328
1329 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1330 IXGBE_SFF_DA_ACTIVE_CABLE))
1331 return 0;
1332
1333
1334 if (comp_codes_10g == 0 &&
1335 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1336 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1337 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1338 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1339 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1340 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1341 hw->phy.type = ixgbe_phy_sfp_unsupported;
1342 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1343 }
1344
1345
1346 if (hw->mac.type == ixgbe_mac_82598EB)
1347 return 0;
1348
1349 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1350 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1351 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1352 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1353 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1354 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1355 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1356 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1357
1358 if (hw->phy.type == ixgbe_phy_sfp_intel)
1359 return 0;
1360 if (hw->allow_unsupported_sfp) {
1361 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1362 return 0;
1363 }
1364 hw_dbg(hw, "SFP+ module not supported\n");
1365 hw->phy.type = ixgbe_phy_sfp_unsupported;
1366 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1367 }
1368 return 0;
1369
1370err_read_i2c_eeprom:
1371 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1372 if (hw->phy.type != ixgbe_phy_nl) {
1373 hw->phy.id = 0;
1374 hw->phy.type = ixgbe_phy_unknown;
1375 }
1376 return IXGBE_ERR_SFP_NOT_PRESENT;
1377}
1378
1379
1380
1381
1382
1383
1384
1385static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1386{
1387 struct ixgbe_adapter *adapter = hw->back;
1388 s32 status;
1389 u32 vendor_oui = 0;
1390 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1391 u8 identifier = 0;
1392 u8 comp_codes_1g = 0;
1393 u8 comp_codes_10g = 0;
1394 u8 oui_bytes[3] = {0, 0, 0};
1395 u16 enforce_sfp = 0;
1396 u8 connector = 0;
1397 u8 cable_length = 0;
1398 u8 device_tech = 0;
1399 bool active_cable = false;
1400
1401 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1402 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1403 return IXGBE_ERR_SFP_NOT_PRESENT;
1404 }
1405
1406
1407 hw->mac.ops.set_lan_id(hw);
1408
1409 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1410 &identifier);
1411
1412 if (status != 0)
1413 goto err_read_i2c_eeprom;
1414
1415 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1416 hw->phy.type = ixgbe_phy_sfp_unsupported;
1417 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1418 }
1419
1420 hw->phy.id = identifier;
1421
1422 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1423 &comp_codes_10g);
1424
1425 if (status != 0)
1426 goto err_read_i2c_eeprom;
1427
1428 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1429 &comp_codes_1g);
1430
1431 if (status != 0)
1432 goto err_read_i2c_eeprom;
1433
1434 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1435 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1436 if (hw->bus.lan_id == 0)
1437 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1438 else
1439 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1440 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1441 IXGBE_SFF_10GBASELR_CAPABLE)) {
1442 if (hw->bus.lan_id == 0)
1443 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1444 else
1445 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1446 } else {
1447 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1448 active_cable = true;
1449
1450 if (!active_cable) {
1451
1452
1453
1454 hw->phy.ops.read_i2c_eeprom(hw,
1455 IXGBE_SFF_QSFP_CONNECTOR,
1456 &connector);
1457
1458 hw->phy.ops.read_i2c_eeprom(hw,
1459 IXGBE_SFF_QSFP_CABLE_LENGTH,
1460 &cable_length);
1461
1462 hw->phy.ops.read_i2c_eeprom(hw,
1463 IXGBE_SFF_QSFP_DEVICE_TECH,
1464 &device_tech);
1465
1466 if ((connector ==
1467 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1468 (cable_length > 0) &&
1469 ((device_tech >> 4) ==
1470 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1471 active_cable = true;
1472 }
1473
1474 if (active_cable) {
1475 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1476 if (hw->bus.lan_id == 0)
1477 hw->phy.sfp_type =
1478 ixgbe_sfp_type_da_act_lmt_core0;
1479 else
1480 hw->phy.sfp_type =
1481 ixgbe_sfp_type_da_act_lmt_core1;
1482 } else {
1483
1484 hw->phy.type = ixgbe_phy_sfp_unsupported;
1485 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1486 }
1487 }
1488
1489 if (hw->phy.sfp_type != stored_sfp_type)
1490 hw->phy.sfp_setup_needed = true;
1491
1492
1493 hw->phy.multispeed_fiber = false;
1494 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1495 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1496 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1497 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1498 hw->phy.multispeed_fiber = true;
1499
1500
1501 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1502 IXGBE_SFF_10GBASELR_CAPABLE)) {
1503 status = hw->phy.ops.read_i2c_eeprom(hw,
1504 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1505 &oui_bytes[0]);
1506
1507 if (status != 0)
1508 goto err_read_i2c_eeprom;
1509
1510 status = hw->phy.ops.read_i2c_eeprom(hw,
1511 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1512 &oui_bytes[1]);
1513
1514 if (status != 0)
1515 goto err_read_i2c_eeprom;
1516
1517 status = hw->phy.ops.read_i2c_eeprom(hw,
1518 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1519 &oui_bytes[2]);
1520
1521 if (status != 0)
1522 goto err_read_i2c_eeprom;
1523
1524 vendor_oui =
1525 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1526 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1527 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1528
1529 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1530 hw->phy.type = ixgbe_phy_qsfp_intel;
1531 else
1532 hw->phy.type = ixgbe_phy_qsfp_unknown;
1533
1534 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1535 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1536
1537 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1538 return 0;
1539 if (hw->allow_unsupported_sfp) {
1540 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1541 return 0;
1542 }
1543 hw_dbg(hw, "QSFP module not supported\n");
1544 hw->phy.type = ixgbe_phy_sfp_unsupported;
1545 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1546 }
1547 return 0;
1548 }
1549 return 0;
1550
1551err_read_i2c_eeprom:
1552 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1553 hw->phy.id = 0;
1554 hw->phy.type = ixgbe_phy_unknown;
1555
1556 return IXGBE_ERR_SFP_NOT_PRESENT;
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1569 u16 *list_offset,
1570 u16 *data_offset)
1571{
1572 u16 sfp_id;
1573 u16 sfp_type = hw->phy.sfp_type;
1574
1575 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1576 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1577
1578 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1579 return IXGBE_ERR_SFP_NOT_PRESENT;
1580
1581 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1582 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1583 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1584
1585
1586
1587
1588
1589 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1590 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1591 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1592 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1593 sfp_type = ixgbe_sfp_type_srlr_core0;
1594 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1595 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1596 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1597 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1598 sfp_type = ixgbe_sfp_type_srlr_core1;
1599
1600
1601 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1602 hw_err(hw, "eeprom read at %d failed\n",
1603 IXGBE_PHY_INIT_OFFSET_NL);
1604 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1605 }
1606
1607 if ((!*list_offset) || (*list_offset == 0xFFFF))
1608 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1609
1610
1611 (*list_offset)++;
1612
1613
1614
1615
1616
1617 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1618 goto err_phy;
1619
1620 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1621 if (sfp_id == sfp_type) {
1622 (*list_offset)++;
1623 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1624 goto err_phy;
1625 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1626 hw_dbg(hw, "SFP+ module not supported\n");
1627 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1628 } else {
1629 break;
1630 }
1631 } else {
1632 (*list_offset) += 2;
1633 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1634 goto err_phy;
1635 }
1636 }
1637
1638 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1639 hw_dbg(hw, "No matching SFP+ module found\n");
1640 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1641 }
1642
1643 return 0;
1644
1645err_phy:
1646 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1647 return IXGBE_ERR_PHY;
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1659 u8 *eeprom_data)
1660{
1661 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1662 IXGBE_I2C_EEPROM_DEV_ADDR,
1663 eeprom_data);
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1675 u8 *sff8472_data)
1676{
1677 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1678 IXGBE_I2C_EEPROM_DEV_ADDR2,
1679 sff8472_data);
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1691 u8 eeprom_data)
1692{
1693 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1694 IXGBE_I2C_EEPROM_DEV_ADDR,
1695 eeprom_data);
1696}
1697
1698
1699
1700
1701
1702
1703
1704static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1705{
1706 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1707 offset == IXGBE_SFF_IDENTIFIER &&
1708 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1709 return true;
1710 return false;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1724 u8 dev_addr, u8 *data, bool lock)
1725{
1726 s32 status;
1727 u32 max_retry = 10;
1728 u32 retry = 0;
1729 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1730 bool nack = true;
1731
1732 if (hw->mac.type >= ixgbe_mac_X550)
1733 max_retry = 3;
1734 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
1735 max_retry = IXGBE_SFP_DETECT_RETRIES;
1736
1737 *data = 0;
1738
1739 do {
1740 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1741 return IXGBE_ERR_SWFW_SYNC;
1742
1743 ixgbe_i2c_start(hw);
1744
1745
1746 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1747 if (status != 0)
1748 goto fail;
1749
1750 status = ixgbe_get_i2c_ack(hw);
1751 if (status != 0)
1752 goto fail;
1753
1754 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1755 if (status != 0)
1756 goto fail;
1757
1758 status = ixgbe_get_i2c_ack(hw);
1759 if (status != 0)
1760 goto fail;
1761
1762 ixgbe_i2c_start(hw);
1763
1764
1765 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
1766 if (status != 0)
1767 goto fail;
1768
1769 status = ixgbe_get_i2c_ack(hw);
1770 if (status != 0)
1771 goto fail;
1772
1773 status = ixgbe_clock_in_i2c_byte(hw, data);
1774 if (status != 0)
1775 goto fail;
1776
1777 status = ixgbe_clock_out_i2c_bit(hw, nack);
1778 if (status != 0)
1779 goto fail;
1780
1781 ixgbe_i2c_stop(hw);
1782 if (lock)
1783 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1784 return 0;
1785
1786fail:
1787 ixgbe_i2c_bus_clear(hw);
1788 if (lock) {
1789 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1790 msleep(100);
1791 }
1792 retry++;
1793 if (retry < max_retry)
1794 hw_dbg(hw, "I2C byte read error - Retrying.\n");
1795 else
1796 hw_dbg(hw, "I2C byte read error.\n");
1797
1798 } while (retry < max_retry);
1799
1800 return status;
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1813 u8 dev_addr, u8 *data)
1814{
1815 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1816 data, true);
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1829 u8 dev_addr, u8 *data)
1830{
1831 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1832 data, false);
1833}
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1846 u8 dev_addr, u8 data, bool lock)
1847{
1848 s32 status;
1849 u32 max_retry = 1;
1850 u32 retry = 0;
1851 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1852
1853 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1854 return IXGBE_ERR_SWFW_SYNC;
1855
1856 do {
1857 ixgbe_i2c_start(hw);
1858
1859 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1860 if (status != 0)
1861 goto fail;
1862
1863 status = ixgbe_get_i2c_ack(hw);
1864 if (status != 0)
1865 goto fail;
1866
1867 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1868 if (status != 0)
1869 goto fail;
1870
1871 status = ixgbe_get_i2c_ack(hw);
1872 if (status != 0)
1873 goto fail;
1874
1875 status = ixgbe_clock_out_i2c_byte(hw, data);
1876 if (status != 0)
1877 goto fail;
1878
1879 status = ixgbe_get_i2c_ack(hw);
1880 if (status != 0)
1881 goto fail;
1882
1883 ixgbe_i2c_stop(hw);
1884 if (lock)
1885 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1886 return 0;
1887
1888fail:
1889 ixgbe_i2c_bus_clear(hw);
1890 retry++;
1891 if (retry < max_retry)
1892 hw_dbg(hw, "I2C byte write error - Retrying.\n");
1893 else
1894 hw_dbg(hw, "I2C byte write error.\n");
1895 } while (retry < max_retry);
1896
1897 if (lock)
1898 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1899
1900 return status;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1913 u8 dev_addr, u8 data)
1914{
1915 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1916 data, true);
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1929 u8 dev_addr, u8 data)
1930{
1931 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1932 data, false);
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1943{
1944 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1945
1946 i2cctl |= IXGBE_I2C_BB_EN(hw);
1947
1948
1949 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1950 ixgbe_raise_i2c_clk(hw, &i2cctl);
1951
1952
1953 udelay(IXGBE_I2C_T_SU_STA);
1954
1955 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1956
1957
1958 udelay(IXGBE_I2C_T_HD_STA);
1959
1960 ixgbe_lower_i2c_clk(hw, &i2cctl);
1961
1962
1963 udelay(IXGBE_I2C_T_LOW);
1964
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
1976{
1977 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1978 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
1979 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
1980 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
1981
1982
1983 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1984 ixgbe_raise_i2c_clk(hw, &i2cctl);
1985
1986
1987 udelay(IXGBE_I2C_T_SU_STO);
1988
1989 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1990
1991
1992 udelay(IXGBE_I2C_T_BUF);
1993
1994 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
1995 i2cctl &= ~bb_en_bit;
1996 i2cctl |= data_oe_bit | clk_oe_bit;
1997 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
1998 IXGBE_WRITE_FLUSH(hw);
1999 }
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2010{
2011 s32 i;
2012 bool bit = false;
2013
2014 *data = 0;
2015 for (i = 7; i >= 0; i--) {
2016 ixgbe_clock_in_i2c_bit(hw, &bit);
2017 *data |= bit << i;
2018 }
2019
2020 return 0;
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2031{
2032 s32 status;
2033 s32 i;
2034 u32 i2cctl;
2035 bool bit = false;
2036
2037 for (i = 7; i >= 0; i--) {
2038 bit = (data >> i) & 0x1;
2039 status = ixgbe_clock_out_i2c_bit(hw, bit);
2040
2041 if (status != 0)
2042 break;
2043 }
2044
2045
2046 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2047 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2048 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2049 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2050 IXGBE_WRITE_FLUSH(hw);
2051
2052 return status;
2053}
2054
2055
2056
2057
2058
2059
2060
2061static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2062{
2063 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2064 s32 status = 0;
2065 u32 i = 0;
2066 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2067 u32 timeout = 10;
2068 bool ack = true;
2069
2070 if (data_oe_bit) {
2071 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2072 i2cctl |= data_oe_bit;
2073 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2074 IXGBE_WRITE_FLUSH(hw);
2075 }
2076 ixgbe_raise_i2c_clk(hw, &i2cctl);
2077
2078
2079 udelay(IXGBE_I2C_T_HIGH);
2080
2081
2082
2083 for (i = 0; i < timeout; i++) {
2084 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2085 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2086
2087 udelay(1);
2088 if (ack == 0)
2089 break;
2090 }
2091
2092 if (ack == 1) {
2093 hw_dbg(hw, "I2C ack was not received.\n");
2094 status = IXGBE_ERR_I2C;
2095 }
2096
2097 ixgbe_lower_i2c_clk(hw, &i2cctl);
2098
2099
2100 udelay(IXGBE_I2C_T_LOW);
2101
2102 return status;
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2113{
2114 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2115 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2116
2117 if (data_oe_bit) {
2118 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2119 i2cctl |= data_oe_bit;
2120 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2121 IXGBE_WRITE_FLUSH(hw);
2122 }
2123 ixgbe_raise_i2c_clk(hw, &i2cctl);
2124
2125
2126 udelay(IXGBE_I2C_T_HIGH);
2127
2128 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2129 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2130
2131 ixgbe_lower_i2c_clk(hw, &i2cctl);
2132
2133
2134 udelay(IXGBE_I2C_T_LOW);
2135
2136 return 0;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2147{
2148 s32 status;
2149 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2150
2151 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2152 if (status == 0) {
2153 ixgbe_raise_i2c_clk(hw, &i2cctl);
2154
2155
2156 udelay(IXGBE_I2C_T_HIGH);
2157
2158 ixgbe_lower_i2c_clk(hw, &i2cctl);
2159
2160
2161
2162
2163 udelay(IXGBE_I2C_T_LOW);
2164 } else {
2165 hw_dbg(hw, "I2C data was not set to %X\n", data);
2166 return IXGBE_ERR_I2C;
2167 }
2168
2169 return 0;
2170}
2171
2172
2173
2174
2175
2176
2177
2178
2179static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2180{
2181 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2182 u32 i = 0;
2183 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2184 u32 i2cctl_r = 0;
2185
2186 if (clk_oe_bit) {
2187 *i2cctl |= clk_oe_bit;
2188 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2189 }
2190
2191 for (i = 0; i < timeout; i++) {
2192 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2193 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2194 IXGBE_WRITE_FLUSH(hw);
2195
2196 udelay(IXGBE_I2C_T_RISE);
2197
2198 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2199 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2200 break;
2201 }
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2213{
2214
2215 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2216 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2217
2218 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2219 IXGBE_WRITE_FLUSH(hw);
2220
2221
2222 udelay(IXGBE_I2C_T_FALL);
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2235{
2236 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2237
2238 if (data)
2239 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2240 else
2241 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2242 *i2cctl &= ~data_oe_bit;
2243
2244 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2245 IXGBE_WRITE_FLUSH(hw);
2246
2247
2248 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2249
2250 if (!data)
2251 return 0;
2252 if (data_oe_bit) {
2253 *i2cctl |= data_oe_bit;
2254 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2255 IXGBE_WRITE_FLUSH(hw);
2256 }
2257
2258
2259 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2260 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2261 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2262 return IXGBE_ERR_I2C;
2263 }
2264
2265 return 0;
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2277{
2278 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2279
2280 if (data_oe_bit) {
2281 *i2cctl |= data_oe_bit;
2282 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2283 IXGBE_WRITE_FLUSH(hw);
2284 udelay(IXGBE_I2C_T_FALL);
2285 }
2286
2287 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2288 return true;
2289 return false;
2290}
2291
2292
2293
2294
2295
2296
2297
2298
2299static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2300{
2301 u32 i2cctl;
2302 u32 i;
2303
2304 ixgbe_i2c_start(hw);
2305 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2306
2307 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2308
2309 for (i = 0; i < 9; i++) {
2310 ixgbe_raise_i2c_clk(hw, &i2cctl);
2311
2312
2313 udelay(IXGBE_I2C_T_HIGH);
2314
2315 ixgbe_lower_i2c_clk(hw, &i2cctl);
2316
2317
2318 udelay(IXGBE_I2C_T_LOW);
2319 }
2320
2321 ixgbe_i2c_start(hw);
2322
2323
2324 ixgbe_i2c_stop(hw);
2325}
2326
2327
2328
2329
2330
2331
2332
2333s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2334{
2335 u16 phy_data = 0;
2336
2337 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2338 return 0;
2339
2340
2341 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2342 MDIO_MMD_PMAPMD, &phy_data);
2343
2344 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2345 return 0;
2346
2347 return IXGBE_ERR_OVERTEMP;
2348}
2349
2350
2351
2352
2353
2354s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2355{
2356 u32 status;
2357 u16 reg;
2358
2359
2360 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2361 return 0;
2362
2363 if (!on && ixgbe_mng_present(hw))
2364 return 0;
2365
2366 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2367 if (status)
2368 return status;
2369
2370 if (on) {
2371 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2372 } else {
2373 if (ixgbe_check_reset_blocked(hw))
2374 return 0;
2375 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2376 }
2377
2378 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2379 return status;
2380}
2381