1
2
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6#include <linux/sched.h>
7
8#include "ixgbe.h"
9#include "ixgbe_phy.h"
10
11static void ixgbe_i2c_start(struct ixgbe_hw *hw);
12static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
13static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
14static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
15static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
16static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
17static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
18static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
19static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
20static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
21static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
22static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
23static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
24static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
25static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
26
27
28
29
30
31
32
33
34static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
35{
36 s32 status;
37
38 status = ixgbe_clock_out_i2c_byte(hw, byte);
39 if (status)
40 return status;
41 return ixgbe_get_i2c_ack(hw);
42}
43
44
45
46
47
48
49
50
51static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
52{
53 s32 status;
54
55 status = ixgbe_clock_in_i2c_byte(hw, byte);
56 if (status)
57 return status;
58
59 return ixgbe_clock_out_i2c_bit(hw, false);
60}
61
62
63
64
65
66
67
68
69static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
70{
71 u16 sum = add1 + add2;
72
73 sum = (sum & 0xFF) + (sum >> 8);
74 return sum & 0xFF;
75}
76
77
78
79
80
81
82
83
84
85
86
87s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
88 u16 reg, u16 *val, bool lock)
89{
90 u32 swfw_mask = hw->phy.phy_semaphore_mask;
91 int max_retry = 3;
92 int retry = 0;
93 u8 csum_byte;
94 u8 high_bits;
95 u8 low_bits;
96 u8 reg_high;
97 u8 csum;
98
99 reg_high = ((reg >> 7) & 0xFE) | 1;
100 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
101 csum = ~csum;
102 do {
103 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
104 return IXGBE_ERR_SWFW_SYNC;
105 ixgbe_i2c_start(hw);
106
107 if (ixgbe_out_i2c_byte_ack(hw, addr))
108 goto fail;
109
110 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
111 goto fail;
112
113 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
114 goto fail;
115
116 if (ixgbe_out_i2c_byte_ack(hw, csum))
117 goto fail;
118
119 ixgbe_i2c_start(hw);
120
121 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
122 goto fail;
123
124 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
125 goto fail;
126
127 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
128 goto fail;
129
130 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
131 goto fail;
132
133 if (ixgbe_clock_out_i2c_bit(hw, false))
134 goto fail;
135 ixgbe_i2c_stop(hw);
136 if (lock)
137 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
138 *val = (high_bits << 8) | low_bits;
139 return 0;
140
141fail:
142 ixgbe_i2c_bus_clear(hw);
143 if (lock)
144 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
145 retry++;
146 if (retry < max_retry)
147 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
148 else
149 hw_dbg(hw, "I2C byte read combined error.\n");
150 } while (retry < max_retry);
151
152 return IXGBE_ERR_I2C;
153}
154
155
156
157
158
159
160
161
162
163
164
165s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
166 u16 reg, u16 val, bool lock)
167{
168 u32 swfw_mask = hw->phy.phy_semaphore_mask;
169 int max_retry = 1;
170 int retry = 0;
171 u8 reg_high;
172 u8 csum;
173
174 reg_high = (reg >> 7) & 0xFE;
175 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
176 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
177 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
178 csum = ~csum;
179 do {
180 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
181 return IXGBE_ERR_SWFW_SYNC;
182 ixgbe_i2c_start(hw);
183
184 if (ixgbe_out_i2c_byte_ack(hw, addr))
185 goto fail;
186
187 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
188 goto fail;
189
190 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
191 goto fail;
192
193 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
194 goto fail;
195
196 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
197 goto fail;
198
199 if (ixgbe_out_i2c_byte_ack(hw, csum))
200 goto fail;
201 ixgbe_i2c_stop(hw);
202 if (lock)
203 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
204 return 0;
205
206fail:
207 ixgbe_i2c_bus_clear(hw);
208 if (lock)
209 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
210 retry++;
211 if (retry < max_retry)
212 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
213 else
214 hw_dbg(hw, "I2C byte write combined error.\n");
215 } while (retry < max_retry);
216
217 return IXGBE_ERR_I2C;
218}
219
220
221
222
223
224
225
226
227static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
228{
229 u16 ext_ability = 0;
230
231 hw->phy.mdio.prtad = phy_addr;
232 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
233 return false;
234
235 if (ixgbe_get_phy_id(hw))
236 return false;
237
238 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
239
240 if (hw->phy.type == ixgbe_phy_unknown) {
241 hw->phy.ops.read_reg(hw,
242 MDIO_PMA_EXTABLE,
243 MDIO_MMD_PMAPMD,
244 &ext_ability);
245 if (ext_ability &
246 (MDIO_PMA_EXTABLE_10GBT |
247 MDIO_PMA_EXTABLE_1000BT))
248 hw->phy.type = ixgbe_phy_cu_unknown;
249 else
250 hw->phy.type = ixgbe_phy_generic;
251 }
252
253 return true;
254}
255
256
257
258
259
260
261
262s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
263{
264 u32 phy_addr;
265 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
266
267 if (!hw->phy.phy_semaphore_mask) {
268 if (hw->bus.lan_id)
269 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
270 else
271 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
272 }
273
274 if (hw->phy.type != ixgbe_phy_unknown)
275 return 0;
276
277 if (hw->phy.nw_mng_if_sel) {
278 phy_addr = (hw->phy.nw_mng_if_sel &
279 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
280 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
281 if (ixgbe_probe_phy(hw, phy_addr))
282 return 0;
283 else
284 return IXGBE_ERR_PHY_ADDR_INVALID;
285 }
286
287 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
288 if (ixgbe_probe_phy(hw, phy_addr)) {
289 status = 0;
290 break;
291 }
292 }
293
294
295
296
297
298 if (status)
299 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
300
301 return status;
302}
303
304
305
306
307
308
309
310
311
312
313bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
314{
315 u32 mmngc;
316
317
318 if (hw->mac.type == ixgbe_mac_82598EB)
319 return false;
320
321 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
322 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
323 hw_dbg(hw, "MNG_VETO bit detected.\n");
324 return true;
325 }
326
327 return false;
328}
329
330
331
332
333
334
335static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
336{
337 s32 status;
338 u16 phy_id_high = 0;
339 u16 phy_id_low = 0;
340
341 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
342 &phy_id_high);
343
344 if (!status) {
345 hw->phy.id = (u32)(phy_id_high << 16);
346 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
347 &phy_id_low);
348 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
349 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
350 }
351 return status;
352}
353
354
355
356
357
358
359static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
360{
361 enum ixgbe_phy_type phy_type;
362
363 switch (phy_id) {
364 case TN1010_PHY_ID:
365 phy_type = ixgbe_phy_tn;
366 break;
367 case X550_PHY_ID2:
368 case X550_PHY_ID3:
369 case X540_PHY_ID:
370 phy_type = ixgbe_phy_aq;
371 break;
372 case QT2022_PHY_ID:
373 phy_type = ixgbe_phy_qt;
374 break;
375 case ATH_PHY_ID:
376 phy_type = ixgbe_phy_nl;
377 break;
378 case X557_PHY_ID:
379 case X557_PHY_ID2:
380 phy_type = ixgbe_phy_x550em_ext_t;
381 break;
382 default:
383 phy_type = ixgbe_phy_unknown;
384 break;
385 }
386
387 return phy_type;
388}
389
390
391
392
393
394s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
395{
396 u32 i;
397 u16 ctrl = 0;
398 s32 status = 0;
399
400 if (hw->phy.type == ixgbe_phy_unknown)
401 status = ixgbe_identify_phy_generic(hw);
402
403 if (status != 0 || hw->phy.type == ixgbe_phy_none)
404 return status;
405
406
407 if (!hw->phy.reset_if_overtemp &&
408 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
409 return 0;
410
411
412 if (ixgbe_check_reset_blocked(hw))
413 return 0;
414
415
416
417
418
419 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
420 MDIO_MMD_PHYXS,
421 MDIO_CTRL1_RESET);
422
423
424
425
426
427
428 for (i = 0; i < 30; i++) {
429 msleep(100);
430 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
431 status = hw->phy.ops.read_reg(hw,
432 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
433 MDIO_MMD_PMAPMD, &ctrl);
434 if (status)
435 return status;
436
437 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
438 udelay(2);
439 break;
440 }
441 } else {
442 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
443 MDIO_MMD_PHYXS, &ctrl);
444 if (status)
445 return status;
446
447 if (!(ctrl & MDIO_CTRL1_RESET)) {
448 udelay(2);
449 break;
450 }
451 }
452 }
453
454 if (ctrl & MDIO_CTRL1_RESET) {
455 hw_dbg(hw, "PHY reset polling failed to complete.\n");
456 return IXGBE_ERR_RESET_FAILED;
457 }
458
459 return 0;
460}
461
462
463
464
465
466
467
468
469
470s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
471 u16 *phy_data)
472{
473 u32 i, data, command;
474
475
476 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
477 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
478 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
479 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
480
481 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
482
483
484
485
486
487 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
488 udelay(10);
489
490 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
491 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
492 break;
493 }
494
495
496 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
497 hw_dbg(hw, "PHY address command did not complete.\n");
498 return IXGBE_ERR_PHY;
499 }
500
501
502
503
504 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
505 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
506 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
507 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
508
509 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
510
511
512
513
514
515 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
516 udelay(10);
517
518 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
519 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
520 break;
521 }
522
523 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
524 hw_dbg(hw, "PHY read command didn't complete\n");
525 return IXGBE_ERR_PHY;
526 }
527
528
529
530
531 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
532 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
533 *phy_data = (u16)(data);
534
535 return 0;
536}
537
538
539
540
541
542
543
544
545
546s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
547 u32 device_type, u16 *phy_data)
548{
549 s32 status;
550 u32 gssr = hw->phy.phy_semaphore_mask;
551
552 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
553 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
554 phy_data);
555 hw->mac.ops.release_swfw_sync(hw, gssr);
556 } else {
557 return IXGBE_ERR_SWFW_SYNC;
558 }
559
560 return status;
561}
562
563
564
565
566
567
568
569
570
571s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
572 u32 device_type, u16 phy_data)
573{
574 u32 i, command;
575
576
577 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
578
579
580 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
581 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
582 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
583 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
584
585 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
586
587
588
589
590
591
592 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
593 udelay(10);
594
595 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
596 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
597 break;
598 }
599
600 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
601 hw_dbg(hw, "PHY address cmd didn't complete\n");
602 return IXGBE_ERR_PHY;
603 }
604
605
606
607
608
609 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
610 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
611 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
612 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
613
614 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
615
616
617
618
619
620 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
621 udelay(10);
622
623 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
624 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
625 break;
626 }
627
628 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
629 hw_dbg(hw, "PHY write cmd didn't complete\n");
630 return IXGBE_ERR_PHY;
631 }
632
633 return 0;
634}
635
636
637
638
639
640
641
642
643
644s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
645 u32 device_type, u16 phy_data)
646{
647 s32 status;
648 u32 gssr = hw->phy.phy_semaphore_mask;
649
650 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
651 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
652 phy_data);
653 hw->mac.ops.release_swfw_sync(hw, gssr);
654 } else {
655 return IXGBE_ERR_SWFW_SYNC;
656 }
657
658 return status;
659}
660
661
662
663
664
665
666
667s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
668{
669 s32 status = 0;
670 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
671 bool autoneg = false;
672 ixgbe_link_speed speed;
673
674 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
675
676
677 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
678
679 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
680 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
681 (speed & IXGBE_LINK_SPEED_10GB_FULL))
682 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
683
684 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
685
686 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
687 MDIO_MMD_AN, &autoneg_reg);
688
689 if (hw->mac.type == ixgbe_mac_X550) {
690
691 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
692 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
693 (speed & IXGBE_LINK_SPEED_5GB_FULL))
694 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
695
696
697 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
698 if ((hw->phy.autoneg_advertised &
699 IXGBE_LINK_SPEED_2_5GB_FULL) &&
700 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
701 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
702 }
703
704
705 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
706 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
707 (speed & IXGBE_LINK_SPEED_1GB_FULL))
708 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
709
710 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
711 MDIO_MMD_AN, autoneg_reg);
712
713
714 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
715
716 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
717 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
718 (speed & IXGBE_LINK_SPEED_100_FULL))
719 autoneg_reg |= ADVERTISE_100FULL;
720
721 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
722
723
724 if (ixgbe_check_reset_blocked(hw))
725 return 0;
726
727
728 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
729 MDIO_MMD_AN, &autoneg_reg);
730
731 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
732
733 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
734 MDIO_MMD_AN, autoneg_reg);
735
736 return status;
737}
738
739
740
741
742
743
744
745s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
746 ixgbe_link_speed speed,
747 bool autoneg_wait_to_complete)
748{
749
750
751
752 hw->phy.autoneg_advertised = 0;
753
754 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
755 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
756
757 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
758 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
759
760 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
761 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
762
763 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
764 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
765
766 if (speed & IXGBE_LINK_SPEED_100_FULL)
767 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
768
769 if (speed & IXGBE_LINK_SPEED_10_FULL)
770 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
771
772
773 if (hw->phy.ops.setup_link)
774 hw->phy.ops.setup_link(hw);
775
776 return 0;
777}
778
779
780
781
782
783
784
785
786static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
787{
788 u16 speed_ability;
789 s32 status;
790
791 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
792 &speed_ability);
793 if (status)
794 return status;
795
796 if (speed_ability & MDIO_SPEED_10G)
797 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
798 if (speed_ability & MDIO_PMA_SPEED_1000)
799 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
800 if (speed_ability & MDIO_PMA_SPEED_100)
801 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
802
803 switch (hw->mac.type) {
804 case ixgbe_mac_X550:
805 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
806 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
807 break;
808 case ixgbe_mac_X550EM_x:
809 case ixgbe_mac_x550em_a:
810 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
811 break;
812 default:
813 break;
814 }
815
816 return 0;
817}
818
819
820
821
822
823
824
825s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
826 ixgbe_link_speed *speed,
827 bool *autoneg)
828{
829 s32 status = 0;
830
831 *autoneg = true;
832 if (!hw->phy.speeds_supported)
833 status = ixgbe_get_copper_speeds_supported(hw);
834
835 *speed = hw->phy.speeds_supported;
836 return status;
837}
838
839
840
841
842
843
844
845
846
847
848s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
849 bool *link_up)
850{
851 s32 status;
852 u32 time_out;
853 u32 max_time_out = 10;
854 u16 phy_link = 0;
855 u16 phy_speed = 0;
856 u16 phy_data = 0;
857
858
859 *link_up = false;
860 *speed = IXGBE_LINK_SPEED_10GB_FULL;
861
862
863
864
865
866
867 for (time_out = 0; time_out < max_time_out; time_out++) {
868 udelay(10);
869 status = hw->phy.ops.read_reg(hw,
870 MDIO_STAT1,
871 MDIO_MMD_VEND1,
872 &phy_data);
873 phy_link = phy_data &
874 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
875 phy_speed = phy_data &
876 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
877 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
878 *link_up = true;
879 if (phy_speed ==
880 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
881 *speed = IXGBE_LINK_SPEED_1GB_FULL;
882 break;
883 }
884 }
885
886 return status;
887}
888
889
890
891
892
893
894
895
896
897
898s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
899{
900 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
901 bool autoneg = false;
902 ixgbe_link_speed speed;
903
904 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
905
906 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
907
908 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
909 MDIO_MMD_AN,
910 &autoneg_reg);
911
912 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
913 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
914 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
915
916 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
917 MDIO_MMD_AN,
918 autoneg_reg);
919 }
920
921 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
922
923 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
924 MDIO_MMD_AN,
925 &autoneg_reg);
926
927 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
928 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
929 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
930
931 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
932 MDIO_MMD_AN,
933 autoneg_reg);
934 }
935
936 if (speed & IXGBE_LINK_SPEED_100_FULL) {
937
938 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
939 MDIO_MMD_AN,
940 &autoneg_reg);
941
942 autoneg_reg &= ~(ADVERTISE_100FULL |
943 ADVERTISE_100HALF);
944 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
945 autoneg_reg |= ADVERTISE_100FULL;
946
947 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
948 MDIO_MMD_AN,
949 autoneg_reg);
950 }
951
952
953 if (ixgbe_check_reset_blocked(hw))
954 return 0;
955
956
957 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
958 MDIO_MMD_AN, &autoneg_reg);
959
960 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
961
962 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
963 MDIO_MMD_AN, autoneg_reg);
964 return 0;
965}
966
967
968
969
970
971s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
972{
973 u16 phy_offset, control, eword, edata, block_crc;
974 bool end_data = false;
975 u16 list_offset, data_offset;
976 u16 phy_data = 0;
977 s32 ret_val;
978 u32 i;
979
980
981 if (ixgbe_check_reset_blocked(hw))
982 return 0;
983
984 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
985
986
987 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
988 (phy_data | MDIO_CTRL1_RESET));
989
990 for (i = 0; i < 100; i++) {
991 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
992 &phy_data);
993 if ((phy_data & MDIO_CTRL1_RESET) == 0)
994 break;
995 usleep_range(10000, 20000);
996 }
997
998 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
999 hw_dbg(hw, "PHY reset did not complete.\n");
1000 return IXGBE_ERR_PHY;
1001 }
1002
1003
1004 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1005 &data_offset);
1006 if (ret_val)
1007 return ret_val;
1008
1009 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1010 data_offset++;
1011 while (!end_data) {
1012
1013
1014
1015 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1016 if (ret_val)
1017 goto err_eeprom;
1018 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1019 IXGBE_CONTROL_SHIFT_NL;
1020 edata = eword & IXGBE_DATA_MASK_NL;
1021 switch (control) {
1022 case IXGBE_DELAY_NL:
1023 data_offset++;
1024 hw_dbg(hw, "DELAY: %d MS\n", edata);
1025 usleep_range(edata * 1000, edata * 2000);
1026 break;
1027 case IXGBE_DATA_NL:
1028 hw_dbg(hw, "DATA:\n");
1029 data_offset++;
1030 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1031 &phy_offset);
1032 if (ret_val)
1033 goto err_eeprom;
1034 for (i = 0; i < edata; i++) {
1035 ret_val = hw->eeprom.ops.read(hw, data_offset,
1036 &eword);
1037 if (ret_val)
1038 goto err_eeprom;
1039 hw->phy.ops.write_reg(hw, phy_offset,
1040 MDIO_MMD_PMAPMD, eword);
1041 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1042 phy_offset);
1043 data_offset++;
1044 phy_offset++;
1045 }
1046 break;
1047 case IXGBE_CONTROL_NL:
1048 data_offset++;
1049 hw_dbg(hw, "CONTROL:\n");
1050 if (edata == IXGBE_CONTROL_EOL_NL) {
1051 hw_dbg(hw, "EOL\n");
1052 end_data = true;
1053 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1054 hw_dbg(hw, "SOL\n");
1055 } else {
1056 hw_dbg(hw, "Bad control value\n");
1057 return IXGBE_ERR_PHY;
1058 }
1059 break;
1060 default:
1061 hw_dbg(hw, "Bad control type\n");
1062 return IXGBE_ERR_PHY;
1063 }
1064 }
1065
1066 return ret_val;
1067
1068err_eeprom:
1069 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1070 return IXGBE_ERR_PHY;
1071}
1072
1073
1074
1075
1076
1077
1078
1079s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1080{
1081 switch (hw->mac.ops.get_media_type(hw)) {
1082 case ixgbe_media_type_fiber:
1083 return ixgbe_identify_sfp_module_generic(hw);
1084 case ixgbe_media_type_fiber_qsfp:
1085 return ixgbe_identify_qsfp_module_generic(hw);
1086 default:
1087 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1088 return IXGBE_ERR_SFP_NOT_PRESENT;
1089 }
1090
1091 return IXGBE_ERR_SFP_NOT_PRESENT;
1092}
1093
1094
1095
1096
1097
1098
1099
1100s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1101{
1102 struct ixgbe_adapter *adapter = hw->back;
1103 s32 status;
1104 u32 vendor_oui = 0;
1105 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1106 u8 identifier = 0;
1107 u8 comp_codes_1g = 0;
1108 u8 comp_codes_10g = 0;
1109 u8 oui_bytes[3] = {0, 0, 0};
1110 u8 cable_tech = 0;
1111 u8 cable_spec = 0;
1112 u16 enforce_sfp = 0;
1113
1114 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1115 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1116 return IXGBE_ERR_SFP_NOT_PRESENT;
1117 }
1118
1119
1120 hw->mac.ops.set_lan_id(hw);
1121
1122 status = hw->phy.ops.read_i2c_eeprom(hw,
1123 IXGBE_SFF_IDENTIFIER,
1124 &identifier);
1125
1126 if (status)
1127 goto err_read_i2c_eeprom;
1128
1129 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1130 hw->phy.type = ixgbe_phy_sfp_unsupported;
1131 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1132 }
1133 status = hw->phy.ops.read_i2c_eeprom(hw,
1134 IXGBE_SFF_1GBE_COMP_CODES,
1135 &comp_codes_1g);
1136
1137 if (status)
1138 goto err_read_i2c_eeprom;
1139
1140 status = hw->phy.ops.read_i2c_eeprom(hw,
1141 IXGBE_SFF_10GBE_COMP_CODES,
1142 &comp_codes_10g);
1143
1144 if (status)
1145 goto err_read_i2c_eeprom;
1146 status = hw->phy.ops.read_i2c_eeprom(hw,
1147 IXGBE_SFF_CABLE_TECHNOLOGY,
1148 &cable_tech);
1149
1150 if (status)
1151 goto err_read_i2c_eeprom;
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 if (hw->mac.type == ixgbe_mac_82598EB) {
1170 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1171 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1172 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1173 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1174 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1175 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1176 else
1177 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1178 } else {
1179 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1180 if (hw->bus.lan_id == 0)
1181 hw->phy.sfp_type =
1182 ixgbe_sfp_type_da_cu_core0;
1183 else
1184 hw->phy.sfp_type =
1185 ixgbe_sfp_type_da_cu_core1;
1186 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1187 hw->phy.ops.read_i2c_eeprom(
1188 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1189 &cable_spec);
1190 if (cable_spec &
1191 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1192 if (hw->bus.lan_id == 0)
1193 hw->phy.sfp_type =
1194 ixgbe_sfp_type_da_act_lmt_core0;
1195 else
1196 hw->phy.sfp_type =
1197 ixgbe_sfp_type_da_act_lmt_core1;
1198 } else {
1199 hw->phy.sfp_type =
1200 ixgbe_sfp_type_unknown;
1201 }
1202 } else if (comp_codes_10g &
1203 (IXGBE_SFF_10GBASESR_CAPABLE |
1204 IXGBE_SFF_10GBASELR_CAPABLE)) {
1205 if (hw->bus.lan_id == 0)
1206 hw->phy.sfp_type =
1207 ixgbe_sfp_type_srlr_core0;
1208 else
1209 hw->phy.sfp_type =
1210 ixgbe_sfp_type_srlr_core1;
1211 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1212 if (hw->bus.lan_id == 0)
1213 hw->phy.sfp_type =
1214 ixgbe_sfp_type_1g_cu_core0;
1215 else
1216 hw->phy.sfp_type =
1217 ixgbe_sfp_type_1g_cu_core1;
1218 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1219 if (hw->bus.lan_id == 0)
1220 hw->phy.sfp_type =
1221 ixgbe_sfp_type_1g_sx_core0;
1222 else
1223 hw->phy.sfp_type =
1224 ixgbe_sfp_type_1g_sx_core1;
1225 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1226 if (hw->bus.lan_id == 0)
1227 hw->phy.sfp_type =
1228 ixgbe_sfp_type_1g_lx_core0;
1229 else
1230 hw->phy.sfp_type =
1231 ixgbe_sfp_type_1g_lx_core1;
1232 } else {
1233 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1234 }
1235 }
1236
1237 if (hw->phy.sfp_type != stored_sfp_type)
1238 hw->phy.sfp_setup_needed = true;
1239
1240
1241 hw->phy.multispeed_fiber = false;
1242 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1243 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1244 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1245 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1246 hw->phy.multispeed_fiber = true;
1247
1248
1249 if (hw->phy.type != ixgbe_phy_nl) {
1250 hw->phy.id = identifier;
1251 status = hw->phy.ops.read_i2c_eeprom(hw,
1252 IXGBE_SFF_VENDOR_OUI_BYTE0,
1253 &oui_bytes[0]);
1254
1255 if (status != 0)
1256 goto err_read_i2c_eeprom;
1257
1258 status = hw->phy.ops.read_i2c_eeprom(hw,
1259 IXGBE_SFF_VENDOR_OUI_BYTE1,
1260 &oui_bytes[1]);
1261
1262 if (status != 0)
1263 goto err_read_i2c_eeprom;
1264
1265 status = hw->phy.ops.read_i2c_eeprom(hw,
1266 IXGBE_SFF_VENDOR_OUI_BYTE2,
1267 &oui_bytes[2]);
1268
1269 if (status != 0)
1270 goto err_read_i2c_eeprom;
1271
1272 vendor_oui =
1273 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1274 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1275 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1276
1277 switch (vendor_oui) {
1278 case IXGBE_SFF_VENDOR_OUI_TYCO:
1279 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1280 hw->phy.type =
1281 ixgbe_phy_sfp_passive_tyco;
1282 break;
1283 case IXGBE_SFF_VENDOR_OUI_FTL:
1284 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1285 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1286 else
1287 hw->phy.type = ixgbe_phy_sfp_ftl;
1288 break;
1289 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1290 hw->phy.type = ixgbe_phy_sfp_avago;
1291 break;
1292 case IXGBE_SFF_VENDOR_OUI_INTEL:
1293 hw->phy.type = ixgbe_phy_sfp_intel;
1294 break;
1295 default:
1296 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1297 hw->phy.type =
1298 ixgbe_phy_sfp_passive_unknown;
1299 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1300 hw->phy.type =
1301 ixgbe_phy_sfp_active_unknown;
1302 else
1303 hw->phy.type = ixgbe_phy_sfp_unknown;
1304 break;
1305 }
1306 }
1307
1308
1309 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1310 IXGBE_SFF_DA_ACTIVE_CABLE))
1311 return 0;
1312
1313
1314 if (comp_codes_10g == 0 &&
1315 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1316 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1317 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1318 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1319 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1320 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1321 hw->phy.type = ixgbe_phy_sfp_unsupported;
1322 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1323 }
1324
1325
1326 if (hw->mac.type == ixgbe_mac_82598EB)
1327 return 0;
1328
1329 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1330 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1331 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1332 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1333 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1334 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1335 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1336 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1337
1338 if (hw->phy.type == ixgbe_phy_sfp_intel)
1339 return 0;
1340 if (hw->allow_unsupported_sfp) {
1341 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1342 return 0;
1343 }
1344 hw_dbg(hw, "SFP+ module not supported\n");
1345 hw->phy.type = ixgbe_phy_sfp_unsupported;
1346 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1347 }
1348 return 0;
1349
1350err_read_i2c_eeprom:
1351 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1352 if (hw->phy.type != ixgbe_phy_nl) {
1353 hw->phy.id = 0;
1354 hw->phy.type = ixgbe_phy_unknown;
1355 }
1356 return IXGBE_ERR_SFP_NOT_PRESENT;
1357}
1358
1359
1360
1361
1362
1363
1364
1365static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1366{
1367 struct ixgbe_adapter *adapter = hw->back;
1368 s32 status;
1369 u32 vendor_oui = 0;
1370 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1371 u8 identifier = 0;
1372 u8 comp_codes_1g = 0;
1373 u8 comp_codes_10g = 0;
1374 u8 oui_bytes[3] = {0, 0, 0};
1375 u16 enforce_sfp = 0;
1376 u8 connector = 0;
1377 u8 cable_length = 0;
1378 u8 device_tech = 0;
1379 bool active_cable = false;
1380
1381 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1382 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1383 return IXGBE_ERR_SFP_NOT_PRESENT;
1384 }
1385
1386
1387 hw->mac.ops.set_lan_id(hw);
1388
1389 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1390 &identifier);
1391
1392 if (status != 0)
1393 goto err_read_i2c_eeprom;
1394
1395 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1396 hw->phy.type = ixgbe_phy_sfp_unsupported;
1397 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1398 }
1399
1400 hw->phy.id = identifier;
1401
1402 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1403 &comp_codes_10g);
1404
1405 if (status != 0)
1406 goto err_read_i2c_eeprom;
1407
1408 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1409 &comp_codes_1g);
1410
1411 if (status != 0)
1412 goto err_read_i2c_eeprom;
1413
1414 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1415 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1416 if (hw->bus.lan_id == 0)
1417 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1418 else
1419 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1420 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1421 IXGBE_SFF_10GBASELR_CAPABLE)) {
1422 if (hw->bus.lan_id == 0)
1423 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1424 else
1425 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1426 } else {
1427 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1428 active_cable = true;
1429
1430 if (!active_cable) {
1431
1432
1433
1434 hw->phy.ops.read_i2c_eeprom(hw,
1435 IXGBE_SFF_QSFP_CONNECTOR,
1436 &connector);
1437
1438 hw->phy.ops.read_i2c_eeprom(hw,
1439 IXGBE_SFF_QSFP_CABLE_LENGTH,
1440 &cable_length);
1441
1442 hw->phy.ops.read_i2c_eeprom(hw,
1443 IXGBE_SFF_QSFP_DEVICE_TECH,
1444 &device_tech);
1445
1446 if ((connector ==
1447 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1448 (cable_length > 0) &&
1449 ((device_tech >> 4) ==
1450 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1451 active_cable = true;
1452 }
1453
1454 if (active_cable) {
1455 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1456 if (hw->bus.lan_id == 0)
1457 hw->phy.sfp_type =
1458 ixgbe_sfp_type_da_act_lmt_core0;
1459 else
1460 hw->phy.sfp_type =
1461 ixgbe_sfp_type_da_act_lmt_core1;
1462 } else {
1463
1464 hw->phy.type = ixgbe_phy_sfp_unsupported;
1465 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1466 }
1467 }
1468
1469 if (hw->phy.sfp_type != stored_sfp_type)
1470 hw->phy.sfp_setup_needed = true;
1471
1472
1473 hw->phy.multispeed_fiber = false;
1474 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1475 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1476 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1477 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1478 hw->phy.multispeed_fiber = true;
1479
1480
1481 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1482 IXGBE_SFF_10GBASELR_CAPABLE)) {
1483 status = hw->phy.ops.read_i2c_eeprom(hw,
1484 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1485 &oui_bytes[0]);
1486
1487 if (status != 0)
1488 goto err_read_i2c_eeprom;
1489
1490 status = hw->phy.ops.read_i2c_eeprom(hw,
1491 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1492 &oui_bytes[1]);
1493
1494 if (status != 0)
1495 goto err_read_i2c_eeprom;
1496
1497 status = hw->phy.ops.read_i2c_eeprom(hw,
1498 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1499 &oui_bytes[2]);
1500
1501 if (status != 0)
1502 goto err_read_i2c_eeprom;
1503
1504 vendor_oui =
1505 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1506 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1507 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1508
1509 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1510 hw->phy.type = ixgbe_phy_qsfp_intel;
1511 else
1512 hw->phy.type = ixgbe_phy_qsfp_unknown;
1513
1514 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1515 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1516
1517 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1518 return 0;
1519 if (hw->allow_unsupported_sfp) {
1520 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1521 return 0;
1522 }
1523 hw_dbg(hw, "QSFP module not supported\n");
1524 hw->phy.type = ixgbe_phy_sfp_unsupported;
1525 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1526 }
1527 return 0;
1528 }
1529 return 0;
1530
1531err_read_i2c_eeprom:
1532 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1533 hw->phy.id = 0;
1534 hw->phy.type = ixgbe_phy_unknown;
1535
1536 return IXGBE_ERR_SFP_NOT_PRESENT;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1549 u16 *list_offset,
1550 u16 *data_offset)
1551{
1552 u16 sfp_id;
1553 u16 sfp_type = hw->phy.sfp_type;
1554
1555 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1556 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1557
1558 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1559 return IXGBE_ERR_SFP_NOT_PRESENT;
1560
1561 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1562 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1563 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1564
1565
1566
1567
1568
1569 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1570 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1571 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1572 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1573 sfp_type = ixgbe_sfp_type_srlr_core0;
1574 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1575 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1576 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1577 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1578 sfp_type = ixgbe_sfp_type_srlr_core1;
1579
1580
1581 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1582 hw_err(hw, "eeprom read at %d failed\n",
1583 IXGBE_PHY_INIT_OFFSET_NL);
1584 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1585 }
1586
1587 if ((!*list_offset) || (*list_offset == 0xFFFF))
1588 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1589
1590
1591 (*list_offset)++;
1592
1593
1594
1595
1596
1597 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1598 goto err_phy;
1599
1600 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1601 if (sfp_id == sfp_type) {
1602 (*list_offset)++;
1603 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1604 goto err_phy;
1605 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1606 hw_dbg(hw, "SFP+ module not supported\n");
1607 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1608 } else {
1609 break;
1610 }
1611 } else {
1612 (*list_offset) += 2;
1613 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1614 goto err_phy;
1615 }
1616 }
1617
1618 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1619 hw_dbg(hw, "No matching SFP+ module found\n");
1620 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1621 }
1622
1623 return 0;
1624
1625err_phy:
1626 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1627 return IXGBE_ERR_PHY;
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1639 u8 *eeprom_data)
1640{
1641 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1642 IXGBE_I2C_EEPROM_DEV_ADDR,
1643 eeprom_data);
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1655 u8 *sff8472_data)
1656{
1657 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1658 IXGBE_I2C_EEPROM_DEV_ADDR2,
1659 sff8472_data);
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1671 u8 eeprom_data)
1672{
1673 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1674 IXGBE_I2C_EEPROM_DEV_ADDR,
1675 eeprom_data);
1676}
1677
1678
1679
1680
1681
1682
1683
1684static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1685{
1686 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1687 offset == IXGBE_SFF_IDENTIFIER &&
1688 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1689 return true;
1690 return false;
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1705 u8 dev_addr, u8 *data, bool lock)
1706{
1707 s32 status;
1708 u32 max_retry = 10;
1709 u32 retry = 0;
1710 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1711 bool nack = true;
1712
1713 if (hw->mac.type >= ixgbe_mac_X550)
1714 max_retry = 3;
1715 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
1716 max_retry = IXGBE_SFP_DETECT_RETRIES;
1717
1718 *data = 0;
1719
1720 do {
1721 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1722 return IXGBE_ERR_SWFW_SYNC;
1723
1724 ixgbe_i2c_start(hw);
1725
1726
1727 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1728 if (status != 0)
1729 goto fail;
1730
1731 status = ixgbe_get_i2c_ack(hw);
1732 if (status != 0)
1733 goto fail;
1734
1735 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1736 if (status != 0)
1737 goto fail;
1738
1739 status = ixgbe_get_i2c_ack(hw);
1740 if (status != 0)
1741 goto fail;
1742
1743 ixgbe_i2c_start(hw);
1744
1745
1746 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
1747 if (status != 0)
1748 goto fail;
1749
1750 status = ixgbe_get_i2c_ack(hw);
1751 if (status != 0)
1752 goto fail;
1753
1754 status = ixgbe_clock_in_i2c_byte(hw, data);
1755 if (status != 0)
1756 goto fail;
1757
1758 status = ixgbe_clock_out_i2c_bit(hw, nack);
1759 if (status != 0)
1760 goto fail;
1761
1762 ixgbe_i2c_stop(hw);
1763 if (lock)
1764 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1765 return 0;
1766
1767fail:
1768 ixgbe_i2c_bus_clear(hw);
1769 if (lock) {
1770 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1771 msleep(100);
1772 }
1773 retry++;
1774 if (retry < max_retry)
1775 hw_dbg(hw, "I2C byte read error - Retrying.\n");
1776 else
1777 hw_dbg(hw, "I2C byte read error.\n");
1778
1779 } while (retry < max_retry);
1780
1781 return status;
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1795 u8 dev_addr, u8 *data)
1796{
1797 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1798 data, true);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1812 u8 dev_addr, u8 *data)
1813{
1814 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1815 data, false);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1830 u8 dev_addr, u8 data, bool lock)
1831{
1832 s32 status;
1833 u32 max_retry = 1;
1834 u32 retry = 0;
1835 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1836
1837 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1838 return IXGBE_ERR_SWFW_SYNC;
1839
1840 do {
1841 ixgbe_i2c_start(hw);
1842
1843 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1844 if (status != 0)
1845 goto fail;
1846
1847 status = ixgbe_get_i2c_ack(hw);
1848 if (status != 0)
1849 goto fail;
1850
1851 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1852 if (status != 0)
1853 goto fail;
1854
1855 status = ixgbe_get_i2c_ack(hw);
1856 if (status != 0)
1857 goto fail;
1858
1859 status = ixgbe_clock_out_i2c_byte(hw, data);
1860 if (status != 0)
1861 goto fail;
1862
1863 status = ixgbe_get_i2c_ack(hw);
1864 if (status != 0)
1865 goto fail;
1866
1867 ixgbe_i2c_stop(hw);
1868 if (lock)
1869 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1870 return 0;
1871
1872fail:
1873 ixgbe_i2c_bus_clear(hw);
1874 retry++;
1875 if (retry < max_retry)
1876 hw_dbg(hw, "I2C byte write error - Retrying.\n");
1877 else
1878 hw_dbg(hw, "I2C byte write error.\n");
1879 } while (retry < max_retry);
1880
1881 if (lock)
1882 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1883
1884 return status;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1898 u8 dev_addr, u8 data)
1899{
1900 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1901 data, true);
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
1915 u8 dev_addr, u8 data)
1916{
1917 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
1918 data, false);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1929{
1930 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1931
1932 i2cctl |= IXGBE_I2C_BB_EN(hw);
1933
1934
1935 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1936 ixgbe_raise_i2c_clk(hw, &i2cctl);
1937
1938
1939 udelay(IXGBE_I2C_T_SU_STA);
1940
1941 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1942
1943
1944 udelay(IXGBE_I2C_T_HD_STA);
1945
1946 ixgbe_lower_i2c_clk(hw, &i2cctl);
1947
1948
1949 udelay(IXGBE_I2C_T_LOW);
1950
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
1962{
1963 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
1964 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
1965 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
1966 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
1967
1968
1969 ixgbe_set_i2c_data(hw, &i2cctl, 0);
1970 ixgbe_raise_i2c_clk(hw, &i2cctl);
1971
1972
1973 udelay(IXGBE_I2C_T_SU_STO);
1974
1975 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1976
1977
1978 udelay(IXGBE_I2C_T_BUF);
1979
1980 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
1981 i2cctl &= ~bb_en_bit;
1982 i2cctl |= data_oe_bit | clk_oe_bit;
1983 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
1984 IXGBE_WRITE_FLUSH(hw);
1985 }
1986}
1987
1988
1989
1990
1991
1992
1993
1994
1995static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
1996{
1997 s32 i;
1998 bool bit = false;
1999
2000 *data = 0;
2001 for (i = 7; i >= 0; i--) {
2002 ixgbe_clock_in_i2c_bit(hw, &bit);
2003 *data |= bit << i;
2004 }
2005
2006 return 0;
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2017{
2018 s32 status;
2019 s32 i;
2020 u32 i2cctl;
2021 bool bit = false;
2022
2023 for (i = 7; i >= 0; i--) {
2024 bit = (data >> i) & 0x1;
2025 status = ixgbe_clock_out_i2c_bit(hw, bit);
2026
2027 if (status != 0)
2028 break;
2029 }
2030
2031
2032 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2033 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2034 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2035 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2036 IXGBE_WRITE_FLUSH(hw);
2037
2038 return status;
2039}
2040
2041
2042
2043
2044
2045
2046
2047static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2048{
2049 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2050 s32 status = 0;
2051 u32 i = 0;
2052 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2053 u32 timeout = 10;
2054 bool ack = true;
2055
2056 if (data_oe_bit) {
2057 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2058 i2cctl |= data_oe_bit;
2059 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2060 IXGBE_WRITE_FLUSH(hw);
2061 }
2062 ixgbe_raise_i2c_clk(hw, &i2cctl);
2063
2064
2065 udelay(IXGBE_I2C_T_HIGH);
2066
2067
2068
2069 for (i = 0; i < timeout; i++) {
2070 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2071 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2072
2073 udelay(1);
2074 if (ack == 0)
2075 break;
2076 }
2077
2078 if (ack == 1) {
2079 hw_dbg(hw, "I2C ack was not received.\n");
2080 status = IXGBE_ERR_I2C;
2081 }
2082
2083 ixgbe_lower_i2c_clk(hw, &i2cctl);
2084
2085
2086 udelay(IXGBE_I2C_T_LOW);
2087
2088 return status;
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2099{
2100 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2101 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2102
2103 if (data_oe_bit) {
2104 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2105 i2cctl |= data_oe_bit;
2106 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2107 IXGBE_WRITE_FLUSH(hw);
2108 }
2109 ixgbe_raise_i2c_clk(hw, &i2cctl);
2110
2111
2112 udelay(IXGBE_I2C_T_HIGH);
2113
2114 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2115 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2116
2117 ixgbe_lower_i2c_clk(hw, &i2cctl);
2118
2119
2120 udelay(IXGBE_I2C_T_LOW);
2121
2122 return 0;
2123}
2124
2125
2126
2127
2128
2129
2130
2131
2132static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2133{
2134 s32 status;
2135 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2136
2137 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2138 if (status == 0) {
2139 ixgbe_raise_i2c_clk(hw, &i2cctl);
2140
2141
2142 udelay(IXGBE_I2C_T_HIGH);
2143
2144 ixgbe_lower_i2c_clk(hw, &i2cctl);
2145
2146
2147
2148
2149 udelay(IXGBE_I2C_T_LOW);
2150 } else {
2151 hw_dbg(hw, "I2C data was not set to %X\n", data);
2152 return IXGBE_ERR_I2C;
2153 }
2154
2155 return 0;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2166{
2167 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2168 u32 i = 0;
2169 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2170 u32 i2cctl_r = 0;
2171
2172 if (clk_oe_bit) {
2173 *i2cctl |= clk_oe_bit;
2174 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2175 }
2176
2177 for (i = 0; i < timeout; i++) {
2178 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2179 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2180 IXGBE_WRITE_FLUSH(hw);
2181
2182 udelay(IXGBE_I2C_T_RISE);
2183
2184 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2185 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2186 break;
2187 }
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2199{
2200
2201 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2202 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2203
2204 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2205 IXGBE_WRITE_FLUSH(hw);
2206
2207
2208 udelay(IXGBE_I2C_T_FALL);
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2221{
2222 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2223
2224 if (data)
2225 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2226 else
2227 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2228 *i2cctl &= ~data_oe_bit;
2229
2230 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2231 IXGBE_WRITE_FLUSH(hw);
2232
2233
2234 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2235
2236 if (!data)
2237 return 0;
2238 if (data_oe_bit) {
2239 *i2cctl |= data_oe_bit;
2240 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2241 IXGBE_WRITE_FLUSH(hw);
2242 }
2243
2244
2245 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2246 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2247 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2248 return IXGBE_ERR_I2C;
2249 }
2250
2251 return 0;
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2263{
2264 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2265
2266 if (data_oe_bit) {
2267 *i2cctl |= data_oe_bit;
2268 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2269 IXGBE_WRITE_FLUSH(hw);
2270 udelay(IXGBE_I2C_T_FALL);
2271 }
2272
2273 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2274 return true;
2275 return false;
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2286{
2287 u32 i2cctl;
2288 u32 i;
2289
2290 ixgbe_i2c_start(hw);
2291 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2292
2293 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2294
2295 for (i = 0; i < 9; i++) {
2296 ixgbe_raise_i2c_clk(hw, &i2cctl);
2297
2298
2299 udelay(IXGBE_I2C_T_HIGH);
2300
2301 ixgbe_lower_i2c_clk(hw, &i2cctl);
2302
2303
2304 udelay(IXGBE_I2C_T_LOW);
2305 }
2306
2307 ixgbe_i2c_start(hw);
2308
2309
2310 ixgbe_i2c_stop(hw);
2311}
2312
2313
2314
2315
2316
2317
2318
2319s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2320{
2321 u16 phy_data = 0;
2322
2323 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2324 return 0;
2325
2326
2327 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2328 MDIO_MMD_PMAPMD, &phy_data);
2329
2330 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2331 return 0;
2332
2333 return IXGBE_ERR_OVERTEMP;
2334}
2335
2336
2337
2338
2339
2340s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2341{
2342 u32 status;
2343 u16 reg;
2344
2345
2346 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2347 return 0;
2348
2349 if (!on && ixgbe_mng_present(hw))
2350 return 0;
2351
2352 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2353 if (status)
2354 return status;
2355
2356 if (on) {
2357 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2358 } else {
2359 if (ixgbe_check_reset_blocked(hw))
2360 return 0;
2361 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2362 }
2363
2364 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2365 return status;
2366}
2367