1
2
3
4
5#include "ixgbe_api.h"
6#include "ixgbe_common.h"
7#include "ixgbe_phy.h"
8
9STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
10STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
11STATIC void ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
12STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
13STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
14STATIC void ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
15STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
16STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
17STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
18STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
19STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
20STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
21 u8 *sff8472_data);
22
23
24
25
26
27
28
29
30STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
31{
32 s32 status;
33
34 status = ixgbe_clock_out_i2c_byte(hw, byte);
35 if (status)
36 return status;
37 return ixgbe_get_i2c_ack(hw);
38}
39
40
41
42
43
44
45
46
47STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
48{
49 ixgbe_clock_in_i2c_byte(hw, byte);
50
51 return ixgbe_clock_out_i2c_bit(hw, false);
52}
53
54
55
56
57
58
59
60
61STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
62{
63 u16 sum = add1 + add2;
64
65 sum = (sum & 0xFF) + (sum >> 8);
66 return sum & 0xFF;
67}
68
69
70
71
72
73
74
75
76
77
78
79s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
80 u16 *val, bool lock)
81{
82 u32 swfw_mask = hw->phy.phy_semaphore_mask;
83 int max_retry = 3;
84 int retry = 0;
85 u8 csum_byte;
86 u8 high_bits;
87 u8 low_bits;
88 u8 reg_high;
89 u8 csum;
90
91 reg_high = ((reg >> 7) & 0xFE) | 1;
92 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
93 csum = ~csum;
94 do {
95 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
96 return IXGBE_ERR_SWFW_SYNC;
97 ixgbe_i2c_start(hw);
98
99 if (ixgbe_out_i2c_byte_ack(hw, addr))
100 goto fail;
101
102 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
103 goto fail;
104
105 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
106 goto fail;
107
108 if (ixgbe_out_i2c_byte_ack(hw, csum))
109 goto fail;
110
111 ixgbe_i2c_start(hw);
112
113 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
114 goto fail;
115
116 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
117 goto fail;
118
119 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
120 goto fail;
121
122 ixgbe_clock_in_i2c_byte(hw, &csum_byte);
123
124 if (ixgbe_clock_out_i2c_bit(hw, false))
125 goto fail;
126 ixgbe_i2c_stop(hw);
127 if (lock)
128 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
129 *val = (high_bits << 8) | low_bits;
130 return 0;
131
132fail:
133 ixgbe_i2c_bus_clear(hw);
134 if (lock)
135 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
136 if (retry < max_retry)
137 DEBUGOUT("I2C byte read combined error - Retrying.\n");
138 else
139 DEBUGOUT("I2C byte read combined error.\n");
140 retry++;
141 } while (retry <= max_retry);
142
143 return IXGBE_ERR_I2C;
144}
145
146
147
148
149
150
151
152
153
154
155
156s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
157 u16 val, bool lock)
158{
159 u32 swfw_mask = hw->phy.phy_semaphore_mask;
160 int max_retry = 1;
161 int retry = 0;
162 u8 reg_high;
163 u8 csum;
164
165 reg_high = (reg >> 7) & 0xFE;
166 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
167 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
168 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
169 csum = ~csum;
170 do {
171 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
172 return IXGBE_ERR_SWFW_SYNC;
173 ixgbe_i2c_start(hw);
174
175 if (ixgbe_out_i2c_byte_ack(hw, addr))
176 goto fail;
177
178 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
179 goto fail;
180
181 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
182 goto fail;
183
184 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
185 goto fail;
186
187 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
188 goto fail;
189
190 if (ixgbe_out_i2c_byte_ack(hw, csum))
191 goto fail;
192 ixgbe_i2c_stop(hw);
193 if (lock)
194 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
195 return 0;
196
197fail:
198 ixgbe_i2c_bus_clear(hw);
199 if (lock)
200 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
201 if (retry < max_retry)
202 DEBUGOUT("I2C byte write combined error - Retrying.\n");
203 else
204 DEBUGOUT("I2C byte write combined error.\n");
205 retry++;
206 } while (retry <= max_retry);
207
208 return IXGBE_ERR_I2C;
209}
210
211
212
213
214
215
216
217s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
218{
219 struct ixgbe_phy_info *phy = &hw->phy;
220
221 DEBUGFUNC("ixgbe_init_phy_ops_generic");
222
223
224 phy->ops.identify = ixgbe_identify_phy_generic;
225 phy->ops.reset = ixgbe_reset_phy_generic;
226 phy->ops.read_reg = ixgbe_read_phy_reg_generic;
227 phy->ops.write_reg = ixgbe_write_phy_reg_generic;
228 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
229 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
230 phy->ops.setup_link = ixgbe_setup_phy_link_generic;
231 phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
232 phy->ops.check_link = NULL;
233 phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
234 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
235 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
236 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
237 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
238 phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
239 phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
240 phy->ops.identify_sfp = ixgbe_identify_module_generic;
241 phy->sfp_type = ixgbe_sfp_type_unknown;
242 phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked;
243 phy->ops.write_i2c_byte_unlocked =
244 ixgbe_write_i2c_byte_generic_unlocked;
245 phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
246 return IXGBE_SUCCESS;
247}
248
249
250
251
252
253
254
255
256static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
257{
258 u16 ext_ability = 0;
259
260 if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
261 DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
262 phy_addr);
263 return false;
264 }
265
266 if (ixgbe_get_phy_id(hw))
267 return false;
268
269 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
270
271 if (hw->phy.type == ixgbe_phy_unknown) {
272 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
273 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
274 if (ext_ability &
275 (IXGBE_MDIO_PHY_10GBASET_ABILITY |
276 IXGBE_MDIO_PHY_1000BASET_ABILITY))
277 hw->phy.type = ixgbe_phy_cu_unknown;
278 else
279 hw->phy.type = ixgbe_phy_generic;
280 }
281
282 return true;
283}
284
285
286
287
288
289
290
291s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
292{
293 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
294 u16 phy_addr;
295
296 DEBUGFUNC("ixgbe_identify_phy_generic");
297
298 if (!hw->phy.phy_semaphore_mask) {
299 if (hw->bus.lan_id)
300 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
301 else
302 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
303 }
304
305 if (hw->phy.type != ixgbe_phy_unknown)
306 return IXGBE_SUCCESS;
307
308 if (hw->phy.nw_mng_if_sel) {
309 phy_addr = (hw->phy.nw_mng_if_sel &
310 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
311 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
312 if (ixgbe_probe_phy(hw, phy_addr))
313 return IXGBE_SUCCESS;
314 else
315 return IXGBE_ERR_PHY_ADDR_INVALID;
316 }
317
318 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
319 if (ixgbe_probe_phy(hw, phy_addr)) {
320 status = IXGBE_SUCCESS;
321 break;
322 }
323 }
324
325
326
327
328
329 if (status != IXGBE_SUCCESS)
330 hw->phy.addr = 0;
331
332 return status;
333}
334
335
336
337
338
339
340
341
342
343
344s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
345{
346 u32 mmngc;
347
348 DEBUGFUNC("ixgbe_check_reset_blocked");
349
350
351 if (hw->mac.type == ixgbe_mac_82598EB)
352 return false;
353
354 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
355 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
356 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
357 "MNG_VETO bit detected.\n");
358 return true;
359 }
360
361 return false;
362}
363
364
365
366
367
368
369
370bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
371{
372 u16 phy_id = 0;
373 bool valid = false;
374
375 DEBUGFUNC("ixgbe_validate_phy_addr");
376
377 hw->phy.addr = phy_addr;
378 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
379 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
380
381 if (phy_id != 0xFFFF && phy_id != 0x0)
382 valid = true;
383
384 DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
385
386 return valid;
387}
388
389
390
391
392
393
394s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
395{
396 u32 status;
397 u16 phy_id_high = 0;
398 u16 phy_id_low = 0;
399
400 DEBUGFUNC("ixgbe_get_phy_id");
401
402 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
403 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
404 &phy_id_high);
405
406 if (status == IXGBE_SUCCESS) {
407 hw->phy.id = (u32)(phy_id_high << 16);
408 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
409 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
410 &phy_id_low);
411 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
412 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
413 }
414 DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
415 phy_id_high, phy_id_low);
416
417 return status;
418}
419
420
421
422
423
424
425enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
426{
427 enum ixgbe_phy_type phy_type;
428
429 DEBUGFUNC("ixgbe_get_phy_type_from_id");
430
431 switch (phy_id) {
432 case TN1010_PHY_ID:
433 phy_type = ixgbe_phy_tn;
434 break;
435 case X550_PHY_ID2:
436 case X550_PHY_ID3:
437 case X540_PHY_ID:
438 phy_type = ixgbe_phy_aq;
439 break;
440 case QT2022_PHY_ID:
441 phy_type = ixgbe_phy_qt;
442 break;
443 case ATH_PHY_ID:
444 phy_type = ixgbe_phy_nl;
445 break;
446 case X557_PHY_ID:
447 case X557_PHY_ID2:
448 phy_type = ixgbe_phy_x550em_ext_t;
449 break;
450 case IXGBE_M88E1500_E_PHY_ID:
451 case IXGBE_M88E1543_E_PHY_ID:
452 phy_type = ixgbe_phy_ext_1g_t;
453 break;
454 default:
455 phy_type = ixgbe_phy_unknown;
456 break;
457 }
458 return phy_type;
459}
460
461
462
463
464
465s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
466{
467 u32 i;
468 u16 ctrl = 0;
469 s32 status = IXGBE_SUCCESS;
470
471 DEBUGFUNC("ixgbe_reset_phy_generic");
472
473 if (hw->phy.type == ixgbe_phy_unknown)
474 status = ixgbe_identify_phy_generic(hw);
475
476 if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
477 goto out;
478
479
480 if (!hw->phy.reset_if_overtemp &&
481 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
482 goto out;
483
484
485 if (ixgbe_check_reset_blocked(hw))
486 goto out;
487
488
489
490
491
492 hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
493 IXGBE_MDIO_PHY_XS_DEV_TYPE,
494 IXGBE_MDIO_PHY_XS_RESET);
495
496
497
498
499
500
501 for (i = 0; i < 30; i++) {
502 msec_delay(100);
503 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
504 status = hw->phy.ops.read_reg(hw,
505 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
506 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
507 &ctrl);
508 if (status != IXGBE_SUCCESS)
509 return status;
510
511 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
512 usec_delay(2);
513 break;
514 }
515 } else {
516 status = hw->phy.ops.read_reg(hw,
517 IXGBE_MDIO_PHY_XS_CONTROL,
518 IXGBE_MDIO_PHY_XS_DEV_TYPE,
519 &ctrl);
520 if (status != IXGBE_SUCCESS)
521 return status;
522
523 if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
524 usec_delay(2);
525 break;
526 }
527 }
528 }
529
530 if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
531 status = IXGBE_ERR_RESET_FAILED;
532 ERROR_REPORT1(IXGBE_ERROR_POLLING,
533 "PHY reset polling failed to complete.\n");
534 }
535
536out:
537 return status;
538}
539
540
541
542
543
544void ixgbe_restart_auto_neg(struct ixgbe_hw *hw)
545{
546 u16 autoneg_reg;
547
548
549 if (ixgbe_check_reset_blocked(hw))
550 return;
551
552
553 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
554 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
555 autoneg_reg |= IXGBE_MII_RESTART;
556 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
557 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
558}
559
560
561
562
563
564
565
566
567
568s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
569 u16 *phy_data)
570{
571 u32 i, data, command;
572
573
574 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
575 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
576 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
577 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
578
579 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
580
581
582
583
584
585
586 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
587 usec_delay(10);
588
589 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
590 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
591 break;
592 }
593
594
595 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
596 ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
597 DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
598 return IXGBE_ERR_PHY;
599 }
600
601
602
603
604
605 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
606 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
607 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
608 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
609
610 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
611
612
613
614
615
616
617 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
618 usec_delay(10);
619
620 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
621 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
622 break;
623 }
624
625 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
626 ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
627 DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
628 return IXGBE_ERR_PHY;
629 }
630
631
632
633
634
635 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
636 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
637 *phy_data = (u16)(data);
638
639 return IXGBE_SUCCESS;
640}
641
642
643
644
645
646
647
648
649
650s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
651 u32 device_type, u16 *phy_data)
652{
653 s32 status;
654 u32 gssr = hw->phy.phy_semaphore_mask;
655
656 DEBUGFUNC("ixgbe_read_phy_reg_generic");
657
658 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
659 return IXGBE_ERR_SWFW_SYNC;
660
661 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
662
663 hw->mac.ops.release_swfw_sync(hw, gssr);
664
665 return status;
666}
667
668
669
670
671
672
673
674
675
676s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
677 u32 device_type, u16 phy_data)
678{
679 u32 i, command;
680
681
682 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
683
684
685 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
686 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
687 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
688 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
689
690 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
691
692
693
694
695
696
697 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
698 usec_delay(10);
699
700 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
701 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
702 break;
703 }
704
705 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
706 ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
707 return IXGBE_ERR_PHY;
708 }
709
710
711
712
713
714 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
715 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
716 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
717 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
718
719 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
720
721
722
723
724
725
726 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
727 usec_delay(10);
728
729 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
730 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
731 break;
732 }
733
734 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
735 ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
736 return IXGBE_ERR_PHY;
737 }
738
739 return IXGBE_SUCCESS;
740}
741
742
743
744
745
746
747
748
749
750s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
751 u32 device_type, u16 phy_data)
752{
753 s32 status;
754 u32 gssr = hw->phy.phy_semaphore_mask;
755
756 DEBUGFUNC("ixgbe_write_phy_reg_generic");
757
758 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
759 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
760 phy_data);
761 hw->mac.ops.release_swfw_sync(hw, gssr);
762 } else {
763 status = IXGBE_ERR_SWFW_SYNC;
764 }
765
766 return status;
767}
768
769
770
771
772
773
774
775s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
776{
777 s32 status = IXGBE_SUCCESS;
778 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
779 bool autoneg = false;
780 ixgbe_link_speed speed;
781
782 DEBUGFUNC("ixgbe_setup_phy_link_generic");
783
784 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
785
786
787 hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
788 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
789 &autoneg_reg);
790
791 autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
792 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
793 (speed & IXGBE_LINK_SPEED_10GB_FULL))
794 autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
795
796 hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
797 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
798 autoneg_reg);
799
800 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
801 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
802 &autoneg_reg);
803
804 if (hw->mac.type == ixgbe_mac_X550) {
805
806 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
807 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
808 (speed & IXGBE_LINK_SPEED_5GB_FULL))
809 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
810
811
812 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
813 if ((hw->phy.autoneg_advertised &
814 IXGBE_LINK_SPEED_2_5GB_FULL) &&
815 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
816 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
817 }
818
819
820 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
821 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
822 (speed & IXGBE_LINK_SPEED_1GB_FULL))
823 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
824
825 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
826 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
827 autoneg_reg);
828
829
830 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
831 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
832 &autoneg_reg);
833
834 autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
835 IXGBE_MII_100BASE_T_ADVERTISE_HALF);
836 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
837 (speed & IXGBE_LINK_SPEED_100_FULL))
838 autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
839
840 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
841 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
842 autoneg_reg);
843
844 ixgbe_restart_auto_neg(hw);
845 return status;
846}
847
848
849
850
851
852
853
854s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
855 ixgbe_link_speed speed,
856 bool autoneg_wait_to_complete)
857{
858 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
859
860 DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
861
862
863
864
865
866 hw->phy.autoneg_advertised = 0;
867
868 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
869 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
870
871 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
872 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
873
874 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
875 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
876
877 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
878 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
879
880 if (speed & IXGBE_LINK_SPEED_100_FULL)
881 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
882
883 if (speed & IXGBE_LINK_SPEED_10_FULL)
884 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
885
886
887 ixgbe_setup_phy_link(hw);
888
889 return IXGBE_SUCCESS;
890}
891
892
893
894
895
896
897
898
899static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
900{
901 s32 status;
902 u16 speed_ability;
903
904 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
905 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
906 &speed_ability);
907 if (status)
908 return status;
909
910 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
911 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
912 if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
913 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
914 if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
915 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
916
917 switch (hw->mac.type) {
918 case ixgbe_mac_X550EM_x:
919 case ixgbe_mac_X550EM_a:
920 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
921 break;
922 default:
923 break;
924 }
925
926 return status;
927}
928
929
930
931
932
933
934
935s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
936 ixgbe_link_speed *speed,
937 bool *autoneg)
938{
939 s32 status = IXGBE_SUCCESS;
940
941 DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
942
943 *autoneg = true;
944 if (!hw->phy.speeds_supported)
945 status = ixgbe_get_copper_speeds_supported(hw);
946
947 *speed = hw->phy.speeds_supported;
948 return status;
949}
950
951
952
953
954
955
956
957
958
959
960s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
961 bool *link_up)
962{
963 s32 status = IXGBE_SUCCESS;
964 u32 time_out;
965 u32 max_time_out = 10;
966 u16 phy_link = 0;
967 u16 phy_speed = 0;
968 u16 phy_data = 0;
969
970 DEBUGFUNC("ixgbe_check_phy_link_tnx");
971
972
973 *link_up = false;
974 *speed = IXGBE_LINK_SPEED_10GB_FULL;
975
976
977
978
979
980
981 for (time_out = 0; time_out < max_time_out; time_out++) {
982 usec_delay(10);
983 status = hw->phy.ops.read_reg(hw,
984 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
985 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
986 &phy_data);
987 phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
988 phy_speed = phy_data &
989 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
990 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
991 *link_up = true;
992 if (phy_speed ==
993 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
994 *speed = IXGBE_LINK_SPEED_1GB_FULL;
995 break;
996 }
997 }
998
999 return status;
1000}
1001
1002
1003
1004
1005
1006
1007
1008s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
1009{
1010 s32 status = IXGBE_SUCCESS;
1011 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
1012 bool autoneg = false;
1013 ixgbe_link_speed speed;
1014
1015 DEBUGFUNC("ixgbe_setup_phy_link_tnx");
1016
1017 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
1018
1019 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1020
1021 hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
1022 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1023 &autoneg_reg);
1024
1025 autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
1026 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1027 autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
1028
1029 hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
1030 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1031 autoneg_reg);
1032 }
1033
1034 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1035
1036 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1037 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1038 &autoneg_reg);
1039
1040 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1041 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1042 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1043
1044 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1045 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1046 autoneg_reg);
1047 }
1048
1049 if (speed & IXGBE_LINK_SPEED_100_FULL) {
1050
1051 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
1052 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1053 &autoneg_reg);
1054
1055 autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
1056 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
1057 autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
1058
1059 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
1060 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1061 autoneg_reg);
1062 }
1063
1064 ixgbe_restart_auto_neg(hw);
1065 return status;
1066}
1067
1068
1069
1070
1071
1072
1073s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1074 u16 *firmware_version)
1075{
1076 s32 status;
1077
1078 DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
1079
1080 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
1081 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1082 firmware_version);
1083
1084 return status;
1085}
1086
1087
1088
1089
1090
1091
1092s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1093 u16 *firmware_version)
1094{
1095 s32 status;
1096
1097 DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
1098
1099 status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
1100 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1101 firmware_version);
1102
1103 return status;
1104}
1105
1106
1107
1108
1109
1110s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
1111{
1112 u16 phy_offset, control, eword, edata, block_crc;
1113 bool end_data = false;
1114 u16 list_offset, data_offset;
1115 u16 phy_data = 0;
1116 s32 ret_val = IXGBE_SUCCESS;
1117 u32 i;
1118
1119 DEBUGFUNC("ixgbe_reset_phy_nl");
1120
1121
1122 if (ixgbe_check_reset_blocked(hw))
1123 goto out;
1124
1125 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
1126 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
1127
1128
1129 hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
1130 IXGBE_MDIO_PHY_XS_DEV_TYPE,
1131 (phy_data | IXGBE_MDIO_PHY_XS_RESET));
1132
1133 for (i = 0; i < 100; i++) {
1134 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
1135 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
1136 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
1137 break;
1138 msec_delay(10);
1139 }
1140
1141 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
1142 DEBUGOUT("PHY reset did not complete.\n");
1143 ret_val = IXGBE_ERR_PHY;
1144 goto out;
1145 }
1146
1147
1148 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1149 &data_offset);
1150 if (ret_val != IXGBE_SUCCESS)
1151 goto out;
1152
1153 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1154 data_offset++;
1155 while (!end_data) {
1156
1157
1158
1159 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1160 if (ret_val)
1161 goto err_eeprom;
1162 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1163 IXGBE_CONTROL_SHIFT_NL;
1164 edata = eword & IXGBE_DATA_MASK_NL;
1165 switch (control) {
1166 case IXGBE_DELAY_NL:
1167 data_offset++;
1168 DEBUGOUT1("DELAY: %d MS\n", edata);
1169 msec_delay(edata);
1170 break;
1171 case IXGBE_DATA_NL:
1172 DEBUGOUT("DATA:\n");
1173 data_offset++;
1174 ret_val = hw->eeprom.ops.read(hw, data_offset,
1175 &phy_offset);
1176 if (ret_val)
1177 goto err_eeprom;
1178 data_offset++;
1179 for (i = 0; i < edata; i++) {
1180 ret_val = hw->eeprom.ops.read(hw, data_offset,
1181 &eword);
1182 if (ret_val)
1183 goto err_eeprom;
1184 hw->phy.ops.write_reg(hw, phy_offset,
1185 IXGBE_TWINAX_DEV, eword);
1186 DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
1187 phy_offset);
1188 data_offset++;
1189 phy_offset++;
1190 }
1191 break;
1192 case IXGBE_CONTROL_NL:
1193 data_offset++;
1194 DEBUGOUT("CONTROL:\n");
1195 if (edata == IXGBE_CONTROL_EOL_NL) {
1196 DEBUGOUT("EOL\n");
1197 end_data = true;
1198 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1199 DEBUGOUT("SOL\n");
1200 } else {
1201 DEBUGOUT("Bad control value\n");
1202 ret_val = IXGBE_ERR_PHY;
1203 goto out;
1204 }
1205 break;
1206 default:
1207 DEBUGOUT("Bad control type\n");
1208 ret_val = IXGBE_ERR_PHY;
1209 goto out;
1210 }
1211 }
1212
1213out:
1214 return ret_val;
1215
1216err_eeprom:
1217 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
1218 "eeprom read at offset %d failed", data_offset);
1219 return IXGBE_ERR_PHY;
1220}
1221
1222
1223
1224
1225
1226
1227
1228s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1229{
1230 s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
1231
1232 DEBUGFUNC("ixgbe_identify_module_generic");
1233
1234 switch (hw->mac.ops.get_media_type(hw)) {
1235 case ixgbe_media_type_fiber:
1236 status = ixgbe_identify_sfp_module_generic(hw);
1237 break;
1238
1239 case ixgbe_media_type_fiber_qsfp:
1240 status = ixgbe_identify_qsfp_module_generic(hw);
1241 break;
1242
1243 default:
1244 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1245 status = IXGBE_ERR_SFP_NOT_PRESENT;
1246 break;
1247 }
1248
1249 return status;
1250}
1251
1252
1253
1254
1255
1256
1257
1258s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1259{
1260 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1261 u32 vendor_oui = 0;
1262 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1263 u8 identifier = 0;
1264 u8 comp_codes_1g = 0;
1265 u8 comp_codes_10g = 0;
1266 u8 oui_bytes[3] = {0, 0, 0};
1267 u8 cable_tech = 0;
1268 u8 cable_spec = 0;
1269 u16 enforce_sfp = 0;
1270
1271 DEBUGFUNC("ixgbe_identify_sfp_module_generic");
1272
1273 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1274 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1275 status = IXGBE_ERR_SFP_NOT_PRESENT;
1276 goto out;
1277 }
1278
1279
1280 hw->mac.ops.set_lan_id(hw);
1281
1282 status = hw->phy.ops.read_i2c_eeprom(hw,
1283 IXGBE_SFF_IDENTIFIER,
1284 &identifier);
1285
1286 if (status != IXGBE_SUCCESS)
1287 goto err_read_i2c_eeprom;
1288
1289 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1290 hw->phy.type = ixgbe_phy_sfp_unsupported;
1291 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1292 } else {
1293 status = hw->phy.ops.read_i2c_eeprom(hw,
1294 IXGBE_SFF_1GBE_COMP_CODES,
1295 &comp_codes_1g);
1296
1297 if (status != IXGBE_SUCCESS)
1298 goto err_read_i2c_eeprom;
1299
1300 status = hw->phy.ops.read_i2c_eeprom(hw,
1301 IXGBE_SFF_10GBE_COMP_CODES,
1302 &comp_codes_10g);
1303
1304 if (status != IXGBE_SUCCESS)
1305 goto err_read_i2c_eeprom;
1306 status = hw->phy.ops.read_i2c_eeprom(hw,
1307 IXGBE_SFF_CABLE_TECHNOLOGY,
1308 &cable_tech);
1309
1310 if (status != IXGBE_SUCCESS)
1311 goto err_read_i2c_eeprom;
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 if (hw->mac.type == ixgbe_mac_82598EB) {
1330 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1331 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1332 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1333 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1334 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1335 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1336 else
1337 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1338 } else {
1339 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1340 if (hw->bus.lan_id == 0)
1341 hw->phy.sfp_type =
1342 ixgbe_sfp_type_da_cu_core0;
1343 else
1344 hw->phy.sfp_type =
1345 ixgbe_sfp_type_da_cu_core1;
1346 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1347 hw->phy.ops.read_i2c_eeprom(
1348 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1349 &cable_spec);
1350 if (cable_spec &
1351 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1352 if (hw->bus.lan_id == 0)
1353 hw->phy.sfp_type =
1354 ixgbe_sfp_type_da_act_lmt_core0;
1355 else
1356 hw->phy.sfp_type =
1357 ixgbe_sfp_type_da_act_lmt_core1;
1358 } else {
1359 hw->phy.sfp_type =
1360 ixgbe_sfp_type_unknown;
1361 }
1362 } else if (comp_codes_10g &
1363 (IXGBE_SFF_10GBASESR_CAPABLE |
1364 IXGBE_SFF_10GBASELR_CAPABLE)) {
1365 if (hw->bus.lan_id == 0)
1366 hw->phy.sfp_type =
1367 ixgbe_sfp_type_srlr_core0;
1368 else
1369 hw->phy.sfp_type =
1370 ixgbe_sfp_type_srlr_core1;
1371 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1372 if (hw->bus.lan_id == 0)
1373 hw->phy.sfp_type =
1374 ixgbe_sfp_type_1g_cu_core0;
1375 else
1376 hw->phy.sfp_type =
1377 ixgbe_sfp_type_1g_cu_core1;
1378 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1379 if (hw->bus.lan_id == 0)
1380 hw->phy.sfp_type =
1381 ixgbe_sfp_type_1g_sx_core0;
1382 else
1383 hw->phy.sfp_type =
1384 ixgbe_sfp_type_1g_sx_core1;
1385 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1386 if (hw->bus.lan_id == 0)
1387 hw->phy.sfp_type =
1388 ixgbe_sfp_type_1g_lx_core0;
1389 else
1390 hw->phy.sfp_type =
1391 ixgbe_sfp_type_1g_lx_core1;
1392 } else if (comp_codes_1g & IXGBE_SFF_1GBASELHA_CAPABLE) {
1393 if (hw->bus.lan_id == 0)
1394 hw->phy.sfp_type =
1395 ixgbe_sfp_type_1g_lha_core0;
1396 else
1397 hw->phy.sfp_type =
1398 ixgbe_sfp_type_1g_lha_core1;
1399 } else {
1400 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1401 }
1402 }
1403
1404 if (hw->phy.sfp_type != stored_sfp_type)
1405 hw->phy.sfp_setup_needed = true;
1406
1407
1408 hw->phy.multispeed_fiber = false;
1409 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1410 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1411 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1412 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1413 hw->phy.multispeed_fiber = true;
1414
1415
1416 if (hw->phy.type != ixgbe_phy_nl) {
1417 hw->phy.id = identifier;
1418 status = hw->phy.ops.read_i2c_eeprom(hw,
1419 IXGBE_SFF_VENDOR_OUI_BYTE0,
1420 &oui_bytes[0]);
1421
1422 if (status != IXGBE_SUCCESS)
1423 goto err_read_i2c_eeprom;
1424
1425 status = hw->phy.ops.read_i2c_eeprom(hw,
1426 IXGBE_SFF_VENDOR_OUI_BYTE1,
1427 &oui_bytes[1]);
1428
1429 if (status != IXGBE_SUCCESS)
1430 goto err_read_i2c_eeprom;
1431
1432 status = hw->phy.ops.read_i2c_eeprom(hw,
1433 IXGBE_SFF_VENDOR_OUI_BYTE2,
1434 &oui_bytes[2]);
1435
1436 if (status != IXGBE_SUCCESS)
1437 goto err_read_i2c_eeprom;
1438
1439 vendor_oui =
1440 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1441 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1442 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1443
1444 switch (vendor_oui) {
1445 case IXGBE_SFF_VENDOR_OUI_TYCO:
1446 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1447 hw->phy.type =
1448 ixgbe_phy_sfp_passive_tyco;
1449 break;
1450 case IXGBE_SFF_VENDOR_OUI_FTL:
1451 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1452 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1453 else
1454 hw->phy.type = ixgbe_phy_sfp_ftl;
1455 break;
1456 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1457 hw->phy.type = ixgbe_phy_sfp_avago;
1458 break;
1459 case IXGBE_SFF_VENDOR_OUI_INTEL:
1460 hw->phy.type = ixgbe_phy_sfp_intel;
1461 break;
1462 default:
1463 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1464 hw->phy.type =
1465 ixgbe_phy_sfp_passive_unknown;
1466 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1467 hw->phy.type =
1468 ixgbe_phy_sfp_active_unknown;
1469 else
1470 hw->phy.type = ixgbe_phy_sfp_unknown;
1471 break;
1472 }
1473 }
1474
1475
1476 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1477 IXGBE_SFF_DA_ACTIVE_CABLE)) {
1478 status = IXGBE_SUCCESS;
1479 goto out;
1480 }
1481
1482
1483 if (comp_codes_10g == 0 &&
1484 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1485 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1486 hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 ||
1487 hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 ||
1488 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1489 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1490 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1491 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1492 hw->phy.type = ixgbe_phy_sfp_unsupported;
1493 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1494 goto out;
1495 }
1496
1497
1498 if (hw->mac.type == ixgbe_mac_82598EB) {
1499 status = IXGBE_SUCCESS;
1500 goto out;
1501 }
1502
1503 ixgbe_get_device_caps(hw, &enforce_sfp);
1504 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1505 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1506 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1507 hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 ||
1508 hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 ||
1509 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1510 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1511 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1512 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1513
1514 if (hw->phy.type == ixgbe_phy_sfp_intel) {
1515 status = IXGBE_SUCCESS;
1516 } else {
1517 if (hw->allow_unsupported_sfp == true) {
1518 EWARN(hw,
1519 "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
1520 "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
1521 "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1522 status = IXGBE_SUCCESS;
1523 } else {
1524 DEBUGOUT("SFP+ module not supported\n");
1525 hw->phy.type =
1526 ixgbe_phy_sfp_unsupported;
1527 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1528 }
1529 }
1530 } else {
1531 status = IXGBE_SUCCESS;
1532 }
1533 }
1534
1535out:
1536 return status;
1537
1538err_read_i2c_eeprom:
1539 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1540 if (hw->phy.type != ixgbe_phy_nl) {
1541 hw->phy.id = 0;
1542 hw->phy.type = ixgbe_phy_unknown;
1543 }
1544 return IXGBE_ERR_SFP_NOT_PRESENT;
1545}
1546
1547
1548
1549
1550
1551
1552
1553u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
1554{
1555 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1556 u8 comp_codes_10g = 0;
1557 u8 comp_codes_1g = 0;
1558
1559 DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
1560
1561 hw->phy.ops.identify_sfp(hw);
1562 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1563 return physical_layer;
1564
1565 switch (hw->phy.type) {
1566 case ixgbe_phy_sfp_passive_tyco:
1567 case ixgbe_phy_sfp_passive_unknown:
1568 case ixgbe_phy_qsfp_passive_unknown:
1569 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1570 break;
1571 case ixgbe_phy_sfp_ftl_active:
1572 case ixgbe_phy_sfp_active_unknown:
1573 case ixgbe_phy_qsfp_active_unknown:
1574 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
1575 break;
1576 case ixgbe_phy_sfp_avago:
1577 case ixgbe_phy_sfp_ftl:
1578 case ixgbe_phy_sfp_intel:
1579 case ixgbe_phy_sfp_unknown:
1580 hw->phy.ops.read_i2c_eeprom(hw,
1581 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
1582 hw->phy.ops.read_i2c_eeprom(hw,
1583 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
1584 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1585 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1586 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1587 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1588 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
1589 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
1590 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
1591 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
1592 break;
1593 case ixgbe_phy_qsfp_intel:
1594 case ixgbe_phy_qsfp_unknown:
1595 hw->phy.ops.read_i2c_eeprom(hw,
1596 IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
1597 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1598 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1599 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1600 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1601 break;
1602 default:
1603 break;
1604 }
1605
1606 return physical_layer;
1607}
1608
1609
1610
1611
1612
1613
1614
1615s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1616{
1617 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1618 u32 vendor_oui = 0;
1619 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1620 u8 identifier = 0;
1621 u8 comp_codes_1g = 0;
1622 u8 comp_codes_10g = 0;
1623 u8 oui_bytes[3] = {0, 0, 0};
1624 u16 enforce_sfp = 0;
1625 u8 connector = 0;
1626 u8 cable_length = 0;
1627 u8 device_tech = 0;
1628 bool active_cable = false;
1629
1630 DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
1631
1632 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1633 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1634 status = IXGBE_ERR_SFP_NOT_PRESENT;
1635 goto out;
1636 }
1637
1638
1639 hw->mac.ops.set_lan_id(hw);
1640
1641 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1642 &identifier);
1643
1644 if (status != IXGBE_SUCCESS)
1645 goto err_read_i2c_eeprom;
1646
1647 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1648 hw->phy.type = ixgbe_phy_sfp_unsupported;
1649 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1650 goto out;
1651 }
1652
1653 hw->phy.id = identifier;
1654
1655 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1656 &comp_codes_10g);
1657
1658 if (status != IXGBE_SUCCESS)
1659 goto err_read_i2c_eeprom;
1660
1661 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1662 &comp_codes_1g);
1663
1664 if (status != IXGBE_SUCCESS)
1665 goto err_read_i2c_eeprom;
1666
1667 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1668 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1669 if (hw->bus.lan_id == 0)
1670 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1671 else
1672 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1673 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1674 IXGBE_SFF_10GBASELR_CAPABLE)) {
1675 if (hw->bus.lan_id == 0)
1676 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1677 else
1678 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1679 } else {
1680 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1681 active_cable = true;
1682
1683 if (!active_cable) {
1684
1685
1686 hw->phy.ops.read_i2c_eeprom(hw,
1687 IXGBE_SFF_QSFP_CONNECTOR,
1688 &connector);
1689
1690 hw->phy.ops.read_i2c_eeprom(hw,
1691 IXGBE_SFF_QSFP_CABLE_LENGTH,
1692 &cable_length);
1693
1694 hw->phy.ops.read_i2c_eeprom(hw,
1695 IXGBE_SFF_QSFP_DEVICE_TECH,
1696 &device_tech);
1697
1698 if ((connector ==
1699 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1700 (cable_length > 0) &&
1701 ((device_tech >> 4) ==
1702 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1703 active_cable = true;
1704 }
1705
1706 if (active_cable) {
1707 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1708 if (hw->bus.lan_id == 0)
1709 hw->phy.sfp_type =
1710 ixgbe_sfp_type_da_act_lmt_core0;
1711 else
1712 hw->phy.sfp_type =
1713 ixgbe_sfp_type_da_act_lmt_core1;
1714 } else {
1715
1716 hw->phy.type = ixgbe_phy_sfp_unsupported;
1717 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1718 goto out;
1719 }
1720 }
1721
1722 if (hw->phy.sfp_type != stored_sfp_type)
1723 hw->phy.sfp_setup_needed = true;
1724
1725
1726 hw->phy.multispeed_fiber = false;
1727 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1728 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1729 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1730 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1731 hw->phy.multispeed_fiber = true;
1732
1733
1734 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1735 IXGBE_SFF_10GBASELR_CAPABLE)) {
1736 status = hw->phy.ops.read_i2c_eeprom(hw,
1737 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1738 &oui_bytes[0]);
1739
1740 if (status != IXGBE_SUCCESS)
1741 goto err_read_i2c_eeprom;
1742
1743 status = hw->phy.ops.read_i2c_eeprom(hw,
1744 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1745 &oui_bytes[1]);
1746
1747 if (status != IXGBE_SUCCESS)
1748 goto err_read_i2c_eeprom;
1749
1750 status = hw->phy.ops.read_i2c_eeprom(hw,
1751 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1752 &oui_bytes[2]);
1753
1754 if (status != IXGBE_SUCCESS)
1755 goto err_read_i2c_eeprom;
1756
1757 vendor_oui =
1758 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1759 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1760 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1761
1762 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1763 hw->phy.type = ixgbe_phy_qsfp_intel;
1764 else
1765 hw->phy.type = ixgbe_phy_qsfp_unknown;
1766
1767 ixgbe_get_device_caps(hw, &enforce_sfp);
1768 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1769
1770 if (hw->phy.type == ixgbe_phy_qsfp_intel) {
1771 status = IXGBE_SUCCESS;
1772 } else {
1773 if (hw->allow_unsupported_sfp == true) {
1774 EWARN(hw,
1775 "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
1776 "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
1777 "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1778 status = IXGBE_SUCCESS;
1779 } else {
1780 DEBUGOUT("QSFP module not supported\n");
1781 hw->phy.type =
1782 ixgbe_phy_sfp_unsupported;
1783 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1784 }
1785 }
1786 } else {
1787 status = IXGBE_SUCCESS;
1788 }
1789 }
1790
1791out:
1792 return status;
1793
1794err_read_i2c_eeprom:
1795 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1796 hw->phy.id = 0;
1797 hw->phy.type = ixgbe_phy_unknown;
1798
1799 return IXGBE_ERR_SFP_NOT_PRESENT;
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1812 u16 *list_offset,
1813 u16 *data_offset)
1814{
1815 u16 sfp_id;
1816 u16 sfp_type = hw->phy.sfp_type;
1817
1818 DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
1819
1820 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1821 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1822
1823 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1824 return IXGBE_ERR_SFP_NOT_PRESENT;
1825
1826 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1827 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1828 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1829
1830
1831
1832
1833
1834 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1835 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1836 sfp_type == ixgbe_sfp_type_1g_lha_core0 ||
1837 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1838 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1839 sfp_type = ixgbe_sfp_type_srlr_core0;
1840 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1841 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1842 sfp_type == ixgbe_sfp_type_1g_lha_core1 ||
1843 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1844 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1845 sfp_type = ixgbe_sfp_type_srlr_core1;
1846
1847
1848 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1849 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
1850 "eeprom read at offset %d failed",
1851 IXGBE_PHY_INIT_OFFSET_NL);
1852 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1853 }
1854
1855 if ((!*list_offset) || (*list_offset == 0xFFFF))
1856 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1857
1858
1859 (*list_offset)++;
1860
1861
1862
1863
1864
1865 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1866 goto err_phy;
1867
1868 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1869 if (sfp_id == sfp_type) {
1870 (*list_offset)++;
1871 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1872 goto err_phy;
1873 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1874 DEBUGOUT("SFP+ module not supported\n");
1875 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1876 } else {
1877 break;
1878 }
1879 } else {
1880 (*list_offset) += 2;
1881 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1882 goto err_phy;
1883 }
1884 }
1885
1886 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1887 DEBUGOUT("No matching SFP+ module found\n");
1888 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1889 }
1890
1891 return IXGBE_SUCCESS;
1892
1893err_phy:
1894 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
1895 "eeprom read at offset %d failed", *list_offset);
1896 return IXGBE_ERR_PHY;
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1908 u8 *eeprom_data)
1909{
1910 DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
1911
1912 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1913 IXGBE_I2C_EEPROM_DEV_ADDR,
1914 eeprom_data);
1915}
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1926 u8 *sff8472_data)
1927{
1928 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1929 IXGBE_I2C_EEPROM_DEV_ADDR2,
1930 sff8472_data);
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1942 u8 eeprom_data)
1943{
1944 DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
1945
1946 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1947 IXGBE_I2C_EEPROM_DEV_ADDR,
1948 eeprom_data);
1949}
1950
1951
1952
1953
1954
1955
1956
1957STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1958{
1959 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1960 offset == IXGBE_SFF_IDENTIFIER &&
1961 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1962 return true;
1963 return false;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
1978 u8 dev_addr, u8 *data, bool lock)
1979{
1980 s32 status;
1981 u32 max_retry = 10;
1982 u32 retry = 0;
1983 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1984 bool nack = 1;
1985 *data = 0;
1986
1987 DEBUGFUNC("ixgbe_read_i2c_byte_generic");
1988
1989 if (hw->mac.type >= ixgbe_mac_X550)
1990 max_retry = 3;
1991 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
1992 max_retry = IXGBE_SFP_DETECT_RETRIES;
1993
1994 do {
1995 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1996 return IXGBE_ERR_SWFW_SYNC;
1997
1998 ixgbe_i2c_start(hw);
1999
2000
2001 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2002 if (status != IXGBE_SUCCESS)
2003 goto fail;
2004
2005 status = ixgbe_get_i2c_ack(hw);
2006 if (status != IXGBE_SUCCESS)
2007 goto fail;
2008
2009 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2010 if (status != IXGBE_SUCCESS)
2011 goto fail;
2012
2013 status = ixgbe_get_i2c_ack(hw);
2014 if (status != IXGBE_SUCCESS)
2015 goto fail;
2016
2017 ixgbe_i2c_start(hw);
2018
2019
2020 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
2021 if (status != IXGBE_SUCCESS)
2022 goto fail;
2023
2024 status = ixgbe_get_i2c_ack(hw);
2025 if (status != IXGBE_SUCCESS)
2026 goto fail;
2027
2028 ixgbe_clock_in_i2c_byte(hw, data);
2029
2030 status = ixgbe_clock_out_i2c_bit(hw, nack);
2031 if (status != IXGBE_SUCCESS)
2032 goto fail;
2033
2034 ixgbe_i2c_stop(hw);
2035 if (lock)
2036 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2037 return IXGBE_SUCCESS;
2038
2039fail:
2040 ixgbe_i2c_bus_clear(hw);
2041 if (lock) {
2042 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2043 msec_delay(100);
2044 }
2045 if (retry < max_retry)
2046 DEBUGOUT("I2C byte read error - Retrying.\n");
2047 else
2048 DEBUGOUT("I2C byte read error.\n");
2049 retry++;
2050 } while (retry <= max_retry);
2051
2052 return status;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2066 u8 dev_addr, u8 *data)
2067{
2068 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2069 data, true);
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2083 u8 dev_addr, u8 *data)
2084{
2085 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2086 data, false);
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2101 u8 dev_addr, u8 data, bool lock)
2102{
2103 s32 status;
2104 u32 max_retry = 1;
2105 u32 retry = 0;
2106 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2107
2108 DEBUGFUNC("ixgbe_write_i2c_byte_generic");
2109
2110 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) !=
2111 IXGBE_SUCCESS)
2112 return IXGBE_ERR_SWFW_SYNC;
2113
2114 do {
2115 ixgbe_i2c_start(hw);
2116
2117 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2118 if (status != IXGBE_SUCCESS)
2119 goto fail;
2120
2121 status = ixgbe_get_i2c_ack(hw);
2122 if (status != IXGBE_SUCCESS)
2123 goto fail;
2124
2125 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2126 if (status != IXGBE_SUCCESS)
2127 goto fail;
2128
2129 status = ixgbe_get_i2c_ack(hw);
2130 if (status != IXGBE_SUCCESS)
2131 goto fail;
2132
2133 status = ixgbe_clock_out_i2c_byte(hw, data);
2134 if (status != IXGBE_SUCCESS)
2135 goto fail;
2136
2137 status = ixgbe_get_i2c_ack(hw);
2138 if (status != IXGBE_SUCCESS)
2139 goto fail;
2140
2141 ixgbe_i2c_stop(hw);
2142 if (lock)
2143 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2144 return IXGBE_SUCCESS;
2145
2146fail:
2147 ixgbe_i2c_bus_clear(hw);
2148 if (retry < max_retry)
2149 DEBUGOUT("I2C byte write error - Retrying.\n");
2150 else
2151 DEBUGOUT("I2C byte write error.\n");
2152 retry++;
2153 } while (retry <= max_retry);
2154
2155 if (lock)
2156 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2157
2158 return status;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2172 u8 dev_addr, u8 data)
2173{
2174 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2175 data, true);
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2189 u8 dev_addr, u8 data)
2190{
2191 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2192 data, false);
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
2203{
2204 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2205
2206 DEBUGFUNC("ixgbe_i2c_start");
2207
2208 i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
2209
2210
2211 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2212 ixgbe_raise_i2c_clk(hw, &i2cctl);
2213
2214
2215 usec_delay(IXGBE_I2C_T_SU_STA);
2216
2217 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2218
2219
2220 usec_delay(IXGBE_I2C_T_HD_STA);
2221
2222 ixgbe_lower_i2c_clk(hw, &i2cctl);
2223
2224
2225 usec_delay(IXGBE_I2C_T_LOW);
2226
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
2238{
2239 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2240 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2241 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
2242 u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
2243
2244 DEBUGFUNC("ixgbe_i2c_stop");
2245
2246
2247 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2248 ixgbe_raise_i2c_clk(hw, &i2cctl);
2249
2250
2251 usec_delay(IXGBE_I2C_T_SU_STO);
2252
2253 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2254
2255
2256 usec_delay(IXGBE_I2C_T_BUF);
2257
2258 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
2259 i2cctl &= ~bb_en_bit;
2260 i2cctl |= data_oe_bit | clk_oe_bit;
2261 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
2262 IXGBE_WRITE_FLUSH(hw);
2263 }
2264}
2265
2266
2267
2268
2269
2270
2271
2272
2273STATIC void ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2274{
2275 s32 i;
2276 bool bit = 0;
2277
2278 DEBUGFUNC("ixgbe_clock_in_i2c_byte");
2279
2280 *data = 0;
2281 for (i = 7; i >= 0; i--) {
2282 ixgbe_clock_in_i2c_bit(hw, &bit);
2283 *data |= bit << i;
2284 }
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2295{
2296 s32 status = IXGBE_SUCCESS;
2297 s32 i;
2298 u32 i2cctl;
2299 bool bit;
2300
2301 DEBUGFUNC("ixgbe_clock_out_i2c_byte");
2302
2303 for (i = 7; i >= 0; i--) {
2304 bit = (data >> i) & 0x1;
2305 status = ixgbe_clock_out_i2c_bit(hw, bit);
2306
2307 if (status != IXGBE_SUCCESS)
2308 break;
2309 }
2310
2311
2312 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2313 i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
2314 i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2315 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
2316 IXGBE_WRITE_FLUSH(hw);
2317
2318 return status;
2319}
2320
2321
2322
2323
2324
2325
2326
2327STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2328{
2329 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2330 s32 status = IXGBE_SUCCESS;
2331 u32 i = 0;
2332 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2333 u32 timeout = 10;
2334 bool ack = 1;
2335
2336 DEBUGFUNC("ixgbe_get_i2c_ack");
2337
2338 if (data_oe_bit) {
2339 i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
2340 i2cctl |= data_oe_bit;
2341 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
2342 IXGBE_WRITE_FLUSH(hw);
2343 }
2344 ixgbe_raise_i2c_clk(hw, &i2cctl);
2345
2346
2347 usec_delay(IXGBE_I2C_T_HIGH);
2348
2349
2350
2351 for (i = 0; i < timeout; i++) {
2352 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2353 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2354
2355 usec_delay(1);
2356 if (!ack)
2357 break;
2358 }
2359
2360 if (ack) {
2361 DEBUGOUT("I2C ack was not received.\n");
2362 status = IXGBE_ERR_I2C;
2363 }
2364
2365 ixgbe_lower_i2c_clk(hw, &i2cctl);
2366
2367
2368 usec_delay(IXGBE_I2C_T_LOW);
2369
2370 return status;
2371}
2372
2373
2374
2375
2376
2377
2378
2379
2380STATIC void ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2381{
2382 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2383 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2384
2385 DEBUGFUNC("ixgbe_clock_in_i2c_bit");
2386
2387 if (data_oe_bit) {
2388 i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
2389 i2cctl |= data_oe_bit;
2390 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
2391 IXGBE_WRITE_FLUSH(hw);
2392 }
2393 ixgbe_raise_i2c_clk(hw, &i2cctl);
2394
2395
2396 usec_delay(IXGBE_I2C_T_HIGH);
2397
2398 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2399 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2400
2401 ixgbe_lower_i2c_clk(hw, &i2cctl);
2402
2403
2404 usec_delay(IXGBE_I2C_T_LOW);
2405}
2406
2407
2408
2409
2410
2411
2412
2413
2414STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2415{
2416 s32 status;
2417 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2418
2419 DEBUGFUNC("ixgbe_clock_out_i2c_bit");
2420
2421 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2422 if (status == IXGBE_SUCCESS) {
2423 ixgbe_raise_i2c_clk(hw, &i2cctl);
2424
2425
2426 usec_delay(IXGBE_I2C_T_HIGH);
2427
2428 ixgbe_lower_i2c_clk(hw, &i2cctl);
2429
2430
2431
2432
2433 usec_delay(IXGBE_I2C_T_LOW);
2434 } else {
2435 status = IXGBE_ERR_I2C;
2436 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2437 "I2C data was not set to %X\n", data);
2438 }
2439
2440 return status;
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2452{
2453 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
2454 u32 i = 0;
2455 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2456 u32 i2cctl_r = 0;
2457
2458 DEBUGFUNC("ixgbe_raise_i2c_clk");
2459
2460 if (clk_oe_bit) {
2461 *i2cctl |= clk_oe_bit;
2462 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2463 }
2464
2465 for (i = 0; i < timeout; i++) {
2466 *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
2467
2468 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2469 IXGBE_WRITE_FLUSH(hw);
2470
2471 usec_delay(IXGBE_I2C_T_RISE);
2472
2473 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2474 if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
2475 break;
2476 }
2477}
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2488{
2489 DEBUGFUNC("ixgbe_lower_i2c_clk");
2490
2491 *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
2492 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
2493
2494 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2495 IXGBE_WRITE_FLUSH(hw);
2496
2497
2498 usec_delay(IXGBE_I2C_T_FALL);
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2511{
2512 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2513 s32 status = IXGBE_SUCCESS;
2514
2515 DEBUGFUNC("ixgbe_set_i2c_data");
2516
2517 if (data)
2518 *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
2519 else
2520 *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
2521 *i2cctl &= ~data_oe_bit;
2522
2523 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2524 IXGBE_WRITE_FLUSH(hw);
2525
2526
2527 usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2528
2529 if (!data)
2530 return IXGBE_SUCCESS;
2531 if (data_oe_bit) {
2532 *i2cctl |= data_oe_bit;
2533 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2534 IXGBE_WRITE_FLUSH(hw);
2535 }
2536
2537
2538 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2539 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2540 status = IXGBE_ERR_I2C;
2541 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2542 "Error - I2C data was not set to %X.\n",
2543 data);
2544 }
2545
2546 return status;
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2558{
2559 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
2560 bool data;
2561
2562 DEBUGFUNC("ixgbe_get_i2c_data");
2563
2564 if (data_oe_bit) {
2565 *i2cctl |= data_oe_bit;
2566 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
2567 IXGBE_WRITE_FLUSH(hw);
2568 usec_delay(IXGBE_I2C_T_FALL);
2569 }
2570
2571 if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
2572 data = 1;
2573 else
2574 data = 0;
2575
2576 return data;
2577}
2578
2579
2580
2581
2582
2583
2584
2585
2586void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2587{
2588 u32 i2cctl;
2589 u32 i;
2590
2591 DEBUGFUNC("ixgbe_i2c_bus_clear");
2592
2593 ixgbe_i2c_start(hw);
2594 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
2595
2596 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2597
2598 for (i = 0; i < 9; i++) {
2599 ixgbe_raise_i2c_clk(hw, &i2cctl);
2600
2601
2602 usec_delay(IXGBE_I2C_T_HIGH);
2603
2604 ixgbe_lower_i2c_clk(hw, &i2cctl);
2605
2606
2607 usec_delay(IXGBE_I2C_T_LOW);
2608 }
2609
2610 ixgbe_i2c_start(hw);
2611
2612
2613 ixgbe_i2c_stop(hw);
2614}
2615
2616
2617
2618
2619
2620
2621
2622s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2623{
2624 s32 status = IXGBE_SUCCESS;
2625 u16 phy_data = 0;
2626
2627 DEBUGFUNC("ixgbe_tn_check_overtemp");
2628
2629 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2630 goto out;
2631
2632
2633 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2634 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
2635
2636 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2637 goto out;
2638
2639 status = IXGBE_ERR_OVERTEMP;
2640 ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
2641out:
2642 return status;
2643}
2644
2645
2646
2647
2648
2649
2650s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2651{
2652 u32 status;
2653 u16 reg;
2654
2655 if (!on && ixgbe_mng_present(hw))
2656 return 0;
2657
2658 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
2659 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2660 ®);
2661 if (status)
2662 return status;
2663
2664 if (on) {
2665 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2666 } else {
2667 if (ixgbe_check_reset_blocked(hw))
2668 return 0;
2669 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2670 }
2671
2672 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
2673 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2674 reg);
2675 return status;
2676}
2677