1
2
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6#include <linux/iopoll.h>
7#include <linux/sched.h>
8
9#include "ixgbe.h"
10#include "ixgbe_phy.h"
11
12static void ixgbe_i2c_start(struct ixgbe_hw *hw);
13static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
14static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
15static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
16static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
17static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
18static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
19static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
20static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
21static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
22static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
23static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
24static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
25static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
26static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
27
28
29
30
31
32
33
34
35static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
36{
37 s32 status;
38
39 status = ixgbe_clock_out_i2c_byte(hw, byte);
40 if (status)
41 return status;
42 return ixgbe_get_i2c_ack(hw);
43}
44
45
46
47
48
49
50
51
52static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
53{
54 s32 status;
55
56 status = ixgbe_clock_in_i2c_byte(hw, byte);
57 if (status)
58 return status;
59
60 return ixgbe_clock_out_i2c_bit(hw, false);
61}
62
63
64
65
66
67
68
69
70static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
71{
72 u16 sum = add1 + add2;
73
74 sum = (sum & 0xFF) + (sum >> 8);
75 return sum & 0xFF;
76}
77
78
79
80
81
82
83
84
85
86
87
88s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
89 u16 reg, u16 *val, bool lock)
90{
91 u32 swfw_mask = hw->phy.phy_semaphore_mask;
92 int max_retry = 3;
93 int retry = 0;
94 u8 csum_byte;
95 u8 high_bits;
96 u8 low_bits;
97 u8 reg_high;
98 u8 csum;
99
100 reg_high = ((reg >> 7) & 0xFE) | 1;
101 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
102 csum = ~csum;
103 do {
104 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
105 return IXGBE_ERR_SWFW_SYNC;
106 ixgbe_i2c_start(hw);
107
108 if (ixgbe_out_i2c_byte_ack(hw, addr))
109 goto fail;
110
111 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
112 goto fail;
113
114 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
115 goto fail;
116
117 if (ixgbe_out_i2c_byte_ack(hw, csum))
118 goto fail;
119
120 ixgbe_i2c_start(hw);
121
122 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
123 goto fail;
124
125 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
126 goto fail;
127
128 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
129 goto fail;
130
131 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
132 goto fail;
133
134 if (ixgbe_clock_out_i2c_bit(hw, false))
135 goto fail;
136 ixgbe_i2c_stop(hw);
137 if (lock)
138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
139 *val = (high_bits << 8) | low_bits;
140 return 0;
141
142fail:
143 ixgbe_i2c_bus_clear(hw);
144 if (lock)
145 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
146 retry++;
147 if (retry < max_retry)
148 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
149 else
150 hw_dbg(hw, "I2C byte read combined error.\n");
151 } while (retry < max_retry);
152
153 return IXGBE_ERR_I2C;
154}
155
156
157
158
159
160
161
162
163
164
165
166s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
167 u16 reg, u16 val, bool lock)
168{
169 u32 swfw_mask = hw->phy.phy_semaphore_mask;
170 int max_retry = 1;
171 int retry = 0;
172 u8 reg_high;
173 u8 csum;
174
175 reg_high = (reg >> 7) & 0xFE;
176 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
177 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
178 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
179 csum = ~csum;
180 do {
181 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
182 return IXGBE_ERR_SWFW_SYNC;
183 ixgbe_i2c_start(hw);
184
185 if (ixgbe_out_i2c_byte_ack(hw, addr))
186 goto fail;
187
188 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
189 goto fail;
190
191 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
192 goto fail;
193
194 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
195 goto fail;
196
197 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
198 goto fail;
199
200 if (ixgbe_out_i2c_byte_ack(hw, csum))
201 goto fail;
202 ixgbe_i2c_stop(hw);
203 if (lock)
204 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
205 return 0;
206
207fail:
208 ixgbe_i2c_bus_clear(hw);
209 if (lock)
210 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
211 retry++;
212 if (retry < max_retry)
213 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
214 else
215 hw_dbg(hw, "I2C byte write combined error.\n");
216 } while (retry < max_retry);
217
218 return IXGBE_ERR_I2C;
219}
220
221
222
223
224
225
226
227
228static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
229{
230 u16 ext_ability = 0;
231
232 hw->phy.mdio.prtad = phy_addr;
233 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
234 return false;
235
236 if (ixgbe_get_phy_id(hw))
237 return false;
238
239 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
240
241 if (hw->phy.type == ixgbe_phy_unknown) {
242 hw->phy.ops.read_reg(hw,
243 MDIO_PMA_EXTABLE,
244 MDIO_MMD_PMAPMD,
245 &ext_ability);
246 if (ext_ability &
247 (MDIO_PMA_EXTABLE_10GBT |
248 MDIO_PMA_EXTABLE_1000BT))
249 hw->phy.type = ixgbe_phy_cu_unknown;
250 else
251 hw->phy.type = ixgbe_phy_generic;
252 }
253
254 return true;
255}
256
257
258
259
260
261
262
263s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
264{
265 u32 phy_addr;
266 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
267
268 if (!hw->phy.phy_semaphore_mask) {
269 if (hw->bus.lan_id)
270 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
271 else
272 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
273 }
274
275 if (hw->phy.type != ixgbe_phy_unknown)
276 return 0;
277
278 if (hw->phy.nw_mng_if_sel) {
279 phy_addr = (hw->phy.nw_mng_if_sel &
280 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
281 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
282 if (ixgbe_probe_phy(hw, phy_addr))
283 return 0;
284 else
285 return IXGBE_ERR_PHY_ADDR_INVALID;
286 }
287
288 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
289 if (ixgbe_probe_phy(hw, phy_addr)) {
290 status = 0;
291 break;
292 }
293 }
294
295
296
297
298
299 if (status)
300 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
301
302 return status;
303}
304
305
306
307
308
309
310
311
312
313
314bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
315{
316 u32 mmngc;
317
318
319 if (hw->mac.type == ixgbe_mac_82598EB)
320 return false;
321
322 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
323 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
324 hw_dbg(hw, "MNG_VETO bit detected.\n");
325 return true;
326 }
327
328 return false;
329}
330
331
332
333
334
335
336static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
337{
338 s32 status;
339 u16 phy_id_high = 0;
340 u16 phy_id_low = 0;
341
342 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
343 &phy_id_high);
344
345 if (!status) {
346 hw->phy.id = (u32)(phy_id_high << 16);
347 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
348 &phy_id_low);
349 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
350 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
351 }
352 return status;
353}
354
355
356
357
358
359
360static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
361{
362 enum ixgbe_phy_type phy_type;
363
364 switch (phy_id) {
365 case TN1010_PHY_ID:
366 phy_type = ixgbe_phy_tn;
367 break;
368 case X550_PHY_ID2:
369 case X550_PHY_ID3:
370 case X540_PHY_ID:
371 phy_type = ixgbe_phy_aq;
372 break;
373 case QT2022_PHY_ID:
374 phy_type = ixgbe_phy_qt;
375 break;
376 case ATH_PHY_ID:
377 phy_type = ixgbe_phy_nl;
378 break;
379 case X557_PHY_ID:
380 case X557_PHY_ID2:
381 phy_type = ixgbe_phy_x550em_ext_t;
382 break;
383 case BCM54616S_E_PHY_ID:
384 phy_type = ixgbe_phy_ext_1g_t;
385 break;
386 default:
387 phy_type = ixgbe_phy_unknown;
388 break;
389 }
390
391 return phy_type;
392}
393
394
395
396
397
398s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
399{
400 u32 i;
401 u16 ctrl = 0;
402 s32 status = 0;
403
404 if (hw->phy.type == ixgbe_phy_unknown)
405 status = ixgbe_identify_phy_generic(hw);
406
407 if (status != 0 || hw->phy.type == ixgbe_phy_none)
408 return status;
409
410
411 if (!hw->phy.reset_if_overtemp &&
412 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
413 return 0;
414
415
416 if (ixgbe_check_reset_blocked(hw))
417 return 0;
418
419
420
421
422
423 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
424 MDIO_MMD_PHYXS,
425 MDIO_CTRL1_RESET);
426
427
428
429
430
431
432 for (i = 0; i < 30; i++) {
433 msleep(100);
434 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
435 status = hw->phy.ops.read_reg(hw,
436 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
437 MDIO_MMD_PMAPMD, &ctrl);
438 if (status)
439 return status;
440
441 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
442 udelay(2);
443 break;
444 }
445 } else {
446 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
447 MDIO_MMD_PHYXS, &ctrl);
448 if (status)
449 return status;
450
451 if (!(ctrl & MDIO_CTRL1_RESET)) {
452 udelay(2);
453 break;
454 }
455 }
456 }
457
458 if (ctrl & MDIO_CTRL1_RESET) {
459 hw_dbg(hw, "PHY reset polling failed to complete.\n");
460 return IXGBE_ERR_RESET_FAILED;
461 }
462
463 return 0;
464}
465
466
467
468
469
470
471
472
473
474
475s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
476 u16 *phy_data)
477{
478 u32 i, data, command;
479
480
481 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
482 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
483 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
484 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
485
486 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
487
488
489
490
491
492 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
493 udelay(10);
494
495 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
496 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
497 break;
498 }
499
500
501 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
502 hw_dbg(hw, "PHY address command did not complete.\n");
503 return IXGBE_ERR_PHY;
504 }
505
506
507
508
509 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
510 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
511 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
512 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
513
514 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
515
516
517
518
519
520 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
521 udelay(10);
522
523 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
524 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
525 break;
526 }
527
528 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
529 hw_dbg(hw, "PHY read command didn't complete\n");
530 return IXGBE_ERR_PHY;
531 }
532
533
534
535
536 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
537 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
538 *phy_data = (u16)(data);
539
540 return 0;
541}
542
543
544
545
546
547
548
549
550
551s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
552 u32 device_type, u16 *phy_data)
553{
554 s32 status;
555 u32 gssr = hw->phy.phy_semaphore_mask;
556
557 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
558 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
559 phy_data);
560 hw->mac.ops.release_swfw_sync(hw, gssr);
561 } else {
562 return IXGBE_ERR_SWFW_SYNC;
563 }
564
565 return status;
566}
567
568
569
570
571
572
573
574
575
576s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
577 u32 device_type, u16 phy_data)
578{
579 u32 i, command;
580
581
582 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
583
584
585 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
586 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
587 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
588 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
589
590 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
591
592
593
594
595
596
597 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
598 udelay(10);
599
600 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
601 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
602 break;
603 }
604
605 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
606 hw_dbg(hw, "PHY address cmd didn't complete\n");
607 return IXGBE_ERR_PHY;
608 }
609
610
611
612
613
614 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
615 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
616 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
617 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
618
619 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
620
621
622
623
624
625 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
626 udelay(10);
627
628 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
629 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
630 break;
631 }
632
633 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
634 hw_dbg(hw, "PHY write cmd didn't complete\n");
635 return IXGBE_ERR_PHY;
636 }
637
638 return 0;
639}
640
641
642
643
644
645
646
647
648
649s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
650 u32 device_type, u16 phy_data)
651{
652 s32 status;
653 u32 gssr = hw->phy.phy_semaphore_mask;
654
655 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
656 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
657 phy_data);
658 hw->mac.ops.release_swfw_sync(hw, gssr);
659 } else {
660 return IXGBE_ERR_SWFW_SYNC;
661 }
662
663 return status;
664}
665
666#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
667
668
669
670
671
672
673static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
674{
675 IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
676
677 return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
678 !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
679 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
680}
681
682
683
684
685
686
687
688
689static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
690 int regnum, u32 gssr)
691{
692 u32 hwaddr, cmd;
693 s32 data;
694
695 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
696 return -EBUSY;
697
698 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
699 if (regnum & MII_ADDR_C45) {
700 hwaddr |= regnum & GENMASK(21, 0);
701 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
702 } else {
703 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
704 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
705 IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
706 }
707
708 data = ixgbe_msca_cmd(hw, cmd);
709 if (data < 0)
710 goto mii_bus_read_done;
711
712
713
714
715 if (!(regnum & MII_ADDR_C45))
716 goto do_mii_bus_read;
717
718 cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
719 data = ixgbe_msca_cmd(hw, cmd);
720 if (data < 0)
721 goto mii_bus_read_done;
722
723do_mii_bus_read:
724 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
725 data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
726
727mii_bus_read_done:
728 hw->mac.ops.release_swfw_sync(hw, gssr);
729 return data;
730}
731
732
733
734
735
736
737
738
739
740static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
741 int regnum, u16 val, u32 gssr)
742{
743 u32 hwaddr, cmd;
744 s32 err;
745
746 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
747 return -EBUSY;
748
749 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
750
751 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
752 if (regnum & MII_ADDR_C45) {
753 hwaddr |= regnum & GENMASK(21, 0);
754 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
755 } else {
756 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
757 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
758 IXGBE_MSCA_MDI_COMMAND;
759 }
760
761
762
763
764 err = ixgbe_msca_cmd(hw, cmd);
765 if (err < 0 || !(regnum & MII_ADDR_C45))
766 goto mii_bus_write_done;
767
768 cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
769 err = ixgbe_msca_cmd(hw, cmd);
770
771mii_bus_write_done:
772 hw->mac.ops.release_swfw_sync(hw, gssr);
773 return err;
774}
775
776
777
778
779
780
781
782static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
783{
784 struct ixgbe_adapter *adapter = bus->priv;
785 struct ixgbe_hw *hw = &adapter->hw;
786 u32 gssr = hw->phy.phy_semaphore_mask;
787
788 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
789}
790
791
792
793
794
795
796
797
798static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
799 u16 val)
800{
801 struct ixgbe_adapter *adapter = bus->priv;
802 struct ixgbe_hw *hw = &adapter->hw;
803 u32 gssr = hw->phy.phy_semaphore_mask;
804
805 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
806}
807
808
809
810
811
812
813
814static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
815 int regnum)
816{
817 struct ixgbe_adapter *adapter = bus->priv;
818 struct ixgbe_hw *hw = &adapter->hw;
819 u32 gssr = hw->phy.phy_semaphore_mask;
820
821 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
822 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
823}
824
825
826
827
828
829
830
831
832static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
833 int regnum, u16 val)
834{
835 struct ixgbe_adapter *adapter = bus->priv;
836 struct ixgbe_hw *hw = &adapter->hw;
837 u32 gssr = hw->phy.phy_semaphore_mask;
838
839 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
840 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
841}
842
843
844
845
846
847
848
849
850static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
851{
852 struct pci_dev *rp_pdev;
853 int bus;
854
855 rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
856 if (rp_pdev && rp_pdev->subordinate) {
857 bus = rp_pdev->subordinate->number;
858 return pci_get_domain_bus_and_slot(0, bus, 0);
859 }
860
861 return NULL;
862}
863
864
865
866
867
868
869
870
871
872static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
873{
874 struct ixgbe_adapter *adapter = hw->back;
875 struct pci_dev *pdev = adapter->pdev;
876 struct pci_dev *func0_pdev;
877
878
879
880
881
882
883
884 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
885 if (func0_pdev) {
886 if (func0_pdev == pdev)
887 return true;
888 else
889 return false;
890 }
891 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
892 if (func0_pdev == pdev)
893 return true;
894
895 return false;
896}
897
898
899
900
901
902
903
904
905
906s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
907{
908 s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
909 s32 (*read)(struct mii_bus *bus, int addr, int regnum);
910 struct ixgbe_adapter *adapter = hw->back;
911 struct pci_dev *pdev = adapter->pdev;
912 struct device *dev = &adapter->netdev->dev;
913 struct mii_bus *bus;
914
915 switch (hw->device_id) {
916
917 case IXGBE_DEV_ID_X550EM_A_KR:
918 case IXGBE_DEV_ID_X550EM_A_KR_L:
919 case IXGBE_DEV_ID_X550EM_A_SFP_N:
920 case IXGBE_DEV_ID_X550EM_A_SGMII:
921 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
922 case IXGBE_DEV_ID_X550EM_A_10G_T:
923 case IXGBE_DEV_ID_X550EM_A_SFP:
924 case IXGBE_DEV_ID_X550EM_A_1G_T:
925 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
926 if (!ixgbe_x550em_a_has_mii(hw))
927 return 0;
928 read = &ixgbe_x550em_a_mii_bus_read;
929 write = &ixgbe_x550em_a_mii_bus_write;
930 break;
931 default:
932 read = &ixgbe_mii_bus_read;
933 write = &ixgbe_mii_bus_write;
934 break;
935 }
936
937 bus = devm_mdiobus_alloc(dev);
938 if (!bus)
939 return -ENOMEM;
940
941 bus->read = read;
942 bus->write = write;
943
944
945 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
946 pci_name(pdev));
947
948 bus->name = "ixgbe-mdio";
949 bus->priv = adapter;
950 bus->parent = dev;
951 bus->phy_mask = GENMASK(31, 0);
952
953
954
955
956
957 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
958
959 adapter->mii_bus = bus;
960 return mdiobus_register(bus);
961}
962
963
964
965
966
967
968
969s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
970{
971 s32 status = 0;
972 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
973 bool autoneg = false;
974 ixgbe_link_speed speed;
975
976 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
977
978
979 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
980
981 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
982 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
983 (speed & IXGBE_LINK_SPEED_10GB_FULL))
984 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
985
986 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
987
988 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
989 MDIO_MMD_AN, &autoneg_reg);
990
991 if (hw->mac.type == ixgbe_mac_X550) {
992
993 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
994 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
995 (speed & IXGBE_LINK_SPEED_5GB_FULL))
996 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
997
998
999 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
1000 if ((hw->phy.autoneg_advertised &
1001 IXGBE_LINK_SPEED_2_5GB_FULL) &&
1002 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
1003 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
1004 }
1005
1006
1007 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
1008 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
1009 (speed & IXGBE_LINK_SPEED_1GB_FULL))
1010 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
1011
1012 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
1013 MDIO_MMD_AN, autoneg_reg);
1014
1015
1016 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
1017
1018 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
1019 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
1020 (speed & IXGBE_LINK_SPEED_100_FULL))
1021 autoneg_reg |= ADVERTISE_100FULL;
1022
1023 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
1024
1025
1026 if (ixgbe_check_reset_blocked(hw))
1027 return 0;
1028
1029
1030 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1031 MDIO_MMD_AN, &autoneg_reg);
1032
1033 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1034
1035 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1036 MDIO_MMD_AN, autoneg_reg);
1037
1038 return status;
1039}
1040
1041
1042
1043
1044
1045
1046
1047s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
1048 ixgbe_link_speed speed,
1049 bool autoneg_wait_to_complete)
1050{
1051
1052
1053
1054 hw->phy.autoneg_advertised = 0;
1055
1056 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1057 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
1058
1059 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
1060 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
1061
1062 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
1063 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
1064
1065 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1066 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
1067
1068 if (speed & IXGBE_LINK_SPEED_100_FULL)
1069 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
1070
1071 if (speed & IXGBE_LINK_SPEED_10_FULL)
1072 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
1073
1074
1075 if (hw->phy.ops.setup_link)
1076 hw->phy.ops.setup_link(hw);
1077
1078 return 0;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
1089{
1090 u16 speed_ability;
1091 s32 status;
1092
1093 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
1094 &speed_ability);
1095 if (status)
1096 return status;
1097
1098 if (speed_ability & MDIO_SPEED_10G)
1099 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1100 if (speed_ability & MDIO_PMA_SPEED_1000)
1101 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1102 if (speed_ability & MDIO_PMA_SPEED_100)
1103 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1104
1105 switch (hw->mac.type) {
1106 case ixgbe_mac_X550:
1107 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1108 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1109 break;
1110 case ixgbe_mac_X550EM_x:
1111 case ixgbe_mac_x550em_a:
1112 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
1113 break;
1114 default:
1115 break;
1116 }
1117
1118 return 0;
1119}
1120
1121
1122
1123
1124
1125
1126
1127s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
1128 ixgbe_link_speed *speed,
1129 bool *autoneg)
1130{
1131 s32 status = 0;
1132
1133 *autoneg = true;
1134 if (!hw->phy.speeds_supported)
1135 status = ixgbe_get_copper_speeds_supported(hw);
1136
1137 *speed = hw->phy.speeds_supported;
1138 return status;
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1151 bool *link_up)
1152{
1153 s32 status;
1154 u32 time_out;
1155 u32 max_time_out = 10;
1156 u16 phy_link = 0;
1157 u16 phy_speed = 0;
1158 u16 phy_data = 0;
1159
1160
1161 *link_up = false;
1162 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1163
1164
1165
1166
1167
1168
1169 for (time_out = 0; time_out < max_time_out; time_out++) {
1170 udelay(10);
1171 status = hw->phy.ops.read_reg(hw,
1172 MDIO_STAT1,
1173 MDIO_MMD_VEND1,
1174 &phy_data);
1175 phy_link = phy_data &
1176 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1177 phy_speed = phy_data &
1178 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1179 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1180 *link_up = true;
1181 if (phy_speed ==
1182 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1183 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1184 break;
1185 }
1186 }
1187
1188 return status;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
1201{
1202 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
1203 bool autoneg = false;
1204 ixgbe_link_speed speed;
1205
1206 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
1207
1208 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1209
1210 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
1211 MDIO_MMD_AN,
1212 &autoneg_reg);
1213
1214 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
1215 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1216 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
1217
1218 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
1219 MDIO_MMD_AN,
1220 autoneg_reg);
1221 }
1222
1223 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1224
1225 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1226 MDIO_MMD_AN,
1227 &autoneg_reg);
1228
1229 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1230 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1231 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1232
1233 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1234 MDIO_MMD_AN,
1235 autoneg_reg);
1236 }
1237
1238 if (speed & IXGBE_LINK_SPEED_100_FULL) {
1239
1240 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1241 MDIO_MMD_AN,
1242 &autoneg_reg);
1243
1244 autoneg_reg &= ~(ADVERTISE_100FULL |
1245 ADVERTISE_100HALF);
1246 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
1247 autoneg_reg |= ADVERTISE_100FULL;
1248
1249 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
1250 MDIO_MMD_AN,
1251 autoneg_reg);
1252 }
1253
1254
1255 if (ixgbe_check_reset_blocked(hw))
1256 return 0;
1257
1258
1259 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1260 MDIO_MMD_AN, &autoneg_reg);
1261
1262 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1263
1264 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1265 MDIO_MMD_AN, autoneg_reg);
1266 return 0;
1267}
1268
1269
1270
1271
1272
1273s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
1274{
1275 u16 phy_offset, control, eword, edata, block_crc;
1276 bool end_data = false;
1277 u16 list_offset, data_offset;
1278 u16 phy_data = 0;
1279 s32 ret_val;
1280 u32 i;
1281
1282
1283 if (ixgbe_check_reset_blocked(hw))
1284 return 0;
1285
1286 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1287
1288
1289 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1290 (phy_data | MDIO_CTRL1_RESET));
1291
1292 for (i = 0; i < 100; i++) {
1293 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1294 &phy_data);
1295 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1296 break;
1297 usleep_range(10000, 20000);
1298 }
1299
1300 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1301 hw_dbg(hw, "PHY reset did not complete.\n");
1302 return IXGBE_ERR_PHY;
1303 }
1304
1305
1306 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1307 &data_offset);
1308 if (ret_val)
1309 return ret_val;
1310
1311 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1312 data_offset++;
1313 while (!end_data) {
1314
1315
1316
1317 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1318 if (ret_val)
1319 goto err_eeprom;
1320 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1321 IXGBE_CONTROL_SHIFT_NL;
1322 edata = eword & IXGBE_DATA_MASK_NL;
1323 switch (control) {
1324 case IXGBE_DELAY_NL:
1325 data_offset++;
1326 hw_dbg(hw, "DELAY: %d MS\n", edata);
1327 usleep_range(edata * 1000, edata * 2000);
1328 break;
1329 case IXGBE_DATA_NL:
1330 hw_dbg(hw, "DATA:\n");
1331 data_offset++;
1332 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1333 &phy_offset);
1334 if (ret_val)
1335 goto err_eeprom;
1336 for (i = 0; i < edata; i++) {
1337 ret_val = hw->eeprom.ops.read(hw, data_offset,
1338 &eword);
1339 if (ret_val)
1340 goto err_eeprom;
1341 hw->phy.ops.write_reg(hw, phy_offset,
1342 MDIO_MMD_PMAPMD, eword);
1343 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1344 phy_offset);
1345 data_offset++;
1346 phy_offset++;
1347 }
1348 break;
1349 case IXGBE_CONTROL_NL:
1350 data_offset++;
1351 hw_dbg(hw, "CONTROL:\n");
1352 if (edata == IXGBE_CONTROL_EOL_NL) {
1353 hw_dbg(hw, "EOL\n");
1354 end_data = true;
1355 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1356 hw_dbg(hw, "SOL\n");
1357 } else {
1358 hw_dbg(hw, "Bad control value\n");
1359 return IXGBE_ERR_PHY;
1360 }
1361 break;
1362 default:
1363 hw_dbg(hw, "Bad control type\n");
1364 return IXGBE_ERR_PHY;
1365 }
1366 }
1367
1368 return ret_val;
1369
1370err_eeprom:
1371 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1372 return IXGBE_ERR_PHY;
1373}
1374
1375
1376
1377
1378
1379
1380
1381s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1382{
1383 switch (hw->mac.ops.get_media_type(hw)) {
1384 case ixgbe_media_type_fiber:
1385 return ixgbe_identify_sfp_module_generic(hw);
1386 case ixgbe_media_type_fiber_qsfp:
1387 return ixgbe_identify_qsfp_module_generic(hw);
1388 default:
1389 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1390 return IXGBE_ERR_SFP_NOT_PRESENT;
1391 }
1392
1393 return IXGBE_ERR_SFP_NOT_PRESENT;
1394}
1395
1396
1397
1398
1399
1400
1401
1402s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1403{
1404 struct ixgbe_adapter *adapter = hw->back;
1405 s32 status;
1406 u32 vendor_oui = 0;
1407 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1408 u8 identifier = 0;
1409 u8 comp_codes_1g = 0;
1410 u8 comp_codes_10g = 0;
1411 u8 oui_bytes[3] = {0, 0, 0};
1412 u8 cable_tech = 0;
1413 u8 cable_spec = 0;
1414 u16 enforce_sfp = 0;
1415
1416 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1417 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1418 return IXGBE_ERR_SFP_NOT_PRESENT;
1419 }
1420
1421
1422 hw->mac.ops.set_lan_id(hw);
1423
1424 status = hw->phy.ops.read_i2c_eeprom(hw,
1425 IXGBE_SFF_IDENTIFIER,
1426 &identifier);
1427
1428 if (status)
1429 goto err_read_i2c_eeprom;
1430
1431 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1432 hw->phy.type = ixgbe_phy_sfp_unsupported;
1433 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1434 }
1435 status = hw->phy.ops.read_i2c_eeprom(hw,
1436 IXGBE_SFF_1GBE_COMP_CODES,
1437 &comp_codes_1g);
1438
1439 if (status)
1440 goto err_read_i2c_eeprom;
1441
1442 status = hw->phy.ops.read_i2c_eeprom(hw,
1443 IXGBE_SFF_10GBE_COMP_CODES,
1444 &comp_codes_10g);
1445
1446 if (status)
1447 goto err_read_i2c_eeprom;
1448 status = hw->phy.ops.read_i2c_eeprom(hw,
1449 IXGBE_SFF_CABLE_TECHNOLOGY,
1450 &cable_tech);
1451
1452 if (status)
1453 goto err_read_i2c_eeprom;
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 if (hw->mac.type == ixgbe_mac_82598EB) {
1472 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1473 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1474 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1475 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1476 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1477 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1478 else
1479 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1480 } else {
1481 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1482 if (hw->bus.lan_id == 0)
1483 hw->phy.sfp_type =
1484 ixgbe_sfp_type_da_cu_core0;
1485 else
1486 hw->phy.sfp_type =
1487 ixgbe_sfp_type_da_cu_core1;
1488 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1489 hw->phy.ops.read_i2c_eeprom(
1490 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1491 &cable_spec);
1492 if (cable_spec &
1493 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1494 if (hw->bus.lan_id == 0)
1495 hw->phy.sfp_type =
1496 ixgbe_sfp_type_da_act_lmt_core0;
1497 else
1498 hw->phy.sfp_type =
1499 ixgbe_sfp_type_da_act_lmt_core1;
1500 } else {
1501 hw->phy.sfp_type =
1502 ixgbe_sfp_type_unknown;
1503 }
1504 } else if (comp_codes_10g &
1505 (IXGBE_SFF_10GBASESR_CAPABLE |
1506 IXGBE_SFF_10GBASELR_CAPABLE)) {
1507 if (hw->bus.lan_id == 0)
1508 hw->phy.sfp_type =
1509 ixgbe_sfp_type_srlr_core0;
1510 else
1511 hw->phy.sfp_type =
1512 ixgbe_sfp_type_srlr_core1;
1513 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1514 if (hw->bus.lan_id == 0)
1515 hw->phy.sfp_type =
1516 ixgbe_sfp_type_1g_cu_core0;
1517 else
1518 hw->phy.sfp_type =
1519 ixgbe_sfp_type_1g_cu_core1;
1520 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1521 if (hw->bus.lan_id == 0)
1522 hw->phy.sfp_type =
1523 ixgbe_sfp_type_1g_sx_core0;
1524 else
1525 hw->phy.sfp_type =
1526 ixgbe_sfp_type_1g_sx_core1;
1527 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1528 if (hw->bus.lan_id == 0)
1529 hw->phy.sfp_type =
1530 ixgbe_sfp_type_1g_lx_core0;
1531 else
1532 hw->phy.sfp_type =
1533 ixgbe_sfp_type_1g_lx_core1;
1534 } else {
1535 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1536 }
1537 }
1538
1539 if (hw->phy.sfp_type != stored_sfp_type)
1540 hw->phy.sfp_setup_needed = true;
1541
1542
1543 hw->phy.multispeed_fiber = false;
1544 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1545 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1546 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1547 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1548 hw->phy.multispeed_fiber = true;
1549
1550
1551 if (hw->phy.type != ixgbe_phy_nl) {
1552 hw->phy.id = identifier;
1553 status = hw->phy.ops.read_i2c_eeprom(hw,
1554 IXGBE_SFF_VENDOR_OUI_BYTE0,
1555 &oui_bytes[0]);
1556
1557 if (status != 0)
1558 goto err_read_i2c_eeprom;
1559
1560 status = hw->phy.ops.read_i2c_eeprom(hw,
1561 IXGBE_SFF_VENDOR_OUI_BYTE1,
1562 &oui_bytes[1]);
1563
1564 if (status != 0)
1565 goto err_read_i2c_eeprom;
1566
1567 status = hw->phy.ops.read_i2c_eeprom(hw,
1568 IXGBE_SFF_VENDOR_OUI_BYTE2,
1569 &oui_bytes[2]);
1570
1571 if (status != 0)
1572 goto err_read_i2c_eeprom;
1573
1574 vendor_oui =
1575 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1576 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1577 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1578
1579 switch (vendor_oui) {
1580 case IXGBE_SFF_VENDOR_OUI_TYCO:
1581 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1582 hw->phy.type =
1583 ixgbe_phy_sfp_passive_tyco;
1584 break;
1585 case IXGBE_SFF_VENDOR_OUI_FTL:
1586 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1587 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1588 else
1589 hw->phy.type = ixgbe_phy_sfp_ftl;
1590 break;
1591 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1592 hw->phy.type = ixgbe_phy_sfp_avago;
1593 break;
1594 case IXGBE_SFF_VENDOR_OUI_INTEL:
1595 hw->phy.type = ixgbe_phy_sfp_intel;
1596 break;
1597 default:
1598 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1599 hw->phy.type =
1600 ixgbe_phy_sfp_passive_unknown;
1601 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1602 hw->phy.type =
1603 ixgbe_phy_sfp_active_unknown;
1604 else
1605 hw->phy.type = ixgbe_phy_sfp_unknown;
1606 break;
1607 }
1608 }
1609
1610
1611 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1612 IXGBE_SFF_DA_ACTIVE_CABLE))
1613 return 0;
1614
1615
1616 if (comp_codes_10g == 0 &&
1617 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1618 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1619 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1620 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1621 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1622 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1623 hw->phy.type = ixgbe_phy_sfp_unsupported;
1624 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1625 }
1626
1627
1628 if (hw->mac.type == ixgbe_mac_82598EB)
1629 return 0;
1630
1631 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1632 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1633 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1634 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1635 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1636 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1637 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1638 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1639
1640 if (hw->phy.type == ixgbe_phy_sfp_intel)
1641 return 0;
1642 if (hw->allow_unsupported_sfp) {
1643 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1644 return 0;
1645 }
1646 hw_dbg(hw, "SFP+ module not supported\n");
1647 hw->phy.type = ixgbe_phy_sfp_unsupported;
1648 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1649 }
1650 return 0;
1651
1652err_read_i2c_eeprom:
1653 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1654 if (hw->phy.type != ixgbe_phy_nl) {
1655 hw->phy.id = 0;
1656 hw->phy.type = ixgbe_phy_unknown;
1657 }
1658 return IXGBE_ERR_SFP_NOT_PRESENT;
1659}
1660
1661
1662
1663
1664
1665
1666
1667static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1668{
1669 struct ixgbe_adapter *adapter = hw->back;
1670 s32 status;
1671 u32 vendor_oui = 0;
1672 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1673 u8 identifier = 0;
1674 u8 comp_codes_1g = 0;
1675 u8 comp_codes_10g = 0;
1676 u8 oui_bytes[3] = {0, 0, 0};
1677 u16 enforce_sfp = 0;
1678 u8 connector = 0;
1679 u8 cable_length = 0;
1680 u8 device_tech = 0;
1681 bool active_cable = false;
1682
1683 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1684 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1685 return IXGBE_ERR_SFP_NOT_PRESENT;
1686 }
1687
1688
1689 hw->mac.ops.set_lan_id(hw);
1690
1691 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1692 &identifier);
1693
1694 if (status != 0)
1695 goto err_read_i2c_eeprom;
1696
1697 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1698 hw->phy.type = ixgbe_phy_sfp_unsupported;
1699 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1700 }
1701
1702 hw->phy.id = identifier;
1703
1704 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1705 &comp_codes_10g);
1706
1707 if (status != 0)
1708 goto err_read_i2c_eeprom;
1709
1710 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1711 &comp_codes_1g);
1712
1713 if (status != 0)
1714 goto err_read_i2c_eeprom;
1715
1716 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1717 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1718 if (hw->bus.lan_id == 0)
1719 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1720 else
1721 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1722 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1723 IXGBE_SFF_10GBASELR_CAPABLE)) {
1724 if (hw->bus.lan_id == 0)
1725 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1726 else
1727 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1728 } else {
1729 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1730 active_cable = true;
1731
1732 if (!active_cable) {
1733
1734
1735
1736 hw->phy.ops.read_i2c_eeprom(hw,
1737 IXGBE_SFF_QSFP_CONNECTOR,
1738 &connector);
1739
1740 hw->phy.ops.read_i2c_eeprom(hw,
1741 IXGBE_SFF_QSFP_CABLE_LENGTH,
1742 &cable_length);
1743
1744 hw->phy.ops.read_i2c_eeprom(hw,
1745 IXGBE_SFF_QSFP_DEVICE_TECH,
1746 &device_tech);
1747
1748 if ((connector ==
1749 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1750 (cable_length > 0) &&
1751 ((device_tech >> 4) ==
1752 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1753 active_cable = true;
1754 }
1755
1756 if (active_cable) {
1757 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1758 if (hw->bus.lan_id == 0)
1759 hw->phy.sfp_type =
1760 ixgbe_sfp_type_da_act_lmt_core0;
1761 else
1762 hw->phy.sfp_type =
1763 ixgbe_sfp_type_da_act_lmt_core1;
1764 } else {
1765
1766 hw->phy.type = ixgbe_phy_sfp_unsupported;
1767 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1768 }
1769 }
1770
1771 if (hw->phy.sfp_type != stored_sfp_type)
1772 hw->phy.sfp_setup_needed = true;
1773
1774
1775 hw->phy.multispeed_fiber = false;
1776 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1777 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1778 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1779 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1780 hw->phy.multispeed_fiber = true;
1781
1782
1783 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1784 IXGBE_SFF_10GBASELR_CAPABLE)) {
1785 status = hw->phy.ops.read_i2c_eeprom(hw,
1786 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1787 &oui_bytes[0]);
1788
1789 if (status != 0)
1790 goto err_read_i2c_eeprom;
1791
1792 status = hw->phy.ops.read_i2c_eeprom(hw,
1793 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1794 &oui_bytes[1]);
1795
1796 if (status != 0)
1797 goto err_read_i2c_eeprom;
1798
1799 status = hw->phy.ops.read_i2c_eeprom(hw,
1800 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1801 &oui_bytes[2]);
1802
1803 if (status != 0)
1804 goto err_read_i2c_eeprom;
1805
1806 vendor_oui =
1807 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1808 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1809 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1810
1811 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1812 hw->phy.type = ixgbe_phy_qsfp_intel;
1813 else
1814 hw->phy.type = ixgbe_phy_qsfp_unknown;
1815
1816 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1817 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1818
1819 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1820 return 0;
1821 if (hw->allow_unsupported_sfp) {
1822 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1823 return 0;
1824 }
1825 hw_dbg(hw, "QSFP module not supported\n");
1826 hw->phy.type = ixgbe_phy_sfp_unsupported;
1827 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1828 }
1829 return 0;
1830 }
1831 return 0;
1832
1833err_read_i2c_eeprom:
1834 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1835 hw->phy.id = 0;
1836 hw->phy.type = ixgbe_phy_unknown;
1837
1838 return IXGBE_ERR_SFP_NOT_PRESENT;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1851 u16 *list_offset,
1852 u16 *data_offset)
1853{
1854 u16 sfp_id;
1855 u16 sfp_type = hw->phy.sfp_type;
1856
1857 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1858 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1859
1860 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1861 return IXGBE_ERR_SFP_NOT_PRESENT;
1862
1863 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1864 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1865 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1866
1867
1868
1869
1870
1871 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1872 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1873 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1874 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1875 sfp_type = ixgbe_sfp_type_srlr_core0;
1876 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1877 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1878 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1879 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1880 sfp_type = ixgbe_sfp_type_srlr_core1;
1881
1882
1883 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1884 hw_err(hw, "eeprom read at %d failed\n",
1885 IXGBE_PHY_INIT_OFFSET_NL);
1886 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1887 }
1888
1889 if ((!*list_offset) || (*list_offset == 0xFFFF))
1890 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1891
1892
1893 (*list_offset)++;
1894
1895
1896
1897
1898
1899 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1900 goto err_phy;
1901
1902 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1903 if (sfp_id == sfp_type) {
1904 (*list_offset)++;
1905 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1906 goto err_phy;
1907 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1908 hw_dbg(hw, "SFP+ module not supported\n");
1909 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1910 } else {
1911 break;
1912 }
1913 } else {
1914 (*list_offset) += 2;
1915 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1916 goto err_phy;
1917 }
1918 }
1919
1920 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1921 hw_dbg(hw, "No matching SFP+ module found\n");
1922 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1923 }
1924
1925 return 0;
1926
1927err_phy:
1928 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1929 return IXGBE_ERR_PHY;
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1941 u8 *eeprom_data)
1942{
1943 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1944 IXGBE_I2C_EEPROM_DEV_ADDR,
1945 eeprom_data);
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1957 u8 *sff8472_data)
1958{
1959 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1960 IXGBE_I2C_EEPROM_DEV_ADDR2,
1961 sff8472_data);
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1973 u8 eeprom_data)
1974{
1975 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1976 IXGBE_I2C_EEPROM_DEV_ADDR,
1977 eeprom_data);
1978}
1979
1980
1981
1982
1983
1984
1985
1986static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1987{
1988 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1989 offset == IXGBE_SFF_IDENTIFIER &&
1990 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1991 return true;
1992 return false;
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2007 u8 dev_addr, u8 *data, bool lock)
2008{
2009 s32 status;
2010 u32 max_retry = 10;
2011 u32 retry = 0;
2012 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2013 bool nack = true;
2014
2015 if (hw->mac.type >= ixgbe_mac_X550)
2016 max_retry = 3;
2017 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
2018 max_retry = IXGBE_SFP_DETECT_RETRIES;
2019
2020 *data = 0;
2021
2022 do {
2023 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2024 return IXGBE_ERR_SWFW_SYNC;
2025
2026 ixgbe_i2c_start(hw);
2027
2028
2029 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2030 if (status != 0)
2031 goto fail;
2032
2033 status = ixgbe_get_i2c_ack(hw);
2034 if (status != 0)
2035 goto fail;
2036
2037 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2038 if (status != 0)
2039 goto fail;
2040
2041 status = ixgbe_get_i2c_ack(hw);
2042 if (status != 0)
2043 goto fail;
2044
2045 ixgbe_i2c_start(hw);
2046
2047
2048 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
2049 if (status != 0)
2050 goto fail;
2051
2052 status = ixgbe_get_i2c_ack(hw);
2053 if (status != 0)
2054 goto fail;
2055
2056 status = ixgbe_clock_in_i2c_byte(hw, data);
2057 if (status != 0)
2058 goto fail;
2059
2060 status = ixgbe_clock_out_i2c_bit(hw, nack);
2061 if (status != 0)
2062 goto fail;
2063
2064 ixgbe_i2c_stop(hw);
2065 if (lock)
2066 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2067 return 0;
2068
2069fail:
2070 ixgbe_i2c_bus_clear(hw);
2071 if (lock) {
2072 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2073 msleep(100);
2074 }
2075 retry++;
2076 if (retry < max_retry)
2077 hw_dbg(hw, "I2C byte read error - Retrying.\n");
2078 else
2079 hw_dbg(hw, "I2C byte read error.\n");
2080
2081 } while (retry < max_retry);
2082
2083 return status;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2097 u8 dev_addr, u8 *data)
2098{
2099 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2100 data, true);
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2114 u8 dev_addr, u8 *data)
2115{
2116 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2117 data, false);
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2132 u8 dev_addr, u8 data, bool lock)
2133{
2134 s32 status;
2135 u32 max_retry = 1;
2136 u32 retry = 0;
2137 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2138
2139 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2140 return IXGBE_ERR_SWFW_SYNC;
2141
2142 do {
2143 ixgbe_i2c_start(hw);
2144
2145 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2146 if (status != 0)
2147 goto fail;
2148
2149 status = ixgbe_get_i2c_ack(hw);
2150 if (status != 0)
2151 goto fail;
2152
2153 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2154 if (status != 0)
2155 goto fail;
2156
2157 status = ixgbe_get_i2c_ack(hw);
2158 if (status != 0)
2159 goto fail;
2160
2161 status = ixgbe_clock_out_i2c_byte(hw, data);
2162 if (status != 0)
2163 goto fail;
2164
2165 status = ixgbe_get_i2c_ack(hw);
2166 if (status != 0)
2167 goto fail;
2168
2169 ixgbe_i2c_stop(hw);
2170 if (lock)
2171 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2172 return 0;
2173
2174fail:
2175 ixgbe_i2c_bus_clear(hw);
2176 retry++;
2177 if (retry < max_retry)
2178 hw_dbg(hw, "I2C byte write error - Retrying.\n");
2179 else
2180 hw_dbg(hw, "I2C byte write error.\n");
2181 } while (retry < max_retry);
2182
2183 if (lock)
2184 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2185
2186 return status;
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2200 u8 dev_addr, u8 data)
2201{
2202 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2203 data, true);
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2217 u8 dev_addr, u8 data)
2218{
2219 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2220 data, false);
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230static void ixgbe_i2c_start(struct ixgbe_hw *hw)
2231{
2232 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2233
2234 i2cctl |= IXGBE_I2C_BB_EN(hw);
2235
2236
2237 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2238 ixgbe_raise_i2c_clk(hw, &i2cctl);
2239
2240
2241 udelay(IXGBE_I2C_T_SU_STA);
2242
2243 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2244
2245
2246 udelay(IXGBE_I2C_T_HD_STA);
2247
2248 ixgbe_lower_i2c_clk(hw, &i2cctl);
2249
2250
2251 udelay(IXGBE_I2C_T_LOW);
2252
2253}
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
2264{
2265 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2266 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2267 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2268 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
2269
2270
2271 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2272 ixgbe_raise_i2c_clk(hw, &i2cctl);
2273
2274
2275 udelay(IXGBE_I2C_T_SU_STO);
2276
2277 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2278
2279
2280 udelay(IXGBE_I2C_T_BUF);
2281
2282 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
2283 i2cctl &= ~bb_en_bit;
2284 i2cctl |= data_oe_bit | clk_oe_bit;
2285 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2286 IXGBE_WRITE_FLUSH(hw);
2287 }
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2298{
2299 s32 i;
2300 bool bit = false;
2301
2302 *data = 0;
2303 for (i = 7; i >= 0; i--) {
2304 ixgbe_clock_in_i2c_bit(hw, &bit);
2305 *data |= bit << i;
2306 }
2307
2308 return 0;
2309}
2310
2311
2312
2313
2314
2315
2316
2317
2318static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2319{
2320 s32 status;
2321 s32 i;
2322 u32 i2cctl;
2323 bool bit = false;
2324
2325 for (i = 7; i >= 0; i--) {
2326 bit = (data >> i) & 0x1;
2327 status = ixgbe_clock_out_i2c_bit(hw, bit);
2328
2329 if (status != 0)
2330 break;
2331 }
2332
2333
2334 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2335 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2336 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2337 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2338 IXGBE_WRITE_FLUSH(hw);
2339
2340 return status;
2341}
2342
2343
2344
2345
2346
2347
2348
2349static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2350{
2351 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2352 s32 status = 0;
2353 u32 i = 0;
2354 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2355 u32 timeout = 10;
2356 bool ack = true;
2357
2358 if (data_oe_bit) {
2359 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2360 i2cctl |= data_oe_bit;
2361 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2362 IXGBE_WRITE_FLUSH(hw);
2363 }
2364 ixgbe_raise_i2c_clk(hw, &i2cctl);
2365
2366
2367 udelay(IXGBE_I2C_T_HIGH);
2368
2369
2370
2371 for (i = 0; i < timeout; i++) {
2372 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2373 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2374
2375 udelay(1);
2376 if (ack == 0)
2377 break;
2378 }
2379
2380 if (ack == 1) {
2381 hw_dbg(hw, "I2C ack was not received.\n");
2382 status = IXGBE_ERR_I2C;
2383 }
2384
2385 ixgbe_lower_i2c_clk(hw, &i2cctl);
2386
2387
2388 udelay(IXGBE_I2C_T_LOW);
2389
2390 return status;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2401{
2402 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2403 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2404
2405 if (data_oe_bit) {
2406 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2407 i2cctl |= data_oe_bit;
2408 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2409 IXGBE_WRITE_FLUSH(hw);
2410 }
2411 ixgbe_raise_i2c_clk(hw, &i2cctl);
2412
2413
2414 udelay(IXGBE_I2C_T_HIGH);
2415
2416 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2417 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2418
2419 ixgbe_lower_i2c_clk(hw, &i2cctl);
2420
2421
2422 udelay(IXGBE_I2C_T_LOW);
2423
2424 return 0;
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2435{
2436 s32 status;
2437 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2438
2439 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2440 if (status == 0) {
2441 ixgbe_raise_i2c_clk(hw, &i2cctl);
2442
2443
2444 udelay(IXGBE_I2C_T_HIGH);
2445
2446 ixgbe_lower_i2c_clk(hw, &i2cctl);
2447
2448
2449
2450
2451 udelay(IXGBE_I2C_T_LOW);
2452 } else {
2453 hw_dbg(hw, "I2C data was not set to %X\n", data);
2454 return IXGBE_ERR_I2C;
2455 }
2456
2457 return 0;
2458}
2459
2460
2461
2462
2463
2464
2465
2466
2467static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2468{
2469 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2470 u32 i = 0;
2471 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2472 u32 i2cctl_r = 0;
2473
2474 if (clk_oe_bit) {
2475 *i2cctl |= clk_oe_bit;
2476 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2477 }
2478
2479 for (i = 0; i < timeout; i++) {
2480 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2481 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2482 IXGBE_WRITE_FLUSH(hw);
2483
2484 udelay(IXGBE_I2C_T_RISE);
2485
2486 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2487 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2488 break;
2489 }
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2501{
2502
2503 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2504 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2505
2506 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2507 IXGBE_WRITE_FLUSH(hw);
2508
2509
2510 udelay(IXGBE_I2C_T_FALL);
2511}
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2523{
2524 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2525
2526 if (data)
2527 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2528 else
2529 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2530 *i2cctl &= ~data_oe_bit;
2531
2532 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2533 IXGBE_WRITE_FLUSH(hw);
2534
2535
2536 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2537
2538 if (!data)
2539 return 0;
2540 if (data_oe_bit) {
2541 *i2cctl |= data_oe_bit;
2542 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2543 IXGBE_WRITE_FLUSH(hw);
2544 }
2545
2546
2547 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2548 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2549 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2550 return IXGBE_ERR_I2C;
2551 }
2552
2553 return 0;
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2565{
2566 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2567
2568 if (data_oe_bit) {
2569 *i2cctl |= data_oe_bit;
2570 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2571 IXGBE_WRITE_FLUSH(hw);
2572 udelay(IXGBE_I2C_T_FALL);
2573 }
2574
2575 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2576 return true;
2577 return false;
2578}
2579
2580
2581
2582
2583
2584
2585
2586
2587static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2588{
2589 u32 i2cctl;
2590 u32 i;
2591
2592 ixgbe_i2c_start(hw);
2593 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2594
2595 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2596
2597 for (i = 0; i < 9; i++) {
2598 ixgbe_raise_i2c_clk(hw, &i2cctl);
2599
2600
2601 udelay(IXGBE_I2C_T_HIGH);
2602
2603 ixgbe_lower_i2c_clk(hw, &i2cctl);
2604
2605
2606 udelay(IXGBE_I2C_T_LOW);
2607 }
2608
2609 ixgbe_i2c_start(hw);
2610
2611
2612 ixgbe_i2c_stop(hw);
2613}
2614
2615
2616
2617
2618
2619
2620
2621s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2622{
2623 u16 phy_data = 0;
2624
2625 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2626 return 0;
2627
2628
2629 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2630 MDIO_MMD_PMAPMD, &phy_data);
2631
2632 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2633 return 0;
2634
2635 return IXGBE_ERR_OVERTEMP;
2636}
2637
2638
2639
2640
2641
2642s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2643{
2644 u32 status;
2645 u16 reg;
2646
2647
2648 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2649 return 0;
2650
2651 if (!on && ixgbe_mng_present(hw))
2652 return 0;
2653
2654 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2655 if (status)
2656 return status;
2657
2658 if (on) {
2659 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2660 } else {
2661 if (ixgbe_check_reset_blocked(hw))
2662 return 0;
2663 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2664 }
2665
2666 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2667 return status;
2668}
2669