1
2
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6#include <linux/iopoll.h>
7#include <linux/sched.h>
8
9#include "ixgbe.h"
10#include "ixgbe_phy.h"
11
12static void ixgbe_i2c_start(struct ixgbe_hw *hw);
13static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
14static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
15static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
16static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
17static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
18static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
19static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
20static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
21static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
22static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
23static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
24static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
25static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
26
27
28
29
30
31
32
33
34static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
35{
36 s32 status;
37
38 status = ixgbe_clock_out_i2c_byte(hw, byte);
39 if (status)
40 return status;
41 return ixgbe_get_i2c_ack(hw);
42}
43
44
45
46
47
48
49
50
51static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
52{
53 s32 status;
54
55 status = ixgbe_clock_in_i2c_byte(hw, byte);
56 if (status)
57 return status;
58
59 return ixgbe_clock_out_i2c_bit(hw, false);
60}
61
62
63
64
65
66
67
68
69static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
70{
71 u16 sum = add1 + add2;
72
73 sum = (sum & 0xFF) + (sum >> 8);
74 return sum & 0xFF;
75}
76
77
78
79
80
81
82
83
84
85
86
87s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
88 u16 reg, u16 *val, bool lock)
89{
90 u32 swfw_mask = hw->phy.phy_semaphore_mask;
91 int max_retry = 3;
92 int retry = 0;
93 u8 csum_byte;
94 u8 high_bits;
95 u8 low_bits;
96 u8 reg_high;
97 u8 csum;
98
99 reg_high = ((reg >> 7) & 0xFE) | 1;
100 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
101 csum = ~csum;
102 do {
103 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
104 return IXGBE_ERR_SWFW_SYNC;
105 ixgbe_i2c_start(hw);
106
107 if (ixgbe_out_i2c_byte_ack(hw, addr))
108 goto fail;
109
110 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
111 goto fail;
112
113 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
114 goto fail;
115
116 if (ixgbe_out_i2c_byte_ack(hw, csum))
117 goto fail;
118
119 ixgbe_i2c_start(hw);
120
121 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
122 goto fail;
123
124 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
125 goto fail;
126
127 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
128 goto fail;
129
130 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
131 goto fail;
132
133 if (ixgbe_clock_out_i2c_bit(hw, false))
134 goto fail;
135 ixgbe_i2c_stop(hw);
136 if (lock)
137 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
138 *val = (high_bits << 8) | low_bits;
139 return 0;
140
141fail:
142 ixgbe_i2c_bus_clear(hw);
143 if (lock)
144 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
145 retry++;
146 if (retry < max_retry)
147 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
148 else
149 hw_dbg(hw, "I2C byte read combined error.\n");
150 } while (retry < max_retry);
151
152 return IXGBE_ERR_I2C;
153}
154
155
156
157
158
159
160
161
162
163
164
165s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
166 u16 reg, u16 val, bool lock)
167{
168 u32 swfw_mask = hw->phy.phy_semaphore_mask;
169 int max_retry = 1;
170 int retry = 0;
171 u8 reg_high;
172 u8 csum;
173
174 reg_high = (reg >> 7) & 0xFE;
175 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
176 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
177 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
178 csum = ~csum;
179 do {
180 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
181 return IXGBE_ERR_SWFW_SYNC;
182 ixgbe_i2c_start(hw);
183
184 if (ixgbe_out_i2c_byte_ack(hw, addr))
185 goto fail;
186
187 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
188 goto fail;
189
190 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
191 goto fail;
192
193 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
194 goto fail;
195
196 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
197 goto fail;
198
199 if (ixgbe_out_i2c_byte_ack(hw, csum))
200 goto fail;
201 ixgbe_i2c_stop(hw);
202 if (lock)
203 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
204 return 0;
205
206fail:
207 ixgbe_i2c_bus_clear(hw);
208 if (lock)
209 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
210 retry++;
211 if (retry < max_retry)
212 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
213 else
214 hw_dbg(hw, "I2C byte write combined error.\n");
215 } while (retry < max_retry);
216
217 return IXGBE_ERR_I2C;
218}
219
220
221
222
223
224
225
226
227static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
228{
229 u16 ext_ability = 0;
230
231 hw->phy.mdio.prtad = phy_addr;
232 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
233 return false;
234
235 if (ixgbe_get_phy_id(hw))
236 return false;
237
238 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
239
240 if (hw->phy.type == ixgbe_phy_unknown) {
241 hw->phy.ops.read_reg(hw,
242 MDIO_PMA_EXTABLE,
243 MDIO_MMD_PMAPMD,
244 &ext_ability);
245 if (ext_ability &
246 (MDIO_PMA_EXTABLE_10GBT |
247 MDIO_PMA_EXTABLE_1000BT))
248 hw->phy.type = ixgbe_phy_cu_unknown;
249 else
250 hw->phy.type = ixgbe_phy_generic;
251 }
252
253 return true;
254}
255
256
257
258
259
260
261
262s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
263{
264 u32 phy_addr;
265 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
266
267 if (!hw->phy.phy_semaphore_mask) {
268 if (hw->bus.lan_id)
269 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
270 else
271 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
272 }
273
274 if (hw->phy.type != ixgbe_phy_unknown)
275 return 0;
276
277 if (hw->phy.nw_mng_if_sel) {
278 phy_addr = (hw->phy.nw_mng_if_sel &
279 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
280 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
281 if (ixgbe_probe_phy(hw, phy_addr))
282 return 0;
283 else
284 return IXGBE_ERR_PHY_ADDR_INVALID;
285 }
286
287 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
288 if (ixgbe_probe_phy(hw, phy_addr)) {
289 status = 0;
290 break;
291 }
292 }
293
294
295
296
297
298 if (status)
299 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
300
301 return status;
302}
303
304
305
306
307
308
309
310
311
312
313bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
314{
315 u32 mmngc;
316
317
318 if (hw->mac.type == ixgbe_mac_82598EB)
319 return false;
320
321 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
322 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
323 hw_dbg(hw, "MNG_VETO bit detected.\n");
324 return true;
325 }
326
327 return false;
328}
329
330
331
332
333
334
335static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
336{
337 s32 status;
338 u16 phy_id_high = 0;
339 u16 phy_id_low = 0;
340
341 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
342 &phy_id_high);
343
344 if (!status) {
345 hw->phy.id = (u32)(phy_id_high << 16);
346 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
347 &phy_id_low);
348 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
349 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
350 }
351 return status;
352}
353
354
355
356
357
358
359static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
360{
361 enum ixgbe_phy_type phy_type;
362
363 switch (phy_id) {
364 case TN1010_PHY_ID:
365 phy_type = ixgbe_phy_tn;
366 break;
367 case X550_PHY_ID2:
368 case X550_PHY_ID3:
369 case X540_PHY_ID:
370 phy_type = ixgbe_phy_aq;
371 break;
372 case QT2022_PHY_ID:
373 phy_type = ixgbe_phy_qt;
374 break;
375 case ATH_PHY_ID:
376 phy_type = ixgbe_phy_nl;
377 break;
378 case X557_PHY_ID:
379 case X557_PHY_ID2:
380 phy_type = ixgbe_phy_x550em_ext_t;
381 break;
382 default:
383 phy_type = ixgbe_phy_unknown;
384 break;
385 }
386
387 return phy_type;
388}
389
390
391
392
393
394s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
395{
396 u32 i;
397 u16 ctrl = 0;
398 s32 status = 0;
399
400 if (hw->phy.type == ixgbe_phy_unknown)
401 status = ixgbe_identify_phy_generic(hw);
402
403 if (status != 0 || hw->phy.type == ixgbe_phy_none)
404 return status;
405
406
407 if (!hw->phy.reset_if_overtemp &&
408 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
409 return 0;
410
411
412 if (ixgbe_check_reset_blocked(hw))
413 return 0;
414
415
416
417
418
419 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
420 MDIO_MMD_PHYXS,
421 MDIO_CTRL1_RESET);
422
423
424
425
426
427
428 for (i = 0; i < 30; i++) {
429 msleep(100);
430 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
431 status = hw->phy.ops.read_reg(hw,
432 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
433 MDIO_MMD_PMAPMD, &ctrl);
434 if (status)
435 return status;
436
437 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
438 udelay(2);
439 break;
440 }
441 } else {
442 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
443 MDIO_MMD_PHYXS, &ctrl);
444 if (status)
445 return status;
446
447 if (!(ctrl & MDIO_CTRL1_RESET)) {
448 udelay(2);
449 break;
450 }
451 }
452 }
453
454 if (ctrl & MDIO_CTRL1_RESET) {
455 hw_dbg(hw, "PHY reset polling failed to complete.\n");
456 return IXGBE_ERR_RESET_FAILED;
457 }
458
459 return 0;
460}
461
462
463
464
465
466
467
468
469
470s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
471 u16 *phy_data)
472{
473 u32 i, data, command;
474
475
476 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
477 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
478 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
479 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
480
481 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
482
483
484
485
486
487 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
488 udelay(10);
489
490 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
491 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
492 break;
493 }
494
495
496 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
497 hw_dbg(hw, "PHY address command did not complete.\n");
498 return IXGBE_ERR_PHY;
499 }
500
501
502
503
504 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
505 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
506 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
507 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
508
509 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
510
511
512
513
514
515 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
516 udelay(10);
517
518 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
519 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
520 break;
521 }
522
523 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
524 hw_dbg(hw, "PHY read command didn't complete\n");
525 return IXGBE_ERR_PHY;
526 }
527
528
529
530
531 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
532 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
533 *phy_data = (u16)(data);
534
535 return 0;
536}
537
538
539
540
541
542
543
544
545
546s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
547 u32 device_type, u16 *phy_data)
548{
549 s32 status;
550 u32 gssr = hw->phy.phy_semaphore_mask;
551
552 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
553 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
554 phy_data);
555 hw->mac.ops.release_swfw_sync(hw, gssr);
556 } else {
557 return IXGBE_ERR_SWFW_SYNC;
558 }
559
560 return status;
561}
562
563
564
565
566
567
568
569
570
571s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
572 u32 device_type, u16 phy_data)
573{
574 u32 i, command;
575
576
577 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
578
579
580 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
581 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
582 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
583 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
584
585 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
586
587
588
589
590
591
592 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
593 udelay(10);
594
595 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
596 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
597 break;
598 }
599
600 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
601 hw_dbg(hw, "PHY address cmd didn't complete\n");
602 return IXGBE_ERR_PHY;
603 }
604
605
606
607
608
609 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
610 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
611 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
612 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
613
614 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
615
616
617
618
619
620 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
621 udelay(10);
622
623 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
624 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
625 break;
626 }
627
628 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
629 hw_dbg(hw, "PHY write cmd didn't complete\n");
630 return IXGBE_ERR_PHY;
631 }
632
633 return 0;
634}
635
636
637
638
639
640
641
642
643
644s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
645 u32 device_type, u16 phy_data)
646{
647 s32 status;
648 u32 gssr = hw->phy.phy_semaphore_mask;
649
650 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
651 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
652 phy_data);
653 hw->mac.ops.release_swfw_sync(hw, gssr);
654 } else {
655 return IXGBE_ERR_SWFW_SYNC;
656 }
657
658 return status;
659}
660
661#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
662
663
664
665
666
667
668static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
669{
670 IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
671
672 return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
673 !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
674 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
675}
676
677
678
679
680
681
682
683
684static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
685 int regnum, u32 gssr)
686{
687 u32 hwaddr, cmd;
688 s32 data;
689
690 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
691 return -EBUSY;
692
693 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
694 if (regnum & MII_ADDR_C45) {
695 hwaddr |= regnum & GENMASK(21, 0);
696 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
697 } else {
698 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
699 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
700 IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
701 }
702
703 data = ixgbe_msca_cmd(hw, cmd);
704 if (data < 0)
705 goto mii_bus_read_done;
706
707
708
709
710 if (!(regnum & MII_ADDR_C45))
711 goto do_mii_bus_read;
712
713 cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
714 data = ixgbe_msca_cmd(hw, cmd);
715 if (data < 0)
716 goto mii_bus_read_done;
717
718do_mii_bus_read:
719 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
720 data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
721
722mii_bus_read_done:
723 hw->mac.ops.release_swfw_sync(hw, gssr);
724 return data;
725}
726
727
728
729
730
731
732
733
734
735static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
736 int regnum, u16 val, u32 gssr)
737{
738 u32 hwaddr, cmd;
739 s32 err;
740
741 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
742 return -EBUSY;
743
744 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
745
746 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
747 if (regnum & MII_ADDR_C45) {
748 hwaddr |= regnum & GENMASK(21, 0);
749 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
750 } else {
751 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
752 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
753 IXGBE_MSCA_MDI_COMMAND;
754 }
755
756
757
758
759 err = ixgbe_msca_cmd(hw, cmd);
760 if (err < 0 || !(regnum & MII_ADDR_C45))
761 goto mii_bus_write_done;
762
763 cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
764 err = ixgbe_msca_cmd(hw, cmd);
765
766mii_bus_write_done:
767 hw->mac.ops.release_swfw_sync(hw, gssr);
768 return err;
769}
770
771
772
773
774
775
776
777static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
778{
779 struct ixgbe_adapter *adapter = bus->priv;
780 struct ixgbe_hw *hw = &adapter->hw;
781 u32 gssr = hw->phy.phy_semaphore_mask;
782
783 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
784}
785
786
787
788
789
790
791
792
793static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
794 u16 val)
795{
796 struct ixgbe_adapter *adapter = bus->priv;
797 struct ixgbe_hw *hw = &adapter->hw;
798 u32 gssr = hw->phy.phy_semaphore_mask;
799
800 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
801}
802
803
804
805
806
807
808
809static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
810 int regnum)
811{
812 struct ixgbe_adapter *adapter = bus->priv;
813 struct ixgbe_hw *hw = &adapter->hw;
814 u32 gssr = hw->phy.phy_semaphore_mask;
815
816 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
817 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
818}
819
820
821
822
823
824
825
826
827static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
828 int regnum, u16 val)
829{
830 struct ixgbe_adapter *adapter = bus->priv;
831 struct ixgbe_hw *hw = &adapter->hw;
832 u32 gssr = hw->phy.phy_semaphore_mask;
833
834 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
835 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
836}
837
838
839
840
841
842
843
844
845static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
846{
847 struct pci_dev *rp_pdev;
848 int bus;
849
850 rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
851 if (rp_pdev && rp_pdev->subordinate) {
852 bus = rp_pdev->subordinate->number;
853 return pci_get_domain_bus_and_slot(0, bus, 0);
854 }
855
856 return NULL;
857}
858
859
860
861
862
863
864
865
866
867static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
868{
869 struct ixgbe_adapter *adapter = hw->back;
870 struct pci_dev *pdev = adapter->pdev;
871 struct pci_dev *func0_pdev;
872
873
874
875
876
877
878
879 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
880 if (func0_pdev) {
881 if (func0_pdev == pdev)
882 return true;
883 else
884 return false;
885 }
886 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
887 if (func0_pdev == pdev)
888 return true;
889
890 return false;
891}
892
893
894
895
896
897
898
899
900
901s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
902{
903 struct ixgbe_adapter *adapter = hw->back;
904 struct pci_dev *pdev = adapter->pdev;
905 struct device *dev = &adapter->netdev->dev;
906 struct mii_bus *bus;
907
908 adapter->mii_bus = devm_mdiobus_alloc(dev);
909 if (!adapter->mii_bus)
910 return -ENOMEM;
911
912 bus = adapter->mii_bus;
913
914 switch (hw->device_id) {
915
916 case IXGBE_DEV_ID_X550EM_A_KR:
917 case IXGBE_DEV_ID_X550EM_A_KR_L:
918 case IXGBE_DEV_ID_X550EM_A_SFP_N:
919 case IXGBE_DEV_ID_X550EM_A_SGMII:
920 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
921 case IXGBE_DEV_ID_X550EM_A_10G_T:
922 case IXGBE_DEV_ID_X550EM_A_SFP:
923 case IXGBE_DEV_ID_X550EM_A_1G_T:
924 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
925 if (!ixgbe_x550em_a_has_mii(hw))
926 goto ixgbe_no_mii_bus;
927 bus->read = &ixgbe_x550em_a_mii_bus_read;
928 bus->write = &ixgbe_x550em_a_mii_bus_write;
929 break;
930 default:
931 bus->read = &ixgbe_mii_bus_read;
932 bus->write = &ixgbe_mii_bus_write;
933 break;
934 }
935
936
937 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%x", ixgbe_driver_name,
938 (pdev->bus->number << 8) | pdev->devfn);
939
940 bus->name = "ixgbe-mdio";
941 bus->priv = adapter;
942 bus->parent = dev;
943 bus->phy_mask = GENMASK(31, 0);
944
945
946
947
948
949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
950
951 return mdiobus_register(bus);
952
953ixgbe_no_mii_bus:
954 devm_mdiobus_free(dev, bus);
955 adapter->mii_bus = NULL;
956 return -ENODEV;
957}
958
959
960
961
962
963
964
965s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
966{
967 s32 status = 0;
968 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
969 bool autoneg = false;
970 ixgbe_link_speed speed;
971
972 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
973
974
975 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
976
977 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
978 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
979 (speed & IXGBE_LINK_SPEED_10GB_FULL))
980 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
981
982 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
983
984 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
985 MDIO_MMD_AN, &autoneg_reg);
986
987 if (hw->mac.type == ixgbe_mac_X550) {
988
989 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
990 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
991 (speed & IXGBE_LINK_SPEED_5GB_FULL))
992 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
993
994
995 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
996 if ((hw->phy.autoneg_advertised &
997 IXGBE_LINK_SPEED_2_5GB_FULL) &&
998 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
999 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
1000 }
1001
1002
1003 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
1004 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
1005 (speed & IXGBE_LINK_SPEED_1GB_FULL))
1006 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
1007
1008 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
1009 MDIO_MMD_AN, autoneg_reg);
1010
1011
1012 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
1013
1014 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
1015 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
1016 (speed & IXGBE_LINK_SPEED_100_FULL))
1017 autoneg_reg |= ADVERTISE_100FULL;
1018
1019 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
1020
1021
1022 if (ixgbe_check_reset_blocked(hw))
1023 return 0;
1024
1025
1026 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1027 MDIO_MMD_AN, &autoneg_reg);
1028
1029 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1030
1031 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1032 MDIO_MMD_AN, autoneg_reg);
1033
1034 return status;
1035}
1036
1037
1038
1039
1040
1041
1042
1043s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
1044 ixgbe_link_speed speed,
1045 bool autoneg_wait_to_complete)
1046{
1047
1048
1049
1050 hw->phy.autoneg_advertised = 0;
1051
1052 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1053 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
1054
1055 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
1056 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
1057
1058 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
1059 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
1060
1061 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1062 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
1063
1064 if (speed & IXGBE_LINK_SPEED_100_FULL)
1065 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
1066
1067 if (speed & IXGBE_LINK_SPEED_10_FULL)
1068 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
1069
1070
1071 if (hw->phy.ops.setup_link)
1072 hw->phy.ops.setup_link(hw);
1073
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
1085{
1086 u16 speed_ability;
1087 s32 status;
1088
1089 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
1090 &speed_ability);
1091 if (status)
1092 return status;
1093
1094 if (speed_ability & MDIO_SPEED_10G)
1095 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1096 if (speed_ability & MDIO_PMA_SPEED_1000)
1097 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1098 if (speed_ability & MDIO_PMA_SPEED_100)
1099 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1100
1101 switch (hw->mac.type) {
1102 case ixgbe_mac_X550:
1103 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1104 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1105 break;
1106 case ixgbe_mac_X550EM_x:
1107 case ixgbe_mac_x550em_a:
1108 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
1109 break;
1110 default:
1111 break;
1112 }
1113
1114 return 0;
1115}
1116
1117
1118
1119
1120
1121
1122
1123s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
1124 ixgbe_link_speed *speed,
1125 bool *autoneg)
1126{
1127 s32 status = 0;
1128
1129 *autoneg = true;
1130 if (!hw->phy.speeds_supported)
1131 status = ixgbe_get_copper_speeds_supported(hw);
1132
1133 *speed = hw->phy.speeds_supported;
1134 return status;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1147 bool *link_up)
1148{
1149 s32 status;
1150 u32 time_out;
1151 u32 max_time_out = 10;
1152 u16 phy_link = 0;
1153 u16 phy_speed = 0;
1154 u16 phy_data = 0;
1155
1156
1157 *link_up = false;
1158 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1159
1160
1161
1162
1163
1164
1165 for (time_out = 0; time_out < max_time_out; time_out++) {
1166 udelay(10);
1167 status = hw->phy.ops.read_reg(hw,
1168 MDIO_STAT1,
1169 MDIO_MMD_VEND1,
1170 &phy_data);
1171 phy_link = phy_data &
1172 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1173 phy_speed = phy_data &
1174 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1175 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1176 *link_up = true;
1177 if (phy_speed ==
1178 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1179 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1180 break;
1181 }
1182 }
1183
1184 return status;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
1197{
1198 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
1199 bool autoneg = false;
1200 ixgbe_link_speed speed;
1201
1202 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
1203
1204 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1205
1206 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
1207 MDIO_MMD_AN,
1208 &autoneg_reg);
1209
1210 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
1211 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1212 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
1213
1214 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
1215 MDIO_MMD_AN,
1216 autoneg_reg);
1217 }
1218
1219 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1220
1221 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1222 MDIO_MMD_AN,
1223 &autoneg_reg);
1224
1225 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1226 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1227 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1228
1229 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1230 MDIO_MMD_AN,
1231 autoneg_reg);
1232 }
1233
1234 if (speed & IXGBE_LINK_SPEED_100_FULL) {
1235
1236 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1237 MDIO_MMD_AN,
1238 &autoneg_reg);
1239
1240 autoneg_reg &= ~(ADVERTISE_100FULL |
1241 ADVERTISE_100HALF);
1242 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
1243 autoneg_reg |= ADVERTISE_100FULL;
1244
1245 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
1246 MDIO_MMD_AN,
1247 autoneg_reg);
1248 }
1249
1250
1251 if (ixgbe_check_reset_blocked(hw))
1252 return 0;
1253
1254
1255 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1256 MDIO_MMD_AN, &autoneg_reg);
1257
1258 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1259
1260 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1261 MDIO_MMD_AN, autoneg_reg);
1262 return 0;
1263}
1264
1265
1266
1267
1268
1269s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
1270{
1271 u16 phy_offset, control, eword, edata, block_crc;
1272 bool end_data = false;
1273 u16 list_offset, data_offset;
1274 u16 phy_data = 0;
1275 s32 ret_val;
1276 u32 i;
1277
1278
1279 if (ixgbe_check_reset_blocked(hw))
1280 return 0;
1281
1282 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1283
1284
1285 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1286 (phy_data | MDIO_CTRL1_RESET));
1287
1288 for (i = 0; i < 100; i++) {
1289 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1290 &phy_data);
1291 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1292 break;
1293 usleep_range(10000, 20000);
1294 }
1295
1296 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1297 hw_dbg(hw, "PHY reset did not complete.\n");
1298 return IXGBE_ERR_PHY;
1299 }
1300
1301
1302 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1303 &data_offset);
1304 if (ret_val)
1305 return ret_val;
1306
1307 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1308 data_offset++;
1309 while (!end_data) {
1310
1311
1312
1313 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1314 if (ret_val)
1315 goto err_eeprom;
1316 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1317 IXGBE_CONTROL_SHIFT_NL;
1318 edata = eword & IXGBE_DATA_MASK_NL;
1319 switch (control) {
1320 case IXGBE_DELAY_NL:
1321 data_offset++;
1322 hw_dbg(hw, "DELAY: %d MS\n", edata);
1323 usleep_range(edata * 1000, edata * 2000);
1324 break;
1325 case IXGBE_DATA_NL:
1326 hw_dbg(hw, "DATA:\n");
1327 data_offset++;
1328 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1329 &phy_offset);
1330 if (ret_val)
1331 goto err_eeprom;
1332 for (i = 0; i < edata; i++) {
1333 ret_val = hw->eeprom.ops.read(hw, data_offset,
1334 &eword);
1335 if (ret_val)
1336 goto err_eeprom;
1337 hw->phy.ops.write_reg(hw, phy_offset,
1338 MDIO_MMD_PMAPMD, eword);
1339 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1340 phy_offset);
1341 data_offset++;
1342 phy_offset++;
1343 }
1344 break;
1345 case IXGBE_CONTROL_NL:
1346 data_offset++;
1347 hw_dbg(hw, "CONTROL:\n");
1348 if (edata == IXGBE_CONTROL_EOL_NL) {
1349 hw_dbg(hw, "EOL\n");
1350 end_data = true;
1351 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1352 hw_dbg(hw, "SOL\n");
1353 } else {
1354 hw_dbg(hw, "Bad control value\n");
1355 return IXGBE_ERR_PHY;
1356 }
1357 break;
1358 default:
1359 hw_dbg(hw, "Bad control type\n");
1360 return IXGBE_ERR_PHY;
1361 }
1362 }
1363
1364 return ret_val;
1365
1366err_eeprom:
1367 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1368 return IXGBE_ERR_PHY;
1369}
1370
1371
1372
1373
1374
1375
1376
1377s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1378{
1379 switch (hw->mac.ops.get_media_type(hw)) {
1380 case ixgbe_media_type_fiber:
1381 return ixgbe_identify_sfp_module_generic(hw);
1382 case ixgbe_media_type_fiber_qsfp:
1383 return ixgbe_identify_qsfp_module_generic(hw);
1384 default:
1385 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1386 return IXGBE_ERR_SFP_NOT_PRESENT;
1387 }
1388
1389 return IXGBE_ERR_SFP_NOT_PRESENT;
1390}
1391
1392
1393
1394
1395
1396
1397
1398s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1399{
1400 struct ixgbe_adapter *adapter = hw->back;
1401 s32 status;
1402 u32 vendor_oui = 0;
1403 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1404 u8 identifier = 0;
1405 u8 comp_codes_1g = 0;
1406 u8 comp_codes_10g = 0;
1407 u8 oui_bytes[3] = {0, 0, 0};
1408 u8 cable_tech = 0;
1409 u8 cable_spec = 0;
1410 u16 enforce_sfp = 0;
1411
1412 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1413 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1414 return IXGBE_ERR_SFP_NOT_PRESENT;
1415 }
1416
1417
1418 hw->mac.ops.set_lan_id(hw);
1419
1420 status = hw->phy.ops.read_i2c_eeprom(hw,
1421 IXGBE_SFF_IDENTIFIER,
1422 &identifier);
1423
1424 if (status)
1425 goto err_read_i2c_eeprom;
1426
1427 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1428 hw->phy.type = ixgbe_phy_sfp_unsupported;
1429 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1430 }
1431 status = hw->phy.ops.read_i2c_eeprom(hw,
1432 IXGBE_SFF_1GBE_COMP_CODES,
1433 &comp_codes_1g);
1434
1435 if (status)
1436 goto err_read_i2c_eeprom;
1437
1438 status = hw->phy.ops.read_i2c_eeprom(hw,
1439 IXGBE_SFF_10GBE_COMP_CODES,
1440 &comp_codes_10g);
1441
1442 if (status)
1443 goto err_read_i2c_eeprom;
1444 status = hw->phy.ops.read_i2c_eeprom(hw,
1445 IXGBE_SFF_CABLE_TECHNOLOGY,
1446 &cable_tech);
1447
1448 if (status)
1449 goto err_read_i2c_eeprom;
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 if (hw->mac.type == ixgbe_mac_82598EB) {
1468 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1469 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1470 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1471 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1472 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1473 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1474 else
1475 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1476 } else {
1477 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1478 if (hw->bus.lan_id == 0)
1479 hw->phy.sfp_type =
1480 ixgbe_sfp_type_da_cu_core0;
1481 else
1482 hw->phy.sfp_type =
1483 ixgbe_sfp_type_da_cu_core1;
1484 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1485 hw->phy.ops.read_i2c_eeprom(
1486 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1487 &cable_spec);
1488 if (cable_spec &
1489 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1490 if (hw->bus.lan_id == 0)
1491 hw->phy.sfp_type =
1492 ixgbe_sfp_type_da_act_lmt_core0;
1493 else
1494 hw->phy.sfp_type =
1495 ixgbe_sfp_type_da_act_lmt_core1;
1496 } else {
1497 hw->phy.sfp_type =
1498 ixgbe_sfp_type_unknown;
1499 }
1500 } else if (comp_codes_10g &
1501 (IXGBE_SFF_10GBASESR_CAPABLE |
1502 IXGBE_SFF_10GBASELR_CAPABLE)) {
1503 if (hw->bus.lan_id == 0)
1504 hw->phy.sfp_type =
1505 ixgbe_sfp_type_srlr_core0;
1506 else
1507 hw->phy.sfp_type =
1508 ixgbe_sfp_type_srlr_core1;
1509 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1510 if (hw->bus.lan_id == 0)
1511 hw->phy.sfp_type =
1512 ixgbe_sfp_type_1g_cu_core0;
1513 else
1514 hw->phy.sfp_type =
1515 ixgbe_sfp_type_1g_cu_core1;
1516 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1517 if (hw->bus.lan_id == 0)
1518 hw->phy.sfp_type =
1519 ixgbe_sfp_type_1g_sx_core0;
1520 else
1521 hw->phy.sfp_type =
1522 ixgbe_sfp_type_1g_sx_core1;
1523 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1524 if (hw->bus.lan_id == 0)
1525 hw->phy.sfp_type =
1526 ixgbe_sfp_type_1g_lx_core0;
1527 else
1528 hw->phy.sfp_type =
1529 ixgbe_sfp_type_1g_lx_core1;
1530 } else {
1531 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1532 }
1533 }
1534
1535 if (hw->phy.sfp_type != stored_sfp_type)
1536 hw->phy.sfp_setup_needed = true;
1537
1538
1539 hw->phy.multispeed_fiber = false;
1540 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1541 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1542 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1543 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1544 hw->phy.multispeed_fiber = true;
1545
1546
1547 if (hw->phy.type != ixgbe_phy_nl) {
1548 hw->phy.id = identifier;
1549 status = hw->phy.ops.read_i2c_eeprom(hw,
1550 IXGBE_SFF_VENDOR_OUI_BYTE0,
1551 &oui_bytes[0]);
1552
1553 if (status != 0)
1554 goto err_read_i2c_eeprom;
1555
1556 status = hw->phy.ops.read_i2c_eeprom(hw,
1557 IXGBE_SFF_VENDOR_OUI_BYTE1,
1558 &oui_bytes[1]);
1559
1560 if (status != 0)
1561 goto err_read_i2c_eeprom;
1562
1563 status = hw->phy.ops.read_i2c_eeprom(hw,
1564 IXGBE_SFF_VENDOR_OUI_BYTE2,
1565 &oui_bytes[2]);
1566
1567 if (status != 0)
1568 goto err_read_i2c_eeprom;
1569
1570 vendor_oui =
1571 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1572 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1573 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1574
1575 switch (vendor_oui) {
1576 case IXGBE_SFF_VENDOR_OUI_TYCO:
1577 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1578 hw->phy.type =
1579 ixgbe_phy_sfp_passive_tyco;
1580 break;
1581 case IXGBE_SFF_VENDOR_OUI_FTL:
1582 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1583 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1584 else
1585 hw->phy.type = ixgbe_phy_sfp_ftl;
1586 break;
1587 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1588 hw->phy.type = ixgbe_phy_sfp_avago;
1589 break;
1590 case IXGBE_SFF_VENDOR_OUI_INTEL:
1591 hw->phy.type = ixgbe_phy_sfp_intel;
1592 break;
1593 default:
1594 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1595 hw->phy.type =
1596 ixgbe_phy_sfp_passive_unknown;
1597 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1598 hw->phy.type =
1599 ixgbe_phy_sfp_active_unknown;
1600 else
1601 hw->phy.type = ixgbe_phy_sfp_unknown;
1602 break;
1603 }
1604 }
1605
1606
1607 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1608 IXGBE_SFF_DA_ACTIVE_CABLE))
1609 return 0;
1610
1611
1612 if (comp_codes_10g == 0 &&
1613 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1614 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1615 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1616 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1617 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1618 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1619 hw->phy.type = ixgbe_phy_sfp_unsupported;
1620 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1621 }
1622
1623
1624 if (hw->mac.type == ixgbe_mac_82598EB)
1625 return 0;
1626
1627 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1628 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1629 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1630 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1631 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1632 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1633 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1634 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1635
1636 if (hw->phy.type == ixgbe_phy_sfp_intel)
1637 return 0;
1638 if (hw->allow_unsupported_sfp) {
1639 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1640 return 0;
1641 }
1642 hw_dbg(hw, "SFP+ module not supported\n");
1643 hw->phy.type = ixgbe_phy_sfp_unsupported;
1644 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1645 }
1646 return 0;
1647
1648err_read_i2c_eeprom:
1649 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1650 if (hw->phy.type != ixgbe_phy_nl) {
1651 hw->phy.id = 0;
1652 hw->phy.type = ixgbe_phy_unknown;
1653 }
1654 return IXGBE_ERR_SFP_NOT_PRESENT;
1655}
1656
1657
1658
1659
1660
1661
1662
1663s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1664{
1665 struct ixgbe_adapter *adapter = hw->back;
1666 s32 status;
1667 u32 vendor_oui = 0;
1668 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1669 u8 identifier = 0;
1670 u8 comp_codes_1g = 0;
1671 u8 comp_codes_10g = 0;
1672 u8 oui_bytes[3] = {0, 0, 0};
1673 u16 enforce_sfp = 0;
1674 u8 connector = 0;
1675 u8 cable_length = 0;
1676 u8 device_tech = 0;
1677 bool active_cable = false;
1678
1679 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1680 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1681 return IXGBE_ERR_SFP_NOT_PRESENT;
1682 }
1683
1684
1685 hw->mac.ops.set_lan_id(hw);
1686
1687 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1688 &identifier);
1689
1690 if (status != 0)
1691 goto err_read_i2c_eeprom;
1692
1693 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1694 hw->phy.type = ixgbe_phy_sfp_unsupported;
1695 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1696 }
1697
1698 hw->phy.id = identifier;
1699
1700 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1701 &comp_codes_10g);
1702
1703 if (status != 0)
1704 goto err_read_i2c_eeprom;
1705
1706 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1707 &comp_codes_1g);
1708
1709 if (status != 0)
1710 goto err_read_i2c_eeprom;
1711
1712 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1713 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1714 if (hw->bus.lan_id == 0)
1715 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1716 else
1717 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1718 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1719 IXGBE_SFF_10GBASELR_CAPABLE)) {
1720 if (hw->bus.lan_id == 0)
1721 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1722 else
1723 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1724 } else {
1725 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1726 active_cable = true;
1727
1728 if (!active_cable) {
1729
1730
1731
1732 hw->phy.ops.read_i2c_eeprom(hw,
1733 IXGBE_SFF_QSFP_CONNECTOR,
1734 &connector);
1735
1736 hw->phy.ops.read_i2c_eeprom(hw,
1737 IXGBE_SFF_QSFP_CABLE_LENGTH,
1738 &cable_length);
1739
1740 hw->phy.ops.read_i2c_eeprom(hw,
1741 IXGBE_SFF_QSFP_DEVICE_TECH,
1742 &device_tech);
1743
1744 if ((connector ==
1745 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1746 (cable_length > 0) &&
1747 ((device_tech >> 4) ==
1748 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1749 active_cable = true;
1750 }
1751
1752 if (active_cable) {
1753 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1754 if (hw->bus.lan_id == 0)
1755 hw->phy.sfp_type =
1756 ixgbe_sfp_type_da_act_lmt_core0;
1757 else
1758 hw->phy.sfp_type =
1759 ixgbe_sfp_type_da_act_lmt_core1;
1760 } else {
1761
1762 hw->phy.type = ixgbe_phy_sfp_unsupported;
1763 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1764 }
1765 }
1766
1767 if (hw->phy.sfp_type != stored_sfp_type)
1768 hw->phy.sfp_setup_needed = true;
1769
1770
1771 hw->phy.multispeed_fiber = false;
1772 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1773 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1774 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1775 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1776 hw->phy.multispeed_fiber = true;
1777
1778
1779 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1780 IXGBE_SFF_10GBASELR_CAPABLE)) {
1781 status = hw->phy.ops.read_i2c_eeprom(hw,
1782 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1783 &oui_bytes[0]);
1784
1785 if (status != 0)
1786 goto err_read_i2c_eeprom;
1787
1788 status = hw->phy.ops.read_i2c_eeprom(hw,
1789 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1790 &oui_bytes[1]);
1791
1792 if (status != 0)
1793 goto err_read_i2c_eeprom;
1794
1795 status = hw->phy.ops.read_i2c_eeprom(hw,
1796 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1797 &oui_bytes[2]);
1798
1799 if (status != 0)
1800 goto err_read_i2c_eeprom;
1801
1802 vendor_oui =
1803 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1804 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1805 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1806
1807 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1808 hw->phy.type = ixgbe_phy_qsfp_intel;
1809 else
1810 hw->phy.type = ixgbe_phy_qsfp_unknown;
1811
1812 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1813 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1814
1815 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1816 return 0;
1817 if (hw->allow_unsupported_sfp) {
1818 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1819 return 0;
1820 }
1821 hw_dbg(hw, "QSFP module not supported\n");
1822 hw->phy.type = ixgbe_phy_sfp_unsupported;
1823 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1824 }
1825 return 0;
1826 }
1827 return 0;
1828
1829err_read_i2c_eeprom:
1830 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1831 hw->phy.id = 0;
1832 hw->phy.type = ixgbe_phy_unknown;
1833
1834 return IXGBE_ERR_SFP_NOT_PRESENT;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1847 u16 *list_offset,
1848 u16 *data_offset)
1849{
1850 u16 sfp_id;
1851 u16 sfp_type = hw->phy.sfp_type;
1852
1853 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1854 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1855
1856 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1857 return IXGBE_ERR_SFP_NOT_PRESENT;
1858
1859 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1860 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1861 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1862
1863
1864
1865
1866
1867 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1868 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1869 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1870 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1871 sfp_type = ixgbe_sfp_type_srlr_core0;
1872 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1873 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1874 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1875 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1876 sfp_type = ixgbe_sfp_type_srlr_core1;
1877
1878
1879 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1880 hw_err(hw, "eeprom read at %d failed\n",
1881 IXGBE_PHY_INIT_OFFSET_NL);
1882 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1883 }
1884
1885 if ((!*list_offset) || (*list_offset == 0xFFFF))
1886 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1887
1888
1889 (*list_offset)++;
1890
1891
1892
1893
1894
1895 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1896 goto err_phy;
1897
1898 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1899 if (sfp_id == sfp_type) {
1900 (*list_offset)++;
1901 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1902 goto err_phy;
1903 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1904 hw_dbg(hw, "SFP+ module not supported\n");
1905 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1906 } else {
1907 break;
1908 }
1909 } else {
1910 (*list_offset) += 2;
1911 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1912 goto err_phy;
1913 }
1914 }
1915
1916 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1917 hw_dbg(hw, "No matching SFP+ module found\n");
1918 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1919 }
1920
1921 return 0;
1922
1923err_phy:
1924 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1925 return IXGBE_ERR_PHY;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1937 u8 *eeprom_data)
1938{
1939 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1940 IXGBE_I2C_EEPROM_DEV_ADDR,
1941 eeprom_data);
1942}
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1953 u8 *sff8472_data)
1954{
1955 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1956 IXGBE_I2C_EEPROM_DEV_ADDR2,
1957 sff8472_data);
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1969 u8 eeprom_data)
1970{
1971 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1972 IXGBE_I2C_EEPROM_DEV_ADDR,
1973 eeprom_data);
1974}
1975
1976
1977
1978
1979
1980
1981
1982static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1983{
1984 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1985 offset == IXGBE_SFF_IDENTIFIER &&
1986 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1987 return true;
1988 return false;
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2003 u8 dev_addr, u8 *data, bool lock)
2004{
2005 s32 status;
2006 u32 max_retry = 10;
2007 u32 retry = 0;
2008 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2009 bool nack = true;
2010
2011 if (hw->mac.type >= ixgbe_mac_X550)
2012 max_retry = 3;
2013 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
2014 max_retry = IXGBE_SFP_DETECT_RETRIES;
2015
2016 *data = 0;
2017
2018 do {
2019 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2020 return IXGBE_ERR_SWFW_SYNC;
2021
2022 ixgbe_i2c_start(hw);
2023
2024
2025 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2026 if (status != 0)
2027 goto fail;
2028
2029 status = ixgbe_get_i2c_ack(hw);
2030 if (status != 0)
2031 goto fail;
2032
2033 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2034 if (status != 0)
2035 goto fail;
2036
2037 status = ixgbe_get_i2c_ack(hw);
2038 if (status != 0)
2039 goto fail;
2040
2041 ixgbe_i2c_start(hw);
2042
2043
2044 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
2045 if (status != 0)
2046 goto fail;
2047
2048 status = ixgbe_get_i2c_ack(hw);
2049 if (status != 0)
2050 goto fail;
2051
2052 status = ixgbe_clock_in_i2c_byte(hw, data);
2053 if (status != 0)
2054 goto fail;
2055
2056 status = ixgbe_clock_out_i2c_bit(hw, nack);
2057 if (status != 0)
2058 goto fail;
2059
2060 ixgbe_i2c_stop(hw);
2061 if (lock)
2062 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2063 return 0;
2064
2065fail:
2066 ixgbe_i2c_bus_clear(hw);
2067 if (lock) {
2068 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2069 msleep(100);
2070 }
2071 retry++;
2072 if (retry < max_retry)
2073 hw_dbg(hw, "I2C byte read error - Retrying.\n");
2074 else
2075 hw_dbg(hw, "I2C byte read error.\n");
2076
2077 } while (retry < max_retry);
2078
2079 return status;
2080}
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2093 u8 dev_addr, u8 *data)
2094{
2095 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2096 data, true);
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2110 u8 dev_addr, u8 *data)
2111{
2112 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2113 data, false);
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2128 u8 dev_addr, u8 data, bool lock)
2129{
2130 s32 status;
2131 u32 max_retry = 1;
2132 u32 retry = 0;
2133 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2134
2135 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2136 return IXGBE_ERR_SWFW_SYNC;
2137
2138 do {
2139 ixgbe_i2c_start(hw);
2140
2141 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2142 if (status != 0)
2143 goto fail;
2144
2145 status = ixgbe_get_i2c_ack(hw);
2146 if (status != 0)
2147 goto fail;
2148
2149 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2150 if (status != 0)
2151 goto fail;
2152
2153 status = ixgbe_get_i2c_ack(hw);
2154 if (status != 0)
2155 goto fail;
2156
2157 status = ixgbe_clock_out_i2c_byte(hw, data);
2158 if (status != 0)
2159 goto fail;
2160
2161 status = ixgbe_get_i2c_ack(hw);
2162 if (status != 0)
2163 goto fail;
2164
2165 ixgbe_i2c_stop(hw);
2166 if (lock)
2167 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2168 return 0;
2169
2170fail:
2171 ixgbe_i2c_bus_clear(hw);
2172 retry++;
2173 if (retry < max_retry)
2174 hw_dbg(hw, "I2C byte write error - Retrying.\n");
2175 else
2176 hw_dbg(hw, "I2C byte write error.\n");
2177 } while (retry < max_retry);
2178
2179 if (lock)
2180 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2181
2182 return status;
2183}
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2196 u8 dev_addr, u8 data)
2197{
2198 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2199 data, true);
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2213 u8 dev_addr, u8 data)
2214{
2215 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2216 data, false);
2217}
2218
2219
2220
2221
2222
2223
2224
2225
2226static void ixgbe_i2c_start(struct ixgbe_hw *hw)
2227{
2228 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2229
2230 i2cctl |= IXGBE_I2C_BB_EN(hw);
2231
2232
2233 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2234 ixgbe_raise_i2c_clk(hw, &i2cctl);
2235
2236
2237 udelay(IXGBE_I2C_T_SU_STA);
2238
2239 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2240
2241
2242 udelay(IXGBE_I2C_T_HD_STA);
2243
2244 ixgbe_lower_i2c_clk(hw, &i2cctl);
2245
2246
2247 udelay(IXGBE_I2C_T_LOW);
2248
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
2260{
2261 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2262 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2263 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2264 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
2265
2266
2267 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2268 ixgbe_raise_i2c_clk(hw, &i2cctl);
2269
2270
2271 udelay(IXGBE_I2C_T_SU_STO);
2272
2273 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2274
2275
2276 udelay(IXGBE_I2C_T_BUF);
2277
2278 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
2279 i2cctl &= ~bb_en_bit;
2280 i2cctl |= data_oe_bit | clk_oe_bit;
2281 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2282 IXGBE_WRITE_FLUSH(hw);
2283 }
2284}
2285
2286
2287
2288
2289
2290
2291
2292
2293static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2294{
2295 s32 i;
2296 bool bit = false;
2297
2298 *data = 0;
2299 for (i = 7; i >= 0; i--) {
2300 ixgbe_clock_in_i2c_bit(hw, &bit);
2301 *data |= bit << i;
2302 }
2303
2304 return 0;
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2315{
2316 s32 status;
2317 s32 i;
2318 u32 i2cctl;
2319 bool bit = false;
2320
2321 for (i = 7; i >= 0; i--) {
2322 bit = (data >> i) & 0x1;
2323 status = ixgbe_clock_out_i2c_bit(hw, bit);
2324
2325 if (status != 0)
2326 break;
2327 }
2328
2329
2330 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2331 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2332 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2333 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2334 IXGBE_WRITE_FLUSH(hw);
2335
2336 return status;
2337}
2338
2339
2340
2341
2342
2343
2344
2345static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2346{
2347 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2348 s32 status = 0;
2349 u32 i = 0;
2350 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2351 u32 timeout = 10;
2352 bool ack = true;
2353
2354 if (data_oe_bit) {
2355 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2356 i2cctl |= data_oe_bit;
2357 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2358 IXGBE_WRITE_FLUSH(hw);
2359 }
2360 ixgbe_raise_i2c_clk(hw, &i2cctl);
2361
2362
2363 udelay(IXGBE_I2C_T_HIGH);
2364
2365
2366
2367 for (i = 0; i < timeout; i++) {
2368 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2369 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2370
2371 udelay(1);
2372 if (ack == 0)
2373 break;
2374 }
2375
2376 if (ack == 1) {
2377 hw_dbg(hw, "I2C ack was not received.\n");
2378 status = IXGBE_ERR_I2C;
2379 }
2380
2381 ixgbe_lower_i2c_clk(hw, &i2cctl);
2382
2383
2384 udelay(IXGBE_I2C_T_LOW);
2385
2386 return status;
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2397{
2398 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2399 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2400
2401 if (data_oe_bit) {
2402 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2403 i2cctl |= data_oe_bit;
2404 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2405 IXGBE_WRITE_FLUSH(hw);
2406 }
2407 ixgbe_raise_i2c_clk(hw, &i2cctl);
2408
2409
2410 udelay(IXGBE_I2C_T_HIGH);
2411
2412 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2413 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2414
2415 ixgbe_lower_i2c_clk(hw, &i2cctl);
2416
2417
2418 udelay(IXGBE_I2C_T_LOW);
2419
2420 return 0;
2421}
2422
2423
2424
2425
2426
2427
2428
2429
2430static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2431{
2432 s32 status;
2433 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2434
2435 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2436 if (status == 0) {
2437 ixgbe_raise_i2c_clk(hw, &i2cctl);
2438
2439
2440 udelay(IXGBE_I2C_T_HIGH);
2441
2442 ixgbe_lower_i2c_clk(hw, &i2cctl);
2443
2444
2445
2446
2447 udelay(IXGBE_I2C_T_LOW);
2448 } else {
2449 hw_dbg(hw, "I2C data was not set to %X\n", data);
2450 return IXGBE_ERR_I2C;
2451 }
2452
2453 return 0;
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2464{
2465 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2466 u32 i = 0;
2467 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2468 u32 i2cctl_r = 0;
2469
2470 if (clk_oe_bit) {
2471 *i2cctl |= clk_oe_bit;
2472 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2473 }
2474
2475 for (i = 0; i < timeout; i++) {
2476 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2477 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2478 IXGBE_WRITE_FLUSH(hw);
2479
2480 udelay(IXGBE_I2C_T_RISE);
2481
2482 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2483 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2484 break;
2485 }
2486}
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2497{
2498
2499 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2500 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2501
2502 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2503 IXGBE_WRITE_FLUSH(hw);
2504
2505
2506 udelay(IXGBE_I2C_T_FALL);
2507}
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2519{
2520 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2521
2522 if (data)
2523 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2524 else
2525 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2526 *i2cctl &= ~data_oe_bit;
2527
2528 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2529 IXGBE_WRITE_FLUSH(hw);
2530
2531
2532 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2533
2534 if (!data)
2535 return 0;
2536 if (data_oe_bit) {
2537 *i2cctl |= data_oe_bit;
2538 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2539 IXGBE_WRITE_FLUSH(hw);
2540 }
2541
2542
2543 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2544 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2545 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2546 return IXGBE_ERR_I2C;
2547 }
2548
2549 return 0;
2550}
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2561{
2562 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2563
2564 if (data_oe_bit) {
2565 *i2cctl |= data_oe_bit;
2566 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2567 IXGBE_WRITE_FLUSH(hw);
2568 udelay(IXGBE_I2C_T_FALL);
2569 }
2570
2571 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2572 return true;
2573 return false;
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2584{
2585 u32 i2cctl;
2586 u32 i;
2587
2588 ixgbe_i2c_start(hw);
2589 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2590
2591 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2592
2593 for (i = 0; i < 9; i++) {
2594 ixgbe_raise_i2c_clk(hw, &i2cctl);
2595
2596
2597 udelay(IXGBE_I2C_T_HIGH);
2598
2599 ixgbe_lower_i2c_clk(hw, &i2cctl);
2600
2601
2602 udelay(IXGBE_I2C_T_LOW);
2603 }
2604
2605 ixgbe_i2c_start(hw);
2606
2607
2608 ixgbe_i2c_stop(hw);
2609}
2610
2611
2612
2613
2614
2615
2616
2617s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2618{
2619 u16 phy_data = 0;
2620
2621 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2622 return 0;
2623
2624
2625 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2626 MDIO_MMD_PMAPMD, &phy_data);
2627
2628 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2629 return 0;
2630
2631 return IXGBE_ERR_OVERTEMP;
2632}
2633
2634
2635
2636
2637
2638s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2639{
2640 u32 status;
2641 u16 reg;
2642
2643
2644 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2645 return 0;
2646
2647 if (!on && ixgbe_mng_present(hw))
2648 return 0;
2649
2650 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2651 if (status)
2652 return status;
2653
2654 if (on) {
2655 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2656 } else {
2657 if (ixgbe_check_reset_blocked(hw))
2658 return 0;
2659 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2660 }
2661
2662 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2663 return status;
2664}
2665