1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34
35#define IXGBE_82598_MAX_TX_QUEUES 32
36#define IXGBE_82598_MAX_RX_QUEUES 64
37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128
40
41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg,
47 bool autoneg_wait_to_complete);
48static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
49 u8 *eeprom_data);
50
51
52
53
54
55
56
57
58
59
60
61static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
62{
63 struct ixgbe_adapter *adapter = hw->back;
64 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
65 u16 pcie_devctl2;
66
67
68 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
69 goto out;
70
71
72
73
74
75 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
76 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
77 goto out;
78 }
79
80
81
82
83
84
85 pci_read_config_word(adapter->pdev,
86 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
87 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
88 pci_write_config_word(adapter->pdev,
89 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
90out:
91
92 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
93 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
94}
95
96
97
98
99
100
101
102
103static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
104{
105 struct ixgbe_adapter *adapter = hw->back;
106 u16 msix_count;
107 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
108 &msix_count);
109 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
110
111
112 msix_count++;
113
114 return msix_count;
115}
116
117
118
119static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
120{
121 struct ixgbe_mac_info *mac = &hw->mac;
122
123
124 ixgbe_identify_phy_generic(hw);
125
126 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
127 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
128 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
129 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
130 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
131 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
132
133 return 0;
134}
135
136
137
138
139
140
141
142
143
144
145static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
146{
147 struct ixgbe_mac_info *mac = &hw->mac;
148 struct ixgbe_phy_info *phy = &hw->phy;
149 s32 ret_val = 0;
150 u16 list_offset, data_offset;
151
152
153 phy->ops.identify(hw);
154
155
156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
158 mac->ops.get_link_capabilities =
159 &ixgbe_get_copper_link_capabilities_82598;
160 }
161
162 switch (hw->phy.type) {
163 case ixgbe_phy_tn:
164 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
165 phy->ops.get_firmware_version =
166 &ixgbe_get_phy_firmware_version_tnx;
167 break;
168 case ixgbe_phy_nl:
169 phy->ops.reset = &ixgbe_reset_phy_nl;
170
171
172 ret_val = phy->ops.identify_sfp(hw);
173 if (ret_val != 0)
174 goto out;
175 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
176 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
177 goto out;
178 }
179
180
181 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
182 &list_offset,
183 &data_offset);
184 if (ret_val != 0) {
185 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
186 goto out;
187 }
188 break;
189 default:
190 break;
191 }
192
193out:
194 return ret_val;
195}
196
197
198
199
200
201
202
203
204static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
205{
206 s32 ret_val = 0;
207
208 ret_val = ixgbe_start_hw_generic(hw);
209
210
211 if (ret_val == 0)
212 ixgbe_set_pcie_completion_timeout(hw);
213
214 return ret_val;
215}
216
217
218
219
220
221
222
223
224
225static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
226 ixgbe_link_speed *speed,
227 bool *autoneg)
228{
229 s32 status = 0;
230 u32 autoc = 0;
231
232
233
234
235
236
237 if (hw->mac.orig_link_settings_stored)
238 autoc = hw->mac.orig_autoc;
239 else
240 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
241
242 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
243 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
244 *speed = IXGBE_LINK_SPEED_1GB_FULL;
245 *autoneg = false;
246 break;
247
248 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
249 *speed = IXGBE_LINK_SPEED_10GB_FULL;
250 *autoneg = false;
251 break;
252
253 case IXGBE_AUTOC_LMS_1G_AN:
254 *speed = IXGBE_LINK_SPEED_1GB_FULL;
255 *autoneg = true;
256 break;
257
258 case IXGBE_AUTOC_LMS_KX4_AN:
259 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
260 *speed = IXGBE_LINK_SPEED_UNKNOWN;
261 if (autoc & IXGBE_AUTOC_KX4_SUPP)
262 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
263 if (autoc & IXGBE_AUTOC_KX_SUPP)
264 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
265 *autoneg = true;
266 break;
267
268 default:
269 status = IXGBE_ERR_LINK_SETUP;
270 break;
271 }
272
273 return status;
274}
275
276
277
278
279
280
281
282
283
284static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
286 bool *autoneg)
287{
288 s32 status = IXGBE_ERR_LINK_SETUP;
289 u16 speed_ability;
290
291 *speed = 0;
292 *autoneg = true;
293
294 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
295 &speed_ability);
296
297 if (status == 0) {
298 if (speed_ability & MDIO_SPEED_10G)
299 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
300 if (speed_ability & MDIO_PMA_SPEED_1000)
301 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
302 }
303
304 return status;
305}
306
307
308
309
310
311
312
313static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
314{
315 enum ixgbe_media_type media_type;
316
317
318 switch (hw->device_id) {
319 case IXGBE_DEV_ID_82598:
320 case IXGBE_DEV_ID_82598_BX:
321 media_type = ixgbe_media_type_backplane;
322 break;
323 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
324 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
325 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
326 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
327 case IXGBE_DEV_ID_82598EB_XF_LR:
328 case IXGBE_DEV_ID_82598EB_SFP_LOM:
329 media_type = ixgbe_media_type_fiber;
330 break;
331 case IXGBE_DEV_ID_82598EB_CX4:
332 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
333 media_type = ixgbe_media_type_cx4;
334 break;
335 case IXGBE_DEV_ID_82598AT:
336 case IXGBE_DEV_ID_82598AT2:
337 media_type = ixgbe_media_type_copper;
338 break;
339 default:
340 media_type = ixgbe_media_type_unknown;
341 break;
342 }
343
344 return media_type;
345}
346
347
348
349
350
351
352
353
354static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
355{
356 s32 ret_val = 0;
357 u32 fctrl_reg;
358 u32 rmcs_reg;
359 u32 reg;
360
361#ifdef CONFIG_DCB
362 if (hw->fc.requested_mode == ixgbe_fc_pfc)
363 goto out;
364
365#endif
366
367 ret_val = ixgbe_fc_autoneg(hw);
368 if (ret_val)
369 goto out;
370
371
372 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
373 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
374
375 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
376 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391 switch (hw->fc.current_mode) {
392 case ixgbe_fc_none:
393
394
395
396
397 break;
398 case ixgbe_fc_rx_pause:
399
400
401
402
403
404
405
406
407 fctrl_reg |= IXGBE_FCTRL_RFCE;
408 break;
409 case ixgbe_fc_tx_pause:
410
411
412
413
414 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
415 break;
416 case ixgbe_fc_full:
417
418 fctrl_reg |= IXGBE_FCTRL_RFCE;
419 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
420 break;
421#ifdef CONFIG_DCB
422 case ixgbe_fc_pfc:
423 goto out;
424 break;
425#endif
426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = IXGBE_ERR_CONFIG;
429 goto out;
430 break;
431 }
432
433
434 fctrl_reg |= IXGBE_FCTRL_DPF;
435 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
436 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
437
438
439 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
440 if (hw->fc.send_xon) {
441 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
442 (hw->fc.low_water | IXGBE_FCRTL_XONE));
443 } else {
444 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
445 hw->fc.low_water);
446 }
447
448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
449 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
450 }
451
452
453 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
454 if ((packetbuf_num & 1) == 0)
455 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
456 else
457 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
458 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
459
460 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
461
462out:
463 return ret_val;
464}
465
466
467
468
469
470
471
472
473static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
474 bool autoneg_wait_to_complete)
475{
476 u32 autoc_reg;
477 u32 links_reg;
478 u32 i;
479 s32 status = 0;
480
481
482 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
483 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
484 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
485
486
487 if (autoneg_wait_to_complete) {
488 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
489 IXGBE_AUTOC_LMS_KX4_AN ||
490 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
491 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
492 links_reg = 0;
493 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
494 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
495 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
496 break;
497 msleep(100);
498 }
499 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
500 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
501 hw_dbg(hw, "Autonegotiation did not complete.\n");
502 }
503 }
504 }
505
506
507 msleep(50);
508
509 return status;
510}
511
512
513
514
515
516
517
518
519
520
521static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
522 ixgbe_link_speed *speed, bool *link_up,
523 bool link_up_wait_to_complete)
524{
525 u32 links_reg;
526 u32 i;
527 u16 link_reg, adapt_comp_reg;
528
529
530
531
532
533
534
535 if (hw->phy.type == ixgbe_phy_nl) {
536 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
537 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
538 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
539 &adapt_comp_reg);
540 if (link_up_wait_to_complete) {
541 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
542 if ((link_reg & 1) &&
543 ((adapt_comp_reg & 1) == 0)) {
544 *link_up = true;
545 break;
546 } else {
547 *link_up = false;
548 }
549 msleep(100);
550 hw->phy.ops.read_reg(hw, 0xC79F,
551 MDIO_MMD_PMAPMD,
552 &link_reg);
553 hw->phy.ops.read_reg(hw, 0xC00C,
554 MDIO_MMD_PMAPMD,
555 &adapt_comp_reg);
556 }
557 } else {
558 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
559 *link_up = true;
560 else
561 *link_up = false;
562 }
563
564 if (*link_up == false)
565 goto out;
566 }
567
568 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
569 if (link_up_wait_to_complete) {
570 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
571 if (links_reg & IXGBE_LINKS_UP) {
572 *link_up = true;
573 break;
574 } else {
575 *link_up = false;
576 }
577 msleep(100);
578 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
579 }
580 } else {
581 if (links_reg & IXGBE_LINKS_UP)
582 *link_up = true;
583 else
584 *link_up = false;
585 }
586
587 if (links_reg & IXGBE_LINKS_SPEED)
588 *speed = IXGBE_LINK_SPEED_10GB_FULL;
589 else
590 *speed = IXGBE_LINK_SPEED_1GB_FULL;
591
592
593 if (*link_up == false) {
594 hw->fc.current_mode = ixgbe_fc_none;
595 hw->fc.fc_was_autonegged = false;
596 }
597out:
598 return 0;
599}
600
601
602
603
604
605
606
607
608
609
610
611static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
612 ixgbe_link_speed speed, bool autoneg,
613 bool autoneg_wait_to_complete)
614{
615 s32 status = 0;
616 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
617 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
618 u32 autoc = curr_autoc;
619 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
620
621
622 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
623 speed &= link_capabilities;
624
625 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
626 status = IXGBE_ERR_LINK_SETUP;
627
628
629 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
630 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
631 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
632 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
633 autoc |= IXGBE_AUTOC_KX4_SUPP;
634 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
635 autoc |= IXGBE_AUTOC_KX_SUPP;
636 if (autoc != curr_autoc)
637 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
638 }
639
640 if (status == 0) {
641
642
643
644
645
646 status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
647 }
648
649 return status;
650}
651
652
653
654
655
656
657
658
659
660
661
662static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
663 ixgbe_link_speed speed,
664 bool autoneg,
665 bool autoneg_wait_to_complete)
666{
667 s32 status;
668
669
670 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
671 autoneg_wait_to_complete);
672
673
674 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
675
676 return status;
677}
678
679
680
681
682
683
684
685
686
687static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
688{
689 s32 status = 0;
690 s32 phy_status = 0;
691 u32 ctrl;
692 u32 gheccr;
693 u32 i;
694 u32 autoc;
695 u8 analog_val;
696
697
698 hw->mac.ops.stop_adapter(hw);
699
700
701
702
703
704
705 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
706 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
707
708 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
709 &analog_val);
710 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
711 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
712 analog_val);
713
714 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
715 &analog_val);
716 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
717 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
718 analog_val);
719
720 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
721 &analog_val);
722 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
723 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
724 analog_val);
725
726 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
727 &analog_val);
728 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
729 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
730 analog_val);
731 }
732
733
734 if (hw->phy.reset_disable == false) {
735
736
737
738 phy_status = hw->phy.ops.init(hw);
739 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
740 goto reset_hw_out;
741 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
742 goto no_phy_reset;
743
744
745 hw->phy.ops.reset(hw);
746 }
747
748no_phy_reset:
749
750
751
752
753 status = ixgbe_disable_pcie_master(hw);
754 if (status != 0) {
755 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
756 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
757 }
758
759
760
761
762
763 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
764 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
765 IXGBE_WRITE_FLUSH(hw);
766
767
768 for (i = 0; i < 10; i++) {
769 udelay(1);
770 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
771 if (!(ctrl & IXGBE_CTRL_RST))
772 break;
773 }
774 if (ctrl & IXGBE_CTRL_RST) {
775 status = IXGBE_ERR_RESET_FAILED;
776 hw_dbg(hw, "Reset polling failed to complete.\n");
777 }
778
779 msleep(50);
780
781 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
782 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
783 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
784
785
786
787
788
789
790 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
791 if (hw->mac.orig_link_settings_stored == false) {
792 hw->mac.orig_autoc = autoc;
793 hw->mac.orig_link_settings_stored = true;
794 } else if (autoc != hw->mac.orig_autoc) {
795 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
796 }
797
798
799
800
801
802 hw->mac.ops.init_rx_addrs(hw);
803
804
805 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
806
807reset_hw_out:
808 if (phy_status)
809 status = phy_status;
810
811 return status;
812}
813
814
815
816
817
818
819
820static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
821{
822 u32 rar_high;
823
824 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
825 rar_high &= ~IXGBE_RAH_VIND_MASK;
826 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
827 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
828 return 0;
829}
830
831
832
833
834
835
836
837static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
838{
839 u32 rar_high;
840 u32 rar_entries = hw->mac.num_rar_entries;
841
842 if (rar < rar_entries) {
843 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
844 if (rar_high & IXGBE_RAH_VIND_MASK) {
845 rar_high &= ~IXGBE_RAH_VIND_MASK;
846 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
847 }
848 } else {
849 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
850 }
851
852 return 0;
853}
854
855
856
857
858
859
860
861
862
863
864static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
865 bool vlan_on)
866{
867 u32 regindex;
868 u32 bitindex;
869 u32 bits;
870 u32 vftabyte;
871
872 if (vlan > 4095)
873 return IXGBE_ERR_PARAM;
874
875
876 regindex = (vlan >> 5) & 0x7F;
877
878
879 vftabyte = ((vlan >> 3) & 0x03);
880 bitindex = (vlan & 0x7) << 2;
881
882
883 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
884 bits &= (~(0x0F << bitindex));
885 bits |= (vind << bitindex);
886 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
887
888
889 bitindex = vlan & 0x1F;
890
891 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
892 if (vlan_on)
893
894 bits |= (1 << bitindex);
895 else
896
897 bits &= ~(1 << bitindex);
898 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
899
900 return 0;
901}
902
903
904
905
906
907
908
909static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
910{
911 u32 offset;
912 u32 vlanbyte;
913
914 for (offset = 0; offset < hw->mac.vft_size; offset++)
915 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
916
917 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
918 for (offset = 0; offset < hw->mac.vft_size; offset++)
919 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
920 0);
921
922 return 0;
923}
924
925
926
927
928
929
930
931
932
933static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
934{
935 u32 atlas_ctl;
936
937 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
938 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
939 IXGBE_WRITE_FLUSH(hw);
940 udelay(10);
941 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
942 *val = (u8)atlas_ctl;
943
944 return 0;
945}
946
947
948
949
950
951
952
953
954
955static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
956{
957 u32 atlas_ctl;
958
959 atlas_ctl = (reg << 8) | val;
960 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
961 IXGBE_WRITE_FLUSH(hw);
962 udelay(10);
963
964 return 0;
965}
966
967
968
969
970
971
972
973
974
975
976static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
977 u8 *eeprom_data)
978{
979 s32 status = 0;
980 u16 sfp_addr = 0;
981 u16 sfp_data = 0;
982 u16 sfp_stat = 0;
983 u32 i;
984
985 if (hw->phy.type == ixgbe_phy_nl) {
986
987
988
989
990
991 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
992 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
993 hw->phy.ops.write_reg(hw,
994 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
995 MDIO_MMD_PMAPMD,
996 sfp_addr);
997
998
999 for (i = 0; i < 100; i++) {
1000 hw->phy.ops.read_reg(hw,
1001 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1002 MDIO_MMD_PMAPMD,
1003 &sfp_stat);
1004 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1005 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1006 break;
1007 msleep(10);
1008 }
1009
1010 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1011 hw_dbg(hw, "EEPROM read did not pass.\n");
1012 status = IXGBE_ERR_SFP_NOT_PRESENT;
1013 goto out;
1014 }
1015
1016
1017 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1018 MDIO_MMD_PMAPMD, &sfp_data);
1019
1020 *eeprom_data = (u8)(sfp_data >> 8);
1021 } else {
1022 status = IXGBE_ERR_PHY;
1023 goto out;
1024 }
1025
1026out:
1027 return status;
1028}
1029
1030
1031
1032
1033
1034
1035
1036static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1037{
1038 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1039 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1040 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1041 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1042 u16 ext_ability = 0;
1043
1044 hw->phy.ops.identify(hw);
1045
1046
1047
1048 if (hw->phy.type == ixgbe_phy_tn ||
1049 hw->phy.type == ixgbe_phy_cu_unknown) {
1050 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1051 &ext_ability);
1052 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1053 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1054 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
1055 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1056 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1057 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1058 goto out;
1059 }
1060
1061 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1062 case IXGBE_AUTOC_LMS_1G_AN:
1063 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1064 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1065 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1066 else
1067 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1068 break;
1069 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1070 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1071 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1072 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1073 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1074 else
1075 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1076 break;
1077 case IXGBE_AUTOC_LMS_KX4_AN:
1078 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1079 if (autoc & IXGBE_AUTOC_KX_SUPP)
1080 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1081 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1082 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 if (hw->phy.type == ixgbe_phy_nl) {
1089 hw->phy.ops.identify_sfp(hw);
1090
1091 switch (hw->phy.sfp_type) {
1092 case ixgbe_sfp_type_da_cu:
1093 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1094 break;
1095 case ixgbe_sfp_type_sr:
1096 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1097 break;
1098 case ixgbe_sfp_type_lr:
1099 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1100 break;
1101 default:
1102 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1103 break;
1104 }
1105 }
1106
1107 switch (hw->device_id) {
1108 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1109 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1110 break;
1111 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1112 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1113 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1114 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1115 break;
1116 case IXGBE_DEV_ID_82598EB_XF_LR:
1117 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1118 break;
1119 default:
1120 break;
1121 }
1122
1123out:
1124 return physical_layer;
1125}
1126
1127static struct ixgbe_mac_operations mac_ops_82598 = {
1128 .init_hw = &ixgbe_init_hw_generic,
1129 .reset_hw = &ixgbe_reset_hw_82598,
1130 .start_hw = &ixgbe_start_hw_82598,
1131 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1132 .get_media_type = &ixgbe_get_media_type_82598,
1133 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
1134 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1135 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1136 .stop_adapter = &ixgbe_stop_adapter_generic,
1137 .get_bus_info = &ixgbe_get_bus_info_generic,
1138 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
1139 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1140 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1141 .setup_link = &ixgbe_setup_mac_link_82598,
1142 .check_link = &ixgbe_check_mac_link_82598,
1143 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1144 .led_on = &ixgbe_led_on_generic,
1145 .led_off = &ixgbe_led_off_generic,
1146 .blink_led_start = &ixgbe_blink_led_start_generic,
1147 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1148 .set_rar = &ixgbe_set_rar_generic,
1149 .clear_rar = &ixgbe_clear_rar_generic,
1150 .set_vmdq = &ixgbe_set_vmdq_82598,
1151 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1152 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1153 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1154 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1155 .enable_mc = &ixgbe_enable_mc_generic,
1156 .disable_mc = &ixgbe_disable_mc_generic,
1157 .clear_vfta = &ixgbe_clear_vfta_82598,
1158 .set_vfta = &ixgbe_set_vfta_82598,
1159 .fc_enable = &ixgbe_fc_enable_82598,
1160};
1161
1162static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1163 .init_params = &ixgbe_init_eeprom_params_generic,
1164 .read = &ixgbe_read_eeprom_generic,
1165 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1166 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1167};
1168
1169static struct ixgbe_phy_operations phy_ops_82598 = {
1170 .identify = &ixgbe_identify_phy_generic,
1171 .identify_sfp = &ixgbe_identify_sfp_module_generic,
1172 .init = &ixgbe_init_phy_ops_82598,
1173 .reset = &ixgbe_reset_phy_generic,
1174 .read_reg = &ixgbe_read_phy_reg_generic,
1175 .write_reg = &ixgbe_write_phy_reg_generic,
1176 .setup_link = &ixgbe_setup_phy_link_generic,
1177 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1178 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1179};
1180
1181struct ixgbe_info ixgbe_82598_info = {
1182 .mac = ixgbe_mac_82598EB,
1183 .get_invariants = &ixgbe_get_invariants_82598,
1184 .mac_ops = &mac_ops_82598,
1185 .eeprom_ops = &eeprom_ops_82598,
1186 .phy_ops = &phy_ops_82598,
1187};
1188
1189