1
2
3
4#include <linux/pci.h>
5#include <linux/delay.h>
6
7#include "igc_mac.h"
8#include "igc_hw.h"
9
10
11static s32 igc_set_fc_watermarks(struct igc_hw *hw);
12
13
14
15
16
17
18
19
20
21
22
23
24s32 igc_disable_pcie_master(struct igc_hw *hw)
25{
26 s32 timeout = MASTER_DISABLE_TIMEOUT;
27 s32 ret_val = 0;
28 u32 ctrl;
29
30 ctrl = rd32(IGC_CTRL);
31 ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
32 wr32(IGC_CTRL, ctrl);
33
34 while (timeout) {
35 if (!(rd32(IGC_STATUS) &
36 IGC_STATUS_GIO_MASTER_ENABLE))
37 break;
38 usleep_range(2000, 3000);
39 timeout--;
40 }
41
42 if (!timeout) {
43 hw_dbg("Master requests are pending.\n");
44 ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
45 goto out;
46 }
47
48out:
49 return ret_val;
50}
51
52
53
54
55
56
57
58
59
60
61void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
62{
63 u8 mac_addr[ETH_ALEN] = {0};
64 u32 i;
65
66
67 hw_dbg("Programming MAC Address into RAR[0]\n");
68
69 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
70
71
72 hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
73 for (i = 1; i < rar_count; i++)
74 hw->mac.ops.rar_set(hw, mac_addr, i);
75}
76
77
78
79
80
81
82
83
84
85
86
87s32 igc_setup_link(struct igc_hw *hw)
88{
89 s32 ret_val = 0;
90
91
92
93
94 if (igc_check_reset_block(hw))
95 goto out;
96
97
98
99
100 if (hw->fc.requested_mode == igc_fc_default)
101 hw->fc.requested_mode = igc_fc_full;
102
103
104
105
106
107 hw->fc.current_mode = hw->fc.requested_mode;
108
109 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
110
111
112 ret_val = hw->mac.ops.setup_physical_interface(hw);
113 if (ret_val)
114 goto out;
115
116
117
118
119
120
121 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
122 wr32(IGC_FCT, FLOW_CONTROL_TYPE);
123 wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
124 wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
125
126 wr32(IGC_FCTTV, hw->fc.pause_time);
127
128 ret_val = igc_set_fc_watermarks(hw);
129
130out:
131 return ret_val;
132}
133
134
135
136
137
138
139
140
141
142
143
144s32 igc_force_mac_fc(struct igc_hw *hw)
145{
146 s32 ret_val = 0;
147 u32 ctrl;
148
149 ctrl = rd32(IGC_CTRL);
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
169
170 switch (hw->fc.current_mode) {
171 case igc_fc_none:
172 ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
173 break;
174 case igc_fc_rx_pause:
175 ctrl &= (~IGC_CTRL_TFCE);
176 ctrl |= IGC_CTRL_RFCE;
177 break;
178 case igc_fc_tx_pause:
179 ctrl &= (~IGC_CTRL_RFCE);
180 ctrl |= IGC_CTRL_TFCE;
181 break;
182 case igc_fc_full:
183 ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
184 break;
185 default:
186 hw_dbg("Flow control param set incorrectly\n");
187 ret_val = -IGC_ERR_CONFIG;
188 goto out;
189 }
190
191 wr32(IGC_CTRL, ctrl);
192
193out:
194 return ret_val;
195}
196
197
198
199
200
201
202
203
204
205static s32 igc_set_fc_watermarks(struct igc_hw *hw)
206{
207 u32 fcrtl = 0, fcrth = 0;
208
209
210
211
212
213
214
215 if (hw->fc.current_mode & igc_fc_tx_pause) {
216
217
218
219
220 fcrtl = hw->fc.low_water;
221 if (hw->fc.send_xon)
222 fcrtl |= IGC_FCRTL_XONE;
223
224 fcrth = hw->fc.high_water;
225 }
226 wr32(IGC_FCRTL, fcrtl);
227 wr32(IGC_FCRTH, fcrth);
228
229 return 0;
230}
231
232
233
234
235
236
237
238void igc_clear_hw_cntrs_base(struct igc_hw *hw)
239{
240 rd32(IGC_CRCERRS);
241 rd32(IGC_SYMERRS);
242 rd32(IGC_MPC);
243 rd32(IGC_SCC);
244 rd32(IGC_ECOL);
245 rd32(IGC_MCC);
246 rd32(IGC_LATECOL);
247 rd32(IGC_COLC);
248 rd32(IGC_DC);
249 rd32(IGC_SEC);
250 rd32(IGC_RLEC);
251 rd32(IGC_XONRXC);
252 rd32(IGC_XONTXC);
253 rd32(IGC_XOFFRXC);
254 rd32(IGC_XOFFTXC);
255 rd32(IGC_FCRUC);
256 rd32(IGC_GPRC);
257 rd32(IGC_BPRC);
258 rd32(IGC_MPRC);
259 rd32(IGC_GPTC);
260 rd32(IGC_GORCL);
261 rd32(IGC_GORCH);
262 rd32(IGC_GOTCL);
263 rd32(IGC_GOTCH);
264 rd32(IGC_RNBC);
265 rd32(IGC_RUC);
266 rd32(IGC_RFC);
267 rd32(IGC_ROC);
268 rd32(IGC_RJC);
269 rd32(IGC_TORL);
270 rd32(IGC_TORH);
271 rd32(IGC_TOTL);
272 rd32(IGC_TOTH);
273 rd32(IGC_TPR);
274 rd32(IGC_TPT);
275 rd32(IGC_MPTC);
276 rd32(IGC_BPTC);
277
278 rd32(IGC_PRC64);
279 rd32(IGC_PRC127);
280 rd32(IGC_PRC255);
281 rd32(IGC_PRC511);
282 rd32(IGC_PRC1023);
283 rd32(IGC_PRC1522);
284 rd32(IGC_PTC64);
285 rd32(IGC_PTC127);
286 rd32(IGC_PTC255);
287 rd32(IGC_PTC511);
288 rd32(IGC_PTC1023);
289 rd32(IGC_PTC1522);
290
291 rd32(IGC_ALGNERRC);
292 rd32(IGC_RXERRC);
293 rd32(IGC_TNCRS);
294 rd32(IGC_CEXTERR);
295 rd32(IGC_TSCTC);
296 rd32(IGC_TSCTFC);
297
298 rd32(IGC_MGTPRC);
299 rd32(IGC_MGTPDC);
300 rd32(IGC_MGTPTC);
301
302 rd32(IGC_IAC);
303 rd32(IGC_ICRXOC);
304
305 rd32(IGC_ICRXPTC);
306 rd32(IGC_ICRXATC);
307 rd32(IGC_ICTXPTC);
308 rd32(IGC_ICTXATC);
309 rd32(IGC_ICTXQEC);
310 rd32(IGC_ICTXQMTC);
311 rd32(IGC_ICRXDMTC);
312
313 rd32(IGC_CBTMPC);
314 rd32(IGC_HTDPMC);
315 rd32(IGC_CBRMPC);
316 rd32(IGC_RPTHC);
317 rd32(IGC_HGPTC);
318 rd32(IGC_HTCBDPC);
319 rd32(IGC_HGORCL);
320 rd32(IGC_HGORCH);
321 rd32(IGC_HGOTCL);
322 rd32(IGC_HGOTCH);
323 rd32(IGC_LENERRS);
324}
325
326
327
328
329
330
331
332
333
334
335void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
336{
337 u32 rar_low, rar_high;
338
339
340
341
342 rar_low = ((u32)addr[0] |
343 ((u32)addr[1] << 8) |
344 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
345
346 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
347
348
349 if (rar_low || rar_high)
350 rar_high |= IGC_RAH_AV;
351
352
353
354
355
356 wr32(IGC_RAL(index), rar_low);
357 wrfl();
358 wr32(IGC_RAH(index), rar_high);
359 wrfl();
360}
361
362
363
364
365
366
367
368
369
370s32 igc_check_for_copper_link(struct igc_hw *hw)
371{
372 struct igc_mac_info *mac = &hw->mac;
373 s32 ret_val;
374 bool link;
375
376
377
378
379
380
381 if (!mac->get_link_status) {
382 ret_val = 0;
383 goto out;
384 }
385
386
387
388
389
390 ret_val = igc_phy_has_link(hw, 1, 0, &link);
391 if (ret_val)
392 goto out;
393
394 if (!link)
395 goto out;
396
397 mac->get_link_status = false;
398
399
400
401
402 igc_check_downshift(hw);
403
404
405
406
407 if (!mac->autoneg) {
408 ret_val = -IGC_ERR_CONFIG;
409 goto out;
410 }
411
412
413
414
415
416 igc_config_collision_dist(hw);
417
418
419
420
421
422
423 ret_val = igc_config_fc_after_link_up(hw);
424 if (ret_val)
425 hw_dbg("Error configuring flow control\n");
426
427out:
428 return ret_val;
429}
430
431
432
433
434
435
436
437
438
439void igc_config_collision_dist(struct igc_hw *hw)
440{
441 u32 tctl;
442
443 tctl = rd32(IGC_TCTL);
444
445 tctl &= ~IGC_TCTL_COLD;
446 tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
447
448 wr32(IGC_TCTL, tctl);
449 wrfl();
450}
451
452
453
454
455
456
457
458
459
460
461
462s32 igc_config_fc_after_link_up(struct igc_hw *hw)
463{
464 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
465 struct igc_mac_info *mac = &hw->mac;
466 u16 speed, duplex;
467 s32 ret_val = 0;
468
469
470
471
472
473 if (mac->autoneg_failed) {
474 if (hw->phy.media_type == igc_media_type_copper)
475 ret_val = igc_force_mac_fc(hw);
476 }
477
478 if (ret_val) {
479 hw_dbg("Error forcing flow control settings\n");
480 goto out;
481 }
482
483
484
485
486
487
488 if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
489
490
491
492
493 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
494 &mii_status_reg);
495 if (ret_val)
496 goto out;
497 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
498 &mii_status_reg);
499 if (ret_val)
500 goto out;
501
502 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
503 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
504 goto out;
505 }
506
507
508
509
510
511
512
513 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
514 &mii_nway_adv_reg);
515 if (ret_val)
516 goto out;
517 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
518 &mii_nway_lp_ability_reg);
519 if (ret_val)
520 goto out;
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
555 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
556
557
558
559
560
561
562 if (hw->fc.requested_mode == igc_fc_full) {
563 hw->fc.current_mode = igc_fc_full;
564 hw_dbg("Flow Control = FULL.\n");
565 } else {
566 hw->fc.current_mode = igc_fc_rx_pause;
567 hw_dbg("Flow Control = RX PAUSE frames only.\n");
568 }
569 }
570
571
572
573
574
575
576
577
578 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
579 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
580 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
581 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
582 hw->fc.current_mode = igc_fc_tx_pause;
583 hw_dbg("Flow Control = TX PAUSE frames only.\n");
584 }
585
586
587
588
589
590
591
592 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
593 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
594 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
595 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
596 hw->fc.current_mode = igc_fc_rx_pause;
597 hw_dbg("Flow Control = RX PAUSE frames only.\n");
598 }
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619 else if ((hw->fc.requested_mode == igc_fc_none) ||
620 (hw->fc.requested_mode == igc_fc_tx_pause) ||
621 (hw->fc.strict_ieee)) {
622 hw->fc.current_mode = igc_fc_none;
623 hw_dbg("Flow Control = NONE.\n");
624 } else {
625 hw->fc.current_mode = igc_fc_rx_pause;
626 hw_dbg("Flow Control = RX PAUSE frames only.\n");
627 }
628
629
630
631
632
633 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
634 if (ret_val) {
635 hw_dbg("Error getting link speed and duplex\n");
636 goto out;
637 }
638
639 if (duplex == HALF_DUPLEX)
640 hw->fc.current_mode = igc_fc_none;
641
642
643
644
645 ret_val = igc_force_mac_fc(hw);
646 if (ret_val) {
647 hw_dbg("Error forcing flow control settings\n");
648 goto out;
649 }
650 }
651
652out:
653 return 0;
654}
655
656
657
658
659
660
661
662s32 igc_get_auto_rd_done(struct igc_hw *hw)
663{
664 s32 ret_val = 0;
665 s32 i = 0;
666
667 while (i < AUTO_READ_DONE_TIMEOUT) {
668 if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
669 break;
670 usleep_range(1000, 2000);
671 i++;
672 }
673
674 if (i == AUTO_READ_DONE_TIMEOUT) {
675 hw_dbg("Auto read by HW from NVM has not completed.\n");
676 ret_val = -IGC_ERR_RESET;
677 goto out;
678 }
679
680out:
681 return ret_val;
682}
683
684
685
686
687
688
689
690
691
692
693s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
694 u16 *duplex)
695{
696 u32 status;
697
698 status = rd32(IGC_STATUS);
699 if (status & IGC_STATUS_SPEED_1000) {
700
701
702
703
704 if (hw->mac.type == igc_i225 &&
705 (status & IGC_STATUS_SPEED_2500)) {
706 *speed = SPEED_2500;
707 hw_dbg("2500 Mbs, ");
708 } else {
709 *speed = SPEED_1000;
710 hw_dbg("1000 Mbs, ");
711 }
712 } else if (status & IGC_STATUS_SPEED_100) {
713 *speed = SPEED_100;
714 hw_dbg("100 Mbs, ");
715 } else {
716 *speed = SPEED_10;
717 hw_dbg("10 Mbs, ");
718 }
719
720 if (status & IGC_STATUS_FD) {
721 *duplex = FULL_DUPLEX;
722 hw_dbg("Full Duplex\n");
723 } else {
724 *duplex = HALF_DUPLEX;
725 hw_dbg("Half Duplex\n");
726 }
727
728 return 0;
729}
730
731
732
733
734
735
736
737void igc_put_hw_semaphore(struct igc_hw *hw)
738{
739 u32 swsm;
740
741 swsm = rd32(IGC_SWSM);
742
743 swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
744
745 wr32(IGC_SWSM, swsm);
746}
747
748
749
750
751
752
753
754
755bool igc_enable_mng_pass_thru(struct igc_hw *hw)
756{
757 bool ret_val = false;
758 u32 fwsm, factps;
759 u32 manc;
760
761 if (!hw->mac.asf_firmware_present)
762 goto out;
763
764 manc = rd32(IGC_MANC);
765
766 if (!(manc & IGC_MANC_RCV_TCO_EN))
767 goto out;
768
769 if (hw->mac.arc_subsystem_valid) {
770 fwsm = rd32(IGC_FWSM);
771 factps = rd32(IGC_FACTPS);
772
773 if (!(factps & IGC_FACTPS_MNGCG) &&
774 ((fwsm & IGC_FWSM_MODE_MASK) ==
775 (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
776 ret_val = true;
777 goto out;
778 }
779 } else {
780 if ((manc & IGC_MANC_SMBUS_EN) &&
781 !(manc & IGC_MANC_ASF_EN)) {
782 ret_val = true;
783 goto out;
784 }
785 }
786
787out:
788 return ret_val;
789}
790