1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/pci_ids.h>
11#include "ixgb_hw.h"
12#include "ixgb_ids.h"
13
14#include <linux/etherdevice.h>
15
16
17
18static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
19
20static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
21
22static void ixgb_get_bus_info(struct ixgb_hw *hw);
23
24static bool ixgb_link_reset(struct ixgb_hw *hw);
25
26static void ixgb_optics_reset(struct ixgb_hw *hw);
27
28static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
29
30static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
31
32static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
33
34static void ixgb_clear_vfta(struct ixgb_hw *hw);
35
36static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
37
38static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
39 u32 reg_address,
40 u32 phy_address,
41 u32 device_type);
42
43static bool ixgb_setup_fc(struct ixgb_hw *hw);
44
45static bool mac_addr_valid(u8 *mac_addr);
46
47static u32 ixgb_mac_reset(struct ixgb_hw *hw)
48{
49 u32 ctrl_reg;
50
51 ctrl_reg = IXGB_CTRL0_RST |
52 IXGB_CTRL0_SDP3_DIR |
53 IXGB_CTRL0_SDP2_DIR |
54 IXGB_CTRL0_SDP1_DIR |
55 IXGB_CTRL0_SDP0_DIR |
56 IXGB_CTRL0_SDP3 |
57 IXGB_CTRL0_SDP2 |
58 IXGB_CTRL0_SDP0;
59
60#ifdef HP_ZX1
61
62 IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
63#else
64 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
65#endif
66
67
68 msleep(IXGB_DELAY_AFTER_RESET);
69 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
70#ifdef DBG
71
72 ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
73#endif
74
75 if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
76 ctrl_reg =
77 IXGB_CTRL1_GPI0_EN |
78 IXGB_CTRL1_SDP6_DIR |
79 IXGB_CTRL1_SDP7_DIR |
80 IXGB_CTRL1_SDP6 |
81 IXGB_CTRL1_SDP7;
82 IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
83 ixgb_optics_reset_bcm(hw);
84 }
85
86 if (hw->phy_type == ixgb_phy_type_txn17401)
87 ixgb_optics_reset(hw);
88
89 return ctrl_reg;
90}
91
92
93
94
95
96
97bool
98ixgb_adapter_stop(struct ixgb_hw *hw)
99{
100 u32 ctrl_reg;
101
102 ENTER();
103
104
105
106
107 if (hw->adapter_stopped) {
108 pr_debug("Exiting because the adapter is already stopped!!!\n");
109 return false;
110 }
111
112
113
114
115 hw->adapter_stopped = true;
116
117
118 pr_debug("Masking off all interrupts\n");
119 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
120
121
122
123
124
125 IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
126 IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
127 IXGB_WRITE_FLUSH(hw);
128 msleep(IXGB_DELAY_BEFORE_RESET);
129
130
131
132
133
134
135 pr_debug("Issuing a global reset to MAC\n");
136
137 ctrl_reg = ixgb_mac_reset(hw);
138
139
140 pr_debug("Masking off all interrupts\n");
141 IXGB_WRITE_REG(hw, IMC, 0xffffffff);
142
143
144 IXGB_READ_REG(hw, ICR);
145
146 return ctrl_reg & IXGB_CTRL0_RST;
147}
148
149
150
151
152
153
154
155
156
157
158
159static ixgb_xpak_vendor
160ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
161{
162 u32 i;
163 u16 vendor_name[5];
164 ixgb_xpak_vendor xpak_vendor;
165
166 ENTER();
167
168
169
170
171 for (i = 0; i < 5; i++) {
172 vendor_name[i] = ixgb_read_phy_reg(hw,
173 MDIO_PMA_PMD_XPAK_VENDOR_NAME
174 + i, IXGB_PHY_ADDRESS,
175 MDIO_MMD_PMAPMD);
176 }
177
178
179 if (vendor_name[0] == 'I' &&
180 vendor_name[1] == 'N' &&
181 vendor_name[2] == 'T' &&
182 vendor_name[3] == 'E' && vendor_name[4] == 'L') {
183 xpak_vendor = ixgb_xpak_vendor_intel;
184 } else {
185 xpak_vendor = ixgb_xpak_vendor_infineon;
186 }
187
188 return xpak_vendor;
189}
190
191
192
193
194
195
196
197
198
199static ixgb_phy_type
200ixgb_identify_phy(struct ixgb_hw *hw)
201{
202 ixgb_phy_type phy_type;
203 ixgb_xpak_vendor xpak_vendor;
204
205 ENTER();
206
207
208 switch (hw->device_id) {
209 case IXGB_DEVICE_ID_82597EX:
210 pr_debug("Identified TXN17401 optics\n");
211 phy_type = ixgb_phy_type_txn17401;
212 break;
213
214 case IXGB_DEVICE_ID_82597EX_SR:
215
216
217
218 xpak_vendor = ixgb_identify_xpak_vendor(hw);
219 if (xpak_vendor == ixgb_xpak_vendor_intel) {
220 pr_debug("Identified TXN17201 optics\n");
221 phy_type = ixgb_phy_type_txn17201;
222 } else {
223 pr_debug("Identified G6005 optics\n");
224 phy_type = ixgb_phy_type_g6005;
225 }
226 break;
227 case IXGB_DEVICE_ID_82597EX_LR:
228 pr_debug("Identified G6104 optics\n");
229 phy_type = ixgb_phy_type_g6104;
230 break;
231 case IXGB_DEVICE_ID_82597EX_CX4:
232 pr_debug("Identified CX4\n");
233 xpak_vendor = ixgb_identify_xpak_vendor(hw);
234 if (xpak_vendor == ixgb_xpak_vendor_intel) {
235 pr_debug("Identified TXN17201 optics\n");
236 phy_type = ixgb_phy_type_txn17201;
237 } else {
238 pr_debug("Identified G6005 optics\n");
239 phy_type = ixgb_phy_type_g6005;
240 }
241 break;
242 default:
243 pr_debug("Unknown physical layer module\n");
244 phy_type = ixgb_phy_type_unknown;
245 break;
246 }
247
248
249 if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
250 phy_type = ixgb_phy_type_bcm;
251
252 return phy_type;
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272bool
273ixgb_init_hw(struct ixgb_hw *hw)
274{
275 u32 i;
276 bool status;
277
278 ENTER();
279
280
281
282
283
284
285 pr_debug("Issuing a global reset to MAC\n");
286
287 ixgb_mac_reset(hw);
288
289 pr_debug("Issuing an EE reset to MAC\n");
290#ifdef HP_ZX1
291
292 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
293#else
294 IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
295#endif
296
297
298 msleep(IXGB_DELAY_AFTER_EE_RESET);
299
300 if (!ixgb_get_eeprom_data(hw))
301 return false;
302
303
304 hw->device_id = ixgb_get_ee_device_id(hw);
305 hw->phy_type = ixgb_identify_phy(hw);
306
307
308
309
310 ixgb_init_rx_addrs(hw);
311
312
313
314
315
316 if (!mac_addr_valid(hw->curr_mac_addr)) {
317 pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
318 return(false);
319 }
320
321
322 hw->adapter_stopped = false;
323
324
325 ixgb_get_bus_info(hw);
326
327
328 pr_debug("Zeroing the MTA\n");
329 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
330 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
331
332
333 ixgb_clear_vfta(hw);
334
335
336 ixgb_clear_hw_cntrs(hw);
337
338
339 status = ixgb_setup_fc(hw);
340
341
342 ixgb_check_for_link(hw);
343
344 return status;
345}
346
347
348
349
350
351
352
353
354
355
356static void
357ixgb_init_rx_addrs(struct ixgb_hw *hw)
358{
359 u32 i;
360
361 ENTER();
362
363
364
365
366
367
368 if (!mac_addr_valid(hw->curr_mac_addr)) {
369
370
371 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
372
373 pr_debug("Keeping Permanent MAC Addr = %pM\n",
374 hw->curr_mac_addr);
375 } else {
376
377
378 pr_debug("Overriding MAC Address in RAR[0]\n");
379 pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
380
381 ixgb_rar_set(hw, hw->curr_mac_addr, 0);
382 }
383
384
385 pr_debug("Clearing RAR[1-15]\n");
386 for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
387
388 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
389 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
390 }
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406void
407ixgb_mc_addr_list_update(struct ixgb_hw *hw,
408 u8 *mc_addr_list,
409 u32 mc_addr_count,
410 u32 pad)
411{
412 u32 hash_value;
413 u32 i;
414 u32 rar_used_count = 1;
415 u8 *mca;
416
417 ENTER();
418
419
420 hw->num_mc_addrs = mc_addr_count;
421
422
423 pr_debug("Clearing RAR[1-15]\n");
424 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
425 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
426 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
427 }
428
429
430 pr_debug("Clearing MTA\n");
431 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
432 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
433
434
435 mca = mc_addr_list;
436 for (i = 0; i < mc_addr_count; i++) {
437 pr_debug("Adding the multicast addresses:\n");
438 pr_debug("MC Addr #%d = %pM\n", i, mca);
439
440
441
442
443 if (rar_used_count < IXGB_RAR_ENTRIES) {
444 ixgb_rar_set(hw, mca, rar_used_count);
445 pr_debug("Added a multicast address to RAR[%d]\n", i);
446 rar_used_count++;
447 } else {
448 hash_value = ixgb_hash_mc_addr(hw, mca);
449
450 pr_debug("Hash value = 0x%03X\n", hash_value);
451
452 ixgb_mta_set(hw, hash_value);
453 }
454
455 mca += ETH_ALEN + pad;
456 }
457
458 pr_debug("MC Update Complete\n");
459}
460
461
462
463
464
465
466
467
468
469
470static u32
471ixgb_hash_mc_addr(struct ixgb_hw *hw,
472 u8 *mc_addr)
473{
474 u32 hash_value = 0;
475
476 ENTER();
477
478
479
480
481 switch (hw->mc_filter_type) {
482
483
484
485 case 0:
486
487 hash_value =
488 ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
489 break;
490 case 1:
491 hash_value =
492 ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
493 break;
494 case 2:
495 hash_value =
496 ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
497 break;
498 case 3:
499 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
500 break;
501 default:
502
503 pr_debug("MC filter type param set incorrectly\n");
504 ASSERT(0);
505 break;
506 }
507
508 hash_value &= 0xFFF;
509 return hash_value;
510}
511
512
513
514
515
516
517
518static void
519ixgb_mta_set(struct ixgb_hw *hw,
520 u32 hash_value)
521{
522 u32 hash_bit, hash_reg;
523 u32 mta_reg;
524
525
526
527
528
529
530
531
532
533 hash_reg = (hash_value >> 5) & 0x7F;
534 hash_bit = hash_value & 0x1F;
535
536 mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
537
538 mta_reg |= (1 << hash_bit);
539
540 IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
541}
542
543
544
545
546
547
548
549
550void
551ixgb_rar_set(struct ixgb_hw *hw,
552 u8 *addr,
553 u32 index)
554{
555 u32 rar_low, rar_high;
556
557 ENTER();
558
559
560
561
562 rar_low = ((u32) addr[0] |
563 ((u32)addr[1] << 8) |
564 ((u32)addr[2] << 16) |
565 ((u32)addr[3] << 24));
566
567 rar_high = ((u32) addr[4] |
568 ((u32)addr[5] << 8) |
569 IXGB_RAH_AV);
570
571 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
572 IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
573}
574
575
576
577
578
579
580
581
582void
583ixgb_write_vfta(struct ixgb_hw *hw,
584 u32 offset,
585 u32 value)
586{
587 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
588}
589
590
591
592
593
594
595static void
596ixgb_clear_vfta(struct ixgb_hw *hw)
597{
598 u32 offset;
599
600 for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
601 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
602}
603
604
605
606
607
608
609
610static bool
611ixgb_setup_fc(struct ixgb_hw *hw)
612{
613 u32 ctrl_reg;
614 u32 pap_reg = 0;
615 bool status = true;
616
617 ENTER();
618
619
620 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
621
622
623 ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
624
625
626
627
628
629
630
631
632
633
634 switch (hw->fc.type) {
635 case ixgb_fc_none:
636
637 ctrl_reg |= (IXGB_CTRL0_CMDC);
638 break;
639 case ixgb_fc_rx_pause:
640
641
642
643 ctrl_reg |= (IXGB_CTRL0_RPE);
644 break;
645 case ixgb_fc_tx_pause:
646
647
648
649 ctrl_reg |= (IXGB_CTRL0_TPE);
650 pap_reg = hw->fc.pause_time;
651 break;
652 case ixgb_fc_full:
653
654
655
656 ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
657 pap_reg = hw->fc.pause_time;
658 break;
659 default:
660
661 pr_debug("Flow control param set incorrectly\n");
662 ASSERT(0);
663 break;
664 }
665
666
667 IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
668
669 if (pap_reg != 0)
670 IXGB_WRITE_REG(hw, PAP, pap_reg);
671
672
673
674
675
676
677
678 if (!(hw->fc.type & ixgb_fc_tx_pause)) {
679 IXGB_WRITE_REG(hw, FCRTL, 0);
680 IXGB_WRITE_REG(hw, FCRTH, 0);
681 } else {
682
683
684
685 if (hw->fc.send_xon) {
686 IXGB_WRITE_REG(hw, FCRTL,
687 (hw->fc.low_water | IXGB_FCRTL_XONE));
688 } else {
689 IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
690 }
691 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
692 }
693 return status;
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711static u16
712ixgb_read_phy_reg(struct ixgb_hw *hw,
713 u32 reg_address,
714 u32 phy_address,
715 u32 device_type)
716{
717 u32 i;
718 u32 data;
719 u32 command = 0;
720
721 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
722 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
723 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
724
725
726 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
727 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
728 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
729 (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
730
731 IXGB_WRITE_REG(hw, MSCA, command);
732
733
734
735
736
737
738
739
740 for (i = 0; i < 10; i++)
741 {
742 udelay(10);
743
744 command = IXGB_READ_REG(hw, MSCA);
745
746 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
747 break;
748 }
749
750 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
751
752
753 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
754 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
755 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
756 (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
757
758 IXGB_WRITE_REG(hw, MSCA, command);
759
760
761
762
763
764
765
766
767 for (i = 0; i < 10; i++)
768 {
769 udelay(10);
770
771 command = IXGB_READ_REG(hw, MSCA);
772
773 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
774 break;
775 }
776
777 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
778
779
780
781
782 data = IXGB_READ_REG(hw, MSRWD);
783 data >>= IXGB_MSRWD_READ_DATA_SHIFT;
784 return((u16) data);
785}
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804static void
805ixgb_write_phy_reg(struct ixgb_hw *hw,
806 u32 reg_address,
807 u32 phy_address,
808 u32 device_type,
809 u16 data)
810{
811 u32 i;
812 u32 command = 0;
813
814 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
815 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
816 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
817
818
819 IXGB_WRITE_REG(hw, MSRWD, (u32)data);
820
821
822 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
823 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
824 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
825 (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
826
827 IXGB_WRITE_REG(hw, MSCA, command);
828
829
830
831
832
833
834
835
836 for (i = 0; i < 10; i++)
837 {
838 udelay(10);
839
840 command = IXGB_READ_REG(hw, MSCA);
841
842 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
843 break;
844 }
845
846 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
847
848
849 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
850 (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
851 (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
852 (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
853
854 IXGB_WRITE_REG(hw, MSCA, command);
855
856
857
858
859
860
861
862
863 for (i = 0; i < 10; i++)
864 {
865 udelay(10);
866
867 command = IXGB_READ_REG(hw, MSCA);
868
869 if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
870 break;
871 }
872
873 ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
874
875
876}
877
878
879
880
881
882
883
884
885void
886ixgb_check_for_link(struct ixgb_hw *hw)
887{
888 u32 status_reg;
889 u32 xpcss_reg;
890
891 ENTER();
892
893 xpcss_reg = IXGB_READ_REG(hw, XPCSS);
894 status_reg = IXGB_READ_REG(hw, STATUS);
895
896 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
897 (status_reg & IXGB_STATUS_LU)) {
898 hw->link_up = true;
899 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
900 (status_reg & IXGB_STATUS_LU)) {
901 pr_debug("XPCSS Not Aligned while Status:LU is set\n");
902 hw->link_up = ixgb_link_reset(hw);
903 } else {
904
905
906
907
908 hw->link_up = ixgb_link_reset(hw);
909 }
910
911}
912
913
914
915
916
917
918
919
920
921
922bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
923{
924 u32 newLFC, newRFC;
925 bool bad_link_returncode = false;
926
927 if (hw->phy_type == ixgb_phy_type_txn17401) {
928 newLFC = IXGB_READ_REG(hw, LFC);
929 newRFC = IXGB_READ_REG(hw, RFC);
930 if ((hw->lastLFC + 250 < newLFC)
931 || (hw->lastRFC + 250 < newRFC)) {
932 pr_debug("BAD LINK! too many LFC/RFC since last check\n");
933 bad_link_returncode = true;
934 }
935 hw->lastLFC = newLFC;
936 hw->lastRFC = newRFC;
937 }
938
939 return bad_link_returncode;
940}
941
942
943
944
945
946
947static void
948ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
949{
950 ENTER();
951
952
953 if (hw->adapter_stopped) {
954 pr_debug("Exiting because the adapter is stopped!!!\n");
955 return;
956 }
957
958 IXGB_READ_REG(hw, TPRL);
959 IXGB_READ_REG(hw, TPRH);
960 IXGB_READ_REG(hw, GPRCL);
961 IXGB_READ_REG(hw, GPRCH);
962 IXGB_READ_REG(hw, BPRCL);
963 IXGB_READ_REG(hw, BPRCH);
964 IXGB_READ_REG(hw, MPRCL);
965 IXGB_READ_REG(hw, MPRCH);
966 IXGB_READ_REG(hw, UPRCL);
967 IXGB_READ_REG(hw, UPRCH);
968 IXGB_READ_REG(hw, VPRCL);
969 IXGB_READ_REG(hw, VPRCH);
970 IXGB_READ_REG(hw, JPRCL);
971 IXGB_READ_REG(hw, JPRCH);
972 IXGB_READ_REG(hw, GORCL);
973 IXGB_READ_REG(hw, GORCH);
974 IXGB_READ_REG(hw, TORL);
975 IXGB_READ_REG(hw, TORH);
976 IXGB_READ_REG(hw, RNBC);
977 IXGB_READ_REG(hw, RUC);
978 IXGB_READ_REG(hw, ROC);
979 IXGB_READ_REG(hw, RLEC);
980 IXGB_READ_REG(hw, CRCERRS);
981 IXGB_READ_REG(hw, ICBC);
982 IXGB_READ_REG(hw, ECBC);
983 IXGB_READ_REG(hw, MPC);
984 IXGB_READ_REG(hw, TPTL);
985 IXGB_READ_REG(hw, TPTH);
986 IXGB_READ_REG(hw, GPTCL);
987 IXGB_READ_REG(hw, GPTCH);
988 IXGB_READ_REG(hw, BPTCL);
989 IXGB_READ_REG(hw, BPTCH);
990 IXGB_READ_REG(hw, MPTCL);
991 IXGB_READ_REG(hw, MPTCH);
992 IXGB_READ_REG(hw, UPTCL);
993 IXGB_READ_REG(hw, UPTCH);
994 IXGB_READ_REG(hw, VPTCL);
995 IXGB_READ_REG(hw, VPTCH);
996 IXGB_READ_REG(hw, JPTCL);
997 IXGB_READ_REG(hw, JPTCH);
998 IXGB_READ_REG(hw, GOTCL);
999 IXGB_READ_REG(hw, GOTCH);
1000 IXGB_READ_REG(hw, TOTL);
1001 IXGB_READ_REG(hw, TOTH);
1002 IXGB_READ_REG(hw, DC);
1003 IXGB_READ_REG(hw, PLT64C);
1004 IXGB_READ_REG(hw, TSCTC);
1005 IXGB_READ_REG(hw, TSCTFC);
1006 IXGB_READ_REG(hw, IBIC);
1007 IXGB_READ_REG(hw, RFC);
1008 IXGB_READ_REG(hw, LFC);
1009 IXGB_READ_REG(hw, PFRC);
1010 IXGB_READ_REG(hw, PFTC);
1011 IXGB_READ_REG(hw, MCFRC);
1012 IXGB_READ_REG(hw, MCFTC);
1013 IXGB_READ_REG(hw, XONRXC);
1014 IXGB_READ_REG(hw, XONTXC);
1015 IXGB_READ_REG(hw, XOFFRXC);
1016 IXGB_READ_REG(hw, XOFFTXC);
1017 IXGB_READ_REG(hw, RJC);
1018}
1019
1020
1021
1022
1023
1024
1025void
1026ixgb_led_on(struct ixgb_hw *hw)
1027{
1028 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1029
1030
1031 ctrl0_reg &= ~IXGB_CTRL0_SDP0;
1032 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1033}
1034
1035
1036
1037
1038
1039
1040void
1041ixgb_led_off(struct ixgb_hw *hw)
1042{
1043 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1044
1045
1046 ctrl0_reg |= IXGB_CTRL0_SDP0;
1047 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1048}
1049
1050
1051
1052
1053
1054
1055static void
1056ixgb_get_bus_info(struct ixgb_hw *hw)
1057{
1058 u32 status_reg;
1059
1060 status_reg = IXGB_READ_REG(hw, STATUS);
1061
1062 hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
1063 ixgb_bus_type_pcix : ixgb_bus_type_pci;
1064
1065 if (hw->bus.type == ixgb_bus_type_pci) {
1066 hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
1067 ixgb_bus_speed_66 : ixgb_bus_speed_33;
1068 } else {
1069 switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
1070 case IXGB_STATUS_PCIX_SPD_66:
1071 hw->bus.speed = ixgb_bus_speed_66;
1072 break;
1073 case IXGB_STATUS_PCIX_SPD_100:
1074 hw->bus.speed = ixgb_bus_speed_100;
1075 break;
1076 case IXGB_STATUS_PCIX_SPD_133:
1077 hw->bus.speed = ixgb_bus_speed_133;
1078 break;
1079 default:
1080 hw->bus.speed = ixgb_bus_speed_reserved;
1081 break;
1082 }
1083 }
1084
1085 hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
1086 ixgb_bus_width_64 : ixgb_bus_width_32;
1087}
1088
1089
1090
1091
1092
1093
1094
1095static bool
1096mac_addr_valid(u8 *mac_addr)
1097{
1098 bool is_valid = true;
1099 ENTER();
1100
1101
1102 if (is_multicast_ether_addr(mac_addr)) {
1103 pr_debug("MAC address is multicast\n");
1104 is_valid = false;
1105 }
1106
1107 else if (is_broadcast_ether_addr(mac_addr)) {
1108 pr_debug("MAC address is broadcast\n");
1109 is_valid = false;
1110 }
1111
1112 else if (is_zero_ether_addr(mac_addr)) {
1113 pr_debug("MAC address is all zeros\n");
1114 is_valid = false;
1115 }
1116 return is_valid;
1117}
1118
1119
1120
1121
1122
1123
1124
1125static bool
1126ixgb_link_reset(struct ixgb_hw *hw)
1127{
1128 bool link_status = false;
1129 u8 wait_retries = MAX_RESET_ITERATIONS;
1130 u8 lrst_retries = MAX_RESET_ITERATIONS;
1131
1132 do {
1133
1134 IXGB_WRITE_REG(hw, CTRL0,
1135 IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
1136
1137
1138 do {
1139 udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
1140 link_status =
1141 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
1142 && (IXGB_READ_REG(hw, XPCSS) &
1143 IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
1144 } while (!link_status && --wait_retries);
1145
1146 } while (!link_status && --lrst_retries);
1147
1148 return link_status;
1149}
1150
1151
1152
1153
1154
1155
1156static void
1157ixgb_optics_reset(struct ixgb_hw *hw)
1158{
1159 if (hw->phy_type == ixgb_phy_type_txn17401) {
1160 ixgb_write_phy_reg(hw,
1161 MDIO_CTRL1,
1162 IXGB_PHY_ADDRESS,
1163 MDIO_MMD_PMAPMD,
1164 MDIO_CTRL1_RESET);
1165
1166 ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
1167 }
1168}
1169
1170
1171
1172
1173
1174
1175
1176#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803
1177#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164
1178#define IXGB_BCM8704_USER_CTRL_REG 0xC800
1179#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF
1180#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003
1181#define IXGB_SUN_PHY_ADDRESS 0x0000
1182#define IXGB_SUN_PHY_RESET_DELAY 305
1183
1184static void
1185ixgb_optics_reset_bcm(struct ixgb_hw *hw)
1186{
1187 u32 ctrl = IXGB_READ_REG(hw, CTRL0);
1188 ctrl &= ~IXGB_CTRL0_SDP2;
1189 ctrl |= IXGB_CTRL0_SDP3;
1190 IXGB_WRITE_REG(hw, CTRL0, ctrl);
1191 IXGB_WRITE_FLUSH(hw);
1192
1193
1194 msleep(IXGB_SUN_PHY_RESET_DELAY);
1195
1196
1197
1198 ixgb_write_phy_reg(hw,
1199 IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1200 IXGB_SUN_PHY_ADDRESS,
1201 IXGB_BCM8704_USER_DEV3_ADDR,
1202 IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
1203
1204 ixgb_read_phy_reg(hw,
1205 IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1206 IXGB_SUN_PHY_ADDRESS,
1207 IXGB_BCM8704_USER_DEV3_ADDR);
1208 ixgb_read_phy_reg(hw,
1209 IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1210 IXGB_SUN_PHY_ADDRESS,
1211 IXGB_BCM8704_USER_DEV3_ADDR);
1212
1213 ixgb_write_phy_reg(hw,
1214 IXGB_BCM8704_USER_CTRL_REG,
1215 IXGB_SUN_PHY_ADDRESS,
1216 IXGB_BCM8704_USER_DEV3_ADDR,
1217 IXGB_BCM8704_USER_CTRL_REG_VAL);
1218 ixgb_read_phy_reg(hw,
1219 IXGB_BCM8704_USER_CTRL_REG,
1220 IXGB_SUN_PHY_ADDRESS,
1221 IXGB_BCM8704_USER_DEV3_ADDR);
1222 ixgb_read_phy_reg(hw,
1223 IXGB_BCM8704_USER_CTRL_REG,
1224 IXGB_SUN_PHY_ADDRESS,
1225 IXGB_BCM8704_USER_DEV3_ADDR);
1226
1227
1228 msleep(IXGB_SUN_PHY_RESET_DELAY);
1229}
1230