1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/netdevice.h>
33
34#include "ixgbe.h"
35#include "ixgbe_common.h"
36#include "ixgbe_phy.h"
37
38static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
44 u16 count);
45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
49
50static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
51static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
52static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
53 u16 words, u16 *data);
54static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
57 u16 offset);
58static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
59
60
61
62
63
64
65
66
67
68
69bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
70{
71 bool supported = false;
72 ixgbe_link_speed speed;
73 bool link_up;
74
75 switch (hw->phy.media_type) {
76 case ixgbe_media_type_fiber:
77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
78
79 if (link_up)
80 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
81 true : false;
82 else
83 supported = true;
84 break;
85 case ixgbe_media_type_backplane:
86 supported = true;
87 break;
88 case ixgbe_media_type_copper:
89
90 switch (hw->device_id) {
91 case IXGBE_DEV_ID_82599_T3_LOM:
92 case IXGBE_DEV_ID_X540T:
93 case IXGBE_DEV_ID_X540T1:
94 supported = true;
95 break;
96 default:
97 break;
98 }
99 default:
100 break;
101 }
102
103 return supported;
104}
105
106
107
108
109
110
111
112static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
113{
114 s32 ret_val = 0;
115 u32 reg = 0, reg_bp = 0;
116 u16 reg_cu = 0;
117 bool locked = false;
118
119
120
121
122
123 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
124 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
125 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
126 goto out;
127 }
128
129
130
131
132
133 if (hw->fc.requested_mode == ixgbe_fc_default)
134 hw->fc.requested_mode = ixgbe_fc_full;
135
136
137
138
139
140
141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_backplane:
143
144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
145 if (ret_val)
146 goto out;
147
148
149 case ixgbe_media_type_fiber:
150 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
151
152 break;
153 case ixgbe_media_type_copper:
154 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
155 MDIO_MMD_AN, ®_cu);
156 break;
157 default:
158 break;
159 }
160
161
162
163
164
165
166
167
168
169
170
171 switch (hw->fc.requested_mode) {
172 case ixgbe_fc_none:
173
174 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
175 if (hw->phy.media_type == ixgbe_media_type_backplane)
176 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
177 IXGBE_AUTOC_ASM_PAUSE);
178 else if (hw->phy.media_type == ixgbe_media_type_copper)
179 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
180 break;
181 case ixgbe_fc_tx_pause:
182
183
184
185
186 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
187 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
188 if (hw->phy.media_type == ixgbe_media_type_backplane) {
189 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
190 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
191 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
192 reg_cu |= IXGBE_TAF_ASM_PAUSE;
193 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
194 }
195 break;
196 case ixgbe_fc_rx_pause:
197
198
199
200
201
202
203
204
205
206 case ixgbe_fc_full:
207
208 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
209 if (hw->phy.media_type == ixgbe_media_type_backplane)
210 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
211 IXGBE_AUTOC_ASM_PAUSE;
212 else if (hw->phy.media_type == ixgbe_media_type_copper)
213 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
214 break;
215 default:
216 hw_dbg(hw, "Flow control param set incorrectly\n");
217 ret_val = IXGBE_ERR_CONFIG;
218 goto out;
219 break;
220 }
221
222 if (hw->mac.type != ixgbe_mac_X540) {
223
224
225
226
227 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
228 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
229
230
231 if (hw->fc.strict_ieee)
232 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
233
234 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
235 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
236 }
237
238
239
240
241
242
243 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244
245
246
247
248 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
249 if (ret_val)
250 goto out;
251
252 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
253 ixgbe_device_supports_autoneg_fc(hw)) {
254 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
255 MDIO_MMD_AN, reg_cu);
256 }
257
258 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
259out:
260 return ret_val;
261}
262
263
264
265
266
267
268
269
270
271
272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
273{
274 s32 ret_val;
275 u32 ctrl_ext;
276
277
278 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
279
280
281 hw->phy.ops.identify(hw);
282
283
284 hw->mac.ops.clear_vfta(hw);
285
286
287 hw->mac.ops.clear_hw_cntrs(hw);
288
289
290 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
291 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
292 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
293 IXGBE_WRITE_FLUSH(hw);
294
295
296 ret_val = ixgbe_setup_fc(hw);
297 if (!ret_val)
298 goto out;
299
300
301 hw->adapter_stopped = false;
302
303out:
304 return ret_val;
305}
306
307
308
309
310
311
312
313
314
315
316
317s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
318{
319 u32 i;
320 u32 regval;
321
322
323 for (i = 0; i < hw->mac.max_tx_queues; i++) {
324 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
325 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
326 }
327 IXGBE_WRITE_FLUSH(hw);
328
329
330 for (i = 0; i < hw->mac.max_tx_queues; i++) {
331 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
332 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
333 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
334 }
335
336 for (i = 0; i < hw->mac.max_rx_queues; i++) {
337 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
338 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
339 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
341 }
342
343 return 0;
344}
345
346
347
348
349
350
351
352
353
354
355
356s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
357{
358 s32 status;
359
360
361 status = hw->mac.ops.reset_hw(hw);
362
363 if (status == 0) {
364
365 status = hw->mac.ops.start_hw(hw);
366 }
367
368 return status;
369}
370
371
372
373
374
375
376
377
378s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
379{
380 u16 i = 0;
381
382 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
383 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
384 IXGBE_READ_REG(hw, IXGBE_ERRBC);
385 IXGBE_READ_REG(hw, IXGBE_MSPDC);
386 for (i = 0; i < 8; i++)
387 IXGBE_READ_REG(hw, IXGBE_MPC(i));
388
389 IXGBE_READ_REG(hw, IXGBE_MLFC);
390 IXGBE_READ_REG(hw, IXGBE_MRFC);
391 IXGBE_READ_REG(hw, IXGBE_RLEC);
392 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
393 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
394 if (hw->mac.type >= ixgbe_mac_82599EB) {
395 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
396 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
397 } else {
398 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
399 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
400 }
401
402 for (i = 0; i < 8; i++) {
403 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
404 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
405 if (hw->mac.type >= ixgbe_mac_82599EB) {
406 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
407 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
408 } else {
409 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
410 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
411 }
412 }
413 if (hw->mac.type >= ixgbe_mac_82599EB)
414 for (i = 0; i < 8; i++)
415 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
416 IXGBE_READ_REG(hw, IXGBE_PRC64);
417 IXGBE_READ_REG(hw, IXGBE_PRC127);
418 IXGBE_READ_REG(hw, IXGBE_PRC255);
419 IXGBE_READ_REG(hw, IXGBE_PRC511);
420 IXGBE_READ_REG(hw, IXGBE_PRC1023);
421 IXGBE_READ_REG(hw, IXGBE_PRC1522);
422 IXGBE_READ_REG(hw, IXGBE_GPRC);
423 IXGBE_READ_REG(hw, IXGBE_BPRC);
424 IXGBE_READ_REG(hw, IXGBE_MPRC);
425 IXGBE_READ_REG(hw, IXGBE_GPTC);
426 IXGBE_READ_REG(hw, IXGBE_GORCL);
427 IXGBE_READ_REG(hw, IXGBE_GORCH);
428 IXGBE_READ_REG(hw, IXGBE_GOTCL);
429 IXGBE_READ_REG(hw, IXGBE_GOTCH);
430 if (hw->mac.type == ixgbe_mac_82598EB)
431 for (i = 0; i < 8; i++)
432 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
433 IXGBE_READ_REG(hw, IXGBE_RUC);
434 IXGBE_READ_REG(hw, IXGBE_RFC);
435 IXGBE_READ_REG(hw, IXGBE_ROC);
436 IXGBE_READ_REG(hw, IXGBE_RJC);
437 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
438 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
439 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
440 IXGBE_READ_REG(hw, IXGBE_TORL);
441 IXGBE_READ_REG(hw, IXGBE_TORH);
442 IXGBE_READ_REG(hw, IXGBE_TPR);
443 IXGBE_READ_REG(hw, IXGBE_TPT);
444 IXGBE_READ_REG(hw, IXGBE_PTC64);
445 IXGBE_READ_REG(hw, IXGBE_PTC127);
446 IXGBE_READ_REG(hw, IXGBE_PTC255);
447 IXGBE_READ_REG(hw, IXGBE_PTC511);
448 IXGBE_READ_REG(hw, IXGBE_PTC1023);
449 IXGBE_READ_REG(hw, IXGBE_PTC1522);
450 IXGBE_READ_REG(hw, IXGBE_MPTC);
451 IXGBE_READ_REG(hw, IXGBE_BPTC);
452 for (i = 0; i < 16; i++) {
453 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
454 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
455 if (hw->mac.type >= ixgbe_mac_82599EB) {
456 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
457 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
458 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
459 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
460 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
461 } else {
462 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
463 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
464 }
465 }
466
467 if (hw->mac.type == ixgbe_mac_X540) {
468 if (hw->phy.id == 0)
469 hw->phy.ops.identify(hw);
470 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
471 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
472 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
473 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
474 }
475
476 return 0;
477}
478
479
480
481
482
483
484
485
486
487s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
488 u32 pba_num_size)
489{
490 s32 ret_val;
491 u16 data;
492 u16 pba_ptr;
493 u16 offset;
494 u16 length;
495
496 if (pba_num == NULL) {
497 hw_dbg(hw, "PBA string buffer was null\n");
498 return IXGBE_ERR_INVALID_ARGUMENT;
499 }
500
501 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
502 if (ret_val) {
503 hw_dbg(hw, "NVM Read Error\n");
504 return ret_val;
505 }
506
507 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
508 if (ret_val) {
509 hw_dbg(hw, "NVM Read Error\n");
510 return ret_val;
511 }
512
513
514
515
516
517
518 if (data != IXGBE_PBANUM_PTR_GUARD) {
519 hw_dbg(hw, "NVM PBA number is not stored as string\n");
520
521
522 if (pba_num_size < 11) {
523 hw_dbg(hw, "PBA string buffer too small\n");
524 return IXGBE_ERR_NO_SPACE;
525 }
526
527
528 pba_num[0] = (data >> 12) & 0xF;
529 pba_num[1] = (data >> 8) & 0xF;
530 pba_num[2] = (data >> 4) & 0xF;
531 pba_num[3] = data & 0xF;
532 pba_num[4] = (pba_ptr >> 12) & 0xF;
533 pba_num[5] = (pba_ptr >> 8) & 0xF;
534 pba_num[6] = '-';
535 pba_num[7] = 0;
536 pba_num[8] = (pba_ptr >> 4) & 0xF;
537 pba_num[9] = pba_ptr & 0xF;
538
539
540 pba_num[10] = '\0';
541
542
543 for (offset = 0; offset < 10; offset++) {
544 if (pba_num[offset] < 0xA)
545 pba_num[offset] += '0';
546 else if (pba_num[offset] < 0x10)
547 pba_num[offset] += 'A' - 0xA;
548 }
549
550 return 0;
551 }
552
553 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
554 if (ret_val) {
555 hw_dbg(hw, "NVM Read Error\n");
556 return ret_val;
557 }
558
559 if (length == 0xFFFF || length == 0) {
560 hw_dbg(hw, "NVM PBA number section invalid length\n");
561 return IXGBE_ERR_PBA_SECTION;
562 }
563
564
565 if (pba_num_size < (((u32)length * 2) - 1)) {
566 hw_dbg(hw, "PBA string buffer too small\n");
567 return IXGBE_ERR_NO_SPACE;
568 }
569
570
571 pba_ptr++;
572 length--;
573
574 for (offset = 0; offset < length; offset++) {
575 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
576 if (ret_val) {
577 hw_dbg(hw, "NVM Read Error\n");
578 return ret_val;
579 }
580 pba_num[offset * 2] = (u8)(data >> 8);
581 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
582 }
583 pba_num[offset * 2] = '\0';
584
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
598{
599 u32 rar_high;
600 u32 rar_low;
601 u16 i;
602
603 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
604 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
605
606 for (i = 0; i < 4; i++)
607 mac_addr[i] = (u8)(rar_low >> (i*8));
608
609 for (i = 0; i < 2; i++)
610 mac_addr[i+4] = (u8)(rar_high >> (i*8));
611
612 return 0;
613}
614
615enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
616{
617 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
618 case IXGBE_PCI_LINK_WIDTH_1:
619 return ixgbe_bus_width_pcie_x1;
620 case IXGBE_PCI_LINK_WIDTH_2:
621 return ixgbe_bus_width_pcie_x2;
622 case IXGBE_PCI_LINK_WIDTH_4:
623 return ixgbe_bus_width_pcie_x4;
624 case IXGBE_PCI_LINK_WIDTH_8:
625 return ixgbe_bus_width_pcie_x8;
626 default:
627 return ixgbe_bus_width_unknown;
628 }
629}
630
631enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
632{
633 switch (link_status & IXGBE_PCI_LINK_SPEED) {
634 case IXGBE_PCI_LINK_SPEED_2500:
635 return ixgbe_bus_speed_2500;
636 case IXGBE_PCI_LINK_SPEED_5000:
637 return ixgbe_bus_speed_5000;
638 case IXGBE_PCI_LINK_SPEED_8000:
639 return ixgbe_bus_speed_8000;
640 default:
641 return ixgbe_bus_speed_unknown;
642 }
643}
644
645
646
647
648
649
650
651s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
652{
653 u16 link_status;
654
655 hw->bus.type = ixgbe_bus_type_pci_express;
656
657
658 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
659
660 hw->bus.width = ixgbe_convert_bus_width(link_status);
661 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
662
663 hw->mac.ops.set_lan_id(hw);
664
665 return 0;
666}
667
668
669
670
671
672
673
674
675void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
676{
677 struct ixgbe_bus_info *bus = &hw->bus;
678 u32 reg;
679
680 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
681 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
682 bus->lan_id = bus->func;
683
684
685 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
686 if (reg & IXGBE_FACTPS_LFS)
687 bus->func ^= 0x1;
688}
689
690
691
692
693
694
695
696
697
698
699s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
700{
701 u32 reg_val;
702 u16 i;
703
704
705
706
707
708 hw->adapter_stopped = true;
709
710
711 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
712
713
714 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
715
716
717 IXGBE_READ_REG(hw, IXGBE_EICR);
718
719
720 for (i = 0; i < hw->mac.max_tx_queues; i++)
721 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
722
723
724 for (i = 0; i < hw->mac.max_rx_queues; i++) {
725 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
726 reg_val &= ~IXGBE_RXDCTL_ENABLE;
727 reg_val |= IXGBE_RXDCTL_SWFLSH;
728 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
729 }
730
731
732 IXGBE_WRITE_FLUSH(hw);
733 usleep_range(1000, 2000);
734
735
736
737
738
739 return ixgbe_disable_pcie_master(hw);
740}
741
742
743
744
745
746
747s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
748{
749 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
750
751
752 led_reg &= ~IXGBE_LED_MODE_MASK(index);
753 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
754 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
755 IXGBE_WRITE_FLUSH(hw);
756
757 return 0;
758}
759
760
761
762
763
764
765s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
766{
767 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
768
769
770 led_reg &= ~IXGBE_LED_MODE_MASK(index);
771 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
772 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
773 IXGBE_WRITE_FLUSH(hw);
774
775 return 0;
776}
777
778
779
780
781
782
783
784
785s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
786{
787 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
788 u32 eec;
789 u16 eeprom_size;
790
791 if (eeprom->type == ixgbe_eeprom_uninitialized) {
792 eeprom->type = ixgbe_eeprom_none;
793
794
795 eeprom->semaphore_delay = 10;
796
797 eeprom->word_page_size = 0;
798
799
800
801
802
803 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
804 if (eec & IXGBE_EEC_PRES) {
805 eeprom->type = ixgbe_eeprom_spi;
806
807
808
809
810
811 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
812 IXGBE_EEC_SIZE_SHIFT);
813 eeprom->word_size = 1 << (eeprom_size +
814 IXGBE_EEPROM_WORD_SIZE_SHIFT);
815 }
816
817 if (eec & IXGBE_EEC_ADDR_SIZE)
818 eeprom->address_bits = 16;
819 else
820 eeprom->address_bits = 8;
821 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
822 eeprom->type, eeprom->word_size, eeprom->address_bits);
823 }
824
825 return 0;
826}
827
828
829
830
831
832
833
834
835
836
837s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
838 u16 words, u16 *data)
839{
840 s32 status = 0;
841 u16 i, count;
842
843 hw->eeprom.ops.init_params(hw);
844
845 if (words == 0) {
846 status = IXGBE_ERR_INVALID_ARGUMENT;
847 goto out;
848 }
849
850 if (offset + words > hw->eeprom.word_size) {
851 status = IXGBE_ERR_EEPROM;
852 goto out;
853 }
854
855
856
857
858
859 if ((hw->eeprom.word_page_size == 0) &&
860 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
861 ixgbe_detect_eeprom_page_size_generic(hw, offset);
862
863
864
865
866
867
868 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
869 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
870 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
871 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
872 count, &data[i]);
873
874 if (status != 0)
875 break;
876 }
877
878out:
879 return status;
880}
881
882
883
884
885
886
887
888
889
890
891
892static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
893 u16 words, u16 *data)
894{
895 s32 status;
896 u16 word;
897 u16 page_size;
898 u16 i;
899 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
900
901
902 status = ixgbe_acquire_eeprom(hw);
903
904 if (status == 0) {
905 if (ixgbe_ready_eeprom(hw) != 0) {
906 ixgbe_release_eeprom(hw);
907 status = IXGBE_ERR_EEPROM;
908 }
909 }
910
911 if (status == 0) {
912 for (i = 0; i < words; i++) {
913 ixgbe_standby_eeprom(hw);
914
915
916 ixgbe_shift_out_eeprom_bits(hw,
917 IXGBE_EEPROM_WREN_OPCODE_SPI,
918 IXGBE_EEPROM_OPCODE_BITS);
919
920 ixgbe_standby_eeprom(hw);
921
922
923
924
925
926 if ((hw->eeprom.address_bits == 8) &&
927 ((offset + i) >= 128))
928 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
929
930
931 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
932 IXGBE_EEPROM_OPCODE_BITS);
933 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
934 hw->eeprom.address_bits);
935
936 page_size = hw->eeprom.word_page_size;
937
938
939 do {
940 word = data[i];
941 word = (word >> 8) | (word << 8);
942 ixgbe_shift_out_eeprom_bits(hw, word, 16);
943
944 if (page_size == 0)
945 break;
946
947
948 if (((offset + i) & (page_size - 1)) ==
949 (page_size - 1))
950 break;
951 } while (++i < words);
952
953 ixgbe_standby_eeprom(hw);
954 usleep_range(10000, 20000);
955 }
956
957 ixgbe_release_eeprom(hw);
958 }
959
960 return status;
961}
962
963
964
965
966
967
968
969
970
971
972s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
973{
974 s32 status;
975
976 hw->eeprom.ops.init_params(hw);
977
978 if (offset >= hw->eeprom.word_size) {
979 status = IXGBE_ERR_EEPROM;
980 goto out;
981 }
982
983 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
984
985out:
986 return status;
987}
988
989
990
991
992
993
994
995
996
997
998s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
999 u16 words, u16 *data)
1000{
1001 s32 status = 0;
1002 u16 i, count;
1003
1004 hw->eeprom.ops.init_params(hw);
1005
1006 if (words == 0) {
1007 status = IXGBE_ERR_INVALID_ARGUMENT;
1008 goto out;
1009 }
1010
1011 if (offset + words > hw->eeprom.word_size) {
1012 status = IXGBE_ERR_EEPROM;
1013 goto out;
1014 }
1015
1016
1017
1018
1019
1020
1021 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1022 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1023 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1024
1025 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1026 count, &data[i]);
1027
1028 if (status != 0)
1029 break;
1030 }
1031
1032out:
1033 return status;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1046 u16 words, u16 *data)
1047{
1048 s32 status;
1049 u16 word_in;
1050 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1051 u16 i;
1052
1053
1054 status = ixgbe_acquire_eeprom(hw);
1055
1056 if (status == 0) {
1057 if (ixgbe_ready_eeprom(hw) != 0) {
1058 ixgbe_release_eeprom(hw);
1059 status = IXGBE_ERR_EEPROM;
1060 }
1061 }
1062
1063 if (status == 0) {
1064 for (i = 0; i < words; i++) {
1065 ixgbe_standby_eeprom(hw);
1066
1067
1068
1069
1070 if ((hw->eeprom.address_bits == 8) &&
1071 ((offset + i) >= 128))
1072 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1073
1074
1075 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1076 IXGBE_EEPROM_OPCODE_BITS);
1077 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1078 hw->eeprom.address_bits);
1079
1080
1081 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1082 data[i] = (word_in >> 8) | (word_in << 8);
1083 }
1084
1085
1086 ixgbe_release_eeprom(hw);
1087 }
1088
1089 return status;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1101 u16 *data)
1102{
1103 s32 status;
1104
1105 hw->eeprom.ops.init_params(hw);
1106
1107 if (offset >= hw->eeprom.word_size) {
1108 status = IXGBE_ERR_EEPROM;
1109 goto out;
1110 }
1111
1112 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1113
1114out:
1115 return status;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1128 u16 words, u16 *data)
1129{
1130 u32 eerd;
1131 s32 status = 0;
1132 u32 i;
1133
1134 hw->eeprom.ops.init_params(hw);
1135
1136 if (words == 0) {
1137 status = IXGBE_ERR_INVALID_ARGUMENT;
1138 goto out;
1139 }
1140
1141 if (offset >= hw->eeprom.word_size) {
1142 status = IXGBE_ERR_EEPROM;
1143 goto out;
1144 }
1145
1146 for (i = 0; i < words; i++) {
1147 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1148 IXGBE_EEPROM_RW_REG_START;
1149
1150 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1151 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1152
1153 if (status == 0) {
1154 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1155 IXGBE_EEPROM_RW_REG_DATA);
1156 } else {
1157 hw_dbg(hw, "Eeprom read timed out\n");
1158 goto out;
1159 }
1160 }
1161out:
1162 return status;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1175 u16 offset)
1176{
1177 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1178 s32 status = 0;
1179 u16 i;
1180
1181 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1182 data[i] = i;
1183
1184 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1185 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1186 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1187 hw->eeprom.word_page_size = 0;
1188 if (status != 0)
1189 goto out;
1190
1191 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1192 if (status != 0)
1193 goto out;
1194
1195
1196
1197
1198
1199 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1200
1201 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1202 hw->eeprom.word_page_size);
1203out:
1204 return status;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1216{
1217 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1230 u16 words, u16 *data)
1231{
1232 u32 eewr;
1233 s32 status = 0;
1234 u16 i;
1235
1236 hw->eeprom.ops.init_params(hw);
1237
1238 if (words == 0) {
1239 status = IXGBE_ERR_INVALID_ARGUMENT;
1240 goto out;
1241 }
1242
1243 if (offset >= hw->eeprom.word_size) {
1244 status = IXGBE_ERR_EEPROM;
1245 goto out;
1246 }
1247
1248 for (i = 0; i < words; i++) {
1249 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1250 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1251 IXGBE_EEPROM_RW_REG_START;
1252
1253 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1254 if (status != 0) {
1255 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1256 goto out;
1257 }
1258
1259 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1260
1261 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1262 if (status != 0) {
1263 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1264 goto out;
1265 }
1266 }
1267
1268out:
1269 return status;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1281{
1282 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1294{
1295 u32 i;
1296 u32 reg;
1297 s32 status = IXGBE_ERR_EEPROM;
1298
1299 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1300 if (ee_reg == IXGBE_NVM_POLL_READ)
1301 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1302 else
1303 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1304
1305 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1306 status = 0;
1307 break;
1308 }
1309 udelay(5);
1310 }
1311 return status;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1322{
1323 s32 status = 0;
1324 u32 eec;
1325 u32 i;
1326
1327 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1328 status = IXGBE_ERR_SWFW_SYNC;
1329
1330 if (status == 0) {
1331 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1332
1333
1334 eec |= IXGBE_EEC_REQ;
1335 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1336
1337 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1338 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1339 if (eec & IXGBE_EEC_GNT)
1340 break;
1341 udelay(5);
1342 }
1343
1344
1345 if (!(eec & IXGBE_EEC_GNT)) {
1346 eec &= ~IXGBE_EEC_REQ;
1347 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1348 hw_dbg(hw, "Could not acquire EEPROM grant\n");
1349
1350 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1351 status = IXGBE_ERR_EEPROM;
1352 }
1353
1354
1355 if (status == 0) {
1356
1357 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1358 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1359 IXGBE_WRITE_FLUSH(hw);
1360 udelay(1);
1361 }
1362 }
1363 return status;
1364}
1365
1366
1367
1368
1369
1370
1371
1372static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1373{
1374 s32 status = IXGBE_ERR_EEPROM;
1375 u32 timeout = 2000;
1376 u32 i;
1377 u32 swsm;
1378
1379
1380 for (i = 0; i < timeout; i++) {
1381
1382
1383
1384
1385 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1386 if (!(swsm & IXGBE_SWSM_SMBI)) {
1387 status = 0;
1388 break;
1389 }
1390 udelay(50);
1391 }
1392
1393 if (i == timeout) {
1394 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1395
1396
1397
1398
1399
1400
1401 ixgbe_release_eeprom_semaphore(hw);
1402
1403 udelay(50);
1404
1405
1406
1407
1408
1409 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1410 if (!(swsm & IXGBE_SWSM_SMBI))
1411 status = 0;
1412 }
1413
1414
1415 if (status == 0) {
1416 for (i = 0; i < timeout; i++) {
1417 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1418
1419
1420 swsm |= IXGBE_SWSM_SWESMBI;
1421 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1422
1423
1424
1425
1426
1427 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1428 if (swsm & IXGBE_SWSM_SWESMBI)
1429 break;
1430
1431 udelay(50);
1432 }
1433
1434
1435
1436
1437
1438 if (i >= timeout) {
1439 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1440 ixgbe_release_eeprom_semaphore(hw);
1441 status = IXGBE_ERR_EEPROM;
1442 }
1443 } else {
1444 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1445 }
1446
1447 return status;
1448}
1449
1450
1451
1452
1453
1454
1455
1456static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1457{
1458 u32 swsm;
1459
1460 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1461
1462
1463 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1464 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1465 IXGBE_WRITE_FLUSH(hw);
1466}
1467
1468
1469
1470
1471
1472static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1473{
1474 s32 status = 0;
1475 u16 i;
1476 u8 spi_stat_reg;
1477
1478
1479
1480
1481
1482
1483
1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1486 IXGBE_EEPROM_OPCODE_BITS);
1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1489 break;
1490
1491 udelay(5);
1492 ixgbe_standby_eeprom(hw);
1493 }
1494
1495
1496
1497
1498
1499 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1500 hw_dbg(hw, "SPI EEPROM Status error\n");
1501 status = IXGBE_ERR_EEPROM;
1502 }
1503
1504 return status;
1505}
1506
1507
1508
1509
1510
1511static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1512{
1513 u32 eec;
1514
1515 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1516
1517
1518 eec |= IXGBE_EEC_CS;
1519 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1520 IXGBE_WRITE_FLUSH(hw);
1521 udelay(1);
1522 eec &= ~IXGBE_EEC_CS;
1523 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1524 IXGBE_WRITE_FLUSH(hw);
1525 udelay(1);
1526}
1527
1528
1529
1530
1531
1532
1533
1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1535 u16 count)
1536{
1537 u32 eec;
1538 u32 mask;
1539 u32 i;
1540
1541 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1542
1543
1544
1545
1546
1547 mask = 0x01 << (count - 1);
1548
1549 for (i = 0; i < count; i++) {
1550
1551
1552
1553
1554
1555
1556
1557 if (data & mask)
1558 eec |= IXGBE_EEC_DI;
1559 else
1560 eec &= ~IXGBE_EEC_DI;
1561
1562 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1563 IXGBE_WRITE_FLUSH(hw);
1564
1565 udelay(1);
1566
1567 ixgbe_raise_eeprom_clk(hw, &eec);
1568 ixgbe_lower_eeprom_clk(hw, &eec);
1569
1570
1571
1572
1573
1574 mask = mask >> 1;
1575 }
1576
1577
1578 eec &= ~IXGBE_EEC_DI;
1579 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1580 IXGBE_WRITE_FLUSH(hw);
1581}
1582
1583
1584
1585
1586
1587static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1588{
1589 u32 eec;
1590 u32 i;
1591 u16 data = 0;
1592
1593
1594
1595
1596
1597
1598
1599
1600 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1601
1602 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1603
1604 for (i = 0; i < count; i++) {
1605 data = data << 1;
1606 ixgbe_raise_eeprom_clk(hw, &eec);
1607
1608 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1609
1610 eec &= ~(IXGBE_EEC_DI);
1611 if (eec & IXGBE_EEC_DO)
1612 data |= 1;
1613
1614 ixgbe_lower_eeprom_clk(hw, &eec);
1615 }
1616
1617 return data;
1618}
1619
1620
1621
1622
1623
1624
1625static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1626{
1627
1628
1629
1630
1631 *eec = *eec | IXGBE_EEC_SK;
1632 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1633 IXGBE_WRITE_FLUSH(hw);
1634 udelay(1);
1635}
1636
1637
1638
1639
1640
1641
1642static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1643{
1644
1645
1646
1647
1648 *eec = *eec & ~IXGBE_EEC_SK;
1649 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1650 IXGBE_WRITE_FLUSH(hw);
1651 udelay(1);
1652}
1653
1654
1655
1656
1657
1658static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1659{
1660 u32 eec;
1661
1662 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1663
1664 eec |= IXGBE_EEC_CS;
1665 eec &= ~IXGBE_EEC_SK;
1666
1667 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1668 IXGBE_WRITE_FLUSH(hw);
1669
1670 udelay(1);
1671
1672
1673 eec &= ~IXGBE_EEC_REQ;
1674 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1675
1676 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1677
1678
1679
1680
1681
1682 usleep_range(hw->eeprom.semaphore_delay * 1000,
1683 hw->eeprom.semaphore_delay * 2000);
1684}
1685
1686
1687
1688
1689
1690u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1691{
1692 u16 i;
1693 u16 j;
1694 u16 checksum = 0;
1695 u16 length = 0;
1696 u16 pointer = 0;
1697 u16 word = 0;
1698
1699
1700 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1701 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
1702 hw_dbg(hw, "EEPROM read failed\n");
1703 break;
1704 }
1705 checksum += word;
1706 }
1707
1708
1709 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1710 hw->eeprom.ops.read(hw, i, &pointer);
1711
1712
1713 if (pointer != 0xFFFF && pointer != 0) {
1714 hw->eeprom.ops.read(hw, pointer, &length);
1715
1716 if (length != 0xFFFF && length != 0) {
1717 for (j = pointer+1; j <= pointer+length; j++) {
1718 hw->eeprom.ops.read(hw, j, &word);
1719 checksum += word;
1720 }
1721 }
1722 }
1723 }
1724
1725 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1726
1727 return checksum;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1739 u16 *checksum_val)
1740{
1741 s32 status;
1742 u16 checksum;
1743 u16 read_checksum = 0;
1744
1745
1746
1747
1748
1749
1750 status = hw->eeprom.ops.read(hw, 0, &checksum);
1751
1752 if (status == 0) {
1753 checksum = hw->eeprom.ops.calc_checksum(hw);
1754
1755 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1756
1757
1758
1759
1760
1761 if (read_checksum != checksum)
1762 status = IXGBE_ERR_EEPROM_CHECKSUM;
1763
1764
1765 if (checksum_val)
1766 *checksum_val = checksum;
1767 } else {
1768 hw_dbg(hw, "EEPROM read failed\n");
1769 }
1770
1771 return status;
1772}
1773
1774
1775
1776
1777
1778s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1779{
1780 s32 status;
1781 u16 checksum;
1782
1783
1784
1785
1786
1787
1788 status = hw->eeprom.ops.read(hw, 0, &checksum);
1789
1790 if (status == 0) {
1791 checksum = hw->eeprom.ops.calc_checksum(hw);
1792 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1793 checksum);
1794 } else {
1795 hw_dbg(hw, "EEPROM read failed\n");
1796 }
1797
1798 return status;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1812 u32 enable_addr)
1813{
1814 u32 rar_low, rar_high;
1815 u32 rar_entries = hw->mac.num_rar_entries;
1816
1817
1818 if (index >= rar_entries) {
1819 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1820 return IXGBE_ERR_INVALID_ARGUMENT;
1821 }
1822
1823
1824 hw->mac.ops.set_vmdq(hw, index, vmdq);
1825
1826
1827
1828
1829
1830 rar_low = ((u32)addr[0] |
1831 ((u32)addr[1] << 8) |
1832 ((u32)addr[2] << 16) |
1833 ((u32)addr[3] << 24));
1834
1835
1836
1837
1838
1839 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1840 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1841 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1842
1843 if (enable_addr != 0)
1844 rar_high |= IXGBE_RAH_AV;
1845
1846 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1847 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1848
1849 return 0;
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1860{
1861 u32 rar_high;
1862 u32 rar_entries = hw->mac.num_rar_entries;
1863
1864
1865 if (index >= rar_entries) {
1866 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1867 return IXGBE_ERR_INVALID_ARGUMENT;
1868 }
1869
1870
1871
1872
1873
1874
1875 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1876 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1877
1878 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1879 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1880
1881
1882 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1883
1884 return 0;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1896{
1897 u32 i;
1898 u32 rar_entries = hw->mac.num_rar_entries;
1899
1900
1901
1902
1903
1904
1905 if (!is_valid_ether_addr(hw->mac.addr)) {
1906
1907 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1908
1909 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1910 } else {
1911
1912 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1913 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1914
1915 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1916
1917
1918 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1919 }
1920 hw->addr_ctrl.overflow_promisc = 0;
1921
1922 hw->addr_ctrl.rar_used_count = 1;
1923
1924
1925 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1926 for (i = 1; i < rar_entries; i++) {
1927 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1928 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1929 }
1930
1931
1932 hw->addr_ctrl.mta_in_use = 0;
1933 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1934
1935 hw_dbg(hw, " Clearing MTA\n");
1936 for (i = 0; i < hw->mac.mcft_size; i++)
1937 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1938
1939 if (hw->mac.ops.init_uta_tables)
1940 hw->mac.ops.init_uta_tables(hw);
1941
1942 return 0;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1958{
1959 u32 vector = 0;
1960
1961 switch (hw->mac.mc_filter_type) {
1962 case 0:
1963 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1964 break;
1965 case 1:
1966 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1967 break;
1968 case 2:
1969 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1970 break;
1971 case 3:
1972 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1973 break;
1974 default:
1975 hw_dbg(hw, "MC filter type param set incorrectly\n");
1976 break;
1977 }
1978
1979
1980 vector &= 0xFFF;
1981 return vector;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1992{
1993 u32 vector;
1994 u32 vector_bit;
1995 u32 vector_reg;
1996
1997 hw->addr_ctrl.mta_in_use++;
1998
1999 vector = ixgbe_mta_vector(hw, mc_addr);
2000 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 vector_reg = (vector >> 5) & 0x7F;
2012 vector_bit = vector & 0x1F;
2013 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2014}
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2027 struct net_device *netdev)
2028{
2029 struct netdev_hw_addr *ha;
2030 u32 i;
2031
2032
2033
2034
2035
2036 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
2037 hw->addr_ctrl.mta_in_use = 0;
2038
2039
2040 hw_dbg(hw, " Clearing MTA\n");
2041 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2042
2043
2044 netdev_for_each_mc_addr(ha, netdev) {
2045 hw_dbg(hw, " Adding the multicast addresses:\n");
2046 ixgbe_set_mta(hw, ha->addr);
2047 }
2048
2049
2050 for (i = 0; i < hw->mac.mcft_size; i++)
2051 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2052 hw->mac.mta_shadow[i]);
2053
2054 if (hw->addr_ctrl.mta_in_use > 0)
2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2057
2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2059 return 0;
2060}
2061
2062
2063
2064
2065
2066
2067
2068s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2069{
2070 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2071
2072 if (a->mta_in_use > 0)
2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2074 hw->mac.mc_filter_type);
2075
2076 return 0;
2077}
2078
2079
2080
2081
2082
2083
2084
2085s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2086{
2087 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2088
2089 if (a->mta_in_use > 0)
2090 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2091
2092 return 0;
2093}
2094
2095
2096
2097
2098
2099
2100
2101s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2102{
2103 s32 ret_val = 0;
2104 u32 mflcn_reg, fccfg_reg;
2105 u32 reg;
2106 u32 fcrtl, fcrth;
2107 int i;
2108
2109
2110 if (!hw->fc.pause_time) {
2111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2112 goto out;
2113 }
2114
2115
2116 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2117 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2118 hw->fc.high_water[i]) {
2119 if (!hw->fc.low_water[i] ||
2120 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2121 hw_dbg(hw, "Invalid water mark configuration\n");
2122 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2123 goto out;
2124 }
2125 }
2126 }
2127
2128
2129 ixgbe_fc_autoneg(hw);
2130
2131
2132 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2133 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2134
2135 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2136 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148 switch (hw->fc.current_mode) {
2149 case ixgbe_fc_none:
2150
2151
2152
2153
2154 break;
2155 case ixgbe_fc_rx_pause:
2156
2157
2158
2159
2160
2161
2162
2163
2164 mflcn_reg |= IXGBE_MFLCN_RFCE;
2165 break;
2166 case ixgbe_fc_tx_pause:
2167
2168
2169
2170
2171 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2172 break;
2173 case ixgbe_fc_full:
2174
2175 mflcn_reg |= IXGBE_MFLCN_RFCE;
2176 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2177 break;
2178 default:
2179 hw_dbg(hw, "Flow control param set incorrectly\n");
2180 ret_val = IXGBE_ERR_CONFIG;
2181 goto out;
2182 break;
2183 }
2184
2185
2186 mflcn_reg |= IXGBE_MFLCN_DPF;
2187 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2188 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2189
2190
2191 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2192 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2193 hw->fc.high_water[i]) {
2194 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2195 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2196 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2197 } else {
2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2199
2200
2201
2202
2203
2204
2205 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2206 }
2207
2208 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2209 }
2210
2211
2212 reg = hw->fc.pause_time * 0x00010001;
2213 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2214 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2215
2216 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2217
2218out:
2219 return ret_val;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2236 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2237{
2238 if ((!(adv_reg)) || (!(lp_reg)))
2239 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2240
2241 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2242
2243
2244
2245
2246
2247
2248
2249 if (hw->fc.requested_mode == ixgbe_fc_full) {
2250 hw->fc.current_mode = ixgbe_fc_full;
2251 hw_dbg(hw, "Flow Control = FULL.\n");
2252 } else {
2253 hw->fc.current_mode = ixgbe_fc_rx_pause;
2254 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2255 }
2256 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2257 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2258 hw->fc.current_mode = ixgbe_fc_tx_pause;
2259 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2260 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2261 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2262 hw->fc.current_mode = ixgbe_fc_rx_pause;
2263 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2264 } else {
2265 hw->fc.current_mode = ixgbe_fc_none;
2266 hw_dbg(hw, "Flow Control = NONE.\n");
2267 }
2268 return 0;
2269}
2270
2271
2272
2273
2274
2275
2276
2277static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2278{
2279 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2280 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2281
2282
2283
2284
2285
2286
2287
2288 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2289 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2290 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2291 goto out;
2292
2293 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2294 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2295
2296 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2297 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2298 IXGBE_PCS1GANA_ASM_PAUSE,
2299 IXGBE_PCS1GANA_SYM_PAUSE,
2300 IXGBE_PCS1GANA_ASM_PAUSE);
2301
2302out:
2303 return ret_val;
2304}
2305
2306
2307
2308
2309
2310
2311
2312static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2313{
2314 u32 links2, anlp1_reg, autoc_reg, links;
2315 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2316
2317
2318
2319
2320
2321
2322 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2323 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2324 goto out;
2325
2326 if (hw->mac.type == ixgbe_mac_82599EB) {
2327 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2328 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2329 goto out;
2330 }
2331
2332
2333
2334
2335 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2336 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2337
2338 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2339 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2340 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2341
2342out:
2343 return ret_val;
2344}
2345
2346
2347
2348
2349
2350
2351
2352static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2353{
2354 u16 technology_ability_reg = 0;
2355 u16 lp_technology_ability_reg = 0;
2356
2357 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2358 MDIO_MMD_AN,
2359 &technology_ability_reg);
2360 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2361 MDIO_MMD_AN,
2362 &lp_technology_ability_reg);
2363
2364 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2365 (u32)lp_technology_ability_reg,
2366 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2367 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2378{
2379 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2380 ixgbe_link_speed speed;
2381 bool link_up;
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392 if (hw->fc.disable_fc_autoneg)
2393 goto out;
2394
2395 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2396 if (!link_up)
2397 goto out;
2398
2399 switch (hw->phy.media_type) {
2400
2401 case ixgbe_media_type_fiber:
2402 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2403 ret_val = ixgbe_fc_autoneg_fiber(hw);
2404 break;
2405
2406
2407 case ixgbe_media_type_backplane:
2408 ret_val = ixgbe_fc_autoneg_backplane(hw);
2409 break;
2410
2411
2412 case ixgbe_media_type_copper:
2413 if (ixgbe_device_supports_autoneg_fc(hw))
2414 ret_val = ixgbe_fc_autoneg_copper(hw);
2415 break;
2416
2417 default:
2418 break;
2419 }
2420
2421out:
2422 if (ret_val == 0) {
2423 hw->fc.fc_was_autonegged = true;
2424 } else {
2425 hw->fc.fc_was_autonegged = false;
2426 hw->fc.current_mode = hw->fc.requested_mode;
2427 }
2428}
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2441{
2442 s16 devctl2;
2443 u32 pollcnt;
2444
2445 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2446 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2447
2448 switch (devctl2) {
2449 case IXGBE_PCIDEVCTRL2_65_130ms:
2450 pollcnt = 1300;
2451 break;
2452 case IXGBE_PCIDEVCTRL2_260_520ms:
2453 pollcnt = 5200;
2454 break;
2455 case IXGBE_PCIDEVCTRL2_1_2s:
2456 pollcnt = 20000;
2457 break;
2458 case IXGBE_PCIDEVCTRL2_4_8s:
2459 pollcnt = 80000;
2460 break;
2461 case IXGBE_PCIDEVCTRL2_17_34s:
2462 pollcnt = 34000;
2463 break;
2464 case IXGBE_PCIDEVCTRL2_50_100us:
2465 case IXGBE_PCIDEVCTRL2_1_2ms:
2466 case IXGBE_PCIDEVCTRL2_16_32ms:
2467 case IXGBE_PCIDEVCTRL2_16_32ms_def:
2468 default:
2469 pollcnt = 800;
2470 break;
2471 }
2472
2473
2474 return (pollcnt * 11) / 10;
2475}
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2487{
2488 s32 status = 0;
2489 u32 i, poll;
2490 u16 value;
2491
2492
2493 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2494
2495
2496 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2497 ixgbe_removed(hw->hw_addr))
2498 goto out;
2499
2500
2501 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2502 udelay(100);
2503 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2504 goto out;
2505 }
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2516 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2517
2518
2519
2520
2521
2522 poll = ixgbe_pcie_timeout_poll(hw);
2523 for (i = 0; i < poll; i++) {
2524 udelay(100);
2525 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2526 if (ixgbe_removed(hw->hw_addr))
2527 goto out;
2528 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2529 goto out;
2530 }
2531
2532 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2533 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2534
2535out:
2536 return status;
2537}
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2548{
2549 u32 gssr = 0;
2550 u32 swmask = mask;
2551 u32 fwmask = mask << 5;
2552 u32 timeout = 200;
2553 u32 i;
2554
2555 for (i = 0; i < timeout; i++) {
2556
2557
2558
2559
2560 if (ixgbe_get_eeprom_semaphore(hw))
2561 return IXGBE_ERR_SWFW_SYNC;
2562
2563 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2564 if (!(gssr & (fwmask | swmask))) {
2565 gssr |= swmask;
2566 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2567 ixgbe_release_eeprom_semaphore(hw);
2568 return 0;
2569 } else {
2570
2571 ixgbe_release_eeprom_semaphore(hw);
2572 usleep_range(5000, 10000);
2573 }
2574 }
2575
2576
2577 if (gssr & (fwmask | swmask))
2578 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2579
2580 usleep_range(5000, 10000);
2581 return IXGBE_ERR_SWFW_SYNC;
2582}
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2593{
2594 u32 gssr;
2595 u32 swmask = mask;
2596
2597 ixgbe_get_eeprom_semaphore(hw);
2598
2599 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2600 gssr &= ~swmask;
2601 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2602
2603 ixgbe_release_eeprom_semaphore(hw);
2604}
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2616{
2617 *locked = false;
2618 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2619 return 0;
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2630{
2631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2632 return 0;
2633}
2634
2635
2636
2637
2638
2639
2640
2641
2642s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2643{
2644#define IXGBE_MAX_SECRX_POLL 40
2645 int i;
2646 int secrxreg;
2647
2648 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2649 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2650 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2651 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2652 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2653 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2654 break;
2655 else
2656
2657 udelay(1000);
2658 }
2659
2660
2661 if (i >= IXGBE_MAX_SECRX_POLL)
2662 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2663
2664 return 0;
2665
2666}
2667
2668
2669
2670
2671
2672
2673
2674s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
2675{
2676 int secrxreg;
2677
2678 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2679 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2680 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2681 IXGBE_WRITE_FLUSH(hw);
2682
2683 return 0;
2684}
2685
2686
2687
2688
2689
2690
2691
2692
2693s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2694{
2695 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2696
2697 return 0;
2698}
2699
2700
2701
2702
2703
2704
2705s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2706{
2707 ixgbe_link_speed speed = 0;
2708 bool link_up = false;
2709 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2710 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2711 s32 ret_val = 0;
2712 bool locked = false;
2713
2714
2715
2716
2717
2718 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2719
2720 if (!link_up) {
2721 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2722 if (ret_val)
2723 goto out;
2724
2725 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2726 autoc_reg |= IXGBE_AUTOC_FLU;
2727
2728 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2729 if (ret_val)
2730 goto out;
2731
2732 IXGBE_WRITE_FLUSH(hw);
2733
2734 usleep_range(10000, 20000);
2735 }
2736
2737 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2738 led_reg |= IXGBE_LED_BLINK(index);
2739 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2740 IXGBE_WRITE_FLUSH(hw);
2741
2742out:
2743 return ret_val;
2744}
2745
2746
2747
2748
2749
2750
2751s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2752{
2753 u32 autoc_reg = 0;
2754 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2755 s32 ret_val = 0;
2756 bool locked = false;
2757
2758 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2759 if (ret_val)
2760 goto out;
2761
2762 autoc_reg &= ~IXGBE_AUTOC_FLU;
2763 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2764
2765 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2766 if (ret_val)
2767 goto out;
2768
2769 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2770 led_reg &= ~IXGBE_LED_BLINK(index);
2771 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2772 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2773 IXGBE_WRITE_FLUSH(hw);
2774
2775out:
2776 return ret_val;
2777}
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2789 u16 *san_mac_offset)
2790{
2791 s32 ret_val;
2792
2793
2794
2795
2796
2797 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2798 san_mac_offset);
2799 if (ret_val)
2800 hw_err(hw, "eeprom read at offset %d failed\n",
2801 IXGBE_SAN_MAC_ADDR_PTR);
2802
2803 return ret_val;
2804}
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2817{
2818 u16 san_mac_data, san_mac_offset;
2819 u8 i;
2820 s32 ret_val;
2821
2822
2823
2824
2825
2826 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2827 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2828
2829 goto san_mac_addr_clr;
2830
2831
2832 hw->mac.ops.set_lan_id(hw);
2833
2834 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2835 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2836 for (i = 0; i < 3; i++) {
2837 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2838 &san_mac_data);
2839 if (ret_val) {
2840 hw_err(hw, "eeprom read at offset %d failed\n",
2841 san_mac_offset);
2842 goto san_mac_addr_clr;
2843 }
2844 san_mac_addr[i * 2] = (u8)(san_mac_data);
2845 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2846 san_mac_offset++;
2847 }
2848 return 0;
2849
2850san_mac_addr_clr:
2851
2852
2853
2854 for (i = 0; i < 6; i++)
2855 san_mac_addr[i] = 0xFF;
2856 return ret_val;
2857}
2858
2859
2860
2861
2862
2863
2864
2865
2866u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2867{
2868 u16 msix_count = 1;
2869 u16 max_msix_count;
2870 u16 pcie_offset;
2871
2872 switch (hw->mac.type) {
2873 case ixgbe_mac_82598EB:
2874 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2875 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2876 break;
2877 case ixgbe_mac_82599EB:
2878 case ixgbe_mac_X540:
2879 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2880 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2881 break;
2882 default:
2883 return msix_count;
2884 }
2885
2886 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2887 if (ixgbe_removed(hw->hw_addr))
2888 msix_count = 0;
2889 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2890
2891
2892 msix_count++;
2893
2894 if (msix_count > max_msix_count)
2895 msix_count = max_msix_count;
2896
2897 return msix_count;
2898}
2899
2900
2901
2902
2903
2904
2905
2906s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2907{
2908 u32 mpsar_lo, mpsar_hi;
2909 u32 rar_entries = hw->mac.num_rar_entries;
2910
2911
2912 if (rar >= rar_entries) {
2913 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2914 return IXGBE_ERR_INVALID_ARGUMENT;
2915 }
2916
2917 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2918 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2919
2920 if (ixgbe_removed(hw->hw_addr))
2921 goto done;
2922
2923 if (!mpsar_lo && !mpsar_hi)
2924 goto done;
2925
2926 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2927 if (mpsar_lo) {
2928 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2929 mpsar_lo = 0;
2930 }
2931 if (mpsar_hi) {
2932 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2933 mpsar_hi = 0;
2934 }
2935 } else if (vmdq < 32) {
2936 mpsar_lo &= ~(1 << vmdq);
2937 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2938 } else {
2939 mpsar_hi &= ~(1 << (vmdq - 32));
2940 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2941 }
2942
2943
2944 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2945 hw->mac.ops.clear_rar(hw, rar);
2946done:
2947 return 0;
2948}
2949
2950
2951
2952
2953
2954
2955
2956s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2957{
2958 u32 mpsar;
2959 u32 rar_entries = hw->mac.num_rar_entries;
2960
2961
2962 if (rar >= rar_entries) {
2963 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2964 return IXGBE_ERR_INVALID_ARGUMENT;
2965 }
2966
2967 if (vmdq < 32) {
2968 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2969 mpsar |= 1 << vmdq;
2970 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2971 } else {
2972 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2973 mpsar |= 1 << (vmdq - 32);
2974 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2975 }
2976 return 0;
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
2990{
2991 u32 rar = hw->mac.san_mac_rar_index;
2992
2993 if (vmdq < 32) {
2994 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
2995 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2996 } else {
2997 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2998 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
2999 }
3000
3001 return 0;
3002}
3003
3004
3005
3006
3007
3008s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3009{
3010 int i;
3011
3012 for (i = 0; i < 128; i++)
3013 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3014
3015 return 0;
3016}
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3027{
3028 u32 bits = 0;
3029 u32 first_empty_slot = 0;
3030 s32 regindex;
3031
3032
3033 if (vlan == 0)
3034 return 0;
3035
3036
3037
3038
3039
3040 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3041 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3042 if (!bits && !(first_empty_slot))
3043 first_empty_slot = regindex;
3044 else if ((bits & 0x0FFF) == vlan)
3045 break;
3046 }
3047
3048
3049
3050
3051
3052
3053 if (regindex >= IXGBE_VLVF_ENTRIES) {
3054 if (first_empty_slot)
3055 regindex = first_empty_slot;
3056 else {
3057 hw_dbg(hw, "No space in VLVF.\n");
3058 regindex = IXGBE_ERR_NO_SPACE;
3059 }
3060 }
3061
3062 return regindex;
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3075 bool vlan_on)
3076{
3077 s32 regindex;
3078 u32 bitindex;
3079 u32 vfta;
3080 u32 bits;
3081 u32 vt;
3082 u32 targetbit;
3083 bool vfta_changed = false;
3084
3085 if (vlan > 4095)
3086 return IXGBE_ERR_PARAM;
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100 regindex = (vlan >> 5) & 0x7F;
3101 bitindex = vlan & 0x1F;
3102 targetbit = (1 << bitindex);
3103 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3104
3105 if (vlan_on) {
3106 if (!(vfta & targetbit)) {
3107 vfta |= targetbit;
3108 vfta_changed = true;
3109 }
3110 } else {
3111 if ((vfta & targetbit)) {
3112 vfta &= ~targetbit;
3113 vfta_changed = true;
3114 }
3115 }
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3126 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3127 s32 vlvf_index;
3128
3129 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3130 if (vlvf_index < 0)
3131 return vlvf_index;
3132
3133 if (vlan_on) {
3134
3135 if (vind < 32) {
3136 bits = IXGBE_READ_REG(hw,
3137 IXGBE_VLVFB(vlvf_index*2));
3138 bits |= (1 << vind);
3139 IXGBE_WRITE_REG(hw,
3140 IXGBE_VLVFB(vlvf_index*2),
3141 bits);
3142 } else {
3143 bits = IXGBE_READ_REG(hw,
3144 IXGBE_VLVFB((vlvf_index*2)+1));
3145 bits |= (1 << (vind-32));
3146 IXGBE_WRITE_REG(hw,
3147 IXGBE_VLVFB((vlvf_index*2)+1),
3148 bits);
3149 }
3150 } else {
3151
3152 if (vind < 32) {
3153 bits = IXGBE_READ_REG(hw,
3154 IXGBE_VLVFB(vlvf_index*2));
3155 bits &= ~(1 << vind);
3156 IXGBE_WRITE_REG(hw,
3157 IXGBE_VLVFB(vlvf_index*2),
3158 bits);
3159 bits |= IXGBE_READ_REG(hw,
3160 IXGBE_VLVFB((vlvf_index*2)+1));
3161 } else {
3162 bits = IXGBE_READ_REG(hw,
3163 IXGBE_VLVFB((vlvf_index*2)+1));
3164 bits &= ~(1 << (vind-32));
3165 IXGBE_WRITE_REG(hw,
3166 IXGBE_VLVFB((vlvf_index*2)+1),
3167 bits);
3168 bits |= IXGBE_READ_REG(hw,
3169 IXGBE_VLVFB(vlvf_index*2));
3170 }
3171 }
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188 if (bits) {
3189 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3190 (IXGBE_VLVF_VIEN | vlan));
3191 if (!vlan_on) {
3192
3193
3194
3195 vfta_changed = false;
3196 }
3197 } else {
3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3199 }
3200 }
3201
3202 if (vfta_changed)
3203 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3204
3205 return 0;
3206}
3207
3208
3209
3210
3211
3212
3213
3214s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3215{
3216 u32 offset;
3217
3218 for (offset = 0; offset < hw->mac.vft_size; offset++)
3219 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3220
3221 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3222 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3223 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3224 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3225 }
3226
3227 return 0;
3228}
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3240 bool *link_up, bool link_up_wait_to_complete)
3241{
3242 u32 links_reg, links_orig;
3243 u32 i;
3244
3245
3246 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3247
3248 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3249
3250 if (links_orig != links_reg) {
3251 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3252 links_orig, links_reg);
3253 }
3254
3255 if (link_up_wait_to_complete) {
3256 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3257 if (links_reg & IXGBE_LINKS_UP) {
3258 *link_up = true;
3259 break;
3260 } else {
3261 *link_up = false;
3262 }
3263 msleep(100);
3264 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3265 }
3266 } else {
3267 if (links_reg & IXGBE_LINKS_UP)
3268 *link_up = true;
3269 else
3270 *link_up = false;
3271 }
3272
3273 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3274 IXGBE_LINKS_SPEED_10G_82599)
3275 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3276 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3277 IXGBE_LINKS_SPEED_1G_82599)
3278 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3279 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3280 IXGBE_LINKS_SPEED_100_82599)
3281 *speed = IXGBE_LINK_SPEED_100_FULL;
3282 else
3283 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3284
3285 return 0;
3286}
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3299 u16 *wwpn_prefix)
3300{
3301 u16 offset, caps;
3302 u16 alt_san_mac_blk_offset;
3303
3304
3305 *wwnn_prefix = 0xFFFF;
3306 *wwpn_prefix = 0xFFFF;
3307
3308
3309 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3310 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3311 goto wwn_prefix_err;
3312
3313 if ((alt_san_mac_blk_offset == 0) ||
3314 (alt_san_mac_blk_offset == 0xFFFF))
3315 goto wwn_prefix_out;
3316
3317
3318 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3319 if (hw->eeprom.ops.read(hw, offset, &caps))
3320 goto wwn_prefix_err;
3321 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3322 goto wwn_prefix_out;
3323
3324
3325 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3326 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3327 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3328
3329 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3330 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3331 goto wwn_prefix_err;
3332
3333wwn_prefix_out:
3334 return 0;
3335
3336wwn_prefix_err:
3337 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3338 return 0;
3339}
3340
3341
3342
3343
3344
3345
3346
3347
3348void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3349{
3350 int j;
3351 int pf_target_reg = pf >> 3;
3352 int pf_target_shift = pf % 8;
3353 u32 pfvfspoof = 0;
3354
3355 if (hw->mac.type == ixgbe_mac_82598EB)
3356 return;
3357
3358 if (enable)
3359 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3360
3361
3362
3363
3364
3365 for (j = 0; j < pf_target_reg; j++)
3366 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3367
3368
3369
3370
3371
3372 pfvfspoof &= (1 << pf_target_shift) - 1;
3373 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3374
3375
3376
3377
3378
3379 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3380 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3381}
3382
3383
3384
3385
3386
3387
3388
3389
3390void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3391{
3392 int vf_target_reg = vf >> 3;
3393 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3394 u32 pfvfspoof;
3395
3396 if (hw->mac.type == ixgbe_mac_82598EB)
3397 return;
3398
3399 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3400 if (enable)
3401 pfvfspoof |= (1 << vf_target_shift);
3402 else
3403 pfvfspoof &= ~(1 << vf_target_shift);
3404 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3416{
3417 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3418
3419 return 0;
3420}
3421
3422
3423
3424
3425
3426
3427
3428
3429void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3430 int num_pb,
3431 u32 headroom,
3432 int strategy)
3433{
3434 u32 pbsize = hw->mac.rx_pb_size;
3435 int i = 0;
3436 u32 rxpktsize, txpktsize, txpbthresh;
3437
3438
3439 pbsize -= headroom;
3440
3441 if (!num_pb)
3442 num_pb = 1;
3443
3444
3445
3446
3447 switch (strategy) {
3448 case (PBA_STRATEGY_WEIGHTED):
3449
3450
3451
3452 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
3453 pbsize -= rxpktsize * (num_pb / 2);
3454 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
3455 for (; i < (num_pb / 2); i++)
3456 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3457
3458 case (PBA_STRATEGY_EQUAL):
3459
3460 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
3461 for (; i < num_pb; i++)
3462 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3463 break;
3464 default:
3465 break;
3466 }
3467
3468
3469
3470
3471
3472
3473 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
3474 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
3475 for (i = 0; i < num_pb; i++) {
3476 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3477 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3478 }
3479
3480
3481 for (; i < IXGBE_MAX_PB; i++) {
3482 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3483 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3484 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3485 }
3486}
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3497{
3498 u32 i;
3499 u8 sum = 0;
3500
3501 if (!buffer)
3502 return 0;
3503
3504 for (i = 0; i < length; i++)
3505 sum += buffer[i];
3506
3507 return (u8) (0 - sum);
3508}
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3521 u32 length)
3522{
3523 u32 hicr, i, bi;
3524 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3525 u8 buf_len, dword_len;
3526
3527 s32 ret_val = 0;
3528
3529 if (length == 0 || length & 0x3 ||
3530 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3531 hw_dbg(hw, "Buffer length failure.\n");
3532 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3533 goto out;
3534 }
3535
3536
3537 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3538 if ((hicr & IXGBE_HICR_EN) == 0) {
3539 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3540 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3541 goto out;
3542 }
3543
3544
3545 dword_len = length >> 2;
3546
3547
3548
3549
3550
3551 for (i = 0; i < dword_len; i++)
3552 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3553 i, cpu_to_le32(buffer[i]));
3554
3555
3556 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3557
3558 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3559 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3560 if (!(hicr & IXGBE_HICR_C))
3561 break;
3562 usleep_range(1000, 2000);
3563 }
3564
3565
3566 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3567 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3568 hw_dbg(hw, "Command has failed with no status valid.\n");
3569 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3570 goto out;
3571 }
3572
3573
3574 dword_len = hdr_size >> 2;
3575
3576
3577 for (bi = 0; bi < dword_len; bi++) {
3578 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3579 le32_to_cpus(&buffer[bi]);
3580 }
3581
3582
3583 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3584 if (buf_len == 0)
3585 goto out;
3586
3587 if (length < (buf_len + hdr_size)) {
3588 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3589 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3590 goto out;
3591 }
3592
3593
3594 dword_len = (buf_len + 3) >> 2;
3595
3596
3597 for (; bi <= dword_len; bi++) {
3598 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3599 le32_to_cpus(&buffer[bi]);
3600 }
3601
3602out:
3603 return ret_val;
3604}
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3620 u8 build, u8 sub)
3621{
3622 struct ixgbe_hic_drv_info fw_cmd;
3623 int i;
3624 s32 ret_val = 0;
3625
3626 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
3627 ret_val = IXGBE_ERR_SWFW_SYNC;
3628 goto out;
3629 }
3630
3631 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3632 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3633 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3634 fw_cmd.port_num = (u8)hw->bus.func;
3635 fw_cmd.ver_maj = maj;
3636 fw_cmd.ver_min = min;
3637 fw_cmd.ver_build = build;
3638 fw_cmd.ver_sub = sub;
3639 fw_cmd.hdr.checksum = 0;
3640 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
3641 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3642 fw_cmd.pad = 0;
3643 fw_cmd.pad2 = 0;
3644
3645 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3646 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3647 sizeof(fw_cmd));
3648 if (ret_val != 0)
3649 continue;
3650
3651 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3652 FW_CEM_RESP_STATUS_SUCCESS)
3653 ret_val = 0;
3654 else
3655 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3656
3657 break;
3658 }
3659
3660 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3661out:
3662 return ret_val;
3663}
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3674{
3675 u32 gcr_ext, hlreg0;
3676
3677
3678
3679
3680
3681 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3682 return;
3683
3684
3685
3686
3687
3688
3689 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3690 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3691
3692
3693 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3694 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3695 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3696
3697
3698 IXGBE_WRITE_FLUSH(hw);
3699 udelay(20);
3700
3701
3702 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3703 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3704}
3705
3706static const u8 ixgbe_emc_temp_data[4] = {
3707 IXGBE_EMC_INTERNAL_DATA,
3708 IXGBE_EMC_DIODE1_DATA,
3709 IXGBE_EMC_DIODE2_DATA,
3710 IXGBE_EMC_DIODE3_DATA
3711};
3712static const u8 ixgbe_emc_therm_limit[4] = {
3713 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3714 IXGBE_EMC_DIODE1_THERM_LIMIT,
3715 IXGBE_EMC_DIODE2_THERM_LIMIT,
3716 IXGBE_EMC_DIODE3_THERM_LIMIT
3717};
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3728 u16 *ets_offset)
3729{
3730 s32 status = 0;
3731
3732 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3733 if (status)
3734 goto out;
3735
3736 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3737 status = IXGBE_NOT_IMPLEMENTED;
3738 goto out;
3739 }
3740
3741 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3742 if (status)
3743 goto out;
3744
3745 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3746 status = IXGBE_NOT_IMPLEMENTED;
3747 goto out;
3748 }
3749
3750out:
3751 return status;
3752}
3753
3754
3755
3756
3757
3758
3759
3760s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3761{
3762 s32 status = 0;
3763 u16 ets_offset;
3764 u16 ets_cfg;
3765 u16 ets_sensor;
3766 u8 num_sensors;
3767 u8 i;
3768 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3769
3770
3771 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3772 status = IXGBE_NOT_IMPLEMENTED;
3773 goto out;
3774 }
3775
3776 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3777 if (status)
3778 goto out;
3779
3780 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3781 if (num_sensors > IXGBE_MAX_SENSORS)
3782 num_sensors = IXGBE_MAX_SENSORS;
3783
3784 for (i = 0; i < num_sensors; i++) {
3785 u8 sensor_index;
3786 u8 sensor_location;
3787
3788 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3789 &ets_sensor);
3790 if (status)
3791 goto out;
3792
3793 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3794 IXGBE_ETS_DATA_INDEX_SHIFT);
3795 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3796 IXGBE_ETS_DATA_LOC_SHIFT);
3797
3798 if (sensor_location != 0) {
3799 status = hw->phy.ops.read_i2c_byte(hw,
3800 ixgbe_emc_temp_data[sensor_index],
3801 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3802 &data->sensor[i].temp);
3803 if (status)
3804 goto out;
3805 }
3806 }
3807out:
3808 return status;
3809}
3810
3811
3812
3813
3814
3815
3816
3817
3818s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3819{
3820 s32 status = 0;
3821 u16 ets_offset;
3822 u16 ets_cfg;
3823 u16 ets_sensor;
3824 u8 low_thresh_delta;
3825 u8 num_sensors;
3826 u8 therm_limit;
3827 u8 i;
3828 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3829
3830 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3831
3832
3833 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3834 status = IXGBE_NOT_IMPLEMENTED;
3835 goto out;
3836 }
3837
3838 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3839 if (status)
3840 goto out;
3841
3842 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3843 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3844 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3845 if (num_sensors > IXGBE_MAX_SENSORS)
3846 num_sensors = IXGBE_MAX_SENSORS;
3847
3848 for (i = 0; i < num_sensors; i++) {
3849 u8 sensor_index;
3850 u8 sensor_location;
3851
3852 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
3853 hw_err(hw, "eeprom read at offset %d failed\n",
3854 ets_offset + 1 + i);
3855 continue;
3856 }
3857 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3858 IXGBE_ETS_DATA_INDEX_SHIFT);
3859 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3860 IXGBE_ETS_DATA_LOC_SHIFT);
3861 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3862
3863 hw->phy.ops.write_i2c_byte(hw,
3864 ixgbe_emc_therm_limit[sensor_index],
3865 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3866
3867 if (sensor_location == 0)
3868 continue;
3869
3870 data->sensor[i].location = sensor_location;
3871 data->sensor[i].caution_thresh = therm_limit;
3872 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3873 }
3874out:
3875 return status;
3876}
3877
3878