1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/netdevice.h>
33
34#include "ixgbe.h"
35#include "ixgbe_common.h"
36#include "ixgbe_phy.h"
37
38static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
44 u16 count);
45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
49
50static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
51static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
52static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
53 u16 words, u16 *data);
54static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
57 u16 offset);
58static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
59
60
61const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
62 IXGBE_MVALS_INIT(8259X)
63};
64
65
66
67
68
69
70
71
72
73
74bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
75{
76 bool supported = false;
77 ixgbe_link_speed speed;
78 bool link_up;
79
80 switch (hw->phy.media_type) {
81 case ixgbe_media_type_fiber:
82 hw->mac.ops.check_link(hw, &speed, &link_up, false);
83
84 if (link_up)
85 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
86 true : false;
87 else
88 supported = true;
89 break;
90 case ixgbe_media_type_backplane:
91 supported = true;
92 break;
93 case ixgbe_media_type_copper:
94
95 switch (hw->device_id) {
96 case IXGBE_DEV_ID_82599_T3_LOM:
97 case IXGBE_DEV_ID_X540T:
98 case IXGBE_DEV_ID_X540T1:
99 case IXGBE_DEV_ID_X550T:
100 case IXGBE_DEV_ID_X550T1:
101 case IXGBE_DEV_ID_X550EM_X_10G_T:
102 case IXGBE_DEV_ID_X550EM_A_10G_T:
103 case IXGBE_DEV_ID_X550EM_A_1G_T:
104 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
105 supported = true;
106 break;
107 default:
108 break;
109 }
110 default:
111 break;
112 }
113
114 return supported;
115}
116
117
118
119
120
121
122
123s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
124{
125 s32 ret_val = 0;
126 u32 reg = 0, reg_bp = 0;
127 u16 reg_cu = 0;
128 bool locked = false;
129
130
131
132
133
134 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
135 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
136 return IXGBE_ERR_INVALID_LINK_SETTINGS;
137 }
138
139
140
141
142
143 if (hw->fc.requested_mode == ixgbe_fc_default)
144 hw->fc.requested_mode = ixgbe_fc_full;
145
146
147
148
149
150
151 switch (hw->phy.media_type) {
152 case ixgbe_media_type_backplane:
153
154 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
155 if (ret_val)
156 return ret_val;
157
158
159 case ixgbe_media_type_fiber:
160 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
161
162 break;
163 case ixgbe_media_type_copper:
164 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
165 MDIO_MMD_AN, ®_cu);
166 break;
167 default:
168 break;
169 }
170
171
172
173
174
175
176
177
178
179
180
181 switch (hw->fc.requested_mode) {
182 case ixgbe_fc_none:
183
184 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
185 if (hw->phy.media_type == ixgbe_media_type_backplane)
186 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
187 IXGBE_AUTOC_ASM_PAUSE);
188 else if (hw->phy.media_type == ixgbe_media_type_copper)
189 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
190 break;
191 case ixgbe_fc_tx_pause:
192
193
194
195
196 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
197 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
198 if (hw->phy.media_type == ixgbe_media_type_backplane) {
199 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
200 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
201 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
202 reg_cu |= IXGBE_TAF_ASM_PAUSE;
203 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
204 }
205 break;
206 case ixgbe_fc_rx_pause:
207
208
209
210
211
212
213
214
215
216 case ixgbe_fc_full:
217
218 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
219 if (hw->phy.media_type == ixgbe_media_type_backplane)
220 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
221 IXGBE_AUTOC_ASM_PAUSE;
222 else if (hw->phy.media_type == ixgbe_media_type_copper)
223 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
224 break;
225 default:
226 hw_dbg(hw, "Flow control param set incorrectly\n");
227 return IXGBE_ERR_CONFIG;
228 }
229
230 if (hw->mac.type != ixgbe_mac_X540) {
231
232
233
234
235 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
236 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
237
238
239 if (hw->fc.strict_ieee)
240 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
241
242 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
243 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
244 }
245
246
247
248
249
250
251 if (hw->phy.media_type == ixgbe_media_type_backplane) {
252
253
254
255
256 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
257 if (ret_val)
258 return ret_val;
259
260 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
261 ixgbe_device_supports_autoneg_fc(hw)) {
262 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
263 MDIO_MMD_AN, reg_cu);
264 }
265
266 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
267 return ret_val;
268}
269
270
271
272
273
274
275
276
277
278
279s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
280{
281 s32 ret_val;
282 u32 ctrl_ext;
283 u16 device_caps;
284
285
286 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
287
288
289 hw->phy.ops.identify(hw);
290
291
292 hw->mac.ops.clear_vfta(hw);
293
294
295 hw->mac.ops.clear_hw_cntrs(hw);
296
297
298 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
299 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
300 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
301 IXGBE_WRITE_FLUSH(hw);
302
303
304 if (hw->mac.ops.setup_fc) {
305 ret_val = hw->mac.ops.setup_fc(hw);
306 if (ret_val)
307 return ret_val;
308 }
309
310
311 switch (hw->mac.type) {
312 case ixgbe_mac_82599EB:
313 case ixgbe_mac_X550EM_x:
314 case ixgbe_mac_x550em_a:
315 hw->mac.ops.get_device_caps(hw, &device_caps);
316 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
317 hw->need_crosstalk_fix = false;
318 else
319 hw->need_crosstalk_fix = true;
320 break;
321 default:
322 hw->need_crosstalk_fix = false;
323 break;
324 }
325
326
327 hw->adapter_stopped = false;
328
329 return 0;
330}
331
332
333
334
335
336
337
338
339
340
341
342s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
343{
344 u32 i;
345
346
347 for (i = 0; i < hw->mac.max_tx_queues; i++) {
348 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
349 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
350 }
351 IXGBE_WRITE_FLUSH(hw);
352
353#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
354
355 for (i = 0; i < hw->mac.max_tx_queues; i++) {
356 u32 regval;
357
358 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
359 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
360 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
361 }
362
363 for (i = 0; i < hw->mac.max_rx_queues; i++) {
364 u32 regval;
365
366 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
367 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
368 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
369 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
370 }
371#endif
372 return 0;
373}
374
375
376
377
378
379
380
381
382
383
384
385s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
386{
387 s32 status;
388
389
390 status = hw->mac.ops.reset_hw(hw);
391
392 if (status == 0) {
393
394 status = hw->mac.ops.start_hw(hw);
395 }
396
397
398 if (hw->mac.ops.init_led_link_act)
399 hw->mac.ops.init_led_link_act(hw);
400
401 return status;
402}
403
404
405
406
407
408
409
410
411s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
412{
413 u16 i = 0;
414
415 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
416 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
417 IXGBE_READ_REG(hw, IXGBE_ERRBC);
418 IXGBE_READ_REG(hw, IXGBE_MSPDC);
419 for (i = 0; i < 8; i++)
420 IXGBE_READ_REG(hw, IXGBE_MPC(i));
421
422 IXGBE_READ_REG(hw, IXGBE_MLFC);
423 IXGBE_READ_REG(hw, IXGBE_MRFC);
424 IXGBE_READ_REG(hw, IXGBE_RLEC);
425 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
426 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
427 if (hw->mac.type >= ixgbe_mac_82599EB) {
428 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
429 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
430 } else {
431 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
432 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
433 }
434
435 for (i = 0; i < 8; i++) {
436 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
437 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
438 if (hw->mac.type >= ixgbe_mac_82599EB) {
439 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
440 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
441 } else {
442 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
443 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
444 }
445 }
446 if (hw->mac.type >= ixgbe_mac_82599EB)
447 for (i = 0; i < 8; i++)
448 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
449 IXGBE_READ_REG(hw, IXGBE_PRC64);
450 IXGBE_READ_REG(hw, IXGBE_PRC127);
451 IXGBE_READ_REG(hw, IXGBE_PRC255);
452 IXGBE_READ_REG(hw, IXGBE_PRC511);
453 IXGBE_READ_REG(hw, IXGBE_PRC1023);
454 IXGBE_READ_REG(hw, IXGBE_PRC1522);
455 IXGBE_READ_REG(hw, IXGBE_GPRC);
456 IXGBE_READ_REG(hw, IXGBE_BPRC);
457 IXGBE_READ_REG(hw, IXGBE_MPRC);
458 IXGBE_READ_REG(hw, IXGBE_GPTC);
459 IXGBE_READ_REG(hw, IXGBE_GORCL);
460 IXGBE_READ_REG(hw, IXGBE_GORCH);
461 IXGBE_READ_REG(hw, IXGBE_GOTCL);
462 IXGBE_READ_REG(hw, IXGBE_GOTCH);
463 if (hw->mac.type == ixgbe_mac_82598EB)
464 for (i = 0; i < 8; i++)
465 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
466 IXGBE_READ_REG(hw, IXGBE_RUC);
467 IXGBE_READ_REG(hw, IXGBE_RFC);
468 IXGBE_READ_REG(hw, IXGBE_ROC);
469 IXGBE_READ_REG(hw, IXGBE_RJC);
470 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
471 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
472 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
473 IXGBE_READ_REG(hw, IXGBE_TORL);
474 IXGBE_READ_REG(hw, IXGBE_TORH);
475 IXGBE_READ_REG(hw, IXGBE_TPR);
476 IXGBE_READ_REG(hw, IXGBE_TPT);
477 IXGBE_READ_REG(hw, IXGBE_PTC64);
478 IXGBE_READ_REG(hw, IXGBE_PTC127);
479 IXGBE_READ_REG(hw, IXGBE_PTC255);
480 IXGBE_READ_REG(hw, IXGBE_PTC511);
481 IXGBE_READ_REG(hw, IXGBE_PTC1023);
482 IXGBE_READ_REG(hw, IXGBE_PTC1522);
483 IXGBE_READ_REG(hw, IXGBE_MPTC);
484 IXGBE_READ_REG(hw, IXGBE_BPTC);
485 for (i = 0; i < 16; i++) {
486 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
487 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
488 if (hw->mac.type >= ixgbe_mac_82599EB) {
489 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
490 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
491 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
492 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
493 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
494 } else {
495 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
496 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
497 }
498 }
499
500 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
501 if (hw->phy.id == 0)
502 hw->phy.ops.identify(hw);
503 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
504 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
505 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
506 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
507 }
508
509 return 0;
510}
511
512
513
514
515
516
517
518
519
520s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
521 u32 pba_num_size)
522{
523 s32 ret_val;
524 u16 data;
525 u16 pba_ptr;
526 u16 offset;
527 u16 length;
528
529 if (pba_num == NULL) {
530 hw_dbg(hw, "PBA string buffer was null\n");
531 return IXGBE_ERR_INVALID_ARGUMENT;
532 }
533
534 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
535 if (ret_val) {
536 hw_dbg(hw, "NVM Read Error\n");
537 return ret_val;
538 }
539
540 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
541 if (ret_val) {
542 hw_dbg(hw, "NVM Read Error\n");
543 return ret_val;
544 }
545
546
547
548
549
550
551 if (data != IXGBE_PBANUM_PTR_GUARD) {
552 hw_dbg(hw, "NVM PBA number is not stored as string\n");
553
554
555 if (pba_num_size < 11) {
556 hw_dbg(hw, "PBA string buffer too small\n");
557 return IXGBE_ERR_NO_SPACE;
558 }
559
560
561 pba_num[0] = (data >> 12) & 0xF;
562 pba_num[1] = (data >> 8) & 0xF;
563 pba_num[2] = (data >> 4) & 0xF;
564 pba_num[3] = data & 0xF;
565 pba_num[4] = (pba_ptr >> 12) & 0xF;
566 pba_num[5] = (pba_ptr >> 8) & 0xF;
567 pba_num[6] = '-';
568 pba_num[7] = 0;
569 pba_num[8] = (pba_ptr >> 4) & 0xF;
570 pba_num[9] = pba_ptr & 0xF;
571
572
573 pba_num[10] = '\0';
574
575
576 for (offset = 0; offset < 10; offset++) {
577 if (pba_num[offset] < 0xA)
578 pba_num[offset] += '0';
579 else if (pba_num[offset] < 0x10)
580 pba_num[offset] += 'A' - 0xA;
581 }
582
583 return 0;
584 }
585
586 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
587 if (ret_val) {
588 hw_dbg(hw, "NVM Read Error\n");
589 return ret_val;
590 }
591
592 if (length == 0xFFFF || length == 0) {
593 hw_dbg(hw, "NVM PBA number section invalid length\n");
594 return IXGBE_ERR_PBA_SECTION;
595 }
596
597
598 if (pba_num_size < (((u32)length * 2) - 1)) {
599 hw_dbg(hw, "PBA string buffer too small\n");
600 return IXGBE_ERR_NO_SPACE;
601 }
602
603
604 pba_ptr++;
605 length--;
606
607 for (offset = 0; offset < length; offset++) {
608 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
609 if (ret_val) {
610 hw_dbg(hw, "NVM Read Error\n");
611 return ret_val;
612 }
613 pba_num[offset * 2] = (u8)(data >> 8);
614 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
615 }
616 pba_num[offset * 2] = '\0';
617
618 return 0;
619}
620
621
622
623
624
625
626
627
628
629
630s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
631{
632 u32 rar_high;
633 u32 rar_low;
634 u16 i;
635
636 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
637 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
638
639 for (i = 0; i < 4; i++)
640 mac_addr[i] = (u8)(rar_low >> (i*8));
641
642 for (i = 0; i < 2; i++)
643 mac_addr[i+4] = (u8)(rar_high >> (i*8));
644
645 return 0;
646}
647
648enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
649{
650 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
651 case IXGBE_PCI_LINK_WIDTH_1:
652 return ixgbe_bus_width_pcie_x1;
653 case IXGBE_PCI_LINK_WIDTH_2:
654 return ixgbe_bus_width_pcie_x2;
655 case IXGBE_PCI_LINK_WIDTH_4:
656 return ixgbe_bus_width_pcie_x4;
657 case IXGBE_PCI_LINK_WIDTH_8:
658 return ixgbe_bus_width_pcie_x8;
659 default:
660 return ixgbe_bus_width_unknown;
661 }
662}
663
664enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
665{
666 switch (link_status & IXGBE_PCI_LINK_SPEED) {
667 case IXGBE_PCI_LINK_SPEED_2500:
668 return ixgbe_bus_speed_2500;
669 case IXGBE_PCI_LINK_SPEED_5000:
670 return ixgbe_bus_speed_5000;
671 case IXGBE_PCI_LINK_SPEED_8000:
672 return ixgbe_bus_speed_8000;
673 default:
674 return ixgbe_bus_speed_unknown;
675 }
676}
677
678
679
680
681
682
683
684s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
685{
686 u16 link_status;
687
688 hw->bus.type = ixgbe_bus_type_pci_express;
689
690
691 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
692
693 hw->bus.width = ixgbe_convert_bus_width(link_status);
694 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
695
696 hw->mac.ops.set_lan_id(hw);
697
698 return 0;
699}
700
701
702
703
704
705
706
707
708void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
709{
710 struct ixgbe_bus_info *bus = &hw->bus;
711 u16 ee_ctrl_4;
712 u32 reg;
713
714 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
715 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
716 bus->lan_id = bus->func;
717
718
719 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
720 if (reg & IXGBE_FACTPS_LFS)
721 bus->func ^= 0x1;
722
723
724 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
725 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
726 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
727 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
728 }
729}
730
731
732
733
734
735
736
737
738
739
740s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
741{
742 u32 reg_val;
743 u16 i;
744
745
746
747
748
749 hw->adapter_stopped = true;
750
751
752 hw->mac.ops.disable_rx(hw);
753
754
755 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
756
757
758 IXGBE_READ_REG(hw, IXGBE_EICR);
759
760
761 for (i = 0; i < hw->mac.max_tx_queues; i++)
762 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
763
764
765 for (i = 0; i < hw->mac.max_rx_queues; i++) {
766 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
767 reg_val &= ~IXGBE_RXDCTL_ENABLE;
768 reg_val |= IXGBE_RXDCTL_SWFLSH;
769 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
770 }
771
772
773 IXGBE_WRITE_FLUSH(hw);
774 usleep_range(1000, 2000);
775
776
777
778
779
780 return ixgbe_disable_pcie_master(hw);
781}
782
783
784
785
786
787
788
789
790s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
791{
792 struct ixgbe_mac_info *mac = &hw->mac;
793 u32 led_reg, led_mode;
794 u16 i;
795
796 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
797
798
799 for (i = 0; i < 4; i++) {
800 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
801
802 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
803 IXGBE_LED_LINK_ACTIVE) {
804 mac->led_link_act = i;
805 return 0;
806 }
807 }
808
809
810
811
812 switch (hw->mac.type) {
813 case ixgbe_mac_x550em_a:
814 mac->led_link_act = 0;
815 break;
816 case ixgbe_mac_X550EM_x:
817 mac->led_link_act = 1;
818 break;
819 default:
820 mac->led_link_act = 2;
821 }
822
823 return 0;
824}
825
826
827
828
829
830
831s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
832{
833 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
834
835 if (index > 3)
836 return IXGBE_ERR_PARAM;
837
838
839 led_reg &= ~IXGBE_LED_MODE_MASK(index);
840 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
841 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
842 IXGBE_WRITE_FLUSH(hw);
843
844 return 0;
845}
846
847
848
849
850
851
852s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
853{
854 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
855
856 if (index > 3)
857 return IXGBE_ERR_PARAM;
858
859
860 led_reg &= ~IXGBE_LED_MODE_MASK(index);
861 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
862 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
863 IXGBE_WRITE_FLUSH(hw);
864
865 return 0;
866}
867
868
869
870
871
872
873
874
875s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
876{
877 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
878 u32 eec;
879 u16 eeprom_size;
880
881 if (eeprom->type == ixgbe_eeprom_uninitialized) {
882 eeprom->type = ixgbe_eeprom_none;
883
884
885 eeprom->semaphore_delay = 10;
886
887 eeprom->word_page_size = 0;
888
889
890
891
892
893 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
894 if (eec & IXGBE_EEC_PRES) {
895 eeprom->type = ixgbe_eeprom_spi;
896
897
898
899
900
901 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
902 IXGBE_EEC_SIZE_SHIFT);
903 eeprom->word_size = BIT(eeprom_size +
904 IXGBE_EEPROM_WORD_SIZE_SHIFT);
905 }
906
907 if (eec & IXGBE_EEC_ADDR_SIZE)
908 eeprom->address_bits = 16;
909 else
910 eeprom->address_bits = 8;
911 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
912 eeprom->type, eeprom->word_size, eeprom->address_bits);
913 }
914
915 return 0;
916}
917
918
919
920
921
922
923
924
925
926
927s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
928 u16 words, u16 *data)
929{
930 s32 status;
931 u16 i, count;
932
933 hw->eeprom.ops.init_params(hw);
934
935 if (words == 0)
936 return IXGBE_ERR_INVALID_ARGUMENT;
937
938 if (offset + words > hw->eeprom.word_size)
939 return IXGBE_ERR_EEPROM;
940
941
942
943
944
945 if ((hw->eeprom.word_page_size == 0) &&
946 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
947 ixgbe_detect_eeprom_page_size_generic(hw, offset);
948
949
950
951
952
953
954 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
955 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
956 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
957 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
958 count, &data[i]);
959
960 if (status != 0)
961 break;
962 }
963
964 return status;
965}
966
967
968
969
970
971
972
973
974
975
976
977static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
978 u16 words, u16 *data)
979{
980 s32 status;
981 u16 word;
982 u16 page_size;
983 u16 i;
984 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
985
986
987 status = ixgbe_acquire_eeprom(hw);
988 if (status)
989 return status;
990
991 if (ixgbe_ready_eeprom(hw) != 0) {
992 ixgbe_release_eeprom(hw);
993 return IXGBE_ERR_EEPROM;
994 }
995
996 for (i = 0; i < words; i++) {
997 ixgbe_standby_eeprom(hw);
998
999
1000 ixgbe_shift_out_eeprom_bits(hw,
1001 IXGBE_EEPROM_WREN_OPCODE_SPI,
1002 IXGBE_EEPROM_OPCODE_BITS);
1003
1004 ixgbe_standby_eeprom(hw);
1005
1006
1007
1008
1009 if ((hw->eeprom.address_bits == 8) &&
1010 ((offset + i) >= 128))
1011 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1012
1013
1014 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1015 IXGBE_EEPROM_OPCODE_BITS);
1016 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1017 hw->eeprom.address_bits);
1018
1019 page_size = hw->eeprom.word_page_size;
1020
1021
1022 do {
1023 word = data[i];
1024 word = (word >> 8) | (word << 8);
1025 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1026
1027 if (page_size == 0)
1028 break;
1029
1030
1031 if (((offset + i) & (page_size - 1)) ==
1032 (page_size - 1))
1033 break;
1034 } while (++i < words);
1035
1036 ixgbe_standby_eeprom(hw);
1037 usleep_range(10000, 20000);
1038 }
1039
1040 ixgbe_release_eeprom(hw);
1041
1042 return 0;
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1055{
1056 hw->eeprom.ops.init_params(hw);
1057
1058 if (offset >= hw->eeprom.word_size)
1059 return IXGBE_ERR_EEPROM;
1060
1061 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1074 u16 words, u16 *data)
1075{
1076 s32 status;
1077 u16 i, count;
1078
1079 hw->eeprom.ops.init_params(hw);
1080
1081 if (words == 0)
1082 return IXGBE_ERR_INVALID_ARGUMENT;
1083
1084 if (offset + words > hw->eeprom.word_size)
1085 return IXGBE_ERR_EEPROM;
1086
1087
1088
1089
1090
1091
1092 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1093 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1094 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1095
1096 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1097 count, &data[i]);
1098
1099 if (status)
1100 return status;
1101 }
1102
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1116 u16 words, u16 *data)
1117{
1118 s32 status;
1119 u16 word_in;
1120 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1121 u16 i;
1122
1123
1124 status = ixgbe_acquire_eeprom(hw);
1125 if (status)
1126 return status;
1127
1128 if (ixgbe_ready_eeprom(hw) != 0) {
1129 ixgbe_release_eeprom(hw);
1130 return IXGBE_ERR_EEPROM;
1131 }
1132
1133 for (i = 0; i < words; i++) {
1134 ixgbe_standby_eeprom(hw);
1135
1136
1137
1138 if ((hw->eeprom.address_bits == 8) &&
1139 ((offset + i) >= 128))
1140 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1141
1142
1143 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1144 IXGBE_EEPROM_OPCODE_BITS);
1145 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1146 hw->eeprom.address_bits);
1147
1148
1149 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1150 data[i] = (word_in >> 8) | (word_in << 8);
1151 }
1152
1153
1154 ixgbe_release_eeprom(hw);
1155
1156 return 0;
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1168 u16 *data)
1169{
1170 hw->eeprom.ops.init_params(hw);
1171
1172 if (offset >= hw->eeprom.word_size)
1173 return IXGBE_ERR_EEPROM;
1174
1175 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1188 u16 words, u16 *data)
1189{
1190 u32 eerd;
1191 s32 status;
1192 u32 i;
1193
1194 hw->eeprom.ops.init_params(hw);
1195
1196 if (words == 0)
1197 return IXGBE_ERR_INVALID_ARGUMENT;
1198
1199 if (offset >= hw->eeprom.word_size)
1200 return IXGBE_ERR_EEPROM;
1201
1202 for (i = 0; i < words; i++) {
1203 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1204 IXGBE_EEPROM_RW_REG_START;
1205
1206 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1207 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1208
1209 if (status == 0) {
1210 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1211 IXGBE_EEPROM_RW_REG_DATA);
1212 } else {
1213 hw_dbg(hw, "Eeprom read timed out\n");
1214 return status;
1215 }
1216 }
1217
1218 return 0;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1231 u16 offset)
1232{
1233 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1234 s32 status;
1235 u16 i;
1236
1237 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1238 data[i] = i;
1239
1240 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1241 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1242 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1243 hw->eeprom.word_page_size = 0;
1244 if (status)
1245 return status;
1246
1247 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1248 if (status)
1249 return status;
1250
1251
1252
1253
1254
1255 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1256
1257 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1258 hw->eeprom.word_page_size);
1259 return 0;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1271{
1272 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1285 u16 words, u16 *data)
1286{
1287 u32 eewr;
1288 s32 status;
1289 u16 i;
1290
1291 hw->eeprom.ops.init_params(hw);
1292
1293 if (words == 0)
1294 return IXGBE_ERR_INVALID_ARGUMENT;
1295
1296 if (offset >= hw->eeprom.word_size)
1297 return IXGBE_ERR_EEPROM;
1298
1299 for (i = 0; i < words; i++) {
1300 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1301 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1302 IXGBE_EEPROM_RW_REG_START;
1303
1304 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1305 if (status) {
1306 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1307 return status;
1308 }
1309
1310 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1311
1312 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1313 if (status) {
1314 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1315 return status;
1316 }
1317 }
1318
1319 return 0;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1331{
1332 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1344{
1345 u32 i;
1346 u32 reg;
1347
1348 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1349 if (ee_reg == IXGBE_NVM_POLL_READ)
1350 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1351 else
1352 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1353
1354 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1355 return 0;
1356 }
1357 udelay(5);
1358 }
1359 return IXGBE_ERR_EEPROM;
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1370{
1371 u32 eec;
1372 u32 i;
1373
1374 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1375 return IXGBE_ERR_SWFW_SYNC;
1376
1377 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1378
1379
1380 eec |= IXGBE_EEC_REQ;
1381 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1382
1383 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1384 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1385 if (eec & IXGBE_EEC_GNT)
1386 break;
1387 udelay(5);
1388 }
1389
1390
1391 if (!(eec & IXGBE_EEC_GNT)) {
1392 eec &= ~IXGBE_EEC_REQ;
1393 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1394 hw_dbg(hw, "Could not acquire EEPROM grant\n");
1395
1396 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1397 return IXGBE_ERR_EEPROM;
1398 }
1399
1400
1401
1402 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1403 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1404 IXGBE_WRITE_FLUSH(hw);
1405 udelay(1);
1406 return 0;
1407}
1408
1409
1410
1411
1412
1413
1414
1415static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1416{
1417 u32 timeout = 2000;
1418 u32 i;
1419 u32 swsm;
1420
1421
1422 for (i = 0; i < timeout; i++) {
1423
1424
1425
1426
1427 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1428 if (!(swsm & IXGBE_SWSM_SMBI))
1429 break;
1430 usleep_range(50, 100);
1431 }
1432
1433 if (i == timeout) {
1434 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1435
1436
1437
1438
1439
1440 ixgbe_release_eeprom_semaphore(hw);
1441
1442 usleep_range(50, 100);
1443
1444
1445
1446
1447 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1448 if (swsm & IXGBE_SWSM_SMBI) {
1449 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1450 return IXGBE_ERR_EEPROM;
1451 }
1452 }
1453
1454
1455 for (i = 0; i < timeout; i++) {
1456 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1457
1458
1459 swsm |= IXGBE_SWSM_SWESMBI;
1460 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
1461
1462
1463
1464
1465 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1466 if (swsm & IXGBE_SWSM_SWESMBI)
1467 break;
1468
1469 usleep_range(50, 100);
1470 }
1471
1472
1473
1474
1475 if (i >= timeout) {
1476 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1477 ixgbe_release_eeprom_semaphore(hw);
1478 return IXGBE_ERR_EEPROM;
1479 }
1480
1481 return 0;
1482}
1483
1484
1485
1486
1487
1488
1489
1490static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1491{
1492 u32 swsm;
1493
1494 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1495
1496
1497 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1498 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
1499 IXGBE_WRITE_FLUSH(hw);
1500}
1501
1502
1503
1504
1505
1506static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1507{
1508 u16 i;
1509 u8 spi_stat_reg;
1510
1511
1512
1513
1514
1515
1516
1517 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1518 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1519 IXGBE_EEPROM_OPCODE_BITS);
1520 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1521 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1522 break;
1523
1524 udelay(5);
1525 ixgbe_standby_eeprom(hw);
1526 }
1527
1528
1529
1530
1531
1532 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1533 hw_dbg(hw, "SPI EEPROM Status error\n");
1534 return IXGBE_ERR_EEPROM;
1535 }
1536
1537 return 0;
1538}
1539
1540
1541
1542
1543
1544static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1545{
1546 u32 eec;
1547
1548 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1549
1550
1551 eec |= IXGBE_EEC_CS;
1552 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1553 IXGBE_WRITE_FLUSH(hw);
1554 udelay(1);
1555 eec &= ~IXGBE_EEC_CS;
1556 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1557 IXGBE_WRITE_FLUSH(hw);
1558 udelay(1);
1559}
1560
1561
1562
1563
1564
1565
1566
1567static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1568 u16 count)
1569{
1570 u32 eec;
1571 u32 mask;
1572 u32 i;
1573
1574 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1575
1576
1577
1578
1579
1580 mask = BIT(count - 1);
1581
1582 for (i = 0; i < count; i++) {
1583
1584
1585
1586
1587
1588
1589
1590 if (data & mask)
1591 eec |= IXGBE_EEC_DI;
1592 else
1593 eec &= ~IXGBE_EEC_DI;
1594
1595 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1596 IXGBE_WRITE_FLUSH(hw);
1597
1598 udelay(1);
1599
1600 ixgbe_raise_eeprom_clk(hw, &eec);
1601 ixgbe_lower_eeprom_clk(hw, &eec);
1602
1603
1604
1605
1606
1607 mask = mask >> 1;
1608 }
1609
1610
1611 eec &= ~IXGBE_EEC_DI;
1612 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1613 IXGBE_WRITE_FLUSH(hw);
1614}
1615
1616
1617
1618
1619
1620static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1621{
1622 u32 eec;
1623 u32 i;
1624 u16 data = 0;
1625
1626
1627
1628
1629
1630
1631
1632
1633 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1634
1635 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1636
1637 for (i = 0; i < count; i++) {
1638 data = data << 1;
1639 ixgbe_raise_eeprom_clk(hw, &eec);
1640
1641 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1642
1643 eec &= ~(IXGBE_EEC_DI);
1644 if (eec & IXGBE_EEC_DO)
1645 data |= 1;
1646
1647 ixgbe_lower_eeprom_clk(hw, &eec);
1648 }
1649
1650 return data;
1651}
1652
1653
1654
1655
1656
1657
1658static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1659{
1660
1661
1662
1663
1664 *eec = *eec | IXGBE_EEC_SK;
1665 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
1666 IXGBE_WRITE_FLUSH(hw);
1667 udelay(1);
1668}
1669
1670
1671
1672
1673
1674
1675static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1676{
1677
1678
1679
1680
1681 *eec = *eec & ~IXGBE_EEC_SK;
1682 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
1683 IXGBE_WRITE_FLUSH(hw);
1684 udelay(1);
1685}
1686
1687
1688
1689
1690
1691static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1692{
1693 u32 eec;
1694
1695 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1696
1697 eec |= IXGBE_EEC_CS;
1698 eec &= ~IXGBE_EEC_SK;
1699
1700 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1701 IXGBE_WRITE_FLUSH(hw);
1702
1703 udelay(1);
1704
1705
1706 eec &= ~IXGBE_EEC_REQ;
1707 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1708
1709 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1710
1711
1712
1713
1714
1715 usleep_range(hw->eeprom.semaphore_delay * 1000,
1716 hw->eeprom.semaphore_delay * 2000);
1717}
1718
1719
1720
1721
1722
1723s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1724{
1725 u16 i;
1726 u16 j;
1727 u16 checksum = 0;
1728 u16 length = 0;
1729 u16 pointer = 0;
1730 u16 word = 0;
1731
1732
1733 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1734 if (hw->eeprom.ops.read(hw, i, &word)) {
1735 hw_dbg(hw, "EEPROM read failed\n");
1736 break;
1737 }
1738 checksum += word;
1739 }
1740
1741
1742 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1743 if (hw->eeprom.ops.read(hw, i, &pointer)) {
1744 hw_dbg(hw, "EEPROM read failed\n");
1745 return IXGBE_ERR_EEPROM;
1746 }
1747
1748
1749 if (pointer == 0xFFFF || pointer == 0)
1750 continue;
1751
1752 if (hw->eeprom.ops.read(hw, pointer, &length)) {
1753 hw_dbg(hw, "EEPROM read failed\n");
1754 return IXGBE_ERR_EEPROM;
1755 }
1756
1757 if (length == 0xFFFF || length == 0)
1758 continue;
1759
1760 for (j = pointer + 1; j <= pointer + length; j++) {
1761 if (hw->eeprom.ops.read(hw, j, &word)) {
1762 hw_dbg(hw, "EEPROM read failed\n");
1763 return IXGBE_ERR_EEPROM;
1764 }
1765 checksum += word;
1766 }
1767 }
1768
1769 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1770
1771 return (s32)checksum;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1783 u16 *checksum_val)
1784{
1785 s32 status;
1786 u16 checksum;
1787 u16 read_checksum = 0;
1788
1789
1790
1791
1792
1793
1794 status = hw->eeprom.ops.read(hw, 0, &checksum);
1795 if (status) {
1796 hw_dbg(hw, "EEPROM read failed\n");
1797 return status;
1798 }
1799
1800 status = hw->eeprom.ops.calc_checksum(hw);
1801 if (status < 0)
1802 return status;
1803
1804 checksum = (u16)(status & 0xffff);
1805
1806 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1807 if (status) {
1808 hw_dbg(hw, "EEPROM read failed\n");
1809 return status;
1810 }
1811
1812
1813
1814
1815 if (read_checksum != checksum)
1816 status = IXGBE_ERR_EEPROM_CHECKSUM;
1817
1818
1819 if (checksum_val)
1820 *checksum_val = checksum;
1821
1822 return status;
1823}
1824
1825
1826
1827
1828
1829s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1830{
1831 s32 status;
1832 u16 checksum;
1833
1834
1835
1836
1837
1838
1839 status = hw->eeprom.ops.read(hw, 0, &checksum);
1840 if (status) {
1841 hw_dbg(hw, "EEPROM read failed\n");
1842 return status;
1843 }
1844
1845 status = hw->eeprom.ops.calc_checksum(hw);
1846 if (status < 0)
1847 return status;
1848
1849 checksum = (u16)(status & 0xffff);
1850
1851 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1852
1853 return status;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1867 u32 enable_addr)
1868{
1869 u32 rar_low, rar_high;
1870 u32 rar_entries = hw->mac.num_rar_entries;
1871
1872
1873 if (index >= rar_entries) {
1874 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1875 return IXGBE_ERR_INVALID_ARGUMENT;
1876 }
1877
1878
1879 hw->mac.ops.set_vmdq(hw, index, vmdq);
1880
1881
1882
1883
1884
1885 rar_low = ((u32)addr[0] |
1886 ((u32)addr[1] << 8) |
1887 ((u32)addr[2] << 16) |
1888 ((u32)addr[3] << 24));
1889
1890
1891
1892
1893
1894 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1895 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1896 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1897
1898 if (enable_addr != 0)
1899 rar_high |= IXGBE_RAH_AV;
1900
1901 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1902 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1903
1904 return 0;
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1915{
1916 u32 rar_high;
1917 u32 rar_entries = hw->mac.num_rar_entries;
1918
1919
1920 if (index >= rar_entries) {
1921 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1922 return IXGBE_ERR_INVALID_ARGUMENT;
1923 }
1924
1925
1926
1927
1928
1929
1930 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1931 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1932
1933 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1934 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1935
1936
1937 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1938
1939 return 0;
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1951{
1952 u32 i;
1953 u32 rar_entries = hw->mac.num_rar_entries;
1954
1955
1956
1957
1958
1959
1960 if (!is_valid_ether_addr(hw->mac.addr)) {
1961
1962 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1963
1964 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1965 } else {
1966
1967 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1968 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1969
1970 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1971 }
1972
1973
1974 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1975
1976 hw->addr_ctrl.overflow_promisc = 0;
1977
1978 hw->addr_ctrl.rar_used_count = 1;
1979
1980
1981 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1982 for (i = 1; i < rar_entries; i++) {
1983 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1984 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1985 }
1986
1987
1988 hw->addr_ctrl.mta_in_use = 0;
1989 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1990
1991 hw_dbg(hw, " Clearing MTA\n");
1992 for (i = 0; i < hw->mac.mcft_size; i++)
1993 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1994
1995 if (hw->mac.ops.init_uta_tables)
1996 hw->mac.ops.init_uta_tables(hw);
1997
1998 return 0;
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2014{
2015 u32 vector = 0;
2016
2017 switch (hw->mac.mc_filter_type) {
2018 case 0:
2019 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2020 break;
2021 case 1:
2022 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2023 break;
2024 case 2:
2025 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2026 break;
2027 case 3:
2028 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2029 break;
2030 default:
2031 hw_dbg(hw, "MC filter type param set incorrectly\n");
2032 break;
2033 }
2034
2035
2036 vector &= 0xFFF;
2037 return vector;
2038}
2039
2040
2041
2042
2043
2044
2045
2046
2047static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2048{
2049 u32 vector;
2050 u32 vector_bit;
2051 u32 vector_reg;
2052
2053 hw->addr_ctrl.mta_in_use++;
2054
2055 vector = ixgbe_mta_vector(hw, mc_addr);
2056 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 vector_reg = (vector >> 5) & 0x7F;
2068 vector_bit = vector & 0x1F;
2069 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2083 struct net_device *netdev)
2084{
2085 struct netdev_hw_addr *ha;
2086 u32 i;
2087
2088
2089
2090
2091
2092 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
2093 hw->addr_ctrl.mta_in_use = 0;
2094
2095
2096 hw_dbg(hw, " Clearing MTA\n");
2097 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2098
2099
2100 netdev_for_each_mc_addr(ha, netdev) {
2101 hw_dbg(hw, " Adding the multicast addresses:\n");
2102 ixgbe_set_mta(hw, ha->addr);
2103 }
2104
2105
2106 for (i = 0; i < hw->mac.mcft_size; i++)
2107 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2108 hw->mac.mta_shadow[i]);
2109
2110 if (hw->addr_ctrl.mta_in_use > 0)
2111 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2112 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2113
2114 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2115 return 0;
2116}
2117
2118
2119
2120
2121
2122
2123
2124s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2125{
2126 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2127
2128 if (a->mta_in_use > 0)
2129 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2130 hw->mac.mc_filter_type);
2131
2132 return 0;
2133}
2134
2135
2136
2137
2138
2139
2140
2141s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2142{
2143 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2144
2145 if (a->mta_in_use > 0)
2146 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2147
2148 return 0;
2149}
2150
2151
2152
2153
2154
2155
2156
2157s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2158{
2159 u32 mflcn_reg, fccfg_reg;
2160 u32 reg;
2161 u32 fcrtl, fcrth;
2162 int i;
2163
2164
2165 if (!hw->fc.pause_time)
2166 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2167
2168
2169 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2170 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2171 hw->fc.high_water[i]) {
2172 if (!hw->fc.low_water[i] ||
2173 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2174 hw_dbg(hw, "Invalid water mark configuration\n");
2175 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2176 }
2177 }
2178 }
2179
2180
2181 hw->mac.ops.fc_autoneg(hw);
2182
2183
2184 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2185 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2186
2187 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2188 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200 switch (hw->fc.current_mode) {
2201 case ixgbe_fc_none:
2202
2203
2204
2205
2206 break;
2207 case ixgbe_fc_rx_pause:
2208
2209
2210
2211
2212
2213
2214
2215
2216 mflcn_reg |= IXGBE_MFLCN_RFCE;
2217 break;
2218 case ixgbe_fc_tx_pause:
2219
2220
2221
2222
2223 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2224 break;
2225 case ixgbe_fc_full:
2226
2227 mflcn_reg |= IXGBE_MFLCN_RFCE;
2228 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2229 break;
2230 default:
2231 hw_dbg(hw, "Flow control param set incorrectly\n");
2232 return IXGBE_ERR_CONFIG;
2233 }
2234
2235
2236 mflcn_reg |= IXGBE_MFLCN_DPF;
2237 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2238 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2239
2240
2241 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2242 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2243 hw->fc.high_water[i]) {
2244 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2245 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2246 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2247 } else {
2248 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2249
2250
2251
2252
2253
2254
2255
2256 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2257 }
2258
2259 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2260 }
2261
2262
2263 reg = hw->fc.pause_time * 0x00010001;
2264 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2265 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2266
2267 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2268
2269 return 0;
2270}
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2286 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2287{
2288 if ((!(adv_reg)) || (!(lp_reg)))
2289 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2290
2291 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2292
2293
2294
2295
2296
2297
2298
2299 if (hw->fc.requested_mode == ixgbe_fc_full) {
2300 hw->fc.current_mode = ixgbe_fc_full;
2301 hw_dbg(hw, "Flow Control = FULL.\n");
2302 } else {
2303 hw->fc.current_mode = ixgbe_fc_rx_pause;
2304 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2305 }
2306 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2307 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2308 hw->fc.current_mode = ixgbe_fc_tx_pause;
2309 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2310 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2311 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2312 hw->fc.current_mode = ixgbe_fc_rx_pause;
2313 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2314 } else {
2315 hw->fc.current_mode = ixgbe_fc_none;
2316 hw_dbg(hw, "Flow Control = NONE.\n");
2317 }
2318 return 0;
2319}
2320
2321
2322
2323
2324
2325
2326
2327static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2328{
2329 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2330 s32 ret_val;
2331
2332
2333
2334
2335
2336
2337
2338 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2339 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2340 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2341 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2342
2343 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2344 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2345
2346 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2347 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2348 IXGBE_PCS1GANA_ASM_PAUSE,
2349 IXGBE_PCS1GANA_SYM_PAUSE,
2350 IXGBE_PCS1GANA_ASM_PAUSE);
2351
2352 return ret_val;
2353}
2354
2355
2356
2357
2358
2359
2360
2361static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2362{
2363 u32 links2, anlp1_reg, autoc_reg, links;
2364 s32 ret_val;
2365
2366
2367
2368
2369
2370
2371 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2372 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2373 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2374
2375 if (hw->mac.type == ixgbe_mac_82599EB) {
2376 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2377 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2378 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2379 }
2380
2381
2382
2383
2384 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2385 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2386
2387 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2388 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2389 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2390
2391 return ret_val;
2392}
2393
2394
2395
2396
2397
2398
2399
2400static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2401{
2402 u16 technology_ability_reg = 0;
2403 u16 lp_technology_ability_reg = 0;
2404
2405 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2406 MDIO_MMD_AN,
2407 &technology_ability_reg);
2408 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2409 MDIO_MMD_AN,
2410 &lp_technology_ability_reg);
2411
2412 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2413 (u32)lp_technology_ability_reg,
2414 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2415 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2426{
2427 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2428 ixgbe_link_speed speed;
2429 bool link_up;
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 if (hw->fc.disable_fc_autoneg)
2441 goto out;
2442
2443 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2444 if (!link_up)
2445 goto out;
2446
2447 switch (hw->phy.media_type) {
2448
2449 case ixgbe_media_type_fiber:
2450 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2451 ret_val = ixgbe_fc_autoneg_fiber(hw);
2452 break;
2453
2454
2455 case ixgbe_media_type_backplane:
2456 ret_val = ixgbe_fc_autoneg_backplane(hw);
2457 break;
2458
2459
2460 case ixgbe_media_type_copper:
2461 if (ixgbe_device_supports_autoneg_fc(hw))
2462 ret_val = ixgbe_fc_autoneg_copper(hw);
2463 break;
2464
2465 default:
2466 break;
2467 }
2468
2469out:
2470 if (ret_val == 0) {
2471 hw->fc.fc_was_autonegged = true;
2472 } else {
2473 hw->fc.fc_was_autonegged = false;
2474 hw->fc.current_mode = hw->fc.requested_mode;
2475 }
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2489{
2490 s16 devctl2;
2491 u32 pollcnt;
2492
2493 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2494 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2495
2496 switch (devctl2) {
2497 case IXGBE_PCIDEVCTRL2_65_130ms:
2498 pollcnt = 1300;
2499 break;
2500 case IXGBE_PCIDEVCTRL2_260_520ms:
2501 pollcnt = 5200;
2502 break;
2503 case IXGBE_PCIDEVCTRL2_1_2s:
2504 pollcnt = 20000;
2505 break;
2506 case IXGBE_PCIDEVCTRL2_4_8s:
2507 pollcnt = 80000;
2508 break;
2509 case IXGBE_PCIDEVCTRL2_17_34s:
2510 pollcnt = 34000;
2511 break;
2512 case IXGBE_PCIDEVCTRL2_50_100us:
2513 case IXGBE_PCIDEVCTRL2_1_2ms:
2514 case IXGBE_PCIDEVCTRL2_16_32ms:
2515 case IXGBE_PCIDEVCTRL2_16_32ms_def:
2516 default:
2517 pollcnt = 800;
2518 break;
2519 }
2520
2521
2522 return (pollcnt * 11) / 10;
2523}
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2535{
2536 u32 i, poll;
2537 u16 value;
2538
2539
2540 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2541
2542
2543 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2544 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
2545 break;
2546 usleep_range(100, 120);
2547 }
2548 if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
2549 hw_dbg(hw, "GIO disable did not set - requesting resets\n");
2550 goto gio_disable_fail;
2551 }
2552
2553
2554 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2555 ixgbe_removed(hw->hw_addr))
2556 return 0;
2557
2558
2559 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2560 udelay(100);
2561 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2562 return 0;
2563 }
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2574gio_disable_fail:
2575 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2576
2577 if (hw->mac.type >= ixgbe_mac_X550)
2578 return 0;
2579
2580
2581
2582
2583
2584 poll = ixgbe_pcie_timeout_poll(hw);
2585 for (i = 0; i < poll; i++) {
2586 udelay(100);
2587 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2588 if (ixgbe_removed(hw->hw_addr))
2589 return 0;
2590 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2591 return 0;
2592 }
2593
2594 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2595 return IXGBE_ERR_MASTER_REQUESTS_PENDING;
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2607{
2608 u32 gssr = 0;
2609 u32 swmask = mask;
2610 u32 fwmask = mask << 5;
2611 u32 timeout = 200;
2612 u32 i;
2613
2614 for (i = 0; i < timeout; i++) {
2615
2616
2617
2618
2619 if (ixgbe_get_eeprom_semaphore(hw))
2620 return IXGBE_ERR_SWFW_SYNC;
2621
2622 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2623 if (!(gssr & (fwmask | swmask))) {
2624 gssr |= swmask;
2625 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2626 ixgbe_release_eeprom_semaphore(hw);
2627 return 0;
2628 } else {
2629
2630 ixgbe_release_eeprom_semaphore(hw);
2631 usleep_range(5000, 10000);
2632 }
2633 }
2634
2635
2636 if (gssr & (fwmask | swmask))
2637 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2638
2639 usleep_range(5000, 10000);
2640 return IXGBE_ERR_SWFW_SYNC;
2641}
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2652{
2653 u32 gssr;
2654 u32 swmask = mask;
2655
2656 ixgbe_get_eeprom_semaphore(hw);
2657
2658 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2659 gssr &= ~swmask;
2660 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2661
2662 ixgbe_release_eeprom_semaphore(hw);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2675{
2676 *locked = false;
2677 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2678 return 0;
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2689{
2690 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2691 return 0;
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2702{
2703#define IXGBE_MAX_SECRX_POLL 40
2704 int i;
2705 int secrxreg;
2706
2707 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2708 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2709 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2710 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2711 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2712 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2713 break;
2714 else
2715
2716 udelay(1000);
2717 }
2718
2719
2720 if (i >= IXGBE_MAX_SECRX_POLL)
2721 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2722
2723 return 0;
2724
2725}
2726
2727
2728
2729
2730
2731
2732
2733s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
2734{
2735 u32 secrxreg;
2736
2737 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2738 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2739 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2740 IXGBE_WRITE_FLUSH(hw);
2741
2742 return 0;
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2753{
2754 if (regval & IXGBE_RXCTRL_RXEN)
2755 hw->mac.ops.enable_rx(hw);
2756 else
2757 hw->mac.ops.disable_rx(hw);
2758
2759 return 0;
2760}
2761
2762
2763
2764
2765
2766
2767s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2768{
2769 ixgbe_link_speed speed = 0;
2770 bool link_up = false;
2771 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2772 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2773 bool locked = false;
2774 s32 ret_val;
2775
2776 if (index > 3)
2777 return IXGBE_ERR_PARAM;
2778
2779
2780
2781
2782
2783 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2784
2785 if (!link_up) {
2786 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2787 if (ret_val)
2788 return ret_val;
2789
2790 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2791 autoc_reg |= IXGBE_AUTOC_FLU;
2792
2793 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2794 if (ret_val)
2795 return ret_val;
2796
2797 IXGBE_WRITE_FLUSH(hw);
2798
2799 usleep_range(10000, 20000);
2800 }
2801
2802 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2803 led_reg |= IXGBE_LED_BLINK(index);
2804 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2805 IXGBE_WRITE_FLUSH(hw);
2806
2807 return 0;
2808}
2809
2810
2811
2812
2813
2814
2815s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2816{
2817 u32 autoc_reg = 0;
2818 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2819 bool locked = false;
2820 s32 ret_val;
2821
2822 if (index > 3)
2823 return IXGBE_ERR_PARAM;
2824
2825 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2826 if (ret_val)
2827 return ret_val;
2828
2829 autoc_reg &= ~IXGBE_AUTOC_FLU;
2830 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2831
2832 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2833 if (ret_val)
2834 return ret_val;
2835
2836 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2837 led_reg &= ~IXGBE_LED_BLINK(index);
2838 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2839 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2840 IXGBE_WRITE_FLUSH(hw);
2841
2842 return 0;
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2855 u16 *san_mac_offset)
2856{
2857 s32 ret_val;
2858
2859
2860
2861
2862
2863 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2864 san_mac_offset);
2865 if (ret_val)
2866 hw_err(hw, "eeprom read at offset %d failed\n",
2867 IXGBE_SAN_MAC_ADDR_PTR);
2868
2869 return ret_val;
2870}
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2883{
2884 u16 san_mac_data, san_mac_offset;
2885 u8 i;
2886 s32 ret_val;
2887
2888
2889
2890
2891
2892 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2893 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2894
2895 goto san_mac_addr_clr;
2896
2897
2898 hw->mac.ops.set_lan_id(hw);
2899
2900 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2901 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2902 for (i = 0; i < 3; i++) {
2903 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2904 &san_mac_data);
2905 if (ret_val) {
2906 hw_err(hw, "eeprom read at offset %d failed\n",
2907 san_mac_offset);
2908 goto san_mac_addr_clr;
2909 }
2910 san_mac_addr[i * 2] = (u8)(san_mac_data);
2911 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2912 san_mac_offset++;
2913 }
2914 return 0;
2915
2916san_mac_addr_clr:
2917
2918
2919
2920 for (i = 0; i < 6; i++)
2921 san_mac_addr[i] = 0xFF;
2922 return ret_val;
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2933{
2934 u16 msix_count;
2935 u16 max_msix_count;
2936 u16 pcie_offset;
2937
2938 switch (hw->mac.type) {
2939 case ixgbe_mac_82598EB:
2940 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2941 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2942 break;
2943 case ixgbe_mac_82599EB:
2944 case ixgbe_mac_X540:
2945 case ixgbe_mac_X550:
2946 case ixgbe_mac_X550EM_x:
2947 case ixgbe_mac_x550em_a:
2948 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2949 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2950 break;
2951 default:
2952 return 1;
2953 }
2954
2955 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2956 if (ixgbe_removed(hw->hw_addr))
2957 msix_count = 0;
2958 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2959
2960
2961 msix_count++;
2962
2963 if (msix_count > max_msix_count)
2964 msix_count = max_msix_count;
2965
2966 return msix_count;
2967}
2968
2969
2970
2971
2972
2973
2974
2975s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2976{
2977 u32 mpsar_lo, mpsar_hi;
2978 u32 rar_entries = hw->mac.num_rar_entries;
2979
2980
2981 if (rar >= rar_entries) {
2982 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2983 return IXGBE_ERR_INVALID_ARGUMENT;
2984 }
2985
2986 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2987 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2988
2989 if (ixgbe_removed(hw->hw_addr))
2990 return 0;
2991
2992 if (!mpsar_lo && !mpsar_hi)
2993 return 0;
2994
2995 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2996 if (mpsar_lo) {
2997 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2998 mpsar_lo = 0;
2999 }
3000 if (mpsar_hi) {
3001 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3002 mpsar_hi = 0;
3003 }
3004 } else if (vmdq < 32) {
3005 mpsar_lo &= ~BIT(vmdq);
3006 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3007 } else {
3008 mpsar_hi &= ~BIT(vmdq - 32);
3009 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3010 }
3011
3012
3013 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3014 rar != 0 && rar != hw->mac.san_mac_rar_index)
3015 hw->mac.ops.clear_rar(hw, rar);
3016
3017 return 0;
3018}
3019
3020
3021
3022
3023
3024
3025
3026s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3027{
3028 u32 mpsar;
3029 u32 rar_entries = hw->mac.num_rar_entries;
3030
3031
3032 if (rar >= rar_entries) {
3033 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
3034 return IXGBE_ERR_INVALID_ARGUMENT;
3035 }
3036
3037 if (vmdq < 32) {
3038 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3039 mpsar |= BIT(vmdq);
3040 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3041 } else {
3042 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3043 mpsar |= BIT(vmdq - 32);
3044 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3045 }
3046 return 0;
3047}
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3060{
3061 u32 rar = hw->mac.san_mac_rar_index;
3062
3063 if (vmdq < 32) {
3064 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
3065 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3066 } else {
3067 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3068 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
3069 }
3070
3071 return 0;
3072}
3073
3074
3075
3076
3077
3078s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3079{
3080 int i;
3081
3082 for (i = 0; i < 128; i++)
3083 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3084
3085 return 0;
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3097{
3098 s32 regindex, first_empty_slot;
3099 u32 bits;
3100
3101
3102 if (vlan == 0)
3103 return 0;
3104
3105
3106
3107
3108
3109 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3110
3111
3112 vlan |= IXGBE_VLVF_VIEN;
3113
3114
3115
3116
3117
3118
3119 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3120 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3121 if (bits == vlan)
3122 return regindex;
3123 if (!first_empty_slot && !bits)
3124 first_empty_slot = regindex;
3125 }
3126
3127
3128
3129
3130 if (!first_empty_slot)
3131 hw_dbg(hw, "No space in VLVF.\n");
3132
3133 return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
3134}
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3147 bool vlan_on, bool vlvf_bypass)
3148{
3149 u32 regidx, vfta_delta, vfta, bits;
3150 s32 vlvf_index;
3151
3152 if ((vlan > 4095) || (vind > 63))
3153 return IXGBE_ERR_PARAM;
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167 regidx = vlan / 32;
3168 vfta_delta = BIT(vlan % 32);
3169 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3170
3171
3172
3173
3174
3175 vfta_delta &= vlan_on ? ~vfta : vfta;
3176 vfta ^= vfta_delta;
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3187 goto vfta_update;
3188
3189 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3190 if (vlvf_index < 0) {
3191 if (vlvf_bypass)
3192 goto vfta_update;
3193 return vlvf_index;
3194 }
3195
3196 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3197
3198
3199 bits |= BIT(vind % 32);
3200 if (vlan_on)
3201 goto vlvf_update;
3202
3203
3204 bits ^= BIT(vind % 32);
3205
3206 if (!bits &&
3207 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3208
3209
3210
3211
3212 if (vfta_delta)
3213 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3214
3215
3216 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3217 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3218
3219 return 0;
3220 }
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236 vfta_delta = 0;
3237
3238vlvf_update:
3239
3240 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3241 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
3242
3243vfta_update:
3244
3245 if (vfta_delta)
3246 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3247
3248 return 0;
3249}
3250
3251
3252
3253
3254
3255
3256
3257s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3258{
3259 u32 offset;
3260
3261 for (offset = 0; offset < hw->mac.vft_size; offset++)
3262 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3263
3264 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3265 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3266 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3267 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
3268 }
3269
3270 return 0;
3271}
3272
3273
3274
3275
3276
3277
3278
3279
3280static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
3281{
3282
3283 if (!hw->need_crosstalk_fix)
3284 return false;
3285
3286
3287 switch (hw->mac.ops.get_media_type(hw)) {
3288 case ixgbe_media_type_fiber:
3289 case ixgbe_media_type_fiber_qsfp:
3290 break;
3291 default:
3292 return false;
3293 }
3294
3295 return true;
3296}
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3308 bool *link_up, bool link_up_wait_to_complete)
3309{
3310 u32 links_reg, links_orig;
3311 u32 i;
3312
3313
3314
3315
3316 if (ixgbe_need_crosstalk_fix(hw)) {
3317 u32 sfp_cage_full;
3318
3319 switch (hw->mac.type) {
3320 case ixgbe_mac_82599EB:
3321 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3322 IXGBE_ESDP_SDP2;
3323 break;
3324 case ixgbe_mac_X550EM_x:
3325 case ixgbe_mac_x550em_a:
3326 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3327 IXGBE_ESDP_SDP0;
3328 break;
3329 default:
3330
3331 sfp_cage_full = false;
3332 break;
3333 }
3334
3335 if (!sfp_cage_full) {
3336 *link_up = false;
3337 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3338 return 0;
3339 }
3340 }
3341
3342
3343 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3344
3345 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3346
3347 if (links_orig != links_reg) {
3348 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3349 links_orig, links_reg);
3350 }
3351
3352 if (link_up_wait_to_complete) {
3353 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3354 if (links_reg & IXGBE_LINKS_UP) {
3355 *link_up = true;
3356 break;
3357 } else {
3358 *link_up = false;
3359 }
3360 msleep(100);
3361 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3362 }
3363 } else {
3364 if (links_reg & IXGBE_LINKS_UP)
3365 *link_up = true;
3366 else
3367 *link_up = false;
3368 }
3369
3370 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3371 case IXGBE_LINKS_SPEED_10G_82599:
3372 if ((hw->mac.type >= ixgbe_mac_X550) &&
3373 (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3374 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3375 else
3376 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3377 break;
3378 case IXGBE_LINKS_SPEED_1G_82599:
3379 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3380 break;
3381 case IXGBE_LINKS_SPEED_100_82599:
3382 if ((hw->mac.type >= ixgbe_mac_X550) &&
3383 (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3384 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3385 else
3386 *speed = IXGBE_LINK_SPEED_100_FULL;
3387 break;
3388 case IXGBE_LINKS_SPEED_10_X550EM_A:
3389 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3390 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3391 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
3392 *speed = IXGBE_LINK_SPEED_10_FULL;
3393 }
3394 break;
3395 default:
3396 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3397 }
3398
3399 return 0;
3400}
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3413 u16 *wwpn_prefix)
3414{
3415 u16 offset, caps;
3416 u16 alt_san_mac_blk_offset;
3417
3418
3419 *wwnn_prefix = 0xFFFF;
3420 *wwpn_prefix = 0xFFFF;
3421
3422
3423 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3424 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3425 goto wwn_prefix_err;
3426
3427 if ((alt_san_mac_blk_offset == 0) ||
3428 (alt_san_mac_blk_offset == 0xFFFF))
3429 return 0;
3430
3431
3432 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3433 if (hw->eeprom.ops.read(hw, offset, &caps))
3434 goto wwn_prefix_err;
3435 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3436 return 0;
3437
3438
3439 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3440 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3441 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3442
3443 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3444 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3445 goto wwn_prefix_err;
3446
3447 return 0;
3448
3449wwn_prefix_err:
3450 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3451 return 0;
3452}
3453
3454
3455
3456
3457
3458
3459
3460
3461void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3462{
3463 int vf_target_reg = vf >> 3;
3464 int vf_target_shift = vf % 8;
3465 u32 pfvfspoof;
3466
3467 if (hw->mac.type == ixgbe_mac_82598EB)
3468 return;
3469
3470 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3471 if (enable)
3472 pfvfspoof |= BIT(vf_target_shift);
3473 else
3474 pfvfspoof &= ~BIT(vf_target_shift);
3475 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3476}
3477
3478
3479
3480
3481
3482
3483
3484
3485void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3486{
3487 int vf_target_reg = vf >> 3;
3488 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3489 u32 pfvfspoof;
3490
3491 if (hw->mac.type == ixgbe_mac_82598EB)
3492 return;
3493
3494 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3495 if (enable)
3496 pfvfspoof |= BIT(vf_target_shift);
3497 else
3498 pfvfspoof &= ~BIT(vf_target_shift);
3499 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3500}
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3511{
3512 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3513
3514 return 0;
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3525 int num_pb,
3526 u32 headroom,
3527 int strategy)
3528{
3529 u32 pbsize = hw->mac.rx_pb_size;
3530 int i = 0;
3531 u32 rxpktsize, txpktsize, txpbthresh;
3532
3533
3534 pbsize -= headroom;
3535
3536 if (!num_pb)
3537 num_pb = 1;
3538
3539
3540
3541
3542 switch (strategy) {
3543 case (PBA_STRATEGY_WEIGHTED):
3544
3545
3546
3547 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
3548 pbsize -= rxpktsize * (num_pb / 2);
3549 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
3550 for (; i < (num_pb / 2); i++)
3551 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3552
3553 case (PBA_STRATEGY_EQUAL):
3554
3555 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
3556 for (; i < num_pb; i++)
3557 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3558 break;
3559 default:
3560 break;
3561 }
3562
3563
3564
3565
3566
3567
3568 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
3569 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
3570 for (i = 0; i < num_pb; i++) {
3571 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3572 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3573 }
3574
3575
3576 for (; i < IXGBE_MAX_PB; i++) {
3577 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3578 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3579 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3580 }
3581}
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3592{
3593 u32 i;
3594 u8 sum = 0;
3595
3596 if (!buffer)
3597 return 0;
3598
3599 for (i = 0; i < length; i++)
3600 sum += buffer[i];
3601
3602 return (u8) (0 - sum);
3603}
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
3620 u32 timeout)
3621{
3622 u32 hicr, i, fwsts;
3623 u16 dword_len;
3624
3625 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3626 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
3627 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3628 }
3629
3630
3631 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3632 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3633
3634
3635 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3636 if (!(hicr & IXGBE_HICR_EN)) {
3637 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3638 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3639 }
3640
3641
3642 if (length % sizeof(u32)) {
3643 hw_dbg(hw, "Buffer length failure, not aligned to dword");
3644 return IXGBE_ERR_INVALID_ARGUMENT;
3645 }
3646
3647 dword_len = length >> 2;
3648
3649
3650
3651
3652 for (i = 0; i < dword_len; i++)
3653 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3654 i, cpu_to_le32(buffer[i]));
3655
3656
3657 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3658
3659 for (i = 0; i < timeout; i++) {
3660 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3661 if (!(hicr & IXGBE_HICR_C))
3662 break;
3663 usleep_range(1000, 2000);
3664 }
3665
3666
3667 if ((timeout && i == timeout) ||
3668 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
3669 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3670
3671 return 0;
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
3692 u32 length, u32 timeout,
3693 bool return_data)
3694{
3695 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3696 union {
3697 struct ixgbe_hic_hdr hdr;
3698 u32 u32arr[1];
3699 } *bp = buffer;
3700 u16 buf_len, dword_len;
3701 s32 status;
3702 u32 bi;
3703
3704 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3705 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
3706 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3707 }
3708
3709 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3710 if (status)
3711 return status;
3712
3713 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
3714 if (status)
3715 goto rel_out;
3716
3717 if (!return_data)
3718 goto rel_out;
3719
3720
3721 dword_len = hdr_size >> 2;
3722
3723
3724 for (bi = 0; bi < dword_len; bi++) {
3725 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3726 le32_to_cpus(&bp->u32arr[bi]);
3727 }
3728
3729
3730 buf_len = bp->hdr.buf_len;
3731 if (!buf_len)
3732 goto rel_out;
3733
3734 if (length < round_up(buf_len, 4) + hdr_size) {
3735 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3736 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3737 goto rel_out;
3738 }
3739
3740
3741 dword_len = (buf_len + 3) >> 2;
3742
3743
3744 for (; bi <= dword_len; bi++) {
3745 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3746 le32_to_cpus(&bp->u32arr[bi]);
3747 }
3748
3749rel_out:
3750 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3751
3752 return status;
3753}
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3771 u8 build, u8 sub, __always_unused u16 len,
3772 __always_unused const char *driver_ver)
3773{
3774 struct ixgbe_hic_drv_info fw_cmd;
3775 int i;
3776 s32 ret_val;
3777
3778 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3779 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3780 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3781 fw_cmd.port_num = hw->bus.func;
3782 fw_cmd.ver_maj = maj;
3783 fw_cmd.ver_min = min;
3784 fw_cmd.ver_build = build;
3785 fw_cmd.ver_sub = sub;
3786 fw_cmd.hdr.checksum = 0;
3787 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
3788 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3789 fw_cmd.pad = 0;
3790 fw_cmd.pad2 = 0;
3791
3792 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3793 ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
3794 sizeof(fw_cmd),
3795 IXGBE_HI_COMMAND_TIMEOUT,
3796 true);
3797 if (ret_val != 0)
3798 continue;
3799
3800 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3801 FW_CEM_RESP_STATUS_SUCCESS)
3802 ret_val = 0;
3803 else
3804 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3805
3806 break;
3807 }
3808
3809 return ret_val;
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3821{
3822 u32 gcr_ext, hlreg0, i, poll;
3823 u16 value;
3824
3825
3826
3827
3828
3829 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3830 return;
3831
3832
3833
3834
3835
3836
3837 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3838 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3839
3840
3841 IXGBE_WRITE_FLUSH(hw);
3842 usleep_range(3000, 6000);
3843
3844
3845
3846
3847 poll = ixgbe_pcie_timeout_poll(hw);
3848 for (i = 0; i < poll; i++) {
3849 usleep_range(100, 200);
3850 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
3851 if (ixgbe_removed(hw->hw_addr))
3852 break;
3853 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3854 break;
3855 }
3856
3857
3858 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3859 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3860 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3861
3862
3863 IXGBE_WRITE_FLUSH(hw);
3864 udelay(20);
3865
3866
3867 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3868 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3869}
3870
3871static const u8 ixgbe_emc_temp_data[4] = {
3872 IXGBE_EMC_INTERNAL_DATA,
3873 IXGBE_EMC_DIODE1_DATA,
3874 IXGBE_EMC_DIODE2_DATA,
3875 IXGBE_EMC_DIODE3_DATA
3876};
3877static const u8 ixgbe_emc_therm_limit[4] = {
3878 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3879 IXGBE_EMC_DIODE1_THERM_LIMIT,
3880 IXGBE_EMC_DIODE2_THERM_LIMIT,
3881 IXGBE_EMC_DIODE3_THERM_LIMIT
3882};
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3893 u16 *ets_offset)
3894{
3895 s32 status;
3896
3897 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3898 if (status)
3899 return status;
3900
3901 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
3902 return IXGBE_NOT_IMPLEMENTED;
3903
3904 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3905 if (status)
3906 return status;
3907
3908 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
3909 return IXGBE_NOT_IMPLEMENTED;
3910
3911 return 0;
3912}
3913
3914
3915
3916
3917
3918
3919
3920s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3921{
3922 s32 status;
3923 u16 ets_offset;
3924 u16 ets_cfg;
3925 u16 ets_sensor;
3926 u8 num_sensors;
3927 u8 i;
3928 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3929
3930
3931 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3932 return IXGBE_NOT_IMPLEMENTED;
3933
3934 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3935 if (status)
3936 return status;
3937
3938 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3939 if (num_sensors > IXGBE_MAX_SENSORS)
3940 num_sensors = IXGBE_MAX_SENSORS;
3941
3942 for (i = 0; i < num_sensors; i++) {
3943 u8 sensor_index;
3944 u8 sensor_location;
3945
3946 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3947 &ets_sensor);
3948 if (status)
3949 return status;
3950
3951 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3952 IXGBE_ETS_DATA_INDEX_SHIFT);
3953 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3954 IXGBE_ETS_DATA_LOC_SHIFT);
3955
3956 if (sensor_location != 0) {
3957 status = hw->phy.ops.read_i2c_byte(hw,
3958 ixgbe_emc_temp_data[sensor_index],
3959 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3960 &data->sensor[i].temp);
3961 if (status)
3962 return status;
3963 }
3964 }
3965
3966 return 0;
3967}
3968
3969
3970
3971
3972
3973
3974
3975
3976s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3977{
3978 s32 status;
3979 u16 ets_offset;
3980 u16 ets_cfg;
3981 u16 ets_sensor;
3982 u8 low_thresh_delta;
3983 u8 num_sensors;
3984 u8 therm_limit;
3985 u8 i;
3986 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3987
3988 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3989
3990
3991 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3992 return IXGBE_NOT_IMPLEMENTED;
3993
3994 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3995 if (status)
3996 return status;
3997
3998 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3999 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4000 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4001 if (num_sensors > IXGBE_MAX_SENSORS)
4002 num_sensors = IXGBE_MAX_SENSORS;
4003
4004 for (i = 0; i < num_sensors; i++) {
4005 u8 sensor_index;
4006 u8 sensor_location;
4007
4008 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
4009 hw_err(hw, "eeprom read at offset %d failed\n",
4010 ets_offset + 1 + i);
4011 continue;
4012 }
4013 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4014 IXGBE_ETS_DATA_INDEX_SHIFT);
4015 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4016 IXGBE_ETS_DATA_LOC_SHIFT);
4017 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4018
4019 hw->phy.ops.write_i2c_byte(hw,
4020 ixgbe_emc_therm_limit[sensor_index],
4021 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4022
4023 if (sensor_location == 0)
4024 continue;
4025
4026 data->sensor[i].location = sensor_location;
4027 data->sensor[i].caution_thresh = therm_limit;
4028 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
4029 }
4030
4031 return 0;
4032}
4033
4034void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4035{
4036 u32 rxctrl;
4037
4038 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4039 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4040 if (hw->mac.type != ixgbe_mac_82598EB) {
4041 u32 pfdtxgswc;
4042
4043 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4044 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4045 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4046 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4047 hw->mac.set_lben = true;
4048 } else {
4049 hw->mac.set_lben = false;
4050 }
4051 }
4052 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4053 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4054 }
4055}
4056
4057void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4058{
4059 u32 rxctrl;
4060
4061 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4062 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4063
4064 if (hw->mac.type != ixgbe_mac_82598EB) {
4065 if (hw->mac.set_lben) {
4066 u32 pfdtxgswc;
4067
4068 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4069 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4070 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4071 hw->mac.set_lben = false;
4072 }
4073 }
4074}
4075
4076
4077
4078
4079bool ixgbe_mng_present(struct ixgbe_hw *hw)
4080{
4081 u32 fwsm;
4082
4083 if (hw->mac.type < ixgbe_mac_82599EB)
4084 return false;
4085
4086 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
4087 fwsm &= IXGBE_FWSM_MODE_MASK;
4088 return fwsm == IXGBE_FWSM_FW_MODE_PT;
4089}
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4100 ixgbe_link_speed speed,
4101 bool autoneg_wait_to_complete)
4102{
4103 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4104 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4105 s32 status = 0;
4106 u32 speedcnt = 0;
4107 u32 i = 0;
4108 bool autoneg, link_up = false;
4109
4110
4111 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
4112 if (status)
4113 return status;
4114
4115 speed &= link_speed;
4116
4117
4118
4119
4120 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4121 speedcnt++;
4122 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4123
4124
4125 switch (hw->phy.media_type) {
4126 case ixgbe_media_type_fiber:
4127 hw->mac.ops.set_rate_select_speed(hw,
4128 IXGBE_LINK_SPEED_10GB_FULL);
4129 break;
4130 case ixgbe_media_type_fiber_qsfp:
4131
4132 break;
4133 default:
4134 hw_dbg(hw, "Unexpected media type\n");
4135 break;
4136 }
4137
4138
4139 msleep(40);
4140
4141 status = hw->mac.ops.setup_mac_link(hw,
4142 IXGBE_LINK_SPEED_10GB_FULL,
4143 autoneg_wait_to_complete);
4144 if (status)
4145 return status;
4146
4147
4148 if (hw->mac.ops.flap_tx_laser)
4149 hw->mac.ops.flap_tx_laser(hw);
4150
4151
4152
4153
4154
4155 for (i = 0; i < 5; i++) {
4156
4157 msleep(100);
4158
4159
4160 status = hw->mac.ops.check_link(hw, &link_speed,
4161 &link_up, false);
4162 if (status)
4163 return status;
4164
4165 if (link_up)
4166 goto out;
4167 }
4168 }
4169
4170 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4171 speedcnt++;
4172 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4173 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4174
4175
4176 switch (hw->phy.media_type) {
4177 case ixgbe_media_type_fiber:
4178 hw->mac.ops.set_rate_select_speed(hw,
4179 IXGBE_LINK_SPEED_1GB_FULL);
4180 break;
4181 case ixgbe_media_type_fiber_qsfp:
4182
4183 break;
4184 default:
4185 hw_dbg(hw, "Unexpected media type\n");
4186 break;
4187 }
4188
4189
4190 msleep(40);
4191
4192 status = hw->mac.ops.setup_mac_link(hw,
4193 IXGBE_LINK_SPEED_1GB_FULL,
4194 autoneg_wait_to_complete);
4195 if (status)
4196 return status;
4197
4198
4199 if (hw->mac.ops.flap_tx_laser)
4200 hw->mac.ops.flap_tx_laser(hw);
4201
4202
4203 msleep(100);
4204
4205
4206 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
4207 false);
4208 if (status)
4209 return status;
4210
4211 if (link_up)
4212 goto out;
4213 }
4214
4215
4216
4217
4218
4219 if (speedcnt > 1)
4220 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4221 highest_link_speed,
4222 autoneg_wait_to_complete);
4223
4224out:
4225
4226 hw->phy.autoneg_advertised = 0;
4227
4228 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4229 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4230
4231 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4232 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4233
4234 return status;
4235}
4236
4237
4238
4239
4240
4241
4242
4243
4244void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4245 ixgbe_link_speed speed)
4246{
4247 s32 status;
4248 u8 rs, eeprom_data;
4249
4250 switch (speed) {
4251 case IXGBE_LINK_SPEED_10GB_FULL:
4252
4253 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4254 break;
4255 case IXGBE_LINK_SPEED_1GB_FULL:
4256 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4257 break;
4258 default:
4259 hw_dbg(hw, "Invalid fixed module speed\n");
4260 return;
4261 }
4262
4263
4264 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4265 IXGBE_I2C_EEPROM_DEV_ADDR2,
4266 &eeprom_data);
4267 if (status) {
4268 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
4269 return;
4270 }
4271
4272 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4273
4274 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4275 IXGBE_I2C_EEPROM_DEV_ADDR2,
4276 eeprom_data);
4277 if (status) {
4278 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
4279 return;
4280 }
4281
4282
4283 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4284 IXGBE_I2C_EEPROM_DEV_ADDR2,
4285 &eeprom_data);
4286 if (status) {
4287 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
4288 return;
4289 }
4290
4291 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4292
4293 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4294 IXGBE_I2C_EEPROM_DEV_ADDR2,
4295 eeprom_data);
4296 if (status) {
4297 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
4298 return;
4299 }
4300}
4301