1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31#include <linux/netdevice.h>
32
33#include "ixgbe.h"
34#include "ixgbe_common.h"
35#include "ixgbe_phy.h"
36
37static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
38static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
39static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
40static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
41static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
43 u16 count);
44static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48
49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
50static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
51static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
52 u16 words, u16 *data);
53static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
54 u16 words, u16 *data);
55static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
56 u16 offset);
57static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
58
59
60
61
62
63
64
65
66
67
68s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{
70
71 switch (hw->device_id) {
72 case IXGBE_DEV_ID_X540T:
73 case IXGBE_DEV_ID_X540T1:
74 case IXGBE_DEV_ID_82599_T3_LOM:
75 return 0;
76 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED;
78 }
79}
80
81
82
83
84
85
86
87static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
88{
89 s32 ret_val = 0;
90 u32 reg = 0, reg_bp = 0;
91 u16 reg_cu = 0;
92 bool got_lock = false;
93
94
95
96
97
98 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
99 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
100 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
101 goto out;
102 }
103
104
105
106
107
108 if (hw->fc.requested_mode == ixgbe_fc_default)
109 hw->fc.requested_mode = ixgbe_fc_full;
110
111
112
113
114
115
116 switch (hw->phy.media_type) {
117 case ixgbe_media_type_fiber:
118 case ixgbe_media_type_backplane:
119 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
120 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
121 break;
122 case ixgbe_media_type_copper:
123 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
124 MDIO_MMD_AN, ®_cu);
125 break;
126 default:
127 break;
128 }
129
130
131
132
133
134
135
136
137
138
139
140 switch (hw->fc.requested_mode) {
141 case ixgbe_fc_none:
142
143 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
144 if (hw->phy.media_type == ixgbe_media_type_backplane)
145 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
146 IXGBE_AUTOC_ASM_PAUSE);
147 else if (hw->phy.media_type == ixgbe_media_type_copper)
148 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
149 break;
150 case ixgbe_fc_tx_pause:
151
152
153
154
155 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
156 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
157 if (hw->phy.media_type == ixgbe_media_type_backplane) {
158 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
159 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
160 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
161 reg_cu |= IXGBE_TAF_ASM_PAUSE;
162 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
163 }
164 break;
165 case ixgbe_fc_rx_pause:
166
167
168
169
170
171
172
173
174
175 case ixgbe_fc_full:
176
177 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
178 if (hw->phy.media_type == ixgbe_media_type_backplane)
179 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
180 IXGBE_AUTOC_ASM_PAUSE;
181 else if (hw->phy.media_type == ixgbe_media_type_copper)
182 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
183 break;
184 default:
185 hw_dbg(hw, "Flow control param set incorrectly\n");
186 ret_val = IXGBE_ERR_CONFIG;
187 goto out;
188 break;
189 }
190
191 if (hw->mac.type != ixgbe_mac_X540) {
192
193
194
195
196 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
197 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
198
199
200 if (hw->fc.strict_ieee)
201 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
202
203 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
204 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
205 }
206
207
208
209
210
211
212 if (hw->phy.media_type == ixgbe_media_type_backplane) {
213
214
215
216
217 if ((hw->mac.type == ixgbe_mac_82599EB) &&
218 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
219 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
220 IXGBE_GSSR_MAC_CSR_SM);
221 if (ret_val)
222 goto out;
223
224 got_lock = true;
225 }
226
227 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
228
229 if (hw->mac.type == ixgbe_mac_82599EB)
230 ixgbe_reset_pipeline_82599(hw);
231
232 if (got_lock)
233 hw->mac.ops.release_swfw_sync(hw,
234 IXGBE_GSSR_MAC_CSR_SM);
235
236 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
237 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
238 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
239 MDIO_MMD_AN, reg_cu);
240 }
241
242 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
243out:
244 return ret_val;
245}
246
247
248
249
250
251
252
253
254
255
256s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
257{
258 u32 ctrl_ext;
259
260
261 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
262
263
264 hw->phy.ops.identify(hw);
265
266
267 hw->mac.ops.clear_vfta(hw);
268
269
270 hw->mac.ops.clear_hw_cntrs(hw);
271
272
273 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
274 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
275 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
276 IXGBE_WRITE_FLUSH(hw);
277
278
279 ixgbe_setup_fc(hw);
280
281
282 hw->adapter_stopped = false;
283
284 return 0;
285}
286
287
288
289
290
291
292
293
294
295
296
297s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
298{
299 u32 i;
300 u32 regval;
301
302
303 for (i = 0; i < hw->mac.max_tx_queues; i++) {
304 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
305 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
306 }
307 IXGBE_WRITE_FLUSH(hw);
308
309
310 for (i = 0; i < hw->mac.max_tx_queues; i++) {
311 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
312 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
313 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
314 }
315
316 for (i = 0; i < hw->mac.max_rx_queues; i++) {
317 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
318 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
319 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
320 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
321 }
322
323 return 0;
324}
325
326
327
328
329
330
331
332
333
334
335
336s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
337{
338 s32 status;
339
340
341 status = hw->mac.ops.reset_hw(hw);
342
343 if (status == 0) {
344
345 status = hw->mac.ops.start_hw(hw);
346 }
347
348 return status;
349}
350
351
352
353
354
355
356
357
358s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
359{
360 u16 i = 0;
361
362 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
363 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
364 IXGBE_READ_REG(hw, IXGBE_ERRBC);
365 IXGBE_READ_REG(hw, IXGBE_MSPDC);
366 for (i = 0; i < 8; i++)
367 IXGBE_READ_REG(hw, IXGBE_MPC(i));
368
369 IXGBE_READ_REG(hw, IXGBE_MLFC);
370 IXGBE_READ_REG(hw, IXGBE_MRFC);
371 IXGBE_READ_REG(hw, IXGBE_RLEC);
372 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
373 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
374 if (hw->mac.type >= ixgbe_mac_82599EB) {
375 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
376 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
377 } else {
378 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
379 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
380 }
381
382 for (i = 0; i < 8; i++) {
383 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
384 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
385 if (hw->mac.type >= ixgbe_mac_82599EB) {
386 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
387 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
388 } else {
389 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
390 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
391 }
392 }
393 if (hw->mac.type >= ixgbe_mac_82599EB)
394 for (i = 0; i < 8; i++)
395 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
396 IXGBE_READ_REG(hw, IXGBE_PRC64);
397 IXGBE_READ_REG(hw, IXGBE_PRC127);
398 IXGBE_READ_REG(hw, IXGBE_PRC255);
399 IXGBE_READ_REG(hw, IXGBE_PRC511);
400 IXGBE_READ_REG(hw, IXGBE_PRC1023);
401 IXGBE_READ_REG(hw, IXGBE_PRC1522);
402 IXGBE_READ_REG(hw, IXGBE_GPRC);
403 IXGBE_READ_REG(hw, IXGBE_BPRC);
404 IXGBE_READ_REG(hw, IXGBE_MPRC);
405 IXGBE_READ_REG(hw, IXGBE_GPTC);
406 IXGBE_READ_REG(hw, IXGBE_GORCL);
407 IXGBE_READ_REG(hw, IXGBE_GORCH);
408 IXGBE_READ_REG(hw, IXGBE_GOTCL);
409 IXGBE_READ_REG(hw, IXGBE_GOTCH);
410 if (hw->mac.type == ixgbe_mac_82598EB)
411 for (i = 0; i < 8; i++)
412 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
413 IXGBE_READ_REG(hw, IXGBE_RUC);
414 IXGBE_READ_REG(hw, IXGBE_RFC);
415 IXGBE_READ_REG(hw, IXGBE_ROC);
416 IXGBE_READ_REG(hw, IXGBE_RJC);
417 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
418 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
419 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
420 IXGBE_READ_REG(hw, IXGBE_TORL);
421 IXGBE_READ_REG(hw, IXGBE_TORH);
422 IXGBE_READ_REG(hw, IXGBE_TPR);
423 IXGBE_READ_REG(hw, IXGBE_TPT);
424 IXGBE_READ_REG(hw, IXGBE_PTC64);
425 IXGBE_READ_REG(hw, IXGBE_PTC127);
426 IXGBE_READ_REG(hw, IXGBE_PTC255);
427 IXGBE_READ_REG(hw, IXGBE_PTC511);
428 IXGBE_READ_REG(hw, IXGBE_PTC1023);
429 IXGBE_READ_REG(hw, IXGBE_PTC1522);
430 IXGBE_READ_REG(hw, IXGBE_MPTC);
431 IXGBE_READ_REG(hw, IXGBE_BPTC);
432 for (i = 0; i < 16; i++) {
433 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
434 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
435 if (hw->mac.type >= ixgbe_mac_82599EB) {
436 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
437 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
438 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
439 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
440 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
441 } else {
442 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
443 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
444 }
445 }
446
447 if (hw->mac.type == ixgbe_mac_X540) {
448 if (hw->phy.id == 0)
449 hw->phy.ops.identify(hw);
450 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
451 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
452 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
453 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
454 }
455
456 return 0;
457}
458
459
460
461
462
463
464
465
466
467s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
468 u32 pba_num_size)
469{
470 s32 ret_val;
471 u16 data;
472 u16 pba_ptr;
473 u16 offset;
474 u16 length;
475
476 if (pba_num == NULL) {
477 hw_dbg(hw, "PBA string buffer was null\n");
478 return IXGBE_ERR_INVALID_ARGUMENT;
479 }
480
481 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
482 if (ret_val) {
483 hw_dbg(hw, "NVM Read Error\n");
484 return ret_val;
485 }
486
487 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
488 if (ret_val) {
489 hw_dbg(hw, "NVM Read Error\n");
490 return ret_val;
491 }
492
493
494
495
496
497
498 if (data != IXGBE_PBANUM_PTR_GUARD) {
499 hw_dbg(hw, "NVM PBA number is not stored as string\n");
500
501
502 if (pba_num_size < 11) {
503 hw_dbg(hw, "PBA string buffer too small\n");
504 return IXGBE_ERR_NO_SPACE;
505 }
506
507
508 pba_num[0] = (data >> 12) & 0xF;
509 pba_num[1] = (data >> 8) & 0xF;
510 pba_num[2] = (data >> 4) & 0xF;
511 pba_num[3] = data & 0xF;
512 pba_num[4] = (pba_ptr >> 12) & 0xF;
513 pba_num[5] = (pba_ptr >> 8) & 0xF;
514 pba_num[6] = '-';
515 pba_num[7] = 0;
516 pba_num[8] = (pba_ptr >> 4) & 0xF;
517 pba_num[9] = pba_ptr & 0xF;
518
519
520 pba_num[10] = '\0';
521
522
523 for (offset = 0; offset < 10; offset++) {
524 if (pba_num[offset] < 0xA)
525 pba_num[offset] += '0';
526 else if (pba_num[offset] < 0x10)
527 pba_num[offset] += 'A' - 0xA;
528 }
529
530 return 0;
531 }
532
533 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
534 if (ret_val) {
535 hw_dbg(hw, "NVM Read Error\n");
536 return ret_val;
537 }
538
539 if (length == 0xFFFF || length == 0) {
540 hw_dbg(hw, "NVM PBA number section invalid length\n");
541 return IXGBE_ERR_PBA_SECTION;
542 }
543
544
545 if (pba_num_size < (((u32)length * 2) - 1)) {
546 hw_dbg(hw, "PBA string buffer too small\n");
547 return IXGBE_ERR_NO_SPACE;
548 }
549
550
551 pba_ptr++;
552 length--;
553
554 for (offset = 0; offset < length; offset++) {
555 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
556 if (ret_val) {
557 hw_dbg(hw, "NVM Read Error\n");
558 return ret_val;
559 }
560 pba_num[offset * 2] = (u8)(data >> 8);
561 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
562 }
563 pba_num[offset * 2] = '\0';
564
565 return 0;
566}
567
568
569
570
571
572
573
574
575
576
577s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
578{
579 u32 rar_high;
580 u32 rar_low;
581 u16 i;
582
583 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
584 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
585
586 for (i = 0; i < 4; i++)
587 mac_addr[i] = (u8)(rar_low >> (i*8));
588
589 for (i = 0; i < 2; i++)
590 mac_addr[i+4] = (u8)(rar_high >> (i*8));
591
592 return 0;
593}
594
595enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
596{
597 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
598 case IXGBE_PCI_LINK_WIDTH_1:
599 return ixgbe_bus_width_pcie_x1;
600 case IXGBE_PCI_LINK_WIDTH_2:
601 return ixgbe_bus_width_pcie_x2;
602 case IXGBE_PCI_LINK_WIDTH_4:
603 return ixgbe_bus_width_pcie_x4;
604 case IXGBE_PCI_LINK_WIDTH_8:
605 return ixgbe_bus_width_pcie_x8;
606 default:
607 return ixgbe_bus_width_unknown;
608 }
609}
610
611enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
612{
613 switch (link_status & IXGBE_PCI_LINK_SPEED) {
614 case IXGBE_PCI_LINK_SPEED_2500:
615 return ixgbe_bus_speed_2500;
616 case IXGBE_PCI_LINK_SPEED_5000:
617 return ixgbe_bus_speed_5000;
618 case IXGBE_PCI_LINK_SPEED_8000:
619 return ixgbe_bus_speed_8000;
620 default:
621 return ixgbe_bus_speed_unknown;
622 }
623}
624
625
626
627
628
629
630
631s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
632{
633 struct ixgbe_adapter *adapter = hw->back;
634 struct ixgbe_mac_info *mac = &hw->mac;
635 u16 link_status;
636
637 hw->bus.type = ixgbe_bus_type_pci_express;
638
639
640 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
641 &link_status);
642
643 hw->bus.width = ixgbe_convert_bus_width(link_status);
644 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
645
646 mac->ops.set_lan_id(hw);
647
648 return 0;
649}
650
651
652
653
654
655
656
657
658void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
659{
660 struct ixgbe_bus_info *bus = &hw->bus;
661 u32 reg;
662
663 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
664 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
665 bus->lan_id = bus->func;
666
667
668 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
669 if (reg & IXGBE_FACTPS_LFS)
670 bus->func ^= 0x1;
671}
672
673
674
675
676
677
678
679
680
681
682s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
683{
684 u32 reg_val;
685 u16 i;
686
687
688
689
690
691 hw->adapter_stopped = true;
692
693
694 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
695
696
697 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
698
699
700 IXGBE_READ_REG(hw, IXGBE_EICR);
701
702
703 for (i = 0; i < hw->mac.max_tx_queues; i++)
704 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
705
706
707 for (i = 0; i < hw->mac.max_rx_queues; i++) {
708 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
709 reg_val &= ~IXGBE_RXDCTL_ENABLE;
710 reg_val |= IXGBE_RXDCTL_SWFLSH;
711 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
712 }
713
714
715 IXGBE_WRITE_FLUSH(hw);
716 usleep_range(1000, 2000);
717
718
719
720
721
722 return ixgbe_disable_pcie_master(hw);
723}
724
725
726
727
728
729
730s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
731{
732 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
733
734
735 led_reg &= ~IXGBE_LED_MODE_MASK(index);
736 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
737 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
738 IXGBE_WRITE_FLUSH(hw);
739
740 return 0;
741}
742
743
744
745
746
747
748s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
749{
750 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
751
752
753 led_reg &= ~IXGBE_LED_MODE_MASK(index);
754 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
755 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
756 IXGBE_WRITE_FLUSH(hw);
757
758 return 0;
759}
760
761
762
763
764
765
766
767
768s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
769{
770 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
771 u32 eec;
772 u16 eeprom_size;
773
774 if (eeprom->type == ixgbe_eeprom_uninitialized) {
775 eeprom->type = ixgbe_eeprom_none;
776
777
778 eeprom->semaphore_delay = 10;
779
780 eeprom->word_page_size = 0;
781
782
783
784
785
786 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
787 if (eec & IXGBE_EEC_PRES) {
788 eeprom->type = ixgbe_eeprom_spi;
789
790
791
792
793
794 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
795 IXGBE_EEC_SIZE_SHIFT);
796 eeprom->word_size = 1 << (eeprom_size +
797 IXGBE_EEPROM_WORD_SIZE_SHIFT);
798 }
799
800 if (eec & IXGBE_EEC_ADDR_SIZE)
801 eeprom->address_bits = 16;
802 else
803 eeprom->address_bits = 8;
804 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
805 "%d\n", eeprom->type, eeprom->word_size,
806 eeprom->address_bits);
807 }
808
809 return 0;
810}
811
812
813
814
815
816
817
818
819
820
821s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
822 u16 words, u16 *data)
823{
824 s32 status = 0;
825 u16 i, count;
826
827 hw->eeprom.ops.init_params(hw);
828
829 if (words == 0) {
830 status = IXGBE_ERR_INVALID_ARGUMENT;
831 goto out;
832 }
833
834 if (offset + words > hw->eeprom.word_size) {
835 status = IXGBE_ERR_EEPROM;
836 goto out;
837 }
838
839
840
841
842
843 if ((hw->eeprom.word_page_size == 0) &&
844 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
845 ixgbe_detect_eeprom_page_size_generic(hw, offset);
846
847
848
849
850
851
852 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
853 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
854 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
855 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
856 count, &data[i]);
857
858 if (status != 0)
859 break;
860 }
861
862out:
863 return status;
864}
865
866
867
868
869
870
871
872
873
874
875
876static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
877 u16 words, u16 *data)
878{
879 s32 status;
880 u16 word;
881 u16 page_size;
882 u16 i;
883 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
884
885
886 status = ixgbe_acquire_eeprom(hw);
887
888 if (status == 0) {
889 if (ixgbe_ready_eeprom(hw) != 0) {
890 ixgbe_release_eeprom(hw);
891 status = IXGBE_ERR_EEPROM;
892 }
893 }
894
895 if (status == 0) {
896 for (i = 0; i < words; i++) {
897 ixgbe_standby_eeprom(hw);
898
899
900 ixgbe_shift_out_eeprom_bits(hw,
901 IXGBE_EEPROM_WREN_OPCODE_SPI,
902 IXGBE_EEPROM_OPCODE_BITS);
903
904 ixgbe_standby_eeprom(hw);
905
906
907
908
909
910 if ((hw->eeprom.address_bits == 8) &&
911 ((offset + i) >= 128))
912 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
913
914
915 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
916 IXGBE_EEPROM_OPCODE_BITS);
917 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
918 hw->eeprom.address_bits);
919
920 page_size = hw->eeprom.word_page_size;
921
922
923 do {
924 word = data[i];
925 word = (word >> 8) | (word << 8);
926 ixgbe_shift_out_eeprom_bits(hw, word, 16);
927
928 if (page_size == 0)
929 break;
930
931
932 if (((offset + i) & (page_size - 1)) ==
933 (page_size - 1))
934 break;
935 } while (++i < words);
936
937 ixgbe_standby_eeprom(hw);
938 usleep_range(10000, 20000);
939 }
940
941 ixgbe_release_eeprom(hw);
942 }
943
944 return status;
945}
946
947
948
949
950
951
952
953
954
955
956s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
957{
958 s32 status;
959
960 hw->eeprom.ops.init_params(hw);
961
962 if (offset >= hw->eeprom.word_size) {
963 status = IXGBE_ERR_EEPROM;
964 goto out;
965 }
966
967 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
968
969out:
970 return status;
971}
972
973
974
975
976
977
978
979
980
981
982s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
983 u16 words, u16 *data)
984{
985 s32 status = 0;
986 u16 i, count;
987
988 hw->eeprom.ops.init_params(hw);
989
990 if (words == 0) {
991 status = IXGBE_ERR_INVALID_ARGUMENT;
992 goto out;
993 }
994
995 if (offset + words > hw->eeprom.word_size) {
996 status = IXGBE_ERR_EEPROM;
997 goto out;
998 }
999
1000
1001
1002
1003
1004
1005 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1006 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1007 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1008
1009 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1010 count, &data[i]);
1011
1012 if (status != 0)
1013 break;
1014 }
1015
1016out:
1017 return status;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1030 u16 words, u16 *data)
1031{
1032 s32 status;
1033 u16 word_in;
1034 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1035 u16 i;
1036
1037
1038 status = ixgbe_acquire_eeprom(hw);
1039
1040 if (status == 0) {
1041 if (ixgbe_ready_eeprom(hw) != 0) {
1042 ixgbe_release_eeprom(hw);
1043 status = IXGBE_ERR_EEPROM;
1044 }
1045 }
1046
1047 if (status == 0) {
1048 for (i = 0; i < words; i++) {
1049 ixgbe_standby_eeprom(hw);
1050
1051
1052
1053
1054 if ((hw->eeprom.address_bits == 8) &&
1055 ((offset + i) >= 128))
1056 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1057
1058
1059 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1060 IXGBE_EEPROM_OPCODE_BITS);
1061 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1062 hw->eeprom.address_bits);
1063
1064
1065 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1066 data[i] = (word_in >> 8) | (word_in << 8);
1067 }
1068
1069
1070 ixgbe_release_eeprom(hw);
1071 }
1072
1073 return status;
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1085 u16 *data)
1086{
1087 s32 status;
1088
1089 hw->eeprom.ops.init_params(hw);
1090
1091 if (offset >= hw->eeprom.word_size) {
1092 status = IXGBE_ERR_EEPROM;
1093 goto out;
1094 }
1095
1096 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1097
1098out:
1099 return status;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1112 u16 words, u16 *data)
1113{
1114 u32 eerd;
1115 s32 status = 0;
1116 u32 i;
1117
1118 hw->eeprom.ops.init_params(hw);
1119
1120 if (words == 0) {
1121 status = IXGBE_ERR_INVALID_ARGUMENT;
1122 goto out;
1123 }
1124
1125 if (offset >= hw->eeprom.word_size) {
1126 status = IXGBE_ERR_EEPROM;
1127 goto out;
1128 }
1129
1130 for (i = 0; i < words; i++) {
1131 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1132 IXGBE_EEPROM_RW_REG_START;
1133
1134 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1135 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1136
1137 if (status == 0) {
1138 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1139 IXGBE_EEPROM_RW_REG_DATA);
1140 } else {
1141 hw_dbg(hw, "Eeprom read timed out\n");
1142 goto out;
1143 }
1144 }
1145out:
1146 return status;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1159 u16 offset)
1160{
1161 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1162 s32 status = 0;
1163 u16 i;
1164
1165 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1166 data[i] = i;
1167
1168 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1169 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1170 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1171 hw->eeprom.word_page_size = 0;
1172 if (status != 0)
1173 goto out;
1174
1175 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1176 if (status != 0)
1177 goto out;
1178
1179
1180
1181
1182
1183 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1184
1185 hw_dbg(hw, "Detected EEPROM page size = %d words.",
1186 hw->eeprom.word_page_size);
1187out:
1188 return status;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1200{
1201 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1214 u16 words, u16 *data)
1215{
1216 u32 eewr;
1217 s32 status = 0;
1218 u16 i;
1219
1220 hw->eeprom.ops.init_params(hw);
1221
1222 if (words == 0) {
1223 status = IXGBE_ERR_INVALID_ARGUMENT;
1224 goto out;
1225 }
1226
1227 if (offset >= hw->eeprom.word_size) {
1228 status = IXGBE_ERR_EEPROM;
1229 goto out;
1230 }
1231
1232 for (i = 0; i < words; i++) {
1233 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1234 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1235 IXGBE_EEPROM_RW_REG_START;
1236
1237 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1238 if (status != 0) {
1239 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1240 goto out;
1241 }
1242
1243 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1244
1245 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1246 if (status != 0) {
1247 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1248 goto out;
1249 }
1250 }
1251
1252out:
1253 return status;
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1265{
1266 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1278{
1279 u32 i;
1280 u32 reg;
1281 s32 status = IXGBE_ERR_EEPROM;
1282
1283 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1284 if (ee_reg == IXGBE_NVM_POLL_READ)
1285 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1286 else
1287 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1288
1289 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1290 status = 0;
1291 break;
1292 }
1293 udelay(5);
1294 }
1295 return status;
1296}
1297
1298
1299
1300
1301
1302
1303
1304
1305static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1306{
1307 s32 status = 0;
1308 u32 eec;
1309 u32 i;
1310
1311 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1312 status = IXGBE_ERR_SWFW_SYNC;
1313
1314 if (status == 0) {
1315 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1316
1317
1318 eec |= IXGBE_EEC_REQ;
1319 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1320
1321 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1322 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1323 if (eec & IXGBE_EEC_GNT)
1324 break;
1325 udelay(5);
1326 }
1327
1328
1329 if (!(eec & IXGBE_EEC_GNT)) {
1330 eec &= ~IXGBE_EEC_REQ;
1331 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1332 hw_dbg(hw, "Could not acquire EEPROM grant\n");
1333
1334 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1335 status = IXGBE_ERR_EEPROM;
1336 }
1337
1338
1339 if (status == 0) {
1340
1341 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1342 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1343 IXGBE_WRITE_FLUSH(hw);
1344 udelay(1);
1345 }
1346 }
1347 return status;
1348}
1349
1350
1351
1352
1353
1354
1355
1356static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1357{
1358 s32 status = IXGBE_ERR_EEPROM;
1359 u32 timeout = 2000;
1360 u32 i;
1361 u32 swsm;
1362
1363
1364 for (i = 0; i < timeout; i++) {
1365
1366
1367
1368
1369 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1370 if (!(swsm & IXGBE_SWSM_SMBI)) {
1371 status = 0;
1372 break;
1373 }
1374 udelay(50);
1375 }
1376
1377 if (i == timeout) {
1378 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
1379 "not granted.\n");
1380
1381
1382
1383
1384
1385
1386 ixgbe_release_eeprom_semaphore(hw);
1387
1388 udelay(50);
1389
1390
1391
1392
1393
1394 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1395 if (!(swsm & IXGBE_SWSM_SMBI))
1396 status = 0;
1397 }
1398
1399
1400 if (status == 0) {
1401 for (i = 0; i < timeout; i++) {
1402 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1403
1404
1405 swsm |= IXGBE_SWSM_SWESMBI;
1406 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1407
1408
1409
1410
1411
1412 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1413 if (swsm & IXGBE_SWSM_SWESMBI)
1414 break;
1415
1416 udelay(50);
1417 }
1418
1419
1420
1421
1422
1423 if (i >= timeout) {
1424 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
1425 "not granted.\n");
1426 ixgbe_release_eeprom_semaphore(hw);
1427 status = IXGBE_ERR_EEPROM;
1428 }
1429 } else {
1430 hw_dbg(hw, "Software semaphore SMBI between device drivers "
1431 "not granted.\n");
1432 }
1433
1434 return status;
1435}
1436
1437
1438
1439
1440
1441
1442
1443static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1444{
1445 u32 swsm;
1446
1447 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1448
1449
1450 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1451 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1452 IXGBE_WRITE_FLUSH(hw);
1453}
1454
1455
1456
1457
1458
1459static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1460{
1461 s32 status = 0;
1462 u16 i;
1463 u8 spi_stat_reg;
1464
1465
1466
1467
1468
1469
1470
1471 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1472 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1473 IXGBE_EEPROM_OPCODE_BITS);
1474 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1475 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1476 break;
1477
1478 udelay(5);
1479 ixgbe_standby_eeprom(hw);
1480 }
1481
1482
1483
1484
1485
1486 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1487 hw_dbg(hw, "SPI EEPROM Status error\n");
1488 status = IXGBE_ERR_EEPROM;
1489 }
1490
1491 return status;
1492}
1493
1494
1495
1496
1497
1498static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1499{
1500 u32 eec;
1501
1502 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1503
1504
1505 eec |= IXGBE_EEC_CS;
1506 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1507 IXGBE_WRITE_FLUSH(hw);
1508 udelay(1);
1509 eec &= ~IXGBE_EEC_CS;
1510 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1511 IXGBE_WRITE_FLUSH(hw);
1512 udelay(1);
1513}
1514
1515
1516
1517
1518
1519
1520
1521static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1522 u16 count)
1523{
1524 u32 eec;
1525 u32 mask;
1526 u32 i;
1527
1528 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1529
1530
1531
1532
1533
1534 mask = 0x01 << (count - 1);
1535
1536 for (i = 0; i < count; i++) {
1537
1538
1539
1540
1541
1542
1543
1544 if (data & mask)
1545 eec |= IXGBE_EEC_DI;
1546 else
1547 eec &= ~IXGBE_EEC_DI;
1548
1549 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1550 IXGBE_WRITE_FLUSH(hw);
1551
1552 udelay(1);
1553
1554 ixgbe_raise_eeprom_clk(hw, &eec);
1555 ixgbe_lower_eeprom_clk(hw, &eec);
1556
1557
1558
1559
1560
1561 mask = mask >> 1;
1562 }
1563
1564
1565 eec &= ~IXGBE_EEC_DI;
1566 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1567 IXGBE_WRITE_FLUSH(hw);
1568}
1569
1570
1571
1572
1573
1574static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1575{
1576 u32 eec;
1577 u32 i;
1578 u16 data = 0;
1579
1580
1581
1582
1583
1584
1585
1586
1587 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1588
1589 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1590
1591 for (i = 0; i < count; i++) {
1592 data = data << 1;
1593 ixgbe_raise_eeprom_clk(hw, &eec);
1594
1595 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1596
1597 eec &= ~(IXGBE_EEC_DI);
1598 if (eec & IXGBE_EEC_DO)
1599 data |= 1;
1600
1601 ixgbe_lower_eeprom_clk(hw, &eec);
1602 }
1603
1604 return data;
1605}
1606
1607
1608
1609
1610
1611
1612static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1613{
1614
1615
1616
1617
1618 *eec = *eec | IXGBE_EEC_SK;
1619 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1620 IXGBE_WRITE_FLUSH(hw);
1621 udelay(1);
1622}
1623
1624
1625
1626
1627
1628
1629static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1630{
1631
1632
1633
1634
1635 *eec = *eec & ~IXGBE_EEC_SK;
1636 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1637 IXGBE_WRITE_FLUSH(hw);
1638 udelay(1);
1639}
1640
1641
1642
1643
1644
1645static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1646{
1647 u32 eec;
1648
1649 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1650
1651 eec |= IXGBE_EEC_CS;
1652 eec &= ~IXGBE_EEC_SK;
1653
1654 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1655 IXGBE_WRITE_FLUSH(hw);
1656
1657 udelay(1);
1658
1659
1660 eec &= ~IXGBE_EEC_REQ;
1661 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1662
1663 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1664
1665
1666
1667
1668
1669 usleep_range(hw->eeprom.semaphore_delay * 1000,
1670 hw->eeprom.semaphore_delay * 2000);
1671}
1672
1673
1674
1675
1676
1677u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1678{
1679 u16 i;
1680 u16 j;
1681 u16 checksum = 0;
1682 u16 length = 0;
1683 u16 pointer = 0;
1684 u16 word = 0;
1685
1686
1687 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1688 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
1689 hw_dbg(hw, "EEPROM read failed\n");
1690 break;
1691 }
1692 checksum += word;
1693 }
1694
1695
1696 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1697 hw->eeprom.ops.read(hw, i, &pointer);
1698
1699
1700 if (pointer != 0xFFFF && pointer != 0) {
1701 hw->eeprom.ops.read(hw, pointer, &length);
1702
1703 if (length != 0xFFFF && length != 0) {
1704 for (j = pointer+1; j <= pointer+length; j++) {
1705 hw->eeprom.ops.read(hw, j, &word);
1706 checksum += word;
1707 }
1708 }
1709 }
1710 }
1711
1712 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1713
1714 return checksum;
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1726 u16 *checksum_val)
1727{
1728 s32 status;
1729 u16 checksum;
1730 u16 read_checksum = 0;
1731
1732
1733
1734
1735
1736
1737 status = hw->eeprom.ops.read(hw, 0, &checksum);
1738
1739 if (status == 0) {
1740 checksum = hw->eeprom.ops.calc_checksum(hw);
1741
1742 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1743
1744
1745
1746
1747
1748 if (read_checksum != checksum)
1749 status = IXGBE_ERR_EEPROM_CHECKSUM;
1750
1751
1752 if (checksum_val)
1753 *checksum_val = checksum;
1754 } else {
1755 hw_dbg(hw, "EEPROM read failed\n");
1756 }
1757
1758 return status;
1759}
1760
1761
1762
1763
1764
1765s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1766{
1767 s32 status;
1768 u16 checksum;
1769
1770
1771
1772
1773
1774
1775 status = hw->eeprom.ops.read(hw, 0, &checksum);
1776
1777 if (status == 0) {
1778 checksum = hw->eeprom.ops.calc_checksum(hw);
1779 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1780 checksum);
1781 } else {
1782 hw_dbg(hw, "EEPROM read failed\n");
1783 }
1784
1785 return status;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1799 u32 enable_addr)
1800{
1801 u32 rar_low, rar_high;
1802 u32 rar_entries = hw->mac.num_rar_entries;
1803
1804
1805 if (index >= rar_entries) {
1806 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1807 return IXGBE_ERR_INVALID_ARGUMENT;
1808 }
1809
1810
1811 hw->mac.ops.set_vmdq(hw, index, vmdq);
1812
1813
1814
1815
1816
1817 rar_low = ((u32)addr[0] |
1818 ((u32)addr[1] << 8) |
1819 ((u32)addr[2] << 16) |
1820 ((u32)addr[3] << 24));
1821
1822
1823
1824
1825
1826 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1827 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1828 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1829
1830 if (enable_addr != 0)
1831 rar_high |= IXGBE_RAH_AV;
1832
1833 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1834 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1835
1836 return 0;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1847{
1848 u32 rar_high;
1849 u32 rar_entries = hw->mac.num_rar_entries;
1850
1851
1852 if (index >= rar_entries) {
1853 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1854 return IXGBE_ERR_INVALID_ARGUMENT;
1855 }
1856
1857
1858
1859
1860
1861
1862 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1863 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1864
1865 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1866 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1867
1868
1869 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1870
1871 return 0;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1883{
1884 u32 i;
1885 u32 rar_entries = hw->mac.num_rar_entries;
1886
1887
1888
1889
1890
1891
1892 if (!is_valid_ether_addr(hw->mac.addr)) {
1893
1894 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1895
1896 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1897 } else {
1898
1899 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1900 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1901
1902 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1903
1904
1905 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1906 }
1907 hw->addr_ctrl.overflow_promisc = 0;
1908
1909 hw->addr_ctrl.rar_used_count = 1;
1910
1911
1912 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1913 for (i = 1; i < rar_entries; i++) {
1914 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1915 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1916 }
1917
1918
1919 hw->addr_ctrl.mta_in_use = 0;
1920 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1921
1922 hw_dbg(hw, " Clearing MTA\n");
1923 for (i = 0; i < hw->mac.mcft_size; i++)
1924 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1925
1926 if (hw->mac.ops.init_uta_tables)
1927 hw->mac.ops.init_uta_tables(hw);
1928
1929 return 0;
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1945{
1946 u32 vector = 0;
1947
1948 switch (hw->mac.mc_filter_type) {
1949 case 0:
1950 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1951 break;
1952 case 1:
1953 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1954 break;
1955 case 2:
1956 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1957 break;
1958 case 3:
1959 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1960 break;
1961 default:
1962 hw_dbg(hw, "MC filter type param set incorrectly\n");
1963 break;
1964 }
1965
1966
1967 vector &= 0xFFF;
1968 return vector;
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1979{
1980 u32 vector;
1981 u32 vector_bit;
1982 u32 vector_reg;
1983
1984 hw->addr_ctrl.mta_in_use++;
1985
1986 vector = ixgbe_mta_vector(hw, mc_addr);
1987 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998 vector_reg = (vector >> 5) & 0x7F;
1999 vector_bit = vector & 0x1F;
2000 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2014 struct net_device *netdev)
2015{
2016 struct netdev_hw_addr *ha;
2017 u32 i;
2018
2019
2020
2021
2022
2023 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
2024 hw->addr_ctrl.mta_in_use = 0;
2025
2026
2027 hw_dbg(hw, " Clearing MTA\n");
2028 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2029
2030
2031 netdev_for_each_mc_addr(ha, netdev) {
2032 hw_dbg(hw, " Adding the multicast addresses:\n");
2033 ixgbe_set_mta(hw, ha->addr);
2034 }
2035
2036
2037 for (i = 0; i < hw->mac.mcft_size; i++)
2038 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2039 hw->mac.mta_shadow[i]);
2040
2041 if (hw->addr_ctrl.mta_in_use > 0)
2042 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2043 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2044
2045 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2046 return 0;
2047}
2048
2049
2050
2051
2052
2053
2054
2055s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2056{
2057 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2058
2059 if (a->mta_in_use > 0)
2060 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2061 hw->mac.mc_filter_type);
2062
2063 return 0;
2064}
2065
2066
2067
2068
2069
2070
2071
2072s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2073{
2074 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2075
2076 if (a->mta_in_use > 0)
2077 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2078
2079 return 0;
2080}
2081
2082
2083
2084
2085
2086
2087
2088s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2089{
2090 s32 ret_val = 0;
2091 u32 mflcn_reg, fccfg_reg;
2092 u32 reg;
2093 u32 fcrtl, fcrth;
2094 int i;
2095
2096
2097
2098
2099
2100
2101 if (!hw->fc.low_water ||
2102 !hw->fc.high_water[0] ||
2103 !hw->fc.pause_time) {
2104 hw_dbg(hw, "Invalid water mark configuration\n");
2105 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2106 goto out;
2107 }
2108
2109
2110 ixgbe_fc_autoneg(hw);
2111
2112
2113 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2114 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2115
2116 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2117 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129 switch (hw->fc.current_mode) {
2130 case ixgbe_fc_none:
2131
2132
2133
2134
2135 break;
2136 case ixgbe_fc_rx_pause:
2137
2138
2139
2140
2141
2142
2143
2144
2145 mflcn_reg |= IXGBE_MFLCN_RFCE;
2146 break;
2147 case ixgbe_fc_tx_pause:
2148
2149
2150
2151
2152 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2153 break;
2154 case ixgbe_fc_full:
2155
2156 mflcn_reg |= IXGBE_MFLCN_RFCE;
2157 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2158 break;
2159 default:
2160 hw_dbg(hw, "Flow control param set incorrectly\n");
2161 ret_val = IXGBE_ERR_CONFIG;
2162 goto out;
2163 break;
2164 }
2165
2166
2167 mflcn_reg |= IXGBE_MFLCN_DPF;
2168 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2169 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2170
2171 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2172
2173
2174 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2175 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2176 hw->fc.high_water[i]) {
2177 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2178 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2179 } else {
2180 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2181
2182
2183
2184
2185
2186
2187 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2188 }
2189
2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2191 }
2192
2193
2194 reg = hw->fc.pause_time * 0x00010001;
2195 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2196 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2197
2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2199
2200out:
2201 return ret_val;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2218 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2219{
2220 if ((!(adv_reg)) || (!(lp_reg)))
2221 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2222
2223 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2224
2225
2226
2227
2228
2229
2230
2231 if (hw->fc.requested_mode == ixgbe_fc_full) {
2232 hw->fc.current_mode = ixgbe_fc_full;
2233 hw_dbg(hw, "Flow Control = FULL.\n");
2234 } else {
2235 hw->fc.current_mode = ixgbe_fc_rx_pause;
2236 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2237 }
2238 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2239 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2240 hw->fc.current_mode = ixgbe_fc_tx_pause;
2241 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2242 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2243 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2244 hw->fc.current_mode = ixgbe_fc_rx_pause;
2245 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2246 } else {
2247 hw->fc.current_mode = ixgbe_fc_none;
2248 hw_dbg(hw, "Flow Control = NONE.\n");
2249 }
2250 return 0;
2251}
2252
2253
2254
2255
2256
2257
2258
2259static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2260{
2261 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2262 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2263
2264
2265
2266
2267
2268
2269
2270 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2271 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2272 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2273 goto out;
2274
2275 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2276 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2277
2278 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2279 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2280 IXGBE_PCS1GANA_ASM_PAUSE,
2281 IXGBE_PCS1GANA_SYM_PAUSE,
2282 IXGBE_PCS1GANA_ASM_PAUSE);
2283
2284out:
2285 return ret_val;
2286}
2287
2288
2289
2290
2291
2292
2293
2294static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2295{
2296 u32 links2, anlp1_reg, autoc_reg, links;
2297 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2298
2299
2300
2301
2302
2303
2304 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2305 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2306 goto out;
2307
2308 if (hw->mac.type == ixgbe_mac_82599EB) {
2309 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2310 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2311 goto out;
2312 }
2313
2314
2315
2316
2317 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2318 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2319
2320 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2321 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2322 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2323
2324out:
2325 return ret_val;
2326}
2327
2328
2329
2330
2331
2332
2333
2334static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2335{
2336 u16 technology_ability_reg = 0;
2337 u16 lp_technology_ability_reg = 0;
2338
2339 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2340 MDIO_MMD_AN,
2341 &technology_ability_reg);
2342 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2343 MDIO_MMD_AN,
2344 &lp_technology_ability_reg);
2345
2346 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2347 (u32)lp_technology_ability_reg,
2348 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2349 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2360{
2361 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2362 ixgbe_link_speed speed;
2363 bool link_up;
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 if (hw->fc.disable_fc_autoneg)
2375 goto out;
2376
2377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2378 if (!link_up)
2379 goto out;
2380
2381 switch (hw->phy.media_type) {
2382
2383 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw);
2386 break;
2387
2388
2389 case ixgbe_media_type_backplane:
2390 ret_val = ixgbe_fc_autoneg_backplane(hw);
2391 break;
2392
2393
2394 case ixgbe_media_type_copper:
2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2396 ret_val = ixgbe_fc_autoneg_copper(hw);
2397 break;
2398
2399 default:
2400 break;
2401 }
2402
2403out:
2404 if (ret_val == 0) {
2405 hw->fc.fc_was_autonegged = true;
2406 } else {
2407 hw->fc.fc_was_autonegged = false;
2408 hw->fc.current_mode = hw->fc.requested_mode;
2409 }
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2422{
2423 struct ixgbe_adapter *adapter = hw->back;
2424 s32 status = 0;
2425 u32 i;
2426 u16 value;
2427
2428
2429 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2430
2431
2432 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2433 goto out;
2434
2435
2436 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2437 udelay(100);
2438 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2439 goto out;
2440 }
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2451 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2452
2453
2454
2455
2456
2457 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2458 udelay(100);
2459 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2460 &value);
2461 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2462 goto out;
2463 }
2464
2465 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2466 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2467
2468out:
2469 return status;
2470}
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2481{
2482 u32 gssr;
2483 u32 swmask = mask;
2484 u32 fwmask = mask << 5;
2485 s32 timeout = 200;
2486
2487 while (timeout) {
2488
2489
2490
2491
2492 if (ixgbe_get_eeprom_semaphore(hw))
2493 return IXGBE_ERR_SWFW_SYNC;
2494
2495 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2496 if (!(gssr & (fwmask | swmask)))
2497 break;
2498
2499
2500
2501
2502
2503 ixgbe_release_eeprom_semaphore(hw);
2504 usleep_range(5000, 10000);
2505 timeout--;
2506 }
2507
2508 if (!timeout) {
2509 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2510 return IXGBE_ERR_SWFW_SYNC;
2511 }
2512
2513 gssr |= swmask;
2514 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2515
2516 ixgbe_release_eeprom_semaphore(hw);
2517 return 0;
2518}
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2529{
2530 u32 gssr;
2531 u32 swmask = mask;
2532
2533 ixgbe_get_eeprom_semaphore(hw);
2534
2535 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2536 gssr &= ~swmask;
2537 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2538
2539 ixgbe_release_eeprom_semaphore(hw);
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2550{
2551#define IXGBE_MAX_SECRX_POLL 40
2552 int i;
2553 int secrxreg;
2554
2555 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2556 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2557 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2558 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2559 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2560 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2561 break;
2562 else
2563
2564 udelay(1000);
2565 }
2566
2567
2568 if (i >= IXGBE_MAX_SECRX_POLL)
2569 hw_dbg(hw, "Rx unit being enabled before security "
2570 "path fully disabled. Continuing with init.\n");
2571
2572 return 0;
2573
2574}
2575
2576
2577
2578
2579
2580
2581
2582s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
2583{
2584 int secrxreg;
2585
2586 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2587 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2588 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2589 IXGBE_WRITE_FLUSH(hw);
2590
2591 return 0;
2592}
2593
2594
2595
2596
2597
2598
2599
2600
2601s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2602{
2603 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2604
2605 return 0;
2606}
2607
2608
2609
2610
2611
2612
2613s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2614{
2615 ixgbe_link_speed speed = 0;
2616 bool link_up = false;
2617 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2618 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2619 s32 ret_val = 0;
2620
2621
2622
2623
2624
2625 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2626
2627 if (!link_up) {
2628
2629
2630
2631 bool got_lock = false;
2632
2633 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2634 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2635 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2636 IXGBE_GSSR_MAC_CSR_SM);
2637 if (ret_val)
2638 goto out;
2639
2640 got_lock = true;
2641 }
2642 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2643 autoc_reg |= IXGBE_AUTOC_FLU;
2644 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2645 IXGBE_WRITE_FLUSH(hw);
2646
2647 if (got_lock)
2648 hw->mac.ops.release_swfw_sync(hw,
2649 IXGBE_GSSR_MAC_CSR_SM);
2650 usleep_range(10000, 20000);
2651 }
2652
2653 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2654 led_reg |= IXGBE_LED_BLINK(index);
2655 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2656 IXGBE_WRITE_FLUSH(hw);
2657
2658out:
2659 return ret_val;
2660}
2661
2662
2663
2664
2665
2666
2667s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2668{
2669 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2670 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2671 s32 ret_val = 0;
2672 bool got_lock = false;
2673
2674
2675
2676
2677 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2678 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2679 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2680 IXGBE_GSSR_MAC_CSR_SM);
2681 if (ret_val)
2682 goto out;
2683
2684 got_lock = true;
2685 }
2686
2687 autoc_reg &= ~IXGBE_AUTOC_FLU;
2688 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2689 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2690
2691 if (hw->mac.type == ixgbe_mac_82599EB)
2692 ixgbe_reset_pipeline_82599(hw);
2693
2694 if (got_lock)
2695 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
2696
2697 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2698 led_reg &= ~IXGBE_LED_BLINK(index);
2699 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2700 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2701 IXGBE_WRITE_FLUSH(hw);
2702
2703out:
2704 return ret_val;
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2717 u16 *san_mac_offset)
2718{
2719
2720
2721
2722
2723 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2724
2725 return 0;
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2739{
2740 u16 san_mac_data, san_mac_offset;
2741 u8 i;
2742
2743
2744
2745
2746
2747 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2748
2749 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2750
2751
2752
2753
2754 for (i = 0; i < 6; i++)
2755 san_mac_addr[i] = 0xFF;
2756
2757 goto san_mac_addr_out;
2758 }
2759
2760
2761 hw->mac.ops.set_lan_id(hw);
2762
2763 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2764 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2765 for (i = 0; i < 3; i++) {
2766 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2767 san_mac_addr[i * 2] = (u8)(san_mac_data);
2768 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2769 san_mac_offset++;
2770 }
2771
2772san_mac_addr_out:
2773 return 0;
2774}
2775
2776
2777
2778
2779
2780
2781
2782
2783u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2784{
2785 struct ixgbe_adapter *adapter = hw->back;
2786 u16 msix_count = 1;
2787 u16 max_msix_count;
2788 u16 pcie_offset;
2789
2790 switch (hw->mac.type) {
2791 case ixgbe_mac_82598EB:
2792 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2793 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2794 break;
2795 case ixgbe_mac_82599EB:
2796 case ixgbe_mac_X540:
2797 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2798 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2799 break;
2800 default:
2801 return msix_count;
2802 }
2803
2804 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
2805 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2806
2807
2808 msix_count++;
2809
2810 if (msix_count > max_msix_count)
2811 msix_count = max_msix_count;
2812
2813 return msix_count;
2814}
2815
2816
2817
2818
2819
2820
2821
2822s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2823{
2824 u32 mpsar_lo, mpsar_hi;
2825 u32 rar_entries = hw->mac.num_rar_entries;
2826
2827
2828 if (rar >= rar_entries) {
2829 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2830 return IXGBE_ERR_INVALID_ARGUMENT;
2831 }
2832
2833 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2834 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2835
2836 if (!mpsar_lo && !mpsar_hi)
2837 goto done;
2838
2839 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2840 if (mpsar_lo) {
2841 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2842 mpsar_lo = 0;
2843 }
2844 if (mpsar_hi) {
2845 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2846 mpsar_hi = 0;
2847 }
2848 } else if (vmdq < 32) {
2849 mpsar_lo &= ~(1 << vmdq);
2850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2851 } else {
2852 mpsar_hi &= ~(1 << (vmdq - 32));
2853 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2854 }
2855
2856
2857 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2858 hw->mac.ops.clear_rar(hw, rar);
2859done:
2860 return 0;
2861}
2862
2863
2864
2865
2866
2867
2868
2869s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2870{
2871 u32 mpsar;
2872 u32 rar_entries = hw->mac.num_rar_entries;
2873
2874
2875 if (rar >= rar_entries) {
2876 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2877 return IXGBE_ERR_INVALID_ARGUMENT;
2878 }
2879
2880 if (vmdq < 32) {
2881 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2882 mpsar |= 1 << vmdq;
2883 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2884 } else {
2885 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2886 mpsar |= 1 << (vmdq - 32);
2887 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2888 }
2889 return 0;
2890}
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
2903{
2904 u32 rar = hw->mac.san_mac_rar_index;
2905
2906 if (vmdq < 32) {
2907 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
2908 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2909 } else {
2910 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2911 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
2912 }
2913
2914 return 0;
2915}
2916
2917
2918
2919
2920
2921s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2922{
2923 int i;
2924
2925 for (i = 0; i < 128; i++)
2926 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2927
2928 return 0;
2929}
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2940{
2941 u32 bits = 0;
2942 u32 first_empty_slot = 0;
2943 s32 regindex;
2944
2945
2946 if (vlan == 0)
2947 return 0;
2948
2949
2950
2951
2952
2953 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2954 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2955 if (!bits && !(first_empty_slot))
2956 first_empty_slot = regindex;
2957 else if ((bits & 0x0FFF) == vlan)
2958 break;
2959 }
2960
2961
2962
2963
2964
2965
2966 if (regindex >= IXGBE_VLVF_ENTRIES) {
2967 if (first_empty_slot)
2968 regindex = first_empty_slot;
2969 else {
2970 hw_dbg(hw, "No space in VLVF.\n");
2971 regindex = IXGBE_ERR_NO_SPACE;
2972 }
2973 }
2974
2975 return regindex;
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2988 bool vlan_on)
2989{
2990 s32 regindex;
2991 u32 bitindex;
2992 u32 vfta;
2993 u32 bits;
2994 u32 vt;
2995 u32 targetbit;
2996 bool vfta_changed = false;
2997
2998 if (vlan > 4095)
2999 return IXGBE_ERR_PARAM;
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013 regindex = (vlan >> 5) & 0x7F;
3014 bitindex = vlan & 0x1F;
3015 targetbit = (1 << bitindex);
3016 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3017
3018 if (vlan_on) {
3019 if (!(vfta & targetbit)) {
3020 vfta |= targetbit;
3021 vfta_changed = true;
3022 }
3023 } else {
3024 if ((vfta & targetbit)) {
3025 vfta &= ~targetbit;
3026 vfta_changed = true;
3027 }
3028 }
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3039 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3040 s32 vlvf_index;
3041
3042 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3043 if (vlvf_index < 0)
3044 return vlvf_index;
3045
3046 if (vlan_on) {
3047
3048 if (vind < 32) {
3049 bits = IXGBE_READ_REG(hw,
3050 IXGBE_VLVFB(vlvf_index*2));
3051 bits |= (1 << vind);
3052 IXGBE_WRITE_REG(hw,
3053 IXGBE_VLVFB(vlvf_index*2),
3054 bits);
3055 } else {
3056 bits = IXGBE_READ_REG(hw,
3057 IXGBE_VLVFB((vlvf_index*2)+1));
3058 bits |= (1 << (vind-32));
3059 IXGBE_WRITE_REG(hw,
3060 IXGBE_VLVFB((vlvf_index*2)+1),
3061 bits);
3062 }
3063 } else {
3064
3065 if (vind < 32) {
3066 bits = IXGBE_READ_REG(hw,
3067 IXGBE_VLVFB(vlvf_index*2));
3068 bits &= ~(1 << vind);
3069 IXGBE_WRITE_REG(hw,
3070 IXGBE_VLVFB(vlvf_index*2),
3071 bits);
3072 bits |= IXGBE_READ_REG(hw,
3073 IXGBE_VLVFB((vlvf_index*2)+1));
3074 } else {
3075 bits = IXGBE_READ_REG(hw,
3076 IXGBE_VLVFB((vlvf_index*2)+1));
3077 bits &= ~(1 << (vind-32));
3078 IXGBE_WRITE_REG(hw,
3079 IXGBE_VLVFB((vlvf_index*2)+1),
3080 bits);
3081 bits |= IXGBE_READ_REG(hw,
3082 IXGBE_VLVFB(vlvf_index*2));
3083 }
3084 }
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101 if (bits) {
3102 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3103 (IXGBE_VLVF_VIEN | vlan));
3104 if (!vlan_on) {
3105
3106
3107
3108 vfta_changed = false;
3109 }
3110 }
3111 else
3112 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3113 }
3114
3115 if (vfta_changed)
3116 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3117
3118 return 0;
3119}
3120
3121
3122
3123
3124
3125
3126
3127s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3128{
3129 u32 offset;
3130
3131 for (offset = 0; offset < hw->mac.vft_size; offset++)
3132 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3133
3134 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3135 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3136 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3137 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3138 }
3139
3140 return 0;
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3153 bool *link_up, bool link_up_wait_to_complete)
3154{
3155 u32 links_reg, links_orig;
3156 u32 i;
3157
3158
3159 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3160
3161 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3162
3163 if (links_orig != links_reg) {
3164 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3165 links_orig, links_reg);
3166 }
3167
3168 if (link_up_wait_to_complete) {
3169 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3170 if (links_reg & IXGBE_LINKS_UP) {
3171 *link_up = true;
3172 break;
3173 } else {
3174 *link_up = false;
3175 }
3176 msleep(100);
3177 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3178 }
3179 } else {
3180 if (links_reg & IXGBE_LINKS_UP)
3181 *link_up = true;
3182 else
3183 *link_up = false;
3184 }
3185
3186 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3187 IXGBE_LINKS_SPEED_10G_82599)
3188 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3189 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3190 IXGBE_LINKS_SPEED_1G_82599)
3191 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3192 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3193 IXGBE_LINKS_SPEED_100_82599)
3194 *speed = IXGBE_LINK_SPEED_100_FULL;
3195 else
3196 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3197
3198 return 0;
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3212 u16 *wwpn_prefix)
3213{
3214 u16 offset, caps;
3215 u16 alt_san_mac_blk_offset;
3216
3217
3218 *wwnn_prefix = 0xFFFF;
3219 *wwpn_prefix = 0xFFFF;
3220
3221
3222 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3223 &alt_san_mac_blk_offset);
3224
3225 if ((alt_san_mac_blk_offset == 0) ||
3226 (alt_san_mac_blk_offset == 0xFFFF))
3227 goto wwn_prefix_out;
3228
3229
3230 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3231 hw->eeprom.ops.read(hw, offset, &caps);
3232 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3233 goto wwn_prefix_out;
3234
3235
3236 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3237 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3238
3239 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3240 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3241
3242wwn_prefix_out:
3243 return 0;
3244}
3245
3246
3247
3248
3249
3250
3251
3252
3253void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3254{
3255 int j;
3256 int pf_target_reg = pf >> 3;
3257 int pf_target_shift = pf % 8;
3258 u32 pfvfspoof = 0;
3259
3260 if (hw->mac.type == ixgbe_mac_82598EB)
3261 return;
3262
3263 if (enable)
3264 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3265
3266
3267
3268
3269
3270 for (j = 0; j < pf_target_reg; j++)
3271 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3272
3273
3274
3275
3276
3277 pfvfspoof &= (1 << pf_target_shift) - 1;
3278 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3279
3280
3281
3282
3283
3284 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3285 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3286}
3287
3288
3289
3290
3291
3292
3293
3294
3295void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3296{
3297 int vf_target_reg = vf >> 3;
3298 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3299 u32 pfvfspoof;
3300
3301 if (hw->mac.type == ixgbe_mac_82598EB)
3302 return;
3303
3304 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3305 if (enable)
3306 pfvfspoof |= (1 << vf_target_shift);
3307 else
3308 pfvfspoof &= ~(1 << vf_target_shift);
3309 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3310}
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3321{
3322 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3323
3324 return 0;
3325}
3326
3327
3328
3329
3330
3331
3332
3333
3334void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3335 int num_pb,
3336 u32 headroom,
3337 int strategy)
3338{
3339 u32 pbsize = hw->mac.rx_pb_size;
3340 int i = 0;
3341 u32 rxpktsize, txpktsize, txpbthresh;
3342
3343
3344 pbsize -= headroom;
3345
3346 if (!num_pb)
3347 num_pb = 1;
3348
3349
3350
3351
3352 switch (strategy) {
3353 case (PBA_STRATEGY_WEIGHTED):
3354
3355
3356
3357 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
3358 pbsize -= rxpktsize * (num_pb / 2);
3359 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
3360 for (; i < (num_pb / 2); i++)
3361 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3362
3363 case (PBA_STRATEGY_EQUAL):
3364
3365 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
3366 for (; i < num_pb; i++)
3367 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3368 break;
3369 default:
3370 break;
3371 }
3372
3373
3374
3375
3376
3377
3378 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
3379 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
3380 for (i = 0; i < num_pb; i++) {
3381 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3382 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3383 }
3384
3385
3386 for (; i < IXGBE_MAX_PB; i++) {
3387 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3388 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3389 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3390 }
3391}
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3402{
3403 u32 i;
3404 u8 sum = 0;
3405
3406 if (!buffer)
3407 return 0;
3408
3409 for (i = 0; i < length; i++)
3410 sum += buffer[i];
3411
3412 return (u8) (0 - sum);
3413}
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3426 u32 length)
3427{
3428 u32 hicr, i, bi;
3429 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3430 u8 buf_len, dword_len;
3431
3432 s32 ret_val = 0;
3433
3434 if (length == 0 || length & 0x3 ||
3435 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3436 hw_dbg(hw, "Buffer length failure.\n");
3437 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3438 goto out;
3439 }
3440
3441
3442 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3443 if ((hicr & IXGBE_HICR_EN) == 0) {
3444 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3445 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3446 goto out;
3447 }
3448
3449
3450 dword_len = length >> 2;
3451
3452
3453
3454
3455
3456 for (i = 0; i < dword_len; i++)
3457 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3458 i, cpu_to_le32(buffer[i]));
3459
3460
3461 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3462
3463 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3464 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3465 if (!(hicr & IXGBE_HICR_C))
3466 break;
3467 usleep_range(1000, 2000);
3468 }
3469
3470
3471 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3472 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3473 hw_dbg(hw, "Command has failed with no status valid.\n");
3474 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3475 goto out;
3476 }
3477
3478
3479 dword_len = hdr_size >> 2;
3480
3481
3482 for (bi = 0; bi < dword_len; bi++) {
3483 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3484 le32_to_cpus(&buffer[bi]);
3485 }
3486
3487
3488 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3489 if (buf_len == 0)
3490 goto out;
3491
3492 if (length < (buf_len + hdr_size)) {
3493 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3494 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3495 goto out;
3496 }
3497
3498
3499 dword_len = (buf_len + 3) >> 2;
3500
3501
3502 for (; bi <= dword_len; bi++) {
3503 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3504 le32_to_cpus(&buffer[bi]);
3505 }
3506
3507out:
3508 return ret_val;
3509}
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3525 u8 build, u8 sub)
3526{
3527 struct ixgbe_hic_drv_info fw_cmd;
3528 int i;
3529 s32 ret_val = 0;
3530
3531 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
3532 ret_val = IXGBE_ERR_SWFW_SYNC;
3533 goto out;
3534 }
3535
3536 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3537 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3538 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3539 fw_cmd.port_num = (u8)hw->bus.func;
3540 fw_cmd.ver_maj = maj;
3541 fw_cmd.ver_min = min;
3542 fw_cmd.ver_build = build;
3543 fw_cmd.ver_sub = sub;
3544 fw_cmd.hdr.checksum = 0;
3545 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
3546 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3547 fw_cmd.pad = 0;
3548 fw_cmd.pad2 = 0;
3549
3550 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3551 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3552 sizeof(fw_cmd));
3553 if (ret_val != 0)
3554 continue;
3555
3556 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3557 FW_CEM_RESP_STATUS_SUCCESS)
3558 ret_val = 0;
3559 else
3560 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3561
3562 break;
3563 }
3564
3565 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3566out:
3567 return ret_val;
3568}
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3579{
3580 u32 gcr_ext, hlreg0;
3581
3582
3583
3584
3585
3586 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3587 return;
3588
3589
3590
3591
3592
3593
3594 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3595 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3596
3597
3598 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3599 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3600 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3601
3602
3603 IXGBE_WRITE_FLUSH(hw);
3604 udelay(20);
3605
3606
3607 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3608 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3609}
3610
3611static const u8 ixgbe_emc_temp_data[4] = {
3612 IXGBE_EMC_INTERNAL_DATA,
3613 IXGBE_EMC_DIODE1_DATA,
3614 IXGBE_EMC_DIODE2_DATA,
3615 IXGBE_EMC_DIODE3_DATA
3616};
3617static const u8 ixgbe_emc_therm_limit[4] = {
3618 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3619 IXGBE_EMC_DIODE1_THERM_LIMIT,
3620 IXGBE_EMC_DIODE2_THERM_LIMIT,
3621 IXGBE_EMC_DIODE3_THERM_LIMIT
3622};
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3633 u16 *ets_offset)
3634{
3635 s32 status = 0;
3636
3637 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3638 if (status)
3639 goto out;
3640
3641 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3642 status = IXGBE_NOT_IMPLEMENTED;
3643 goto out;
3644 }
3645
3646 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3647 if (status)
3648 goto out;
3649
3650 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3651 status = IXGBE_NOT_IMPLEMENTED;
3652 goto out;
3653 }
3654
3655out:
3656 return status;
3657}
3658
3659
3660
3661
3662
3663
3664
3665s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3666{
3667 s32 status = 0;
3668 u16 ets_offset;
3669 u16 ets_cfg;
3670 u16 ets_sensor;
3671 u8 num_sensors;
3672 u8 i;
3673 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3674
3675
3676 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3677 status = IXGBE_NOT_IMPLEMENTED;
3678 goto out;
3679 }
3680
3681 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3682 if (status)
3683 goto out;
3684
3685 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3686 if (num_sensors > IXGBE_MAX_SENSORS)
3687 num_sensors = IXGBE_MAX_SENSORS;
3688
3689 for (i = 0; i < num_sensors; i++) {
3690 u8 sensor_index;
3691 u8 sensor_location;
3692
3693 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3694 &ets_sensor);
3695 if (status)
3696 goto out;
3697
3698 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3699 IXGBE_ETS_DATA_INDEX_SHIFT);
3700 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3701 IXGBE_ETS_DATA_LOC_SHIFT);
3702
3703 if (sensor_location != 0) {
3704 status = hw->phy.ops.read_i2c_byte(hw,
3705 ixgbe_emc_temp_data[sensor_index],
3706 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3707 &data->sensor[i].temp);
3708 if (status)
3709 goto out;
3710 }
3711 }
3712out:
3713 return status;
3714}
3715
3716
3717
3718
3719
3720
3721
3722
3723s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3724{
3725 s32 status = 0;
3726 u16 ets_offset;
3727 u16 ets_cfg;
3728 u16 ets_sensor;
3729 u8 low_thresh_delta;
3730 u8 num_sensors;
3731 u8 therm_limit;
3732 u8 i;
3733 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3734
3735 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3736
3737
3738 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3739 status = IXGBE_NOT_IMPLEMENTED;
3740 goto out;
3741 }
3742
3743 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3744 if (status)
3745 goto out;
3746
3747 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3748 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3749 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3750 if (num_sensors > IXGBE_MAX_SENSORS)
3751 num_sensors = IXGBE_MAX_SENSORS;
3752
3753 for (i = 0; i < num_sensors; i++) {
3754 u8 sensor_index;
3755 u8 sensor_location;
3756
3757 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
3758 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3759 IXGBE_ETS_DATA_INDEX_SHIFT);
3760 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3761 IXGBE_ETS_DATA_LOC_SHIFT);
3762 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3763
3764 hw->phy.ops.write_i2c_byte(hw,
3765 ixgbe_emc_therm_limit[sensor_index],
3766 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3767
3768 if (sensor_location == 0)
3769 continue;
3770
3771 data->sensor[i].location = sensor_location;
3772 data->sensor[i].caution_thresh = therm_limit;
3773 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3774 }
3775out:
3776 return status;
3777}
3778
3779