1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h>
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/dma-mapping.h>
35#include <linux/bitops.h>
36#include <linux/irq.h>
37#include <linux/delay.h>
38#include <asm/byteorder.h>
39#include <linux/time.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
42#include <linux/if_vlan.h>
43#include <net/ip.h>
44#include <net/ipv6.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/crc32c.h>
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
53#include <linux/io.h>
54#include <linux/semaphore.h>
55#include <linux/stringify.h>
56#include <linux/vmalloc.h>
57
58#include "bnx2x.h"
59#include "bnx2x_init.h"
60#include "bnx2x_init_ops.h"
61#include "bnx2x_cmn.h"
62#include "bnx2x_dcb.h"
63#include "bnx2x_sp.h"
64
65#include <linux/firmware.h>
66#include "bnx2x_fw_file_hdr.h"
67
68#define FW_FILE_VERSION \
69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
72 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
73#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
74#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
75#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
76
77#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
78
79
80#define TX_TIMEOUT (5*HZ)
81
82static char version[] =
83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
85
86MODULE_AUTHOR("Eliezer Tamir");
87MODULE_DESCRIPTION("Broadcom NetXtreme II "
88 "BCM57710/57711/57711E/"
89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
90 "57840/57840_MF Driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_MODULE_VERSION);
93MODULE_FIRMWARE(FW_FILE_NAME_E1);
94MODULE_FIRMWARE(FW_FILE_NAME_E1H);
95MODULE_FIRMWARE(FW_FILE_NAME_E2);
96
97
98int num_queues;
99module_param(num_queues, int, 0);
100MODULE_PARM_DESC(num_queues,
101 " Set number of queues (default is as a number of CPUs)");
102
103static int disable_tpa;
104module_param(disable_tpa, int, 0);
105MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
106
107#define INT_MODE_INTx 1
108#define INT_MODE_MSI 2
109int int_mode;
110module_param(int_mode, int, 0);
111MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
112 "(1 INT#x; 2 MSI)");
113
114static int dropless_fc;
115module_param(dropless_fc, int, 0);
116MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
117
118static int mrrs = -1;
119module_param(mrrs, int, 0);
120MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121
122static int debug;
123module_param(debug, int, 0);
124MODULE_PARM_DESC(debug, " Default debug msglevel");
125
126
127
128struct workqueue_struct *bnx2x_wq;
129
130struct bnx2x_mac_vals {
131 u32 xmac_addr;
132 u32 xmac_val;
133 u32 emac_addr;
134 u32 emac_val;
135 u32 umac_addr;
136 u32 umac_val;
137 u32 bmac_addr;
138 u32 bmac_val[2];
139};
140
141enum bnx2x_board_type {
142 BCM57710 = 0,
143 BCM57711,
144 BCM57711E,
145 BCM57712,
146 BCM57712_MF,
147 BCM57800,
148 BCM57800_MF,
149 BCM57810,
150 BCM57810_MF,
151 BCM57840_O,
152 BCM57840_4_10,
153 BCM57840_2_20,
154 BCM57840_MFO,
155 BCM57840_MF,
156 BCM57811,
157 BCM57811_MF
158};
159
160
161static struct {
162 char *name;
163} board_info[] = {
164 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
165 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
166 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
167 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
168 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
169 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
170 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
171 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
172 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
173 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
174 { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
175 { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
176 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
177 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
178 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
179 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
180};
181
182#ifndef PCI_DEVICE_ID_NX2_57710
183#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
184#endif
185#ifndef PCI_DEVICE_ID_NX2_57711
186#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
187#endif
188#ifndef PCI_DEVICE_ID_NX2_57711E
189#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
190#endif
191#ifndef PCI_DEVICE_ID_NX2_57712
192#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
193#endif
194#ifndef PCI_DEVICE_ID_NX2_57712_MF
195#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
196#endif
197#ifndef PCI_DEVICE_ID_NX2_57800
198#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
199#endif
200#ifndef PCI_DEVICE_ID_NX2_57800_MF
201#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
202#endif
203#ifndef PCI_DEVICE_ID_NX2_57810
204#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
205#endif
206#ifndef PCI_DEVICE_ID_NX2_57810_MF
207#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
208#endif
209#ifndef PCI_DEVICE_ID_NX2_57840_O
210#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
211#endif
212#ifndef PCI_DEVICE_ID_NX2_57840_4_10
213#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
214#endif
215#ifndef PCI_DEVICE_ID_NX2_57840_2_20
216#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
217#endif
218#ifndef PCI_DEVICE_ID_NX2_57840_MFO
219#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
220#endif
221#ifndef PCI_DEVICE_ID_NX2_57840_MF
222#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
223#endif
224#ifndef PCI_DEVICE_ID_NX2_57811
225#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
226#endif
227#ifndef PCI_DEVICE_ID_NX2_57811_MF
228#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
229#endif
230static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
231 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
232 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
233 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
234 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
235 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
236 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
237 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
238 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
239 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
240 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
241 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
242 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
243 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
244 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
245 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
246 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
247 { 0 }
248};
249
250MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
251
252
253#define BNX2X_PREV_WAIT_NEEDED 1
254static DEFINE_SEMAPHORE(bnx2x_prev_sem);
255static LIST_HEAD(bnx2x_prev_list);
256
257
258
259
260static void __storm_memset_dma_mapping(struct bnx2x *bp,
261 u32 addr, dma_addr_t mapping)
262{
263 REG_WR(bp, addr, U64_LO(mapping));
264 REG_WR(bp, addr + 4, U64_HI(mapping));
265}
266
267static void storm_memset_spq_addr(struct bnx2x *bp,
268 dma_addr_t mapping, u16 abs_fid)
269{
270 u32 addr = XSEM_REG_FAST_MEMORY +
271 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
272
273 __storm_memset_dma_mapping(bp, addr, mapping);
274}
275
276static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
277 u16 pf_id)
278{
279 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
280 pf_id);
281 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
282 pf_id);
283 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
284 pf_id);
285 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
286 pf_id);
287}
288
289static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
290 u8 enable)
291{
292 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
293 enable);
294 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
295 enable);
296 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
297 enable);
298 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
299 enable);
300}
301
302static void storm_memset_eq_data(struct bnx2x *bp,
303 struct event_ring_data *eq_data,
304 u16 pfid)
305{
306 size_t size = sizeof(struct event_ring_data);
307
308 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
309
310 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
311}
312
313static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
314 u16 pfid)
315{
316 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
317 REG_WR16(bp, addr, eq_prod);
318}
319
320
321
322
323static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
324{
325 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
326 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
327 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
328 PCICFG_VENDOR_ID_OFFSET);
329}
330
331static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
332{
333 u32 val;
334
335 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
336 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
337 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
338 PCICFG_VENDOR_ID_OFFSET);
339
340 return val;
341}
342
343#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
344#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
345#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
346#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
347#define DMAE_DP_DST_NONE "dst_addr [none]"
348
349
350
351void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
352{
353 u32 cmd_offset;
354 int i;
355
356 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
357 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
358 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
359 }
360 REG_WR(bp, dmae_reg_go_c[idx], 1);
361}
362
363u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
364{
365 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
366 DMAE_CMD_C_ENABLE);
367}
368
369u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
370{
371 return opcode & ~DMAE_CMD_SRC_RESET;
372}
373
374u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
375 bool with_comp, u8 comp_type)
376{
377 u32 opcode = 0;
378
379 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
380 (dst_type << DMAE_COMMAND_DST_SHIFT));
381
382 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
383
384 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
385 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
386 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
387 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
388
389#ifdef __BIG_ENDIAN
390 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
391#else
392 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
393#endif
394 if (with_comp)
395 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
396 return opcode;
397}
398
399static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
400 struct dmae_command *dmae,
401 u8 src_type, u8 dst_type)
402{
403 memset(dmae, 0, sizeof(struct dmae_command));
404
405
406 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
407 true, DMAE_COMP_PCI);
408
409
410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
412 dmae->comp_val = DMAE_COMP_VAL;
413}
414
415
416static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
417 struct dmae_command *dmae)
418{
419 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
420 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
421 int rc = 0;
422
423
424
425
426
427
428 spin_lock_bh(&bp->dmae_lock);
429
430
431 *wb_comp = 0;
432
433
434 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
435
436
437 udelay(5);
438 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
439
440 if (!cnt ||
441 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
442 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
443 BNX2X_ERR("DMAE timeout!\n");
444 rc = DMAE_TIMEOUT;
445 goto unlock;
446 }
447 cnt--;
448 udelay(50);
449 }
450 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
451 BNX2X_ERR("DMAE PCI error!\n");
452 rc = DMAE_PCI_ERROR;
453 }
454
455unlock:
456 spin_unlock_bh(&bp->dmae_lock);
457 return rc;
458}
459
460void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
461 u32 len32)
462{
463 struct dmae_command dmae;
464
465 if (!bp->dmae_ready) {
466 u32 *data = bnx2x_sp(bp, wb_data[0]);
467
468 if (CHIP_IS_E1(bp))
469 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
470 else
471 bnx2x_init_str_wr(bp, dst_addr, data, len32);
472 return;
473 }
474
475
476 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
477
478
479 dmae.src_addr_lo = U64_LO(dma_addr);
480 dmae.src_addr_hi = U64_HI(dma_addr);
481 dmae.dst_addr_lo = dst_addr >> 2;
482 dmae.dst_addr_hi = 0;
483 dmae.len = len32;
484
485
486 bnx2x_issue_dmae_with_comp(bp, &dmae);
487}
488
489void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
490{
491 struct dmae_command dmae;
492
493 if (!bp->dmae_ready) {
494 u32 *data = bnx2x_sp(bp, wb_data[0]);
495 int i;
496
497 if (CHIP_IS_E1(bp))
498 for (i = 0; i < len32; i++)
499 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
500 else
501 for (i = 0; i < len32; i++)
502 data[i] = REG_RD(bp, src_addr + i*4);
503
504 return;
505 }
506
507
508 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
509
510
511 dmae.src_addr_lo = src_addr >> 2;
512 dmae.src_addr_hi = 0;
513 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
514 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
515 dmae.len = len32;
516
517
518 bnx2x_issue_dmae_with_comp(bp, &dmae);
519}
520
521static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
522 u32 addr, u32 len)
523{
524 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
525 int offset = 0;
526
527 while (len > dmae_wr_max) {
528 bnx2x_write_dmae(bp, phys_addr + offset,
529 addr + offset, dmae_wr_max);
530 offset += dmae_wr_max * 4;
531 len -= dmae_wr_max;
532 }
533
534 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
535}
536
537static int bnx2x_mc_assert(struct bnx2x *bp)
538{
539 char last_idx;
540 int i, rc = 0;
541 u32 row0, row1, row2, row3;
542
543
544 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
545 XSTORM_ASSERT_LIST_INDEX_OFFSET);
546 if (last_idx)
547 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
548
549
550 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
551
552 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
553 XSTORM_ASSERT_LIST_OFFSET(i));
554 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
555 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
556 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
557 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
558 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
559 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
560
561 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
562 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
563 i, row3, row2, row1, row0);
564 rc++;
565 } else {
566 break;
567 }
568 }
569
570
571 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
572 TSTORM_ASSERT_LIST_INDEX_OFFSET);
573 if (last_idx)
574 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
575
576
577 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
578
579 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
580 TSTORM_ASSERT_LIST_OFFSET(i));
581 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
582 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
583 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
584 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
585 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
586 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
587
588 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
589 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
590 i, row3, row2, row1, row0);
591 rc++;
592 } else {
593 break;
594 }
595 }
596
597
598 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
599 CSTORM_ASSERT_LIST_INDEX_OFFSET);
600 if (last_idx)
601 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
602
603
604 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
605
606 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
607 CSTORM_ASSERT_LIST_OFFSET(i));
608 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
609 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
610 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
611 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
612 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
613 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
614
615 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
616 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
617 i, row3, row2, row1, row0);
618 rc++;
619 } else {
620 break;
621 }
622 }
623
624
625 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
626 USTORM_ASSERT_LIST_INDEX_OFFSET);
627 if (last_idx)
628 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
629
630
631 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
632
633 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
634 USTORM_ASSERT_LIST_OFFSET(i));
635 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
636 USTORM_ASSERT_LIST_OFFSET(i) + 4);
637 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
638 USTORM_ASSERT_LIST_OFFSET(i) + 8);
639 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
640 USTORM_ASSERT_LIST_OFFSET(i) + 12);
641
642 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
643 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
644 i, row3, row2, row1, row0);
645 rc++;
646 } else {
647 break;
648 }
649 }
650
651 return rc;
652}
653
654void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
655{
656 u32 addr, val;
657 u32 mark, offset;
658 __be32 data[9];
659 int word;
660 u32 trace_shmem_base;
661 if (BP_NOMCP(bp)) {
662 BNX2X_ERR("NO MCP - can not dump\n");
663 return;
664 }
665 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
666 (bp->common.bc_ver & 0xff0000) >> 16,
667 (bp->common.bc_ver & 0xff00) >> 8,
668 (bp->common.bc_ver & 0xff));
669
670 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
671 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
672 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
673
674 if (BP_PATH(bp) == 0)
675 trace_shmem_base = bp->common.shmem_base;
676 else
677 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
678 addr = trace_shmem_base - 0x800;
679
680
681 mark = REG_RD(bp, addr);
682 if (mark != MFW_TRACE_SIGNATURE) {
683 BNX2X_ERR("Trace buffer signature is missing.");
684 return ;
685 }
686
687
688 addr += 4;
689 mark = REG_RD(bp, addr);
690 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
691 + ((mark + 0x3) & ~0x3) - 0x08000000;
692 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
693
694 printk("%s", lvl);
695 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
696 for (word = 0; word < 8; word++)
697 data[word] = htonl(REG_RD(bp, offset + 4*word));
698 data[8] = 0x0;
699 pr_cont("%s", (char *)data);
700 }
701 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
702 for (word = 0; word < 8; word++)
703 data[word] = htonl(REG_RD(bp, offset + 4*word));
704 data[8] = 0x0;
705 pr_cont("%s", (char *)data);
706 }
707 printk("%s" "end of fw dump\n", lvl);
708}
709
710static void bnx2x_fw_dump(struct bnx2x *bp)
711{
712 bnx2x_fw_dump_lvl(bp, KERN_ERR);
713}
714
715void bnx2x_panic_dump(struct bnx2x *bp)
716{
717 int i;
718 u16 j;
719 struct hc_sp_status_block_data sp_sb_data;
720 int func = BP_FUNC(bp);
721#ifdef BNX2X_STOP_ON_ERROR
722 u16 start = 0, end = 0;
723 u8 cos;
724#endif
725
726 bp->stats_state = STATS_STATE_DISABLED;
727 bp->eth_stats.unrecoverable_error++;
728 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
729
730 BNX2X_ERR("begin crash dump -----------------\n");
731
732
733
734 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
735 bp->def_idx, bp->def_att_idx, bp->attn_state,
736 bp->spq_prod_idx, bp->stats_counter);
737 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
738 bp->def_status_blk->atten_status_block.attn_bits,
739 bp->def_status_blk->atten_status_block.attn_bits_ack,
740 bp->def_status_blk->atten_status_block.status_block_id,
741 bp->def_status_blk->atten_status_block.attn_bits_index);
742 BNX2X_ERR(" def (");
743 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
744 pr_cont("0x%x%s",
745 bp->def_status_blk->sp_sb.index_values[i],
746 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
747
748 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
749 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
750 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
751 i*sizeof(u32));
752
753 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
754 sp_sb_data.igu_sb_id,
755 sp_sb_data.igu_seg_id,
756 sp_sb_data.p_func.pf_id,
757 sp_sb_data.p_func.vnic_id,
758 sp_sb_data.p_func.vf_id,
759 sp_sb_data.p_func.vf_valid,
760 sp_sb_data.state);
761
762
763 for_each_eth_queue(bp, i) {
764 struct bnx2x_fastpath *fp = &bp->fp[i];
765 int loop;
766 struct hc_status_block_data_e2 sb_data_e2;
767 struct hc_status_block_data_e1x sb_data_e1x;
768 struct hc_status_block_sm *hc_sm_p =
769 CHIP_IS_E1x(bp) ?
770 sb_data_e1x.common.state_machine :
771 sb_data_e2.common.state_machine;
772 struct hc_index_data *hc_index_p =
773 CHIP_IS_E1x(bp) ?
774 sb_data_e1x.index_data :
775 sb_data_e2.index_data;
776 u8 data_size, cos;
777 u32 *sb_data_p;
778 struct bnx2x_fp_txdata txdata;
779
780
781 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
782 i, fp->rx_bd_prod, fp->rx_bd_cons,
783 fp->rx_comp_prod,
784 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
785 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
786 fp->rx_sge_prod, fp->last_max_sge,
787 le16_to_cpu(fp->fp_hc_idx));
788
789
790 for_each_cos_in_tx_queue(fp, cos)
791 {
792 txdata = *fp->txdata_ptr[cos];
793 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
794 i, txdata.tx_pkt_prod,
795 txdata.tx_pkt_cons, txdata.tx_bd_prod,
796 txdata.tx_bd_cons,
797 le16_to_cpu(*txdata.tx_cons_sb));
798 }
799
800 loop = CHIP_IS_E1x(bp) ?
801 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
802
803
804
805 if (IS_FCOE_FP(fp))
806 continue;
807
808 BNX2X_ERR(" run indexes (");
809 for (j = 0; j < HC_SB_MAX_SM; j++)
810 pr_cont("0x%x%s",
811 fp->sb_running_index[j],
812 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
813
814 BNX2X_ERR(" indexes (");
815 for (j = 0; j < loop; j++)
816 pr_cont("0x%x%s",
817 fp->sb_index_values[j],
818 (j == loop - 1) ? ")" : " ");
819
820 data_size = CHIP_IS_E1x(bp) ?
821 sizeof(struct hc_status_block_data_e1x) :
822 sizeof(struct hc_status_block_data_e2);
823 data_size /= sizeof(u32);
824 sb_data_p = CHIP_IS_E1x(bp) ?
825 (u32 *)&sb_data_e1x :
826 (u32 *)&sb_data_e2;
827
828 for (j = 0; j < data_size; j++)
829 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
830 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
831 j * sizeof(u32));
832
833 if (!CHIP_IS_E1x(bp)) {
834 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
835 sb_data_e2.common.p_func.pf_id,
836 sb_data_e2.common.p_func.vf_id,
837 sb_data_e2.common.p_func.vf_valid,
838 sb_data_e2.common.p_func.vnic_id,
839 sb_data_e2.common.same_igu_sb_1b,
840 sb_data_e2.common.state);
841 } else {
842 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
843 sb_data_e1x.common.p_func.pf_id,
844 sb_data_e1x.common.p_func.vf_id,
845 sb_data_e1x.common.p_func.vf_valid,
846 sb_data_e1x.common.p_func.vnic_id,
847 sb_data_e1x.common.same_igu_sb_1b,
848 sb_data_e1x.common.state);
849 }
850
851
852 for (j = 0; j < HC_SB_MAX_SM; j++) {
853 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
854 j, hc_sm_p[j].__flags,
855 hc_sm_p[j].igu_sb_id,
856 hc_sm_p[j].igu_seg_id,
857 hc_sm_p[j].time_to_expire,
858 hc_sm_p[j].timer_value);
859 }
860
861
862 for (j = 0; j < loop; j++) {
863 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
864 hc_index_p[j].flags,
865 hc_index_p[j].timeout);
866 }
867 }
868
869#ifdef BNX2X_STOP_ON_ERROR
870
871
872 for_each_valid_rx_queue(bp, i) {
873 struct bnx2x_fastpath *fp = &bp->fp[i];
874
875 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
876 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
877 for (j = start; j != end; j = RX_BD(j + 1)) {
878 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
879 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
880
881 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
882 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
883 }
884
885 start = RX_SGE(fp->rx_sge_prod);
886 end = RX_SGE(fp->last_max_sge);
887 for (j = start; j != end; j = RX_SGE(j + 1)) {
888 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
889 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
890
891 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
892 i, j, rx_sge[1], rx_sge[0], sw_page->page);
893 }
894
895 start = RCQ_BD(fp->rx_comp_cons - 10);
896 end = RCQ_BD(fp->rx_comp_cons + 503);
897 for (j = start; j != end; j = RCQ_BD(j + 1)) {
898 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
899
900 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
901 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
902 }
903 }
904
905
906 for_each_valid_tx_queue(bp, i) {
907 struct bnx2x_fastpath *fp = &bp->fp[i];
908 for_each_cos_in_tx_queue(fp, cos) {
909 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
910
911 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
912 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
913 for (j = start; j != end; j = TX_BD(j + 1)) {
914 struct sw_tx_bd *sw_bd =
915 &txdata->tx_buf_ring[j];
916
917 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
918 i, cos, j, sw_bd->skb,
919 sw_bd->first_bd);
920 }
921
922 start = TX_BD(txdata->tx_bd_cons - 10);
923 end = TX_BD(txdata->tx_bd_cons + 254);
924 for (j = start; j != end; j = TX_BD(j + 1)) {
925 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
926
927 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
928 i, cos, j, tx_bd[0], tx_bd[1],
929 tx_bd[2], tx_bd[3]);
930 }
931 }
932 }
933#endif
934 bnx2x_fw_dump(bp);
935 bnx2x_mc_assert(bp);
936 BNX2X_ERR("end crash dump -----------------\n");
937}
938
939
940
941
942
943
944
945#define FLR_WAIT_USEC 10000
946#define FLR_WAIT_INTERVAL 50
947#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
948
949struct pbf_pN_buf_regs {
950 int pN;
951 u32 init_crd;
952 u32 crd;
953 u32 crd_freed;
954};
955
956struct pbf_pN_cmd_regs {
957 int pN;
958 u32 lines_occup;
959 u32 lines_freed;
960};
961
962static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
963 struct pbf_pN_buf_regs *regs,
964 u32 poll_count)
965{
966 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
967 u32 cur_cnt = poll_count;
968
969 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
970 crd = crd_start = REG_RD(bp, regs->crd);
971 init_crd = REG_RD(bp, regs->init_crd);
972
973 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
974 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
975 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
976
977 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
978 (init_crd - crd_start))) {
979 if (cur_cnt--) {
980 udelay(FLR_WAIT_INTERVAL);
981 crd = REG_RD(bp, regs->crd);
982 crd_freed = REG_RD(bp, regs->crd_freed);
983 } else {
984 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
985 regs->pN);
986 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
987 regs->pN, crd);
988 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
989 regs->pN, crd_freed);
990 break;
991 }
992 }
993 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
994 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
995}
996
997static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
998 struct pbf_pN_cmd_regs *regs,
999 u32 poll_count)
1000{
1001 u32 occup, to_free, freed, freed_start;
1002 u32 cur_cnt = poll_count;
1003
1004 occup = to_free = REG_RD(bp, regs->lines_occup);
1005 freed = freed_start = REG_RD(bp, regs->lines_freed);
1006
1007 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1008 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1009
1010 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1011 if (cur_cnt--) {
1012 udelay(FLR_WAIT_INTERVAL);
1013 occup = REG_RD(bp, regs->lines_occup);
1014 freed = REG_RD(bp, regs->lines_freed);
1015 } else {
1016 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1017 regs->pN);
1018 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1019 regs->pN, occup);
1020 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1021 regs->pN, freed);
1022 break;
1023 }
1024 }
1025 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1026 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1027}
1028
1029static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1030 u32 expected, u32 poll_count)
1031{
1032 u32 cur_cnt = poll_count;
1033 u32 val;
1034
1035 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1036 udelay(FLR_WAIT_INTERVAL);
1037
1038 return val;
1039}
1040
1041static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1042 char *msg, u32 poll_cnt)
1043{
1044 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1045 if (val != 0) {
1046 BNX2X_ERR("%s usage count=%d\n", msg, val);
1047 return 1;
1048 }
1049 return 0;
1050}
1051
1052static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1053{
1054
1055 if (CHIP_REV_IS_EMUL(bp))
1056 return FLR_POLL_CNT * 2000;
1057
1058 if (CHIP_REV_IS_FPGA(bp))
1059 return FLR_POLL_CNT * 120;
1060
1061 return FLR_POLL_CNT;
1062}
1063
1064static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1065{
1066 struct pbf_pN_cmd_regs cmd_regs[] = {
1067 {0, (CHIP_IS_E3B0(bp)) ?
1068 PBF_REG_TQ_OCCUPANCY_Q0 :
1069 PBF_REG_P0_TQ_OCCUPANCY,
1070 (CHIP_IS_E3B0(bp)) ?
1071 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1072 PBF_REG_P0_TQ_LINES_FREED_CNT},
1073 {1, (CHIP_IS_E3B0(bp)) ?
1074 PBF_REG_TQ_OCCUPANCY_Q1 :
1075 PBF_REG_P1_TQ_OCCUPANCY,
1076 (CHIP_IS_E3B0(bp)) ?
1077 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1078 PBF_REG_P1_TQ_LINES_FREED_CNT},
1079 {4, (CHIP_IS_E3B0(bp)) ?
1080 PBF_REG_TQ_OCCUPANCY_LB_Q :
1081 PBF_REG_P4_TQ_OCCUPANCY,
1082 (CHIP_IS_E3B0(bp)) ?
1083 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1084 PBF_REG_P4_TQ_LINES_FREED_CNT}
1085 };
1086
1087 struct pbf_pN_buf_regs buf_regs[] = {
1088 {0, (CHIP_IS_E3B0(bp)) ?
1089 PBF_REG_INIT_CRD_Q0 :
1090 PBF_REG_P0_INIT_CRD ,
1091 (CHIP_IS_E3B0(bp)) ?
1092 PBF_REG_CREDIT_Q0 :
1093 PBF_REG_P0_CREDIT,
1094 (CHIP_IS_E3B0(bp)) ?
1095 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1096 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1097 {1, (CHIP_IS_E3B0(bp)) ?
1098 PBF_REG_INIT_CRD_Q1 :
1099 PBF_REG_P1_INIT_CRD,
1100 (CHIP_IS_E3B0(bp)) ?
1101 PBF_REG_CREDIT_Q1 :
1102 PBF_REG_P1_CREDIT,
1103 (CHIP_IS_E3B0(bp)) ?
1104 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1105 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1106 {4, (CHIP_IS_E3B0(bp)) ?
1107 PBF_REG_INIT_CRD_LB_Q :
1108 PBF_REG_P4_INIT_CRD,
1109 (CHIP_IS_E3B0(bp)) ?
1110 PBF_REG_CREDIT_LB_Q :
1111 PBF_REG_P4_CREDIT,
1112 (CHIP_IS_E3B0(bp)) ?
1113 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1114 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1115 };
1116
1117 int i;
1118
1119
1120 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1121 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1122
1123
1124
1125 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1126 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1127}
1128
1129#define OP_GEN_PARAM(param) \
1130 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1131
1132#define OP_GEN_TYPE(type) \
1133 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1134
1135#define OP_GEN_AGG_VECT(index) \
1136 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1137
1138
1139static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1140 u32 poll_cnt)
1141{
1142 struct sdm_op_gen op_gen = {0};
1143
1144 u32 comp_addr = BAR_CSTRORM_INTMEM +
1145 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1146 int ret = 0;
1147
1148 if (REG_RD(bp, comp_addr)) {
1149 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1150 return 1;
1151 }
1152
1153 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1154 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1155 op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
1156 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1157
1158 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1159 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
1160
1161 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1162 BNX2X_ERR("FW final cleanup did not succeed\n");
1163 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1164 (REG_RD(bp, comp_addr)));
1165 ret = 1;
1166 }
1167
1168 REG_WR(bp, comp_addr, 0);
1169
1170 return ret;
1171}
1172
1173static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1174{
1175 u16 status;
1176
1177 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1178 return status & PCI_EXP_DEVSTA_TRPND;
1179}
1180
1181
1182
1183static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1184{
1185
1186
1187 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1188 CFC_REG_NUM_LCIDS_INSIDE_PF,
1189 "CFC PF usage counter timed out",
1190 poll_cnt))
1191 return 1;
1192
1193
1194
1195 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1196 DORQ_REG_PF_USAGE_CNT,
1197 "DQ PF usage counter timed out",
1198 poll_cnt))
1199 return 1;
1200
1201
1202 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1203 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1204 "QM PF usage counter timed out",
1205 poll_cnt))
1206 return 1;
1207
1208
1209 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1210 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1211 "Timers VNIC usage counter timed out",
1212 poll_cnt))
1213 return 1;
1214 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1215 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1216 "Timers NUM_SCANS usage counter timed out",
1217 poll_cnt))
1218 return 1;
1219
1220
1221 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1222 dmae_reg_go_c[INIT_DMAE_C(bp)],
1223 "DMAE dommand register timed out",
1224 poll_cnt))
1225 return 1;
1226
1227 return 0;
1228}
1229
1230static void bnx2x_hw_enable_status(struct bnx2x *bp)
1231{
1232 u32 val;
1233
1234 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1235 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1236
1237 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1238 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1239
1240 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1241 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1242
1243 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1244 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1245
1246 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1247 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1248
1249 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1250 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1251
1252 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1253 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1254
1255 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1256 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1257 val);
1258}
1259
1260static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1261{
1262 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1263
1264 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1265
1266
1267 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1268
1269
1270 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1271 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1272 return -EBUSY;
1273
1274
1275
1276
1277 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1278 return -EBUSY;
1279
1280
1281
1282
1283 bnx2x_tx_hw_flushed(bp, poll_cnt);
1284
1285
1286 msleep(100);
1287
1288
1289 if (bnx2x_is_pcie_pending(bp->pdev))
1290 BNX2X_ERR("PCIE Transactions still pending\n");
1291
1292
1293 bnx2x_hw_enable_status(bp);
1294
1295
1296
1297
1298
1299 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1300
1301 return 0;
1302}
1303
1304static void bnx2x_hc_int_enable(struct bnx2x *bp)
1305{
1306 int port = BP_PORT(bp);
1307 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1308 u32 val = REG_RD(bp, addr);
1309 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1310 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1311 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1312
1313 if (msix) {
1314 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1315 HC_CONFIG_0_REG_INT_LINE_EN_0);
1316 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1317 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1318 if (single_msix)
1319 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1320 } else if (msi) {
1321 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1322 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1323 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1324 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1325 } else {
1326 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1327 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1328 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1329 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1330
1331 if (!CHIP_IS_E1(bp)) {
1332 DP(NETIF_MSG_IFUP,
1333 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1334
1335 REG_WR(bp, addr, val);
1336
1337 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1338 }
1339 }
1340
1341 if (CHIP_IS_E1(bp))
1342 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1343
1344 DP(NETIF_MSG_IFUP,
1345 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1346 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1347
1348 REG_WR(bp, addr, val);
1349
1350
1351
1352 mmiowb();
1353 barrier();
1354
1355 if (!CHIP_IS_E1(bp)) {
1356
1357 if (IS_MF(bp)) {
1358 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1359 if (bp->port.pmf)
1360
1361 val |= 0x1100;
1362 } else
1363 val = 0xffff;
1364
1365 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1366 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1367 }
1368
1369
1370 mmiowb();
1371}
1372
1373static void bnx2x_igu_int_enable(struct bnx2x *bp)
1374{
1375 u32 val;
1376 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1377 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1378 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1379
1380 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1381
1382 if (msix) {
1383 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1384 IGU_PF_CONF_SINGLE_ISR_EN);
1385 val |= (IGU_PF_CONF_FUNC_EN |
1386 IGU_PF_CONF_MSI_MSIX_EN |
1387 IGU_PF_CONF_ATTN_BIT_EN);
1388
1389 if (single_msix)
1390 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1391 } else if (msi) {
1392 val &= ~IGU_PF_CONF_INT_LINE_EN;
1393 val |= (IGU_PF_CONF_FUNC_EN |
1394 IGU_PF_CONF_MSI_MSIX_EN |
1395 IGU_PF_CONF_ATTN_BIT_EN |
1396 IGU_PF_CONF_SINGLE_ISR_EN);
1397 } else {
1398 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1399 val |= (IGU_PF_CONF_FUNC_EN |
1400 IGU_PF_CONF_INT_LINE_EN |
1401 IGU_PF_CONF_ATTN_BIT_EN |
1402 IGU_PF_CONF_SINGLE_ISR_EN);
1403 }
1404
1405 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1406 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1407
1408 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1409
1410 if (val & IGU_PF_CONF_INT_LINE_EN)
1411 pci_intx(bp->pdev, true);
1412
1413 barrier();
1414
1415
1416 if (IS_MF(bp)) {
1417 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1418 if (bp->port.pmf)
1419
1420 val |= 0x1100;
1421 } else
1422 val = 0xffff;
1423
1424 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1425 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1426
1427
1428 mmiowb();
1429}
1430
1431void bnx2x_int_enable(struct bnx2x *bp)
1432{
1433 if (bp->common.int_block == INT_BLOCK_HC)
1434 bnx2x_hc_int_enable(bp);
1435 else
1436 bnx2x_igu_int_enable(bp);
1437}
1438
1439static void bnx2x_hc_int_disable(struct bnx2x *bp)
1440{
1441 int port = BP_PORT(bp);
1442 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1443 u32 val = REG_RD(bp, addr);
1444
1445
1446
1447
1448
1449
1450 if (CHIP_IS_E1(bp)) {
1451
1452
1453
1454
1455 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1456
1457 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1458 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1459 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1460 } else
1461 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1462 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1463 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1464 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1465
1466 DP(NETIF_MSG_IFDOWN,
1467 "write %x to HC %d (addr 0x%x)\n",
1468 val, port, addr);
1469
1470
1471 mmiowb();
1472
1473 REG_WR(bp, addr, val);
1474 if (REG_RD(bp, addr) != val)
1475 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1476}
1477
1478static void bnx2x_igu_int_disable(struct bnx2x *bp)
1479{
1480 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1481
1482 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1483 IGU_PF_CONF_INT_LINE_EN |
1484 IGU_PF_CONF_ATTN_BIT_EN);
1485
1486 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
1487
1488
1489 mmiowb();
1490
1491 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1492 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1493 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1494}
1495
1496static void bnx2x_int_disable(struct bnx2x *bp)
1497{
1498 if (bp->common.int_block == INT_BLOCK_HC)
1499 bnx2x_hc_int_disable(bp);
1500 else
1501 bnx2x_igu_int_disable(bp);
1502}
1503
1504void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1505{
1506 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1507 int i, offset;
1508
1509 if (disable_hw)
1510
1511 bnx2x_int_disable(bp);
1512
1513
1514 if (msix) {
1515 synchronize_irq(bp->msix_table[0].vector);
1516 offset = 1;
1517 if (CNIC_SUPPORT(bp))
1518 offset++;
1519 for_each_eth_queue(bp, i)
1520 synchronize_irq(bp->msix_table[offset++].vector);
1521 } else
1522 synchronize_irq(bp->pdev->irq);
1523
1524
1525 cancel_delayed_work(&bp->sp_task);
1526 cancel_delayed_work(&bp->period_task);
1527 flush_workqueue(bnx2x_wq);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1538{
1539 u32 lock_status;
1540 u32 resource_bit = (1 << resource);
1541 int func = BP_FUNC(bp);
1542 u32 hw_lock_control_reg;
1543
1544 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1545 "Trying to take a lock on resource %d\n", resource);
1546
1547
1548 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1549 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1550 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1551 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1552 return false;
1553 }
1554
1555 if (func <= 5)
1556 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1557 else
1558 hw_lock_control_reg =
1559 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1560
1561
1562 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1563 lock_status = REG_RD(bp, hw_lock_control_reg);
1564 if (lock_status & resource_bit)
1565 return true;
1566
1567 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1568 "Failed to get a lock on resource %d\n", resource);
1569 return false;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1581{
1582 if (BP_PATH(bp))
1583 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1584 else
1585 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1596{
1597 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1598}
1599
1600static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1601
1602
1603void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1604{
1605 struct bnx2x *bp = fp->bp;
1606 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1607 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1608 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1609 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1610
1611 DP(BNX2X_MSG_SP,
1612 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1613 fp->index, cid, command, bp->state,
1614 rr_cqe->ramrod_cqe.ramrod_type);
1615
1616 switch (command) {
1617 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1618 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1619 drv_cmd = BNX2X_Q_CMD_UPDATE;
1620 break;
1621
1622 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1623 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1624 drv_cmd = BNX2X_Q_CMD_SETUP;
1625 break;
1626
1627 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1628 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1629 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1630 break;
1631
1632 case (RAMROD_CMD_ID_ETH_HALT):
1633 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1634 drv_cmd = BNX2X_Q_CMD_HALT;
1635 break;
1636
1637 case (RAMROD_CMD_ID_ETH_TERMINATE):
1638 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
1639 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1640 break;
1641
1642 case (RAMROD_CMD_ID_ETH_EMPTY):
1643 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1644 drv_cmd = BNX2X_Q_CMD_EMPTY;
1645 break;
1646
1647 default:
1648 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1649 command, fp->index);
1650 return;
1651 }
1652
1653 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1654 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1655
1656
1657
1658
1659
1660
1661
1662#ifdef BNX2X_STOP_ON_ERROR
1663 bnx2x_panic();
1664#else
1665 return;
1666#endif
1667
1668 smp_mb__before_atomic_inc();
1669 atomic_inc(&bp->cq_spq_left);
1670
1671 smp_mb__after_atomic_inc();
1672
1673 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1674
1675 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1676 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 smp_mb__before_clear_bit();
1687 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1688 wmb();
1689 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1690 smp_mb__after_clear_bit();
1691
1692
1693 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1694 }
1695
1696 return;
1697}
1698
1699void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1700 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
1701{
1702 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
1703
1704 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
1705 start);
1706}
1707
1708irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1709{
1710 struct bnx2x *bp = netdev_priv(dev_instance);
1711 u16 status = bnx2x_ack_int(bp);
1712 u16 mask;
1713 int i;
1714 u8 cos;
1715
1716
1717 if (unlikely(status == 0)) {
1718 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1719 return IRQ_NONE;
1720 }
1721 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1722
1723#ifdef BNX2X_STOP_ON_ERROR
1724 if (unlikely(bp->panic))
1725 return IRQ_HANDLED;
1726#endif
1727
1728 for_each_eth_queue(bp, i) {
1729 struct bnx2x_fastpath *fp = &bp->fp[i];
1730
1731 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1732 if (status & mask) {
1733
1734 prefetch(fp->rx_cons_sb);
1735 for_each_cos_in_tx_queue(fp, cos)
1736 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1737 prefetch(&fp->sb_running_index[SM_RX_ID]);
1738 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1739 status &= ~mask;
1740 }
1741 }
1742
1743 if (CNIC_SUPPORT(bp)) {
1744 mask = 0x2;
1745 if (status & (mask | 0x1)) {
1746 struct cnic_ops *c_ops = NULL;
1747
1748 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1749 rcu_read_lock();
1750 c_ops = rcu_dereference(bp->cnic_ops);
1751 if (c_ops)
1752 c_ops->cnic_handler(bp->cnic_data,
1753 NULL);
1754 rcu_read_unlock();
1755 }
1756
1757 status &= ~mask;
1758 }
1759 }
1760
1761 if (unlikely(status & 0x1)) {
1762 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1763
1764 status &= ~0x1;
1765 if (!status)
1766 return IRQ_HANDLED;
1767 }
1768
1769 if (unlikely(status))
1770 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1771 status);
1772
1773 return IRQ_HANDLED;
1774}
1775
1776
1777
1778
1779
1780
1781
1782int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
1788 int cnt;
1789
1790
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
1796
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
1804
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (lock_status & resource_bit) {
1807 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EEXIST;
1810 }
1811
1812
1813 for (cnt = 0; cnt < 1000; cnt++) {
1814
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
1817 if (lock_status & resource_bit)
1818 return 0;
1819
1820 msleep(5);
1821 }
1822 BNX2X_ERR("Timeout\n");
1823 return -EAGAIN;
1824}
1825
1826int bnx2x_release_leader_lock(struct bnx2x *bp)
1827{
1828 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1829}
1830
1831int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1832{
1833 u32 lock_status;
1834 u32 resource_bit = (1 << resource);
1835 int func = BP_FUNC(bp);
1836 u32 hw_lock_control_reg;
1837
1838
1839 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1840 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1841 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1842 return -EINVAL;
1843 }
1844
1845 if (func <= 5) {
1846 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1847 } else {
1848 hw_lock_control_reg =
1849 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1850 }
1851
1852
1853 lock_status = REG_RD(bp, hw_lock_control_reg);
1854 if (!(lock_status & resource_bit)) {
1855 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
1856 lock_status, resource_bit);
1857 return -EFAULT;
1858 }
1859
1860 REG_WR(bp, hw_lock_control_reg, resource_bit);
1861 return 0;
1862}
1863
1864
1865int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1866{
1867
1868 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1869 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1870 int gpio_shift = gpio_num +
1871 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1872 u32 gpio_mask = (1 << gpio_shift);
1873 u32 gpio_reg;
1874 int value;
1875
1876 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1877 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1878 return -EINVAL;
1879 }
1880
1881
1882 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1883
1884
1885 if ((gpio_reg & gpio_mask) == gpio_mask)
1886 value = 1;
1887 else
1888 value = 0;
1889
1890 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1891
1892 return value;
1893}
1894
1895int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1896{
1897
1898 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1899 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1900 int gpio_shift = gpio_num +
1901 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1902 u32 gpio_mask = (1 << gpio_shift);
1903 u32 gpio_reg;
1904
1905 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1906 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1907 return -EINVAL;
1908 }
1909
1910 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1911
1912 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1913
1914 switch (mode) {
1915 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1916 DP(NETIF_MSG_LINK,
1917 "Set GPIO %d (shift %d) -> output low\n",
1918 gpio_num, gpio_shift);
1919
1920 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1921 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1922 break;
1923
1924 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1925 DP(NETIF_MSG_LINK,
1926 "Set GPIO %d (shift %d) -> output high\n",
1927 gpio_num, gpio_shift);
1928
1929 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1930 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1931 break;
1932
1933 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1934 DP(NETIF_MSG_LINK,
1935 "Set GPIO %d (shift %d) -> input\n",
1936 gpio_num, gpio_shift);
1937
1938 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1939 break;
1940
1941 default:
1942 break;
1943 }
1944
1945 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1946 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1947
1948 return 0;
1949}
1950
1951int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
1952{
1953 u32 gpio_reg = 0;
1954 int rc = 0;
1955
1956
1957
1958 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1959
1960 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1961 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1962 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1963 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1964
1965 switch (mode) {
1966 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1967 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
1968
1969 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1970 break;
1971
1972 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
1974
1975 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1976 break;
1977
1978 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1979 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
1980
1981 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1982 break;
1983
1984 default:
1985 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
1986 rc = -EINVAL;
1987 break;
1988 }
1989
1990 if (rc == 0)
1991 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1992
1993 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1994
1995 return rc;
1996}
1997
1998int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1999{
2000
2001 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2002 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2003 int gpio_shift = gpio_num +
2004 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2005 u32 gpio_mask = (1 << gpio_shift);
2006 u32 gpio_reg;
2007
2008 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010 return -EINVAL;
2011 }
2012
2013 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2016
2017 switch (mode) {
2018 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2019 DP(NETIF_MSG_LINK,
2020 "Clear GPIO INT %d (shift %d) -> output low\n",
2021 gpio_num, gpio_shift);
2022
2023 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2024 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2025 break;
2026
2027 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2028 DP(NETIF_MSG_LINK,
2029 "Set GPIO INT %d (shift %d) -> output high\n",
2030 gpio_num, gpio_shift);
2031
2032 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2034 break;
2035
2036 default:
2037 break;
2038 }
2039
2040 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2042
2043 return 0;
2044}
2045
2046static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2047{
2048 u32 spio_reg;
2049
2050
2051 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2052 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2053 return -EINVAL;
2054 }
2055
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2057
2058 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2059
2060 switch (mode) {
2061 case MISC_SPIO_OUTPUT_LOW:
2062 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2063
2064 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2065 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2066 break;
2067
2068 case MISC_SPIO_OUTPUT_HIGH:
2069 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2070
2071 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2072 spio_reg |= (spio << MISC_SPIO_SET_POS);
2073 break;
2074
2075 case MISC_SPIO_INPUT_HI_Z:
2076 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2077
2078 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2079 break;
2080
2081 default:
2082 break;
2083 }
2084
2085 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2086 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2087
2088 return 0;
2089}
2090
2091void bnx2x_calc_fc_adv(struct bnx2x *bp)
2092{
2093 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2094 switch (bp->link_vars.ieee_fc &
2095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2097 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2098 ADVERTISED_Pause);
2099 break;
2100
2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2102 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2103 ADVERTISED_Pause);
2104 break;
2105
2106 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2107 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2108 break;
2109
2110 default:
2111 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2112 ADVERTISED_Pause);
2113 break;
2114 }
2115}
2116
2117static void bnx2x_set_requested_fc(struct bnx2x *bp)
2118{
2119
2120
2121
2122
2123 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2124 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2125 else
2126 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2127}
2128
2129int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2130{
2131 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2132 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2133
2134 if (!BP_NOMCP(bp)) {
2135 bnx2x_set_requested_fc(bp);
2136 bnx2x_acquire_phy_lock(bp);
2137
2138 if (load_mode == LOAD_DIAG) {
2139 struct link_params *lp = &bp->link_params;
2140 lp->loopback_mode = LOOPBACK_XGXS;
2141
2142 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2143 if (lp->speed_cap_mask[cfx_idx] &
2144 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2145 lp->req_line_speed[cfx_idx] =
2146 SPEED_10000;
2147 else
2148 lp->req_line_speed[cfx_idx] =
2149 SPEED_1000;
2150 }
2151 }
2152
2153 if (load_mode == LOAD_LOOPBACK_EXT) {
2154 struct link_params *lp = &bp->link_params;
2155 lp->loopback_mode = LOOPBACK_EXT;
2156 }
2157
2158 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2159
2160 bnx2x_release_phy_lock(bp);
2161
2162 bnx2x_calc_fc_adv(bp);
2163
2164 if (bp->link_vars.link_up) {
2165 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2166 bnx2x_link_report(bp);
2167 }
2168 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2169 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2170 return rc;
2171 }
2172 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2173 return -EINVAL;
2174}
2175
2176void bnx2x_link_set(struct bnx2x *bp)
2177{
2178 if (!BP_NOMCP(bp)) {
2179 bnx2x_acquire_phy_lock(bp);
2180 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2181 bnx2x_release_phy_lock(bp);
2182
2183 bnx2x_calc_fc_adv(bp);
2184 } else
2185 BNX2X_ERR("Bootcode is missing - can not set link\n");
2186}
2187
2188static void bnx2x__link_reset(struct bnx2x *bp)
2189{
2190 if (!BP_NOMCP(bp)) {
2191 bnx2x_acquire_phy_lock(bp);
2192 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2193 bnx2x_release_phy_lock(bp);
2194 } else
2195 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2196}
2197
2198void bnx2x_force_link_reset(struct bnx2x *bp)
2199{
2200 bnx2x_acquire_phy_lock(bp);
2201 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2202 bnx2x_release_phy_lock(bp);
2203}
2204
2205u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2206{
2207 u8 rc = 0;
2208
2209 if (!BP_NOMCP(bp)) {
2210 bnx2x_acquire_phy_lock(bp);
2211 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2212 is_serdes);
2213 bnx2x_release_phy_lock(bp);
2214 } else
2215 BNX2X_ERR("Bootcode is missing - can not test link\n");
2216
2217 return rc;
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230static void bnx2x_calc_vn_min(struct bnx2x *bp,
2231 struct cmng_init_input *input)
2232{
2233 int all_zero = 1;
2234 int vn;
2235
2236 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2237 u32 vn_cfg = bp->mf_config[vn];
2238 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2239 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2240
2241
2242 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2243 vn_min_rate = 0;
2244
2245 else if (!vn_min_rate)
2246 vn_min_rate = DEF_MIN_RATE;
2247 else
2248 all_zero = 0;
2249
2250 input->vnic_min_rate[vn] = vn_min_rate;
2251 }
2252
2253
2254 if (BNX2X_IS_ETS_ENABLED(bp)) {
2255 input->flags.cmng_enables &=
2256 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2257 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2258 } else if (all_zero) {
2259 input->flags.cmng_enables &=
2260 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2261 DP(NETIF_MSG_IFUP,
2262 "All MIN values are zeroes fairness will be disabled\n");
2263 } else
2264 input->flags.cmng_enables |=
2265 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2266}
2267
2268static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2269 struct cmng_init_input *input)
2270{
2271 u16 vn_max_rate;
2272 u32 vn_cfg = bp->mf_config[vn];
2273
2274 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2275 vn_max_rate = 0;
2276 else {
2277 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2278
2279 if (IS_MF_SI(bp)) {
2280
2281 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2282 } else
2283
2284 vn_max_rate = maxCfg * 100;
2285 }
2286
2287 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2288
2289 input->vnic_max_rate[vn] = vn_max_rate;
2290}
2291
2292
2293static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2294{
2295 if (CHIP_REV_IS_SLOW(bp))
2296 return CMNG_FNS_NONE;
2297 if (IS_MF(bp))
2298 return CMNG_FNS_MINMAX;
2299
2300 return CMNG_FNS_NONE;
2301}
2302
2303void bnx2x_read_mf_cfg(struct bnx2x *bp)
2304{
2305 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2306
2307 if (BP_NOMCP(bp))
2308 return;
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2322 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2323
2324 if (func >= E1H_FUNC_MAX)
2325 break;
2326
2327 bp->mf_config[vn] =
2328 MF_CFG_RD(bp, func_mf_config[func].config);
2329 }
2330 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2331 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2332 bp->flags |= MF_FUNC_DIS;
2333 } else {
2334 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2335 bp->flags &= ~MF_FUNC_DIS;
2336 }
2337}
2338
2339static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2340{
2341 struct cmng_init_input input;
2342 memset(&input, 0, sizeof(struct cmng_init_input));
2343
2344 input.port_rate = bp->link_vars.line_speed;
2345
2346 if (cmng_type == CMNG_FNS_MINMAX) {
2347 int vn;
2348
2349
2350 if (read_cfg)
2351 bnx2x_read_mf_cfg(bp);
2352
2353
2354 bnx2x_calc_vn_min(bp, &input);
2355
2356
2357 if (bp->port.pmf)
2358 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2359 bnx2x_calc_vn_max(bp, vn, &input);
2360
2361
2362 input.flags.cmng_enables |=
2363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2364
2365 bnx2x_init_cmng(&input, &bp->cmng);
2366 return;
2367 }
2368
2369
2370 DP(NETIF_MSG_IFUP,
2371 "rate shaping and fairness are disabled\n");
2372}
2373
2374static void storm_memset_cmng(struct bnx2x *bp,
2375 struct cmng_init *cmng,
2376 u8 port)
2377{
2378 int vn;
2379 size_t size = sizeof(struct cmng_struct_per_port);
2380
2381 u32 addr = BAR_XSTRORM_INTMEM +
2382 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2383
2384 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2385
2386 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2387 int func = func_by_vn(bp, vn);
2388
2389 addr = BAR_XSTRORM_INTMEM +
2390 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2391 size = sizeof(struct rate_shaping_vars_per_vn);
2392 __storm_memset_struct(bp, addr, size,
2393 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2394
2395 addr = BAR_XSTRORM_INTMEM +
2396 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2397 size = sizeof(struct fairness_vars_per_vn);
2398 __storm_memset_struct(bp, addr, size,
2399 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2400 }
2401}
2402
2403
2404static void bnx2x_link_attn(struct bnx2x *bp)
2405{
2406
2407 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2408
2409 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2410
2411 if (bp->link_vars.link_up) {
2412
2413
2414 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2415 int port = BP_PORT(bp);
2416 u32 pause_enabled = 0;
2417
2418 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2419 pause_enabled = 1;
2420
2421 REG_WR(bp, BAR_USTRORM_INTMEM +
2422 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2423 pause_enabled);
2424 }
2425
2426 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2427 struct host_port_stats *pstats;
2428
2429 pstats = bnx2x_sp(bp, port_stats);
2430
2431 memset(&(pstats->mac_stx[0]), 0,
2432 sizeof(struct mac_stx));
2433 }
2434 if (bp->state == BNX2X_STATE_OPEN)
2435 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2436 }
2437
2438 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2439 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2440
2441 if (cmng_fns != CMNG_FNS_NONE) {
2442 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2443 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2444 } else
2445
2446 DP(NETIF_MSG_IFUP,
2447 "single function mode without fairness\n");
2448 }
2449
2450 __bnx2x_link_report(bp);
2451
2452 if (IS_MF(bp))
2453 bnx2x_link_sync_notify(bp);
2454}
2455
2456void bnx2x__link_status_update(struct bnx2x *bp)
2457{
2458 if (bp->state != BNX2X_STATE_OPEN)
2459 return;
2460
2461
2462 bnx2x_dcbx_pmf_update(bp);
2463
2464 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2465
2466 if (bp->link_vars.link_up)
2467 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2468 else
2469 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2470
2471
2472 bnx2x_link_report(bp);
2473}
2474
2475static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2476 u16 vlan_val, u8 allowed_prio)
2477{
2478 struct bnx2x_func_state_params func_params = {0};
2479 struct bnx2x_func_afex_update_params *f_update_params =
2480 &func_params.params.afex_update;
2481
2482 func_params.f_obj = &bp->func_obj;
2483 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2484
2485
2486
2487
2488
2489 f_update_params->vif_id = vifid;
2490 f_update_params->afex_default_vlan = vlan_val;
2491 f_update_params->allowed_priorities = allowed_prio;
2492
2493
2494 if (bnx2x_func_state_change(bp, &func_params) < 0)
2495 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2496
2497 return 0;
2498}
2499
2500static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2501 u16 vif_index, u8 func_bit_map)
2502{
2503 struct bnx2x_func_state_params func_params = {0};
2504 struct bnx2x_func_afex_viflists_params *update_params =
2505 &func_params.params.afex_viflists;
2506 int rc;
2507 u32 drv_msg_code;
2508
2509
2510 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2511 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2512 cmd_type);
2513
2514 func_params.f_obj = &bp->func_obj;
2515 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2516
2517
2518 update_params->afex_vif_list_command = cmd_type;
2519 update_params->vif_list_index = cpu_to_le16(vif_index);
2520 update_params->func_bit_map =
2521 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2522 update_params->func_to_clear = 0;
2523 drv_msg_code =
2524 (cmd_type == VIF_LIST_RULE_GET) ?
2525 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2526 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2527
2528
2529
2530
2531 rc = bnx2x_func_state_change(bp, &func_params);
2532 if (rc < 0)
2533 bnx2x_fw_command(bp, drv_msg_code, 0);
2534
2535 return 0;
2536}
2537
2538static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2539{
2540 struct afex_stats afex_stats;
2541 u32 func = BP_ABS_FUNC(bp);
2542 u32 mf_config;
2543 u16 vlan_val;
2544 u32 vlan_prio;
2545 u16 vif_id;
2546 u8 allowed_prio;
2547 u8 vlan_mode;
2548 u32 addr_to_write, vifid, addrs, stats_type, i;
2549
2550 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2551 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2552 DP(BNX2X_MSG_MCP,
2553 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2554 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2555 }
2556
2557 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2558 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2559 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2560 DP(BNX2X_MSG_MCP,
2561 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2562 vifid, addrs);
2563 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2564 addrs);
2565 }
2566
2567 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2568 addr_to_write = SHMEM2_RD(bp,
2569 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2570 stats_type = SHMEM2_RD(bp,
2571 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2572
2573 DP(BNX2X_MSG_MCP,
2574 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2575 addr_to_write);
2576
2577 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2578
2579
2580 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2581 REG_WR(bp, addr_to_write + i*sizeof(u32),
2582 *(((u32 *)(&afex_stats))+i));
2583
2584
2585 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2586 }
2587
2588 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2589 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2590 bp->mf_config[BP_VN(bp)] = mf_config;
2591 DP(BNX2X_MSG_MCP,
2592 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2593 mf_config);
2594
2595
2596 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2597
2598 struct cmng_init_input cmng_input;
2599 struct rate_shaping_vars_per_vn m_rs_vn;
2600 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2601 u32 addr = BAR_XSTRORM_INTMEM +
2602 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2603
2604 bp->mf_config[BP_VN(bp)] = mf_config;
2605
2606 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2607 m_rs_vn.vn_counter.rate =
2608 cmng_input.vnic_max_rate[BP_VN(bp)];
2609 m_rs_vn.vn_counter.quota =
2610 (m_rs_vn.vn_counter.rate *
2611 RS_PERIODIC_TIMEOUT_USEC) / 8;
2612
2613 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2614
2615
2616 vif_id =
2617 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2618 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2619 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2620 vlan_val =
2621 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2622 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2623 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2624 vlan_prio = (mf_config &
2625 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2626 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2627 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2628 vlan_mode =
2629 (MF_CFG_RD(bp,
2630 func_mf_config[func].afex_config) &
2631 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2632 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2633 allowed_prio =
2634 (MF_CFG_RD(bp,
2635 func_mf_config[func].afex_config) &
2636 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2637 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2638
2639
2640 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2641 allowed_prio))
2642 return;
2643
2644 bp->afex_def_vlan_tag = vlan_val;
2645 bp->afex_vlan_mode = vlan_mode;
2646 } else {
2647
2648 bnx2x_link_report(bp);
2649
2650
2651 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2652
2653
2654 bp->afex_def_vlan_tag = -1;
2655 }
2656 }
2657}
2658
2659static void bnx2x_pmf_update(struct bnx2x *bp)
2660{
2661 int port = BP_PORT(bp);
2662 u32 val;
2663
2664 bp->port.pmf = 1;
2665 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2666
2667
2668
2669
2670
2671 smp_mb();
2672
2673
2674 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2675
2676 bnx2x_dcbx_pmf_update(bp);
2677
2678
2679 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2680 if (bp->common.int_block == INT_BLOCK_HC) {
2681 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2682 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2683 } else if (!CHIP_IS_E1x(bp)) {
2684 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2685 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2686 }
2687
2688 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2689}
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2701{
2702 int mb_idx = BP_FW_MB_IDX(bp);
2703 u32 seq;
2704 u32 rc = 0;
2705 u32 cnt = 1;
2706 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2707
2708 mutex_lock(&bp->fw_mb_mutex);
2709 seq = ++bp->fw_seq;
2710 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2711 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2712
2713 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2714 (command | seq), param);
2715
2716 do {
2717
2718 msleep(delay);
2719
2720 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2721
2722
2723 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2724
2725 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2726 cnt*delay, rc, seq);
2727
2728
2729 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2730 rc &= FW_MSG_CODE_MASK;
2731 else {
2732
2733 BNX2X_ERR("FW failed to respond!\n");
2734 bnx2x_fw_dump(bp);
2735 rc = 0;
2736 }
2737 mutex_unlock(&bp->fw_mb_mutex);
2738
2739 return rc;
2740}
2741
2742
2743static void storm_memset_func_cfg(struct bnx2x *bp,
2744 struct tstorm_eth_function_common_config *tcfg,
2745 u16 abs_fid)
2746{
2747 size_t size = sizeof(struct tstorm_eth_function_common_config);
2748
2749 u32 addr = BAR_TSTRORM_INTMEM +
2750 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2751
2752 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2753}
2754
2755void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2756{
2757 if (CHIP_IS_E1x(bp)) {
2758 struct tstorm_eth_function_common_config tcfg = {0};
2759
2760 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2761 }
2762
2763
2764 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2765 storm_memset_func_en(bp, p->func_id, 1);
2766
2767
2768 if (p->func_flgs & FUNC_FLG_SPQ) {
2769 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2770 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2771 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2772 }
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2785 struct bnx2x_fastpath *fp,
2786 bool zero_stats)
2787{
2788 unsigned long flags = 0;
2789
2790
2791 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2792
2793
2794
2795
2796
2797
2798 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2799 if (zero_stats)
2800 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2801
2802
2803 return flags;
2804}
2805
2806static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2807 struct bnx2x_fastpath *fp,
2808 bool leading)
2809{
2810 unsigned long flags = 0;
2811
2812
2813 if (IS_MF_SD(bp))
2814 __set_bit(BNX2X_Q_FLG_OV, &flags);
2815
2816 if (IS_FCOE_FP(fp)) {
2817 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2818
2819 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2820 }
2821
2822 if (!fp->disable_tpa) {
2823 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2824 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2825 if (fp->mode == TPA_MODE_GRO)
2826 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
2827 }
2828
2829 if (leading) {
2830 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2831 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
2832 }
2833
2834
2835 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2836
2837
2838 if (IS_MF_AFEX(bp))
2839 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2840
2841
2842 return flags | bnx2x_get_common_flags(bp, fp, true);
2843}
2844
2845static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2846 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
2847 u8 cos)
2848{
2849 gen_init->stat_id = bnx2x_stats_id(fp);
2850 gen_init->spcl_id = fp->cl_id;
2851
2852
2853 if (IS_FCOE_FP(fp))
2854 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2855 else
2856 gen_init->mtu = bp->dev->mtu;
2857
2858 gen_init->cos = cos;
2859}
2860
2861static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2862 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2863 struct bnx2x_rxq_setup_params *rxq_init)
2864{
2865 u8 max_sge = 0;
2866 u16 sge_sz = 0;
2867 u16 tpa_agg_size = 0;
2868
2869 if (!fp->disable_tpa) {
2870 pause->sge_th_lo = SGE_TH_LO(bp);
2871 pause->sge_th_hi = SGE_TH_HI(bp);
2872
2873
2874 WARN_ON(bp->dropless_fc &&
2875 pause->sge_th_hi + FW_PREFETCH_CNT >
2876 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2877
2878 tpa_agg_size = min_t(u32,
2879 (min_t(u32, 8, MAX_SKB_FRAGS) *
2880 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2881 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2882 SGE_PAGE_SHIFT;
2883 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2884 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2885 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2886 0xffff);
2887 }
2888
2889
2890 if (!CHIP_IS_E1(bp)) {
2891 pause->bd_th_lo = BD_TH_LO(bp);
2892 pause->bd_th_hi = BD_TH_HI(bp);
2893
2894 pause->rcq_th_lo = RCQ_TH_LO(bp);
2895 pause->rcq_th_hi = RCQ_TH_HI(bp);
2896
2897
2898
2899
2900 WARN_ON(bp->dropless_fc &&
2901 pause->bd_th_hi + FW_PREFETCH_CNT >
2902 bp->rx_ring_size);
2903 WARN_ON(bp->dropless_fc &&
2904 pause->rcq_th_hi + FW_PREFETCH_CNT >
2905 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2906
2907 pause->pri_map = 1;
2908 }
2909
2910
2911 rxq_init->dscr_map = fp->rx_desc_mapping;
2912 rxq_init->sge_map = fp->rx_sge_mapping;
2913 rxq_init->rcq_map = fp->rx_comp_mapping;
2914 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2915
2916
2917
2918
2919 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
2920 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
2921
2922 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2923 rxq_init->tpa_agg_sz = tpa_agg_size;
2924 rxq_init->sge_buf_sz = sge_sz;
2925 rxq_init->max_sges_pkt = max_sge;
2926 rxq_init->rss_engine_id = BP_FUNC(bp);
2927 rxq_init->mcast_engine_id = BP_FUNC(bp);
2928
2929
2930
2931
2932
2933
2934 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2935
2936 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2937 rxq_init->fw_sb_id = fp->fw_sb_id;
2938
2939 if (IS_FCOE_FP(fp))
2940 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2941 else
2942 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2943
2944
2945
2946 if (IS_MF_AFEX(bp)) {
2947 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
2948 rxq_init->silent_removal_mask = VLAN_VID_MASK;
2949 }
2950}
2951
2952static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2953 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2954 u8 cos)
2955{
2956 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
2957 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2958 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2959 txq_init->fw_sb_id = fp->fw_sb_id;
2960
2961
2962
2963
2964
2965 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
2966
2967 if (IS_FCOE_FP(fp)) {
2968 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2969 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2970 }
2971}
2972
2973static void bnx2x_pf_init(struct bnx2x *bp)
2974{
2975 struct bnx2x_func_init_params func_init = {0};
2976 struct event_ring_data eq_data = { {0} };
2977 u16 flags;
2978
2979 if (!CHIP_IS_E1x(bp)) {
2980
2981
2982 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2983 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2984 (CHIP_MODE_IS_4_PORT(bp) ?
2985 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2986
2987 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2988 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2989 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2990 (CHIP_MODE_IS_4_PORT(bp) ?
2991 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2992 }
2993
2994
2995 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2996
2997
2998
2999
3000 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3001
3002 func_init.func_flgs = flags;
3003 func_init.pf_id = BP_FUNC(bp);
3004 func_init.func_id = BP_FUNC(bp);
3005 func_init.spq_map = bp->spq_mapping;
3006 func_init.spq_prod = bp->spq_prod_idx;
3007
3008 bnx2x_func_init(bp, &func_init);
3009
3010 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3011
3012
3013
3014
3015
3016
3017
3018 bp->link_vars.line_speed = SPEED_10000;
3019 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3020
3021
3022 if (bp->port.pmf)
3023 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3024
3025
3026 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3027 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3028 eq_data.producer = bp->eq_prod;
3029 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3030 eq_data.sb_id = DEF_SB_ID;
3031 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3032}
3033
3034
3035static void bnx2x_e1h_disable(struct bnx2x *bp)
3036{
3037 int port = BP_PORT(bp);
3038
3039 bnx2x_tx_disable(bp);
3040
3041 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3042}
3043
3044static void bnx2x_e1h_enable(struct bnx2x *bp)
3045{
3046 int port = BP_PORT(bp);
3047
3048 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3049
3050
3051 netif_tx_wake_all_queues(bp->dev);
3052
3053
3054
3055
3056
3057}
3058
3059#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3060
3061static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3062{
3063 struct eth_stats_info *ether_stat =
3064 &bp->slowpath->drv_info_to_mcp.ether_stat;
3065
3066 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3067 ETH_STAT_INFO_VERSION_LEN);
3068
3069 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3070 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3071 ether_stat->mac_local);
3072
3073 ether_stat->mtu_size = bp->dev->mtu;
3074
3075 if (bp->dev->features & NETIF_F_RXCSUM)
3076 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3077 if (bp->dev->features & NETIF_F_TSO)
3078 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3079 ether_stat->feature_flags |= bp->common.boot_mode;
3080
3081 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3082
3083 ether_stat->txq_size = bp->tx_ring_size;
3084 ether_stat->rxq_size = bp->rx_ring_size;
3085}
3086
3087static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3088{
3089 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3090 struct fcoe_stats_info *fcoe_stat =
3091 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3092
3093 if (!CNIC_LOADED(bp))
3094 return;
3095
3096 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3097 bp->fip_mac, ETH_ALEN);
3098
3099 fcoe_stat->qos_priority =
3100 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3101
3102
3103 if (!NO_FCOE(bp)) {
3104 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3105 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3106 tstorm_queue_statistics;
3107
3108 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3109 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3110 xstorm_queue_statistics;
3111
3112 struct fcoe_statistics_params *fw_fcoe_stat =
3113 &bp->fw_stats_data->fcoe;
3114
3115 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
3116 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3117
3118 ADD_64(fcoe_stat->rx_bytes_hi,
3119 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3120 fcoe_stat->rx_bytes_lo,
3121 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3122
3123 ADD_64(fcoe_stat->rx_bytes_hi,
3124 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3125 fcoe_stat->rx_bytes_lo,
3126 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3127
3128 ADD_64(fcoe_stat->rx_bytes_hi,
3129 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3130 fcoe_stat->rx_bytes_lo,
3131 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3132
3133 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3134 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3135
3136 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3137 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3138
3139 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3140 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3141
3142 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3143 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3144
3145 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
3146 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3147
3148 ADD_64(fcoe_stat->tx_bytes_hi,
3149 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3150 fcoe_stat->tx_bytes_lo,
3151 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3152
3153 ADD_64(fcoe_stat->tx_bytes_hi,
3154 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3155 fcoe_stat->tx_bytes_lo,
3156 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3157
3158 ADD_64(fcoe_stat->tx_bytes_hi,
3159 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3160 fcoe_stat->tx_bytes_lo,
3161 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3162
3163 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3164 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3165
3166 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3167 fcoe_q_xstorm_stats->ucast_pkts_sent);
3168
3169 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3170 fcoe_q_xstorm_stats->bcast_pkts_sent);
3171
3172 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3173 fcoe_q_xstorm_stats->mcast_pkts_sent);
3174 }
3175
3176
3177 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3178}
3179
3180static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3181{
3182 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3183 struct iscsi_stats_info *iscsi_stat =
3184 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3185
3186 if (!CNIC_LOADED(bp))
3187 return;
3188
3189 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3190 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3191
3192 iscsi_stat->qos_priority =
3193 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3194
3195
3196 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3197}
3198
3199
3200
3201
3202
3203
3204static void bnx2x_config_mf_bw(struct bnx2x *bp)
3205{
3206 if (bp->link_vars.link_up) {
3207 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3208 bnx2x_link_sync_notify(bp);
3209 }
3210 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3211}
3212
3213static void bnx2x_set_mf_bw(struct bnx2x *bp)
3214{
3215 bnx2x_config_mf_bw(bp);
3216 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3217}
3218
3219static void bnx2x_handle_eee_event(struct bnx2x *bp)
3220{
3221 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3222 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3223}
3224
3225static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3226{
3227 enum drv_info_opcode op_code;
3228 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3229
3230
3231 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3232 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3233 return;
3234 }
3235
3236 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3237 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3238
3239 memset(&bp->slowpath->drv_info_to_mcp, 0,
3240 sizeof(union drv_info_to_mcp));
3241
3242 switch (op_code) {
3243 case ETH_STATS_OPCODE:
3244 bnx2x_drv_info_ether_stat(bp);
3245 break;
3246 case FCOE_STATS_OPCODE:
3247 bnx2x_drv_info_fcoe_stat(bp);
3248 break;
3249 case ISCSI_STATS_OPCODE:
3250 bnx2x_drv_info_iscsi_stat(bp);
3251 break;
3252 default:
3253
3254 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3255 return;
3256 }
3257
3258
3259
3260
3261 SHMEM2_WR(bp, drv_info_host_addr_lo,
3262 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3263 SHMEM2_WR(bp, drv_info_host_addr_hi,
3264 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3265
3266 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3267}
3268
3269static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3270{
3271 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3272
3273 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3274
3275
3276
3277
3278
3279
3280 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3281 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3282 bp->flags |= MF_FUNC_DIS;
3283
3284 bnx2x_e1h_disable(bp);
3285 } else {
3286 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3287 bp->flags &= ~MF_FUNC_DIS;
3288
3289 bnx2x_e1h_enable(bp);
3290 }
3291 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3292 }
3293 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3294 bnx2x_config_mf_bw(bp);
3295 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3296 }
3297
3298
3299 if (dcc_event)
3300 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3301 else
3302 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3303}
3304
3305
3306static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3307{
3308 struct eth_spe *next_spe = bp->spq_prod_bd;
3309
3310 if (bp->spq_prod_bd == bp->spq_last_bd) {
3311 bp->spq_prod_bd = bp->spq;
3312 bp->spq_prod_idx = 0;
3313 DP(BNX2X_MSG_SP, "end of spq\n");
3314 } else {
3315 bp->spq_prod_bd++;
3316 bp->spq_prod_idx++;
3317 }
3318 return next_spe;
3319}
3320
3321
3322static void bnx2x_sp_prod_update(struct bnx2x *bp)
3323{
3324 int func = BP_FUNC(bp);
3325
3326
3327
3328
3329
3330
3331 mb();
3332
3333 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3334 bp->spq_prod_idx);
3335 mmiowb();
3336}
3337
3338
3339
3340
3341
3342
3343
3344static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3345{
3346 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3347 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3348 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3349 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3350 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3351 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3352 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3353 return true;
3354 else
3355 return false;
3356
3357}
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3375 u32 data_hi, u32 data_lo, int cmd_type)
3376{
3377 struct eth_spe *spe;
3378 u16 type;
3379 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3380
3381#ifdef BNX2X_STOP_ON_ERROR
3382 if (unlikely(bp->panic)) {
3383 BNX2X_ERR("Can't post SP when there is panic\n");
3384 return -EIO;
3385 }
3386#endif
3387
3388 spin_lock_bh(&bp->spq_lock);
3389
3390 if (common) {
3391 if (!atomic_read(&bp->eq_spq_left)) {
3392 BNX2X_ERR("BUG! EQ ring full!\n");
3393 spin_unlock_bh(&bp->spq_lock);
3394 bnx2x_panic();
3395 return -EBUSY;
3396 }
3397 } else if (!atomic_read(&bp->cq_spq_left)) {
3398 BNX2X_ERR("BUG! SPQ ring full!\n");
3399 spin_unlock_bh(&bp->spq_lock);
3400 bnx2x_panic();
3401 return -EBUSY;
3402 }
3403
3404 spe = bnx2x_sp_get_next(bp);
3405
3406
3407 spe->hdr.conn_and_cmd_data =
3408 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3409 HW_CID(bp, cid));
3410
3411 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3412
3413 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3414 SPE_HDR_FUNCTION_ID);
3415
3416 spe->hdr.type = cpu_to_le16(type);
3417
3418 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3419 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3420
3421
3422
3423
3424
3425
3426 if (common)
3427 atomic_dec(&bp->eq_spq_left);
3428 else
3429 atomic_dec(&bp->cq_spq_left);
3430
3431
3432 DP(BNX2X_MSG_SP,
3433 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3434 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3435 (u32)(U64_LO(bp->spq_mapping) +
3436 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3437 HW_CID(bp, cid), data_hi, data_lo, type,
3438 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3439
3440 bnx2x_sp_prod_update(bp);
3441 spin_unlock_bh(&bp->spq_lock);
3442 return 0;
3443}
3444
3445
3446static int bnx2x_acquire_alr(struct bnx2x *bp)
3447{
3448 u32 j, val;
3449 int rc = 0;
3450
3451 might_sleep();
3452 for (j = 0; j < 1000; j++) {
3453 val = (1UL << 31);
3454 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3455 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3456 if (val & (1L << 31))
3457 break;
3458
3459 msleep(5);
3460 }
3461 if (!(val & (1L << 31))) {
3462 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3463 rc = -EBUSY;
3464 }
3465
3466 return rc;
3467}
3468
3469
3470static void bnx2x_release_alr(struct bnx2x *bp)
3471{
3472 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
3473}
3474
3475#define BNX2X_DEF_SB_ATT_IDX 0x0001
3476#define BNX2X_DEF_SB_IDX 0x0002
3477
3478static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3479{
3480 struct host_sp_status_block *def_sb = bp->def_status_blk;
3481 u16 rc = 0;
3482
3483 barrier();
3484 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3485 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3486 rc |= BNX2X_DEF_SB_ATT_IDX;
3487 }
3488
3489 if (bp->def_idx != def_sb->sp_sb.running_index) {
3490 bp->def_idx = def_sb->sp_sb.running_index;
3491 rc |= BNX2X_DEF_SB_IDX;
3492 }
3493
3494
3495 barrier();
3496 return rc;
3497}
3498
3499
3500
3501
3502
3503static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3504{
3505 int port = BP_PORT(bp);
3506 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3507 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3508 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3509 NIG_REG_MASK_INTERRUPT_PORT0;
3510 u32 aeu_mask;
3511 u32 nig_mask = 0;
3512 u32 reg_addr;
3513
3514 if (bp->attn_state & asserted)
3515 BNX2X_ERR("IGU ERROR\n");
3516
3517 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3518 aeu_mask = REG_RD(bp, aeu_addr);
3519
3520 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3521 aeu_mask, asserted);
3522 aeu_mask &= ~(asserted & 0x3ff);
3523 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3524
3525 REG_WR(bp, aeu_addr, aeu_mask);
3526 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3527
3528 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3529 bp->attn_state |= asserted;
3530 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3531
3532 if (asserted & ATTN_HARD_WIRED_MASK) {
3533 if (asserted & ATTN_NIG_FOR_FUNC) {
3534
3535 bnx2x_acquire_phy_lock(bp);
3536
3537
3538 nig_mask = REG_RD(bp, nig_int_mask_addr);
3539
3540
3541
3542
3543 if (nig_mask) {
3544 REG_WR(bp, nig_int_mask_addr, 0);
3545
3546 bnx2x_link_attn(bp);
3547 }
3548
3549
3550 }
3551 if (asserted & ATTN_SW_TIMER_4_FUNC)
3552 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3553
3554 if (asserted & GPIO_2_FUNC)
3555 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3556
3557 if (asserted & GPIO_3_FUNC)
3558 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3559
3560 if (asserted & GPIO_4_FUNC)
3561 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3562
3563 if (port == 0) {
3564 if (asserted & ATTN_GENERAL_ATTN_1) {
3565 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3566 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3567 }
3568 if (asserted & ATTN_GENERAL_ATTN_2) {
3569 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3570 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3571 }
3572 if (asserted & ATTN_GENERAL_ATTN_3) {
3573 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3575 }
3576 } else {
3577 if (asserted & ATTN_GENERAL_ATTN_4) {
3578 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3579 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3580 }
3581 if (asserted & ATTN_GENERAL_ATTN_5) {
3582 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3584 }
3585 if (asserted & ATTN_GENERAL_ATTN_6) {
3586 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3588 }
3589 }
3590
3591 }
3592
3593 if (bp->common.int_block == INT_BLOCK_HC)
3594 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3595 COMMAND_REG_ATTN_BITS_SET);
3596 else
3597 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3598
3599 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3600 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3601 REG_WR(bp, reg_addr, asserted);
3602
3603
3604 if (asserted & ATTN_NIG_FOR_FUNC) {
3605
3606
3607
3608 if (bp->common.int_block != INT_BLOCK_HC) {
3609 u32 cnt = 0, igu_acked;
3610 do {
3611 igu_acked = REG_RD(bp,
3612 IGU_REG_ATTENTION_ACK_BITS);
3613 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3614 (++cnt < MAX_IGU_ATTN_ACK_TO));
3615 if (!igu_acked)
3616 DP(NETIF_MSG_HW,
3617 "Failed to verify IGU ack on time\n");
3618 barrier();
3619 }
3620 REG_WR(bp, nig_int_mask_addr, nig_mask);
3621 bnx2x_release_phy_lock(bp);
3622 }
3623}
3624
3625static void bnx2x_fan_failure(struct bnx2x *bp)
3626{
3627 int port = BP_PORT(bp);
3628 u32 ext_phy_config;
3629
3630 ext_phy_config =
3631 SHMEM_RD(bp,
3632 dev_info.port_hw_config[port].external_phy_config);
3633
3634 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3635 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3636 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3637 ext_phy_config);
3638
3639
3640 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3641 "Please contact OEM Support for assistance\n");
3642
3643
3644
3645
3646
3647
3648 smp_mb__before_clear_bit();
3649 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3650 smp_mb__after_clear_bit();
3651 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3652
3653}
3654
3655static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3656{
3657 int port = BP_PORT(bp);
3658 int reg_offset;
3659 u32 val;
3660
3661 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3662 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3663
3664 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3665
3666 val = REG_RD(bp, reg_offset);
3667 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3668 REG_WR(bp, reg_offset, val);
3669
3670 BNX2X_ERR("SPIO5 hw attention\n");
3671
3672
3673 bnx2x_hw_reset_phy(&bp->link_params);
3674 bnx2x_fan_failure(bp);
3675 }
3676
3677 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3678 bnx2x_acquire_phy_lock(bp);
3679 bnx2x_handle_module_detect_int(&bp->link_params);
3680 bnx2x_release_phy_lock(bp);
3681 }
3682
3683 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3684
3685 val = REG_RD(bp, reg_offset);
3686 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3687 REG_WR(bp, reg_offset, val);
3688
3689 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3690 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3691 bnx2x_panic();
3692 }
3693}
3694
3695static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3696{
3697 u32 val;
3698
3699 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3700
3701 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3702 BNX2X_ERR("DB hw attention 0x%x\n", val);
3703
3704 if (val & 0x2)
3705 BNX2X_ERR("FATAL error from DORQ\n");
3706 }
3707
3708 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3709
3710 int port = BP_PORT(bp);
3711 int reg_offset;
3712
3713 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3714 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3715
3716 val = REG_RD(bp, reg_offset);
3717 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3718 REG_WR(bp, reg_offset, val);
3719
3720 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3721 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3722 bnx2x_panic();
3723 }
3724}
3725
3726static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3727{
3728 u32 val;
3729
3730 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3731
3732 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3733 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3734
3735 if (val & 0x2)
3736 BNX2X_ERR("FATAL error from CFC\n");
3737 }
3738
3739 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3740 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3741 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3742
3743 if (val & 0x18000)
3744 BNX2X_ERR("FATAL error from PXP\n");
3745
3746 if (!CHIP_IS_E1x(bp)) {
3747 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3748 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3749 }
3750 }
3751
3752 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3753
3754 int port = BP_PORT(bp);
3755 int reg_offset;
3756
3757 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3758 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3759
3760 val = REG_RD(bp, reg_offset);
3761 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3762 REG_WR(bp, reg_offset, val);
3763
3764 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3765 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3766 bnx2x_panic();
3767 }
3768}
3769
3770static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3771{
3772 u32 val;
3773
3774 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3775
3776 if (attn & BNX2X_PMF_LINK_ASSERT) {
3777 int func = BP_FUNC(bp);
3778
3779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3780 bnx2x_read_mf_cfg(bp);
3781 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3782 func_mf_config[BP_ABS_FUNC(bp)].config);
3783 val = SHMEM_RD(bp,
3784 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3785 if (val & DRV_STATUS_DCC_EVENT_MASK)
3786 bnx2x_dcc_event(bp,
3787 (val & DRV_STATUS_DCC_EVENT_MASK));
3788
3789 if (val & DRV_STATUS_SET_MF_BW)
3790 bnx2x_set_mf_bw(bp);
3791
3792 if (val & DRV_STATUS_DRV_INFO_REQ)
3793 bnx2x_handle_drv_info_req(bp);
3794 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3795 bnx2x_pmf_update(bp);
3796
3797 if (bp->port.pmf &&
3798 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3799 bp->dcbx_enabled > 0)
3800
3801 bnx2x_dcbx_set_params(bp,
3802 BNX2X_DCBX_STATE_NEG_RECEIVED);
3803 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3804 bnx2x_handle_afex_cmd(bp,
3805 val & DRV_STATUS_AFEX_EVENT_MASK);
3806 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3807 bnx2x_handle_eee_event(bp);
3808 if (bp->link_vars.periodic_flags &
3809 PERIODIC_FLAGS_LINK_EVENT) {
3810
3811 bnx2x_acquire_phy_lock(bp);
3812 bp->link_vars.periodic_flags &=
3813 ~PERIODIC_FLAGS_LINK_EVENT;
3814 bnx2x_release_phy_lock(bp);
3815 if (IS_MF(bp))
3816 bnx2x_link_sync_notify(bp);
3817 bnx2x_link_report(bp);
3818 }
3819
3820
3821
3822 bnx2x__link_status_update(bp);
3823 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3824
3825 BNX2X_ERR("MC assert!\n");
3826 bnx2x_mc_assert(bp);
3827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3831 bnx2x_panic();
3832
3833 } else if (attn & BNX2X_MCP_ASSERT) {
3834
3835 BNX2X_ERR("MCP assert!\n");
3836 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3837 bnx2x_fw_dump(bp);
3838
3839 } else
3840 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3841 }
3842
3843 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3844 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3845 if (attn & BNX2X_GRC_TIMEOUT) {
3846 val = CHIP_IS_E1(bp) ? 0 :
3847 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3848 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3849 }
3850 if (attn & BNX2X_GRC_RSV) {
3851 val = CHIP_IS_E1(bp) ? 0 :
3852 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3853 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3854 }
3855 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3856 }
3857}
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
3874
3875#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
3876#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
3877#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
3878#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
3879#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
3880#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
3881#define BNX2X_GLOBAL_RESET_BIT 0x00040000
3882
3883
3884
3885
3886
3887
3888void bnx2x_set_reset_global(struct bnx2x *bp)
3889{
3890 u32 val;
3891 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3892 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3893 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
3894 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3895}
3896
3897
3898
3899
3900
3901
3902static void bnx2x_clear_reset_global(struct bnx2x *bp)
3903{
3904 u32 val;
3905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3906 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3907 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
3908 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3909}
3910
3911
3912
3913
3914
3915
3916static bool bnx2x_reset_is_global(struct bnx2x *bp)
3917{
3918 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3919
3920 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3921 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
3922}
3923
3924
3925
3926
3927
3928
3929static void bnx2x_set_reset_done(struct bnx2x *bp)
3930{
3931 u32 val;
3932 u32 bit = BP_PATH(bp) ?
3933 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3934 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3935 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3936
3937
3938 val &= ~bit;
3939 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3940
3941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3942}
3943
3944
3945
3946
3947
3948
3949void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3950{
3951 u32 val;
3952 u32 bit = BP_PATH(bp) ?
3953 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3955 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3956
3957
3958 val |= bit;
3959 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3960 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3961}
3962
3963
3964
3965
3966
3967bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
3968{
3969 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3970 u32 bit = engine ?
3971 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3972
3973
3974 return (val & bit) ? false : true;
3975}
3976
3977
3978
3979
3980
3981
3982void bnx2x_set_pf_load(struct bnx2x *bp)
3983{
3984 u32 val1, val;
3985 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3986 BNX2X_PATH0_LOAD_CNT_MASK;
3987 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3988 BNX2X_PATH0_LOAD_CNT_SHIFT;
3989
3990 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3991 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3992
3993 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
3994
3995
3996 val1 = (val & mask) >> shift;
3997
3998
3999 val1 |= (1 << bp->pf_num);
4000
4001
4002 val &= ~mask;
4003
4004
4005 val |= ((val1 << shift) & mask);
4006
4007 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4008 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020bool bnx2x_clear_pf_load(struct bnx2x *bp)
4021{
4022 u32 val1, val;
4023 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4024 BNX2X_PATH0_LOAD_CNT_MASK;
4025 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4026 BNX2X_PATH0_LOAD_CNT_SHIFT;
4027
4028 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4029 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4030 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4031
4032
4033 val1 = (val & mask) >> shift;
4034
4035
4036 val1 &= ~(1 << bp->pf_num);
4037
4038
4039 val &= ~mask;
4040
4041
4042 val |= ((val1 << shift) & mask);
4043
4044 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4045 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4046 return val1 != 0;
4047}
4048
4049
4050
4051
4052
4053
4054static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4055{
4056 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4057 BNX2X_PATH0_LOAD_CNT_MASK);
4058 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4059 BNX2X_PATH0_LOAD_CNT_SHIFT);
4060 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4061
4062 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4063
4064 val = (val & mask) >> shift;
4065
4066 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4067 engine, val);
4068
4069 return val != 0;
4070}
4071
4072static void _print_next_block(int idx, const char *blk)
4073{
4074 pr_cont("%s%s", idx ? ", " : "", blk);
4075}
4076
4077static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4078 bool print)
4079{
4080 int i = 0;
4081 u32 cur_bit = 0;
4082 for (i = 0; sig; i++) {
4083 cur_bit = ((u32)0x1 << i);
4084 if (sig & cur_bit) {
4085 switch (cur_bit) {
4086 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4087 if (print)
4088 _print_next_block(par_num++, "BRB");
4089 break;
4090 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4091 if (print)
4092 _print_next_block(par_num++, "PARSER");
4093 break;
4094 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4095 if (print)
4096 _print_next_block(par_num++, "TSDM");
4097 break;
4098 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4099 if (print)
4100 _print_next_block(par_num++,
4101 "SEARCHER");
4102 break;
4103 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4104 if (print)
4105 _print_next_block(par_num++, "TCM");
4106 break;
4107 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4108 if (print)
4109 _print_next_block(par_num++, "TSEMI");
4110 break;
4111 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4112 if (print)
4113 _print_next_block(par_num++, "XPB");
4114 break;
4115 }
4116
4117
4118 sig &= ~cur_bit;
4119 }
4120 }
4121
4122 return par_num;
4123}
4124
4125static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4126 bool *global, bool print)
4127{
4128 int i = 0;
4129 u32 cur_bit = 0;
4130 for (i = 0; sig; i++) {
4131 cur_bit = ((u32)0x1 << i);
4132 if (sig & cur_bit) {
4133 switch (cur_bit) {
4134 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4135 if (print)
4136 _print_next_block(par_num++, "PBF");
4137 break;
4138 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4139 if (print)
4140 _print_next_block(par_num++, "QM");
4141 break;
4142 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4143 if (print)
4144 _print_next_block(par_num++, "TM");
4145 break;
4146 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4147 if (print)
4148 _print_next_block(par_num++, "XSDM");
4149 break;
4150 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4151 if (print)
4152 _print_next_block(par_num++, "XCM");
4153 break;
4154 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4155 if (print)
4156 _print_next_block(par_num++, "XSEMI");
4157 break;
4158 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4159 if (print)
4160 _print_next_block(par_num++,
4161 "DOORBELLQ");
4162 break;
4163 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4164 if (print)
4165 _print_next_block(par_num++, "NIG");
4166 break;
4167 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4168 if (print)
4169 _print_next_block(par_num++,
4170 "VAUX PCI CORE");
4171 *global = true;
4172 break;
4173 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4174 if (print)
4175 _print_next_block(par_num++, "DEBUG");
4176 break;
4177 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4178 if (print)
4179 _print_next_block(par_num++, "USDM");
4180 break;
4181 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4182 if (print)
4183 _print_next_block(par_num++, "UCM");
4184 break;
4185 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4186 if (print)
4187 _print_next_block(par_num++, "USEMI");
4188 break;
4189 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4190 if (print)
4191 _print_next_block(par_num++, "UPB");
4192 break;
4193 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4194 if (print)
4195 _print_next_block(par_num++, "CSDM");
4196 break;
4197 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4198 if (print)
4199 _print_next_block(par_num++, "CCM");
4200 break;
4201 }
4202
4203
4204 sig &= ~cur_bit;
4205 }
4206 }
4207
4208 return par_num;
4209}
4210
4211static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4212 bool print)
4213{
4214 int i = 0;
4215 u32 cur_bit = 0;
4216 for (i = 0; sig; i++) {
4217 cur_bit = ((u32)0x1 << i);
4218 if (sig & cur_bit) {
4219 switch (cur_bit) {
4220 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4221 if (print)
4222 _print_next_block(par_num++, "CSEMI");
4223 break;
4224 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4225 if (print)
4226 _print_next_block(par_num++, "PXP");
4227 break;
4228 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4229 if (print)
4230 _print_next_block(par_num++,
4231 "PXPPCICLOCKCLIENT");
4232 break;
4233 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4234 if (print)
4235 _print_next_block(par_num++, "CFC");
4236 break;
4237 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4238 if (print)
4239 _print_next_block(par_num++, "CDU");
4240 break;
4241 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4242 if (print)
4243 _print_next_block(par_num++, "DMAE");
4244 break;
4245 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4246 if (print)
4247 _print_next_block(par_num++, "IGU");
4248 break;
4249 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4250 if (print)
4251 _print_next_block(par_num++, "MISC");
4252 break;
4253 }
4254
4255
4256 sig &= ~cur_bit;
4257 }
4258 }
4259
4260 return par_num;
4261}
4262
4263static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4264 bool *global, bool print)
4265{
4266 int i = 0;
4267 u32 cur_bit = 0;
4268 for (i = 0; sig; i++) {
4269 cur_bit = ((u32)0x1 << i);
4270 if (sig & cur_bit) {
4271 switch (cur_bit) {
4272 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4273 if (print)
4274 _print_next_block(par_num++, "MCP ROM");
4275 *global = true;
4276 break;
4277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4278 if (print)
4279 _print_next_block(par_num++,
4280 "MCP UMP RX");
4281 *global = true;
4282 break;
4283 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4284 if (print)
4285 _print_next_block(par_num++,
4286 "MCP UMP TX");
4287 *global = true;
4288 break;
4289 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4290 if (print)
4291 _print_next_block(par_num++,
4292 "MCP SCPAD");
4293 *global = true;
4294 break;
4295 }
4296
4297
4298 sig &= ~cur_bit;
4299 }
4300 }
4301
4302 return par_num;
4303}
4304
4305static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4306 bool print)
4307{
4308 int i = 0;
4309 u32 cur_bit = 0;
4310 for (i = 0; sig; i++) {
4311 cur_bit = ((u32)0x1 << i);
4312 if (sig & cur_bit) {
4313 switch (cur_bit) {
4314 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4315 if (print)
4316 _print_next_block(par_num++, "PGLUE_B");
4317 break;
4318 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4319 if (print)
4320 _print_next_block(par_num++, "ATC");
4321 break;
4322 }
4323
4324
4325 sig &= ~cur_bit;
4326 }
4327 }
4328
4329 return par_num;
4330}
4331
4332static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4333 u32 *sig)
4334{
4335 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4336 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4337 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4338 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4339 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4340 int par_num = 0;
4341 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4342 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4343 sig[0] & HW_PRTY_ASSERT_SET_0,
4344 sig[1] & HW_PRTY_ASSERT_SET_1,
4345 sig[2] & HW_PRTY_ASSERT_SET_2,
4346 sig[3] & HW_PRTY_ASSERT_SET_3,
4347 sig[4] & HW_PRTY_ASSERT_SET_4);
4348 if (print)
4349 netdev_err(bp->dev,
4350 "Parity errors detected in blocks: ");
4351 par_num = bnx2x_check_blocks_with_parity0(
4352 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4353 par_num = bnx2x_check_blocks_with_parity1(
4354 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4355 par_num = bnx2x_check_blocks_with_parity2(
4356 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4357 par_num = bnx2x_check_blocks_with_parity3(
4358 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4359 par_num = bnx2x_check_blocks_with_parity4(
4360 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4361
4362 if (print)
4363 pr_cont("\n");
4364
4365 return true;
4366 } else
4367 return false;
4368}
4369
4370
4371
4372
4373
4374
4375
4376
4377bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4378{
4379 struct attn_route attn = { {0} };
4380 int port = BP_PORT(bp);
4381
4382 attn.sig[0] = REG_RD(bp,
4383 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4384 port*4);
4385 attn.sig[1] = REG_RD(bp,
4386 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4387 port*4);
4388 attn.sig[2] = REG_RD(bp,
4389 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4390 port*4);
4391 attn.sig[3] = REG_RD(bp,
4392 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4393 port*4);
4394
4395 if (!CHIP_IS_E1x(bp))
4396 attn.sig[4] = REG_RD(bp,
4397 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4398 port*4);
4399
4400 return bnx2x_parity_attn(bp, global, print, attn.sig);
4401}
4402
4403
4404static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4405{
4406 u32 val;
4407 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4408
4409 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4410 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4411 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4412 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4413 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4414 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4415 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4416 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4417 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4418 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4419 if (val &
4420 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4421 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4422 if (val &
4423 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4424 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4425 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4426 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4427 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4428 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4429 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4430 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4431 }
4432 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4433 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4434 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4435 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4436 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4437 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4438 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4439 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4440 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4441 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4442 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4443 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4444 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4445 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4446 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4447 }
4448
4449 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4450 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4451 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4452 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4453 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4454 }
4455
4456}
4457
4458static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4459{
4460 struct attn_route attn, *group_mask;
4461 int port = BP_PORT(bp);
4462 int index;
4463 u32 reg_addr;
4464 u32 val;
4465 u32 aeu_mask;
4466 bool global = false;
4467
4468
4469
4470 bnx2x_acquire_alr(bp);
4471
4472 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4473#ifndef BNX2X_STOP_ON_ERROR
4474 bp->recovery_state = BNX2X_RECOVERY_INIT;
4475 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4476
4477 bnx2x_int_disable(bp);
4478
4479
4480
4481#else
4482 bnx2x_panic();
4483#endif
4484 bnx2x_release_alr(bp);
4485 return;
4486 }
4487
4488 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4489 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4490 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4491 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4492 if (!CHIP_IS_E1x(bp))
4493 attn.sig[4] =
4494 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4495 else
4496 attn.sig[4] = 0;
4497
4498 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4499 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4500
4501 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4502 if (deasserted & (1 << index)) {
4503 group_mask = &bp->attn_group[index];
4504
4505 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4506 index,
4507 group_mask->sig[0], group_mask->sig[1],
4508 group_mask->sig[2], group_mask->sig[3],
4509 group_mask->sig[4]);
4510
4511 bnx2x_attn_int_deasserted4(bp,
4512 attn.sig[4] & group_mask->sig[4]);
4513 bnx2x_attn_int_deasserted3(bp,
4514 attn.sig[3] & group_mask->sig[3]);
4515 bnx2x_attn_int_deasserted1(bp,
4516 attn.sig[1] & group_mask->sig[1]);
4517 bnx2x_attn_int_deasserted2(bp,
4518 attn.sig[2] & group_mask->sig[2]);
4519 bnx2x_attn_int_deasserted0(bp,
4520 attn.sig[0] & group_mask->sig[0]);
4521 }
4522 }
4523
4524 bnx2x_release_alr(bp);
4525
4526 if (bp->common.int_block == INT_BLOCK_HC)
4527 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4528 COMMAND_REG_ATTN_BITS_CLR);
4529 else
4530 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4531
4532 val = ~deasserted;
4533 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4534 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4535 REG_WR(bp, reg_addr, val);
4536
4537 if (~bp->attn_state & deasserted)
4538 BNX2X_ERR("IGU ERROR\n");
4539
4540 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4541 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4542
4543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4544 aeu_mask = REG_RD(bp, reg_addr);
4545
4546 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4547 aeu_mask, deasserted);
4548 aeu_mask |= (deasserted & 0x3ff);
4549 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4550
4551 REG_WR(bp, reg_addr, aeu_mask);
4552 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4553
4554 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4555 bp->attn_state &= ~deasserted;
4556 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4557}
4558
4559static void bnx2x_attn_int(struct bnx2x *bp)
4560{
4561
4562 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4563 attn_bits);
4564 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4565 attn_bits_ack);
4566 u32 attn_state = bp->attn_state;
4567
4568
4569 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4570 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4571
4572 DP(NETIF_MSG_HW,
4573 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4574 attn_bits, attn_ack, asserted, deasserted);
4575
4576 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4577 BNX2X_ERR("BAD attention state\n");
4578
4579
4580 if (asserted)
4581 bnx2x_attn_int_asserted(bp, asserted);
4582
4583 if (deasserted)
4584 bnx2x_attn_int_deasserted(bp, deasserted);
4585}
4586
4587void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4588 u16 index, u8 op, u8 update)
4589{
4590 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4591
4592 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4593 igu_addr);
4594}
4595
4596static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4597{
4598
4599 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4600 mmiowb();
4601}
4602
4603static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4604 union event_ring_elem *elem)
4605{
4606 u8 err = elem->message.error;
4607
4608 if (!bp->cnic_eth_dev.starting_cid ||
4609 (cid < bp->cnic_eth_dev.starting_cid &&
4610 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4611 return 1;
4612
4613 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4614
4615 if (unlikely(err)) {
4616
4617 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4618 cid);
4619 bnx2x_panic_dump(bp);
4620 }
4621 bnx2x_cnic_cfc_comp(bp, cid, err);
4622 return 0;
4623}
4624
4625static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4626{
4627 struct bnx2x_mcast_ramrod_params rparam;
4628 int rc;
4629
4630 memset(&rparam, 0, sizeof(rparam));
4631
4632 rparam.mcast_obj = &bp->mcast_obj;
4633
4634 netif_addr_lock_bh(bp->dev);
4635
4636
4637 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4638
4639
4640 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4641 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4642 if (rc < 0)
4643 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4644 rc);
4645 }
4646
4647 netif_addr_unlock_bh(bp->dev);
4648}
4649
4650static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4651 union event_ring_elem *elem)
4652{
4653 unsigned long ramrod_flags = 0;
4654 int rc = 0;
4655 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4656 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4657
4658
4659 __set_bit(RAMROD_CONT, &ramrod_flags);
4660
4661 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4662 case BNX2X_FILTER_MAC_PENDING:
4663 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4664 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4665 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4666 else
4667 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4668
4669 break;
4670 case BNX2X_FILTER_MCAST_PENDING:
4671 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
4672
4673
4674
4675 bnx2x_handle_mcast_eqe(bp);
4676 return;
4677 default:
4678 BNX2X_ERR("Unsupported classification command: %d\n",
4679 elem->message.data.eth_event.echo);
4680 return;
4681 }
4682
4683 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4684
4685 if (rc < 0)
4686 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4687 else if (rc > 0)
4688 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4689
4690}
4691
4692static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4693
4694static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4695{
4696 netif_addr_lock_bh(bp->dev);
4697
4698 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4699
4700
4701 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4702 bnx2x_set_storm_rx_mode(bp);
4703 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4704 &bp->sp_state))
4705 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4706 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4707 &bp->sp_state))
4708 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4709
4710 netif_addr_unlock_bh(bp->dev);
4711}
4712
4713static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4714 union event_ring_elem *elem)
4715{
4716 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4717 DP(BNX2X_MSG_SP,
4718 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4719 elem->message.data.vif_list_event.func_bit_map);
4720 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4721 elem->message.data.vif_list_event.func_bit_map);
4722 } else if (elem->message.data.vif_list_event.echo ==
4723 VIF_LIST_RULE_SET) {
4724 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4725 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4726 }
4727}
4728
4729
4730static void bnx2x_after_function_update(struct bnx2x *bp)
4731{
4732 int q, rc;
4733 struct bnx2x_fastpath *fp;
4734 struct bnx2x_queue_state_params queue_params = {NULL};
4735 struct bnx2x_queue_update_params *q_update_params =
4736 &queue_params.params.update;
4737
4738
4739 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4740
4741
4742 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4743 &q_update_params->update_flags);
4744 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4745 &q_update_params->update_flags);
4746 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4747
4748
4749 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4750 q_update_params->silent_removal_value = 0;
4751 q_update_params->silent_removal_mask = 0;
4752 } else {
4753 q_update_params->silent_removal_value =
4754 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4755 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4756 }
4757
4758 for_each_eth_queue(bp, q) {
4759
4760 fp = &bp->fp[q];
4761 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4762
4763
4764 rc = bnx2x_queue_state_change(bp, &queue_params);
4765 if (rc < 0)
4766 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4767 q);
4768 }
4769
4770 if (!NO_FCOE(bp)) {
4771 fp = &bp->fp[FCOE_IDX(bp)];
4772 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4773
4774
4775 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4776
4777
4778 smp_mb__before_clear_bit();
4779 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4780 smp_mb__after_clear_bit();
4781
4782
4783 rc = bnx2x_queue_state_change(bp, &queue_params);
4784 if (rc < 0)
4785 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4786 q);
4787 } else {
4788
4789 bnx2x_link_report(bp);
4790 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4791 }
4792}
4793
4794static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4795 struct bnx2x *bp, u32 cid)
4796{
4797 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4798
4799 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4800 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4801 else
4802 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4803}
4804
4805static void bnx2x_eq_int(struct bnx2x *bp)
4806{
4807 u16 hw_cons, sw_cons, sw_prod;
4808 union event_ring_elem *elem;
4809 u8 echo;
4810 u32 cid;
4811 u8 opcode;
4812 int spqe_cnt = 0;
4813 struct bnx2x_queue_sp_obj *q_obj;
4814 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
4815 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
4816
4817 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
4818
4819
4820
4821
4822
4823
4824 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
4825 hw_cons++;
4826
4827
4828
4829
4830
4831 sw_cons = bp->eq_cons;
4832 sw_prod = bp->eq_prod;
4833
4834 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
4835 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
4836
4837 for (; sw_cons != hw_cons;
4838 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
4839
4840
4841 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
4842
4843 cid = SW_CID(elem->message.data.cfc_del_event.cid);
4844 opcode = elem->message.opcode;
4845
4846
4847
4848 switch (opcode) {
4849 case EVENT_RING_OPCODE_STAT_QUERY:
4850 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
4851 "got statistics comp event %d\n",
4852 bp->stats_comp++);
4853
4854 goto next_spqe;
4855
4856 case EVENT_RING_OPCODE_CFC_DEL:
4857
4858
4859
4860
4861
4862 DP(BNX2X_MSG_SP,
4863 "got delete ramrod for MULTI[%d]\n", cid);
4864
4865 if (CNIC_LOADED(bp) &&
4866 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4867 goto next_spqe;
4868
4869 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4870
4871 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
4872 break;
4873
4874
4875
4876 goto next_spqe;
4877
4878 case EVENT_RING_OPCODE_STOP_TRAFFIC:
4879 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
4880 if (f_obj->complete_cmd(bp, f_obj,
4881 BNX2X_F_CMD_TX_STOP))
4882 break;
4883 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
4884 goto next_spqe;
4885
4886 case EVENT_RING_OPCODE_START_TRAFFIC:
4887 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
4888 if (f_obj->complete_cmd(bp, f_obj,
4889 BNX2X_F_CMD_TX_START))
4890 break;
4891 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4892 goto next_spqe;
4893
4894 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4895 echo = elem->message.data.function_update_event.echo;
4896 if (echo == SWITCH_UPDATE) {
4897 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4898 "got FUNC_SWITCH_UPDATE ramrod\n");
4899 if (f_obj->complete_cmd(
4900 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
4901 break;
4902
4903 } else {
4904 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4905 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4906 f_obj->complete_cmd(bp, f_obj,
4907 BNX2X_F_CMD_AFEX_UPDATE);
4908
4909
4910
4911
4912
4913 smp_mb__before_clear_bit();
4914 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4915 &bp->sp_rtnl_state);
4916 smp_mb__after_clear_bit();
4917
4918 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4919 }
4920
4921 goto next_spqe;
4922
4923 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
4924 f_obj->complete_cmd(bp, f_obj,
4925 BNX2X_F_CMD_AFEX_VIFLISTS);
4926 bnx2x_after_afex_vif_lists(bp, elem);
4927 goto next_spqe;
4928 case EVENT_RING_OPCODE_FUNCTION_START:
4929 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4930 "got FUNC_START ramrod\n");
4931 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
4932 break;
4933
4934 goto next_spqe;
4935
4936 case EVENT_RING_OPCODE_FUNCTION_STOP:
4937 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4938 "got FUNC_STOP ramrod\n");
4939 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
4940 break;
4941
4942 goto next_spqe;
4943 }
4944
4945 switch (opcode | bp->state) {
4946 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4947 BNX2X_STATE_OPEN):
4948 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4949 BNX2X_STATE_OPENING_WAIT4_PORT):
4950 cid = elem->message.data.eth_event.echo &
4951 BNX2X_SWCID_MASK;
4952 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
4953 cid);
4954 rss_raw->clear_pending(rss_raw);
4955 break;
4956
4957 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
4958 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
4959 case (EVENT_RING_OPCODE_SET_MAC |
4960 BNX2X_STATE_CLOSING_WAIT4_HALT):
4961 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4962 BNX2X_STATE_OPEN):
4963 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4964 BNX2X_STATE_DIAG):
4965 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4966 BNX2X_STATE_CLOSING_WAIT4_HALT):
4967 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
4968 bnx2x_handle_classification_eqe(bp, elem);
4969 break;
4970
4971 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4972 BNX2X_STATE_OPEN):
4973 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4974 BNX2X_STATE_DIAG):
4975 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4976 BNX2X_STATE_CLOSING_WAIT4_HALT):
4977 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
4978 bnx2x_handle_mcast_eqe(bp);
4979 break;
4980
4981 case (EVENT_RING_OPCODE_FILTERS_RULES |
4982 BNX2X_STATE_OPEN):
4983 case (EVENT_RING_OPCODE_FILTERS_RULES |
4984 BNX2X_STATE_DIAG):
4985 case (EVENT_RING_OPCODE_FILTERS_RULES |
4986 BNX2X_STATE_CLOSING_WAIT4_HALT):
4987 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
4988 bnx2x_handle_rx_mode_eqe(bp);
4989 break;
4990 default:
4991
4992 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
4993 elem->message.opcode, bp->state);
4994 }
4995next_spqe:
4996 spqe_cnt++;
4997 }
4998
4999 smp_mb__before_atomic_inc();
5000 atomic_add(spqe_cnt, &bp->eq_spq_left);
5001
5002 bp->eq_cons = sw_cons;
5003 bp->eq_prod = sw_prod;
5004
5005 smp_wmb();
5006
5007
5008 bnx2x_update_eq_prod(bp, bp->eq_prod);
5009}
5010
5011static void bnx2x_sp_task(struct work_struct *work)
5012{
5013 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5014 u16 status;
5015
5016 status = bnx2x_update_dsb_idx(bp);
5017
5018
5019
5020 DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
5021
5022
5023 if (status & BNX2X_DEF_SB_ATT_IDX) {
5024 bnx2x_attn_int(bp);
5025 status &= ~BNX2X_DEF_SB_ATT_IDX;
5026 }
5027
5028
5029 if (status & BNX2X_DEF_SB_IDX) {
5030 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5031
5032 if (FCOE_INIT(bp) &&
5033 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5034
5035
5036
5037
5038 local_bh_disable();
5039 napi_schedule(&bnx2x_fcoe(bp, napi));
5040 local_bh_enable();
5041 }
5042
5043
5044 bnx2x_eq_int(bp);
5045
5046 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5047 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5048
5049 status &= ~BNX2X_DEF_SB_IDX;
5050 }
5051
5052 if (unlikely(status))
5053 DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
5054 status);
5055
5056 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5057 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5058
5059
5060 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5061 &bp->sp_state)) {
5062 bnx2x_link_report(bp);
5063 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5064 }
5065}
5066
5067irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5068{
5069 struct net_device *dev = dev_instance;
5070 struct bnx2x *bp = netdev_priv(dev);
5071
5072 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5073 IGU_INT_DISABLE, 0);
5074
5075#ifdef BNX2X_STOP_ON_ERROR
5076 if (unlikely(bp->panic))
5077 return IRQ_HANDLED;
5078#endif
5079
5080 if (CNIC_LOADED(bp)) {
5081 struct cnic_ops *c_ops;
5082
5083 rcu_read_lock();
5084 c_ops = rcu_dereference(bp->cnic_ops);
5085 if (c_ops)
5086 c_ops->cnic_handler(bp->cnic_data, NULL);
5087 rcu_read_unlock();
5088 }
5089
5090 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
5091
5092 return IRQ_HANDLED;
5093}
5094
5095
5096
5097
5098void bnx2x_drv_pulse(struct bnx2x *bp)
5099{
5100 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5101 bp->fw_drv_pulse_wr_seq);
5102}
5103
5104
5105static void bnx2x_timer(unsigned long data)
5106{
5107 struct bnx2x *bp = (struct bnx2x *) data;
5108
5109 if (!netif_running(bp->dev))
5110 return;
5111
5112 if (!BP_NOMCP(bp)) {
5113 int mb_idx = BP_FW_MB_IDX(bp);
5114 u32 drv_pulse;
5115 u32 mcp_pulse;
5116
5117 ++bp->fw_drv_pulse_wr_seq;
5118 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5119
5120 drv_pulse = bp->fw_drv_pulse_wr_seq;
5121 bnx2x_drv_pulse(bp);
5122
5123 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5124 MCP_PULSE_SEQ_MASK);
5125
5126
5127
5128 if ((drv_pulse != mcp_pulse) &&
5129 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5130
5131 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5132 drv_pulse, mcp_pulse);
5133 }
5134 }
5135
5136 if (bp->state == BNX2X_STATE_OPEN)
5137 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5138
5139 mod_timer(&bp->timer, jiffies + bp->current_interval);
5140}
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5151{
5152 u32 i;
5153 if (!(len%4) && !(addr%4))
5154 for (i = 0; i < len; i += 4)
5155 REG_WR(bp, addr + i, fill);
5156 else
5157 for (i = 0; i < len; i++)
5158 REG_WR8(bp, addr + i, fill);
5159
5160}
5161
5162
5163static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5164 int fw_sb_id,
5165 u32 *sb_data_p,
5166 u32 data_size)
5167{
5168 int index;
5169 for (index = 0; index < data_size; index++)
5170 REG_WR(bp, BAR_CSTRORM_INTMEM +
5171 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5172 sizeof(u32)*index,
5173 *(sb_data_p + index));
5174}
5175
5176static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5177{
5178 u32 *sb_data_p;
5179 u32 data_size = 0;
5180 struct hc_status_block_data_e2 sb_data_e2;
5181 struct hc_status_block_data_e1x sb_data_e1x;
5182
5183
5184 if (!CHIP_IS_E1x(bp)) {
5185 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5186 sb_data_e2.common.state = SB_DISABLED;
5187 sb_data_e2.common.p_func.vf_valid = false;
5188 sb_data_p = (u32 *)&sb_data_e2;
5189 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5190 } else {
5191 memset(&sb_data_e1x, 0,
5192 sizeof(struct hc_status_block_data_e1x));
5193 sb_data_e1x.common.state = SB_DISABLED;
5194 sb_data_e1x.common.p_func.vf_valid = false;
5195 sb_data_p = (u32 *)&sb_data_e1x;
5196 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5197 }
5198 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5199
5200 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5201 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5202 CSTORM_STATUS_BLOCK_SIZE);
5203 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5204 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5205 CSTORM_SYNC_BLOCK_SIZE);
5206}
5207
5208
5209static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5210 struct hc_sp_status_block_data *sp_sb_data)
5211{
5212 int func = BP_FUNC(bp);
5213 int i;
5214 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5215 REG_WR(bp, BAR_CSTRORM_INTMEM +
5216 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5217 i*sizeof(u32),
5218 *((u32 *)sp_sb_data + i));
5219}
5220
5221static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5222{
5223 int func = BP_FUNC(bp);
5224 struct hc_sp_status_block_data sp_sb_data;
5225 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5226
5227 sp_sb_data.state = SB_DISABLED;
5228 sp_sb_data.p_func.vf_valid = false;
5229
5230 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5231
5232 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5233 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5234 CSTORM_SP_STATUS_BLOCK_SIZE);
5235 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5236 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5237 CSTORM_SP_SYNC_BLOCK_SIZE);
5238
5239}
5240
5241
5242static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5243 int igu_sb_id, int igu_seg_id)
5244{
5245 hc_sm->igu_sb_id = igu_sb_id;
5246 hc_sm->igu_seg_id = igu_seg_id;
5247 hc_sm->timer_value = 0xFF;
5248 hc_sm->time_to_expire = 0xFFFFFFFF;
5249}
5250
5251
5252
5253static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5254{
5255
5256
5257 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5258
5259
5260 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5261 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5262 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5263 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5264
5265
5266
5267 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5268 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5269
5270
5271 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5272 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5273 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5274 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5275 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5276 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5277 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5278 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5279}
5280
5281static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5282 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5283{
5284 int igu_seg_id;
5285
5286 struct hc_status_block_data_e2 sb_data_e2;
5287 struct hc_status_block_data_e1x sb_data_e1x;
5288 struct hc_status_block_sm *hc_sm_p;
5289 int data_size;
5290 u32 *sb_data_p;
5291
5292 if (CHIP_INT_MODE_IS_BC(bp))
5293 igu_seg_id = HC_SEG_ACCESS_NORM;
5294 else
5295 igu_seg_id = IGU_SEG_ACCESS_NORM;
5296
5297 bnx2x_zero_fp_sb(bp, fw_sb_id);
5298
5299 if (!CHIP_IS_E1x(bp)) {
5300 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5301 sb_data_e2.common.state = SB_ENABLED;
5302 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5303 sb_data_e2.common.p_func.vf_id = vfid;
5304 sb_data_e2.common.p_func.vf_valid = vf_valid;
5305 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5306 sb_data_e2.common.same_igu_sb_1b = true;
5307 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5308 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5309 hc_sm_p = sb_data_e2.common.state_machine;
5310 sb_data_p = (u32 *)&sb_data_e2;
5311 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5312 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5313 } else {
5314 memset(&sb_data_e1x, 0,
5315 sizeof(struct hc_status_block_data_e1x));
5316 sb_data_e1x.common.state = SB_ENABLED;
5317 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5318 sb_data_e1x.common.p_func.vf_id = 0xff;
5319 sb_data_e1x.common.p_func.vf_valid = false;
5320 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5321 sb_data_e1x.common.same_igu_sb_1b = true;
5322 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5323 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5324 hc_sm_p = sb_data_e1x.common.state_machine;
5325 sb_data_p = (u32 *)&sb_data_e1x;
5326 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5327 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5328 }
5329
5330 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5331 igu_sb_id, igu_seg_id);
5332 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5333 igu_sb_id, igu_seg_id);
5334
5335 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5336
5337
5338 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5339}
5340
5341static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5342 u16 tx_usec, u16 rx_usec)
5343{
5344 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5345 false, rx_usec);
5346 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5347 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5348 tx_usec);
5349 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5350 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5351 tx_usec);
5352 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5353 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5354 tx_usec);
5355}
5356
5357static void bnx2x_init_def_sb(struct bnx2x *bp)
5358{
5359 struct host_sp_status_block *def_sb = bp->def_status_blk;
5360 dma_addr_t mapping = bp->def_status_blk_mapping;
5361 int igu_sp_sb_index;
5362 int igu_seg_id;
5363 int port = BP_PORT(bp);
5364 int func = BP_FUNC(bp);
5365 int reg_offset, reg_offset_en5;
5366 u64 section;
5367 int index;
5368 struct hc_sp_status_block_data sp_sb_data;
5369 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5370
5371 if (CHIP_INT_MODE_IS_BC(bp)) {
5372 igu_sp_sb_index = DEF_SB_IGU_ID;
5373 igu_seg_id = HC_SEG_ACCESS_DEF;
5374 } else {
5375 igu_sp_sb_index = bp->igu_dsb_id;
5376 igu_seg_id = IGU_SEG_ACCESS_DEF;
5377 }
5378
5379
5380 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5381 atten_status_block);
5382 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5383
5384 bp->attn_state = 0;
5385
5386 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5387 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5388 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5389 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5390 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5391 int sindex;
5392
5393 for (sindex = 0; sindex < 4; sindex++)
5394 bp->attn_group[index].sig[sindex] =
5395 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5396
5397 if (!CHIP_IS_E1x(bp))
5398
5399
5400
5401
5402
5403 bp->attn_group[index].sig[4] = REG_RD(bp,
5404 reg_offset_en5 + 0x4*index);
5405 else
5406 bp->attn_group[index].sig[4] = 0;
5407 }
5408
5409 if (bp->common.int_block == INT_BLOCK_HC) {
5410 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5411 HC_REG_ATTN_MSG0_ADDR_L);
5412
5413 REG_WR(bp, reg_offset, U64_LO(section));
5414 REG_WR(bp, reg_offset + 4, U64_HI(section));
5415 } else if (!CHIP_IS_E1x(bp)) {
5416 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5417 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5418 }
5419
5420 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5421 sp_sb);
5422
5423 bnx2x_zero_sp_sb(bp);
5424
5425 sp_sb_data.state = SB_ENABLED;
5426 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5427 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5428 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5429 sp_sb_data.igu_seg_id = igu_seg_id;
5430 sp_sb_data.p_func.pf_id = func;
5431 sp_sb_data.p_func.vnic_id = BP_VN(bp);
5432 sp_sb_data.p_func.vf_id = 0xff;
5433
5434 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5435
5436 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5437}
5438
5439void bnx2x_update_coalesce(struct bnx2x *bp)
5440{
5441 int i;
5442
5443 for_each_eth_queue(bp, i)
5444 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5445 bp->tx_ticks, bp->rx_ticks);
5446}
5447
5448static void bnx2x_init_sp_ring(struct bnx2x *bp)
5449{
5450 spin_lock_init(&bp->spq_lock);
5451 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5452
5453 bp->spq_prod_idx = 0;
5454 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5455 bp->spq_prod_bd = bp->spq;
5456 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5457}
5458
5459static void bnx2x_init_eq_ring(struct bnx2x *bp)
5460{
5461 int i;
5462 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5463 union event_ring_elem *elem =
5464 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5465
5466 elem->next_page.addr.hi =
5467 cpu_to_le32(U64_HI(bp->eq_mapping +
5468 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5469 elem->next_page.addr.lo =
5470 cpu_to_le32(U64_LO(bp->eq_mapping +
5471 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5472 }
5473 bp->eq_cons = 0;
5474 bp->eq_prod = NUM_EQ_DESC;
5475 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5476
5477 atomic_set(&bp->eq_spq_left,
5478 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5479}
5480
5481
5482
5483void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5484 unsigned long rx_mode_flags,
5485 unsigned long rx_accept_flags,
5486 unsigned long tx_accept_flags,
5487 unsigned long ramrod_flags)
5488{
5489 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5490 int rc;
5491
5492 memset(&ramrod_param, 0, sizeof(ramrod_param));
5493
5494
5495 ramrod_param.cid = 0;
5496 ramrod_param.cl_id = cl_id;
5497 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5498 ramrod_param.func_id = BP_FUNC(bp);
5499
5500 ramrod_param.pstate = &bp->sp_state;
5501 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5502
5503 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5504 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5505
5506 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5507
5508 ramrod_param.ramrod_flags = ramrod_flags;
5509 ramrod_param.rx_mode_flags = rx_mode_flags;
5510
5511 ramrod_param.rx_accept_flags = rx_accept_flags;
5512 ramrod_param.tx_accept_flags = tx_accept_flags;
5513
5514 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5515 if (rc < 0) {
5516 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5517 return;
5518 }
5519}
5520
5521
5522void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5523{
5524 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5525 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5526
5527 if (!NO_FCOE(bp))
5528
5529
5530 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5531
5532 switch (bp->rx_mode) {
5533 case BNX2X_RX_MODE_NONE:
5534
5535
5536
5537
5538 break;
5539 case BNX2X_RX_MODE_NORMAL:
5540 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5541 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
5542 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5543
5544
5545 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5546 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
5547 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5548
5549 break;
5550 case BNX2X_RX_MODE_ALLMULTI:
5551 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5552 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5553 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5554
5555
5556 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5557 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5558 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5559
5560 break;
5561 case BNX2X_RX_MODE_PROMISC:
5562
5563
5564
5565
5566 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
5567 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5568 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5569 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5570
5571
5572 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5573 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5574
5575 if (IS_MF_SI(bp))
5576 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
5577 else
5578 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5579
5580 break;
5581 default:
5582 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
5583 return;
5584 }
5585
5586 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5587 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
5588 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
5589 }
5590
5591 __set_bit(RAMROD_RX, &ramrod_flags);
5592 __set_bit(RAMROD_TX, &ramrod_flags);
5593
5594 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
5595 tx_accept_flags, ramrod_flags);
5596}
5597
5598static void bnx2x_init_internal_common(struct bnx2x *bp)
5599{
5600 int i;
5601
5602 if (IS_MF_SI(bp))
5603
5604
5605
5606
5607
5608 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5609 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5610 else if (!CHIP_IS_E1(bp))
5611 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5612 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5613
5614
5615
5616 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5617 REG_WR(bp, BAR_USTRORM_INTMEM +
5618 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5619 if (!CHIP_IS_E1x(bp)) {
5620 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5621 CHIP_INT_MODE_IS_BC(bp) ?
5622 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5623 }
5624}
5625
5626static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5627{
5628 switch (load_code) {
5629 case FW_MSG_CODE_DRV_LOAD_COMMON:
5630 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5631 bnx2x_init_internal_common(bp);
5632
5633
5634 case FW_MSG_CODE_DRV_LOAD_PORT:
5635
5636
5637
5638 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5639
5640
5641 break;
5642
5643 default:
5644 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5645 break;
5646 }
5647}
5648
5649static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5650{
5651 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5652}
5653
5654static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5655{
5656 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5657}
5658
5659static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5660{
5661 if (CHIP_IS_E1x(fp->bp))
5662 return BP_L_ID(fp->bp) + fp->index;
5663 else
5664 return bnx2x_fp_igu_sb_id(fp);
5665}
5666
5667static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5668{
5669 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
5670 u8 cos;
5671 unsigned long q_type = 0;
5672 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
5673 fp->rx_queue = fp_idx;
5674 fp->cid = fp_idx;
5675 fp->cl_id = bnx2x_fp_cl_id(fp);
5676 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5677 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
5678
5679 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
5680
5681
5682 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5683
5684
5685 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5686
5687
5688 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5689 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
5690
5691 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5692
5693
5694 for_each_cos_in_tx_queue(fp, cos) {
5695 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5696 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5697 FP_COS_TO_TXQ(fp, cos, bp),
5698 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5699 cids[cos] = fp->txdata_ptr[cos]->cid;
5700 }
5701
5702 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5703 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5704 bnx2x_sp_mapping(bp, q_rdata), q_type);
5705
5706
5707
5708
5709 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
5710
5711 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
5712 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
5713 fp->igu_sb_id);
5714 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
5715 fp->fw_sb_id, fp->igu_sb_id);
5716
5717 bnx2x_update_fpsb_idx(fp);
5718}
5719
5720static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5721{
5722 int i;
5723
5724 for (i = 1; i <= NUM_TX_RINGS; i++) {
5725 struct eth_tx_next_bd *tx_next_bd =
5726 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5727
5728 tx_next_bd->addr_hi =
5729 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5730 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5731 tx_next_bd->addr_lo =
5732 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5733 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5734 }
5735
5736 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5737 txdata->tx_db.data.zero_fill1 = 0;
5738 txdata->tx_db.data.prod = 0;
5739
5740 txdata->tx_pkt_prod = 0;
5741 txdata->tx_pkt_cons = 0;
5742 txdata->tx_bd_prod = 0;
5743 txdata->tx_bd_cons = 0;
5744 txdata->tx_pkt = 0;
5745}
5746
5747static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5748{
5749 int i;
5750
5751 for_each_tx_queue_cnic(bp, i)
5752 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5753}
5754static void bnx2x_init_tx_rings(struct bnx2x *bp)
5755{
5756 int i;
5757 u8 cos;
5758
5759 for_each_eth_queue(bp, i)
5760 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5761 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5762}
5763
5764void bnx2x_nic_init_cnic(struct bnx2x *bp)
5765{
5766 if (!NO_FCOE(bp))
5767 bnx2x_init_fcoe_fp(bp);
5768
5769 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
5770 BNX2X_VF_ID_INVALID, false,
5771 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5772
5773
5774 rmb();
5775 bnx2x_init_rx_rings_cnic(bp);
5776 bnx2x_init_tx_rings_cnic(bp);
5777
5778
5779 mb();
5780 mmiowb();
5781}
5782
5783void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5784{
5785 int i;
5786
5787 for_each_eth_queue(bp, i)
5788 bnx2x_init_eth_fp(bp, i);
5789
5790 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5791 bp->common.shmem_base, bp->common.shmem2_base,
5792 BP_PORT(bp));
5793
5794 rmb();
5795
5796 bnx2x_init_def_sb(bp);
5797 bnx2x_update_dsb_idx(bp);
5798 bnx2x_init_rx_rings(bp);
5799 bnx2x_init_tx_rings(bp);
5800 bnx2x_init_sp_ring(bp);
5801 bnx2x_init_eq_ring(bp);
5802 bnx2x_init_internal(bp, load_code);
5803 bnx2x_pf_init(bp);
5804 bnx2x_stats_init(bp);
5805
5806
5807 mb();
5808 mmiowb();
5809
5810 bnx2x_int_enable(bp);
5811
5812
5813 bnx2x_attn_int_deasserted0(bp,
5814 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5815 AEU_INPUTS_ATTN_BITS_SPIO5);
5816}
5817
5818
5819
5820
5821
5822
5823
5824static int bnx2x_gunzip_init(struct bnx2x *bp)
5825{
5826 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5827 &bp->gunzip_mapping, GFP_KERNEL);
5828 if (bp->gunzip_buf == NULL)
5829 goto gunzip_nomem1;
5830
5831 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5832 if (bp->strm == NULL)
5833 goto gunzip_nomem2;
5834
5835 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
5836 if (bp->strm->workspace == NULL)
5837 goto gunzip_nomem3;
5838
5839 return 0;
5840
5841gunzip_nomem3:
5842 kfree(bp->strm);
5843 bp->strm = NULL;
5844
5845gunzip_nomem2:
5846 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5847 bp->gunzip_mapping);
5848 bp->gunzip_buf = NULL;
5849
5850gunzip_nomem1:
5851 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
5852 return -ENOMEM;
5853}
5854
5855static void bnx2x_gunzip_end(struct bnx2x *bp)
5856{
5857 if (bp->strm) {
5858 vfree(bp->strm->workspace);
5859 kfree(bp->strm);
5860 bp->strm = NULL;
5861 }
5862
5863 if (bp->gunzip_buf) {
5864 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5865 bp->gunzip_mapping);
5866 bp->gunzip_buf = NULL;
5867 }
5868}
5869
5870static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5871{
5872 int n, rc;
5873
5874
5875 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5876 BNX2X_ERR("Bad gzip header\n");
5877 return -EINVAL;
5878 }
5879
5880 n = 10;
5881
5882#define FNAME 0x8
5883
5884 if (zbuf[3] & FNAME)
5885 while ((zbuf[n++] != 0) && (n < len));
5886
5887 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5888 bp->strm->avail_in = len - n;
5889 bp->strm->next_out = bp->gunzip_buf;
5890 bp->strm->avail_out = FW_BUF_SIZE;
5891
5892 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5893 if (rc != Z_OK)
5894 return rc;
5895
5896 rc = zlib_inflate(bp->strm, Z_FINISH);
5897 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5898 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5899 bp->strm->msg);
5900
5901 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5902 if (bp->gunzip_outlen & 0x3)
5903 netdev_err(bp->dev,
5904 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5905 bp->gunzip_outlen);
5906 bp->gunzip_outlen >>= 2;
5907
5908 zlib_inflateEnd(bp->strm);
5909
5910 if (rc == Z_STREAM_END)
5911 return 0;
5912
5913 return rc;
5914}
5915
5916
5917
5918
5919
5920
5921
5922
5923static void bnx2x_lb_pckt(struct bnx2x *bp)
5924{
5925 u32 wb_write[3];
5926
5927
5928 wb_write[0] = 0x55555555;
5929 wb_write[1] = 0x55555555;
5930 wb_write[2] = 0x20;
5931 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5932
5933
5934 wb_write[0] = 0x09000000;
5935 wb_write[1] = 0x55555555;
5936 wb_write[2] = 0x10;
5937 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5938}
5939
5940
5941
5942
5943
5944static int bnx2x_int_mem_test(struct bnx2x *bp)
5945{
5946 int factor;
5947 int count, i;
5948 u32 val = 0;
5949
5950 if (CHIP_REV_IS_FPGA(bp))
5951 factor = 120;
5952 else if (CHIP_REV_IS_EMUL(bp))
5953 factor = 200;
5954 else
5955 factor = 1;
5956
5957
5958 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5959 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5960 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5961 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5962
5963
5964 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5965
5966
5967 bnx2x_lb_pckt(bp);
5968
5969
5970
5971 count = 1000 * factor;
5972 while (count) {
5973
5974 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5975 val = *bnx2x_sp(bp, wb_data[0]);
5976 if (val == 0x10)
5977 break;
5978
5979 msleep(10);
5980 count--;
5981 }
5982 if (val != 0x10) {
5983 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5984 return -1;
5985 }
5986
5987
5988 count = 1000 * factor;
5989 while (count) {
5990 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5991 if (val == 1)
5992 break;
5993
5994 msleep(10);
5995 count--;
5996 }
5997 if (val != 0x1) {
5998 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5999 return -2;
6000 }
6001
6002
6003 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6004 msleep(50);
6005 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6006 msleep(50);
6007 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6008 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6009
6010 DP(NETIF_MSG_HW, "part2\n");
6011
6012
6013 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6014 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6015 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6016 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6017
6018
6019 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6020
6021
6022 for (i = 0; i < 10; i++)
6023 bnx2x_lb_pckt(bp);
6024
6025
6026
6027 count = 1000 * factor;
6028 while (count) {
6029
6030 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6031 val = *bnx2x_sp(bp, wb_data[0]);
6032 if (val == 0xb0)
6033 break;
6034
6035 msleep(10);
6036 count--;
6037 }
6038 if (val != 0xb0) {
6039 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6040 return -3;
6041 }
6042
6043
6044 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6045 if (val != 2)
6046 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6047
6048
6049 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6050
6051
6052 msleep(10 * factor);
6053
6054 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6055 if (val != 3)
6056 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6057
6058
6059 for (i = 0; i < 11; i++)
6060 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6061 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6062 if (val != 1) {
6063 BNX2X_ERR("clear of NIG failed\n");
6064 return -4;
6065 }
6066
6067
6068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6069 msleep(50);
6070 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6071 msleep(50);
6072 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6073 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6074 if (!CNIC_SUPPORT(bp))
6075
6076 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6077
6078
6079 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6080 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6081 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6082 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6083
6084 DP(NETIF_MSG_HW, "done\n");
6085
6086 return 0;
6087}
6088
6089static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6090{
6091 u32 val;
6092
6093 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6094 if (!CHIP_IS_E1x(bp))
6095 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6096 else
6097 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6098 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6099 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6100
6101
6102
6103
6104
6105
6106 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6107 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6108 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6109 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6110 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6111 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6112
6113
6114 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6115 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6116 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6117
6118
6119 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6120 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6121 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6122 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6123
6124
6125
6126 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6127 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6128 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6129 if (!CHIP_IS_E1x(bp))
6130 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6131 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6132 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6133
6134 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6135 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6136 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6137
6138
6139 if (!CHIP_IS_E1x(bp))
6140
6141 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6142
6143 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6144 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6145
6146 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6147}
6148
6149static void bnx2x_reset_common(struct bnx2x *bp)
6150{
6151 u32 val = 0x1400;
6152
6153
6154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6155 0xd3ffff7f);
6156
6157 if (CHIP_IS_E3(bp)) {
6158 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6159 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6160 }
6161
6162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6163}
6164
6165static void bnx2x_setup_dmae(struct bnx2x *bp)
6166{
6167 bp->dmae_ready = 0;
6168 spin_lock_init(&bp->dmae_lock);
6169}
6170
6171static void bnx2x_init_pxp(struct bnx2x *bp)
6172{
6173 u16 devctl;
6174 int r_order, w_order;
6175
6176 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6177 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6178 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6179 if (bp->mrrs == -1)
6180 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6181 else {
6182 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6183 r_order = bp->mrrs;
6184 }
6185
6186 bnx2x_init_pxp_arb(bp, r_order, w_order);
6187}
6188
6189static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6190{
6191 int is_required;
6192 u32 val;
6193 int port;
6194
6195 if (BP_NOMCP(bp))
6196 return;
6197
6198 is_required = 0;
6199 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6200 SHARED_HW_CFG_FAN_FAILURE_MASK;
6201
6202 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6203 is_required = 1;
6204
6205
6206
6207
6208
6209
6210 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6211 for (port = PORT_0; port < PORT_MAX; port++) {
6212 is_required |=
6213 bnx2x_fan_failure_det_req(
6214 bp,
6215 bp->common.shmem_base,
6216 bp->common.shmem2_base,
6217 port);
6218 }
6219
6220 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6221
6222 if (is_required == 0)
6223 return;
6224
6225
6226 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6227
6228
6229 val = REG_RD(bp, MISC_REG_SPIO_INT);
6230 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6231 REG_WR(bp, MISC_REG_SPIO_INT, val);
6232
6233
6234 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6235 val |= MISC_SPIO_SPIO5;
6236 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6237}
6238
6239static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
6240{
6241 u32 offset = 0;
6242
6243 if (CHIP_IS_E1(bp))
6244 return;
6245 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
6246 return;
6247
6248 switch (BP_ABS_FUNC(bp)) {
6249 case 0:
6250 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
6251 break;
6252 case 1:
6253 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
6254 break;
6255 case 2:
6256 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
6257 break;
6258 case 3:
6259 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
6260 break;
6261 case 4:
6262 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
6263 break;
6264 case 5:
6265 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
6266 break;
6267 case 6:
6268 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
6269 break;
6270 case 7:
6271 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
6272 break;
6273 default:
6274 return;
6275 }
6276
6277 REG_WR(bp, offset, pretend_func_num);
6278 REG_RD(bp, offset);
6279 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
6280}
6281
6282void bnx2x_pf_disable(struct bnx2x *bp)
6283{
6284 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6285 val &= ~IGU_PF_CONF_FUNC_EN;
6286
6287 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6288 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6289 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6290}
6291
6292static void bnx2x__common_init_phy(struct bnx2x *bp)
6293{
6294 u32 shmem_base[2], shmem2_base[2];
6295
6296 if (SHMEM2_RD(bp, size) >
6297 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6298 return;
6299 shmem_base[0] = bp->common.shmem_base;
6300 shmem2_base[0] = bp->common.shmem2_base;
6301 if (!CHIP_IS_E1x(bp)) {
6302 shmem_base[1] =
6303 SHMEM2_RD(bp, other_shmem_base_addr);
6304 shmem2_base[1] =
6305 SHMEM2_RD(bp, other_shmem2_base_addr);
6306 }
6307 bnx2x_acquire_phy_lock(bp);
6308 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6309 bp->common.chip_id);
6310 bnx2x_release_phy_lock(bp);
6311}
6312
6313
6314
6315
6316
6317
6318static int bnx2x_init_hw_common(struct bnx2x *bp)
6319{
6320 u32 val;
6321
6322 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
6323
6324
6325
6326
6327
6328 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6329
6330 bnx2x_reset_common(bp);
6331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6332
6333 val = 0xfffc;
6334 if (CHIP_IS_E3(bp)) {
6335 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6336 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6337 }
6338 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6339
6340 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6341
6342 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6343
6344 if (!CHIP_IS_E1x(bp)) {
6345 u8 abs_func_id;
6346
6347
6348
6349
6350
6351
6352
6353
6354 for (abs_func_id = BP_PATH(bp);
6355 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6356 if (abs_func_id == BP_ABS_FUNC(bp)) {
6357 REG_WR(bp,
6358 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6359 1);
6360 continue;
6361 }
6362
6363 bnx2x_pretend_func(bp, abs_func_id);
6364
6365 bnx2x_pf_disable(bp);
6366 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6367 }
6368 }
6369
6370 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6371 if (CHIP_IS_E1(bp)) {
6372
6373
6374 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6375 }
6376
6377 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6378 bnx2x_init_pxp(bp);
6379
6380#ifdef __BIG_ENDIAN
6381 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6382 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6383 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6384 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6385 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6386
6387 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6388
6389
6390 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6391 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6392 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6393 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6394#endif
6395
6396 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6397
6398 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6399 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6400
6401
6402 msleep(100);
6403
6404 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6405 if (val != 1) {
6406 BNX2X_ERR("PXP2 CFG failed\n");
6407 return -EBUSY;
6408 }
6409 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6410 if (val != 1) {
6411 BNX2X_ERR("PXP2 RD_INIT failed\n");
6412 return -EBUSY;
6413 }
6414
6415
6416
6417
6418
6419
6420 if (!CHIP_IS_E1x(bp)) {
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483 struct ilt_client_info ilt_cli;
6484 struct bnx2x_ilt ilt;
6485 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6486 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6487
6488
6489 ilt_cli.start = 0;
6490 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6491 ilt_cli.client_num = ILT_CLIENT_TM;
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6505 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6506 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6507
6508 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6509 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6510 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6511 }
6512
6513
6514 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6515 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6516
6517 if (!CHIP_IS_E1x(bp)) {
6518 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6519 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6520 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6521
6522 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6523
6524
6525 do {
6526 msleep(200);
6527 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6528 } while (factor-- && (val != 1));
6529
6530 if (val != 1) {
6531 BNX2X_ERR("ATC_INIT failed\n");
6532 return -EBUSY;
6533 }
6534 }
6535
6536 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6537
6538
6539 bp->dmae_ready = 1;
6540 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6541
6542 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6543
6544 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6545
6546 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6547
6548 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6549
6550 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6551 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6552 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6553 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6554
6555 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6556
6557
6558
6559 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6560
6561
6562 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6563 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6564
6565 if (CNIC_SUPPORT(bp))
6566 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6567
6568 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6569 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
6570 if (!CHIP_REV_IS_SLOW(bp))
6571
6572 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6573
6574 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6575
6576 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6577 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6578
6579 if (!CHIP_IS_E1(bp))
6580 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6581
6582 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6583 if (IS_MF_AFEX(bp)) {
6584
6585
6586
6587 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6588 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6589 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6590 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6591 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6592 } else {
6593
6594
6595
6596 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6597 bp->path_has_ovlan ? 7 : 6);
6598 }
6599 }
6600
6601 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6602 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6603 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6604 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6605
6606 if (!CHIP_IS_E1x(bp)) {
6607
6608 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6609 VFC_MEMORIES_RST_REG_CAM_RST |
6610 VFC_MEMORIES_RST_REG_RAM_RST);
6611 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6612 VFC_MEMORIES_RST_REG_CAM_RST |
6613 VFC_MEMORIES_RST_REG_RAM_RST);
6614
6615 msleep(20);
6616 }
6617
6618 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6619 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6620 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6621 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6622
6623
6624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6625 0x80000000);
6626 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6627 0x80000000);
6628
6629 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6630 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6631 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6632
6633 if (!CHIP_IS_E1x(bp)) {
6634 if (IS_MF_AFEX(bp)) {
6635
6636
6637
6638 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6639 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6640 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6641 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6642 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6643 } else {
6644 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6645 bp->path_has_ovlan ? 7 : 6);
6646 }
6647 }
6648
6649 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6650
6651 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6652
6653 if (CNIC_SUPPORT(bp)) {
6654 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6655 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6656 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6657 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6658 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6659 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6660 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6661 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6662 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6663 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6664 }
6665 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6666
6667 if (sizeof(union cdu_context) != 1024)
6668
6669 dev_alert(&bp->pdev->dev,
6670 "please adjust the size of cdu_context(%ld)\n",
6671 (long)sizeof(union cdu_context));
6672
6673 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
6674 val = (4 << 24) + (0 << 12) + 1024;
6675 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6676
6677 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
6678 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6679
6680 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6681
6682
6683 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6684
6685 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
6686
6687 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
6688 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
6689
6690 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
6691 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
6692
6693
6694 REG_WR(bp, 0x2814, 0xffffffff);
6695 REG_WR(bp, 0x3820, 0xffffffff);
6696
6697 if (!CHIP_IS_E1x(bp)) {
6698 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
6699 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
6700 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
6701 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
6702 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
6703 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
6704 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
6705 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
6706 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
6707 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
6708 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
6709 }
6710
6711 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
6712 if (!CHIP_IS_E1(bp)) {
6713
6714 if (!CHIP_IS_E3(bp))
6715 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
6716 }
6717 if (CHIP_IS_E1H(bp))
6718
6719 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
6720
6721 if (CHIP_REV_IS_SLOW(bp))
6722 msleep(200);
6723
6724
6725 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6726 if (val != 1) {
6727 BNX2X_ERR("CFC LL_INIT failed\n");
6728 return -EBUSY;
6729 }
6730 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6731 if (val != 1) {
6732 BNX2X_ERR("CFC AC_INIT failed\n");
6733 return -EBUSY;
6734 }
6735 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6736 if (val != 1) {
6737 BNX2X_ERR("CFC CAM_INIT failed\n");
6738 return -EBUSY;
6739 }
6740 REG_WR(bp, CFC_REG_DEBUG0, 0);
6741
6742 if (CHIP_IS_E1(bp)) {
6743
6744
6745 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6746 val = *bnx2x_sp(bp, wb_data[0]);
6747
6748
6749 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6750 BNX2X_ERR("internal mem self test failed\n");
6751 return -EBUSY;
6752 }
6753 }
6754
6755 bnx2x_setup_fan_failure_detection(bp);
6756
6757
6758 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6759
6760 bnx2x_enable_blocks_attention(bp);
6761 bnx2x_enable_blocks_parity(bp);
6762
6763 if (!BP_NOMCP(bp)) {
6764 if (CHIP_IS_E1x(bp))
6765 bnx2x__common_init_phy(bp);
6766 } else
6767 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6768
6769 return 0;
6770}
6771
6772
6773
6774
6775
6776
6777static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
6778{
6779 int rc = bnx2x_init_hw_common(bp);
6780
6781 if (rc)
6782 return rc;
6783
6784
6785 if (!BP_NOMCP(bp))
6786 bnx2x__common_init_phy(bp);
6787
6788 return 0;
6789}
6790
6791static int bnx2x_init_hw_port(struct bnx2x *bp)
6792{
6793 int port = BP_PORT(bp);
6794 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
6795 u32 low, high;
6796 u32 val;
6797
6798
6799 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
6800
6801 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6802
6803 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
6804 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
6805 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
6806
6807
6808
6809
6810
6811
6812 if (!CHIP_IS_E1x(bp))
6813 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
6814
6815 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
6816 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
6817 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
6818 bnx2x_init_block(bp, BLOCK_QM, init_phase);
6819
6820 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
6821 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
6822 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
6823 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
6824
6825
6826 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6827
6828 if (CNIC_SUPPORT(bp)) {
6829 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6830 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6831 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6832 }
6833
6834 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6835
6836 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6837
6838 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6839
6840 if (IS_MF(bp))
6841 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6842 else if (bp->dev->mtu > 4096) {
6843 if (bp->flags & ONE_PORT_FLAG)
6844 low = 160;
6845 else {
6846 val = bp->dev->mtu;
6847
6848 low = 96 + (val/64) +
6849 ((val % 64) ? 1 : 0);
6850 }
6851 } else
6852 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6853 high = low + 56;
6854 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6855 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6856 }
6857
6858 if (CHIP_MODE_IS_4_PORT(bp))
6859 REG_WR(bp, (BP_PORT(bp) ?
6860 BRB1_REG_MAC_GUARANTIED_1 :
6861 BRB1_REG_MAC_GUARANTIED_0), 40);
6862
6863
6864 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6865 if (CHIP_IS_E3B0(bp)) {
6866 if (IS_MF_AFEX(bp)) {
6867
6868 REG_WR(bp, BP_PORT(bp) ?
6869 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6870 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6871 REG_WR(bp, BP_PORT(bp) ?
6872 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6873 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6874 REG_WR(bp, BP_PORT(bp) ?
6875 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6876 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6877 } else {
6878
6879
6880
6881
6882 REG_WR(bp, BP_PORT(bp) ?
6883 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6884 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6885 (bp->path_has_ovlan ? 7 : 6));
6886 }
6887 }
6888
6889 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6890 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
6891 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
6892 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
6893
6894 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
6895 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
6896 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
6897 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
6898
6899 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
6900 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
6901
6902 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
6903
6904 if (CHIP_IS_E1x(bp)) {
6905
6906 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6907
6908
6909 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6910
6911 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6912
6913
6914 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6915 udelay(50);
6916 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6917 }
6918
6919 if (CNIC_SUPPORT(bp))
6920 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6921
6922 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6923 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6924
6925 if (CHIP_IS_E1(bp)) {
6926 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6927 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6928 }
6929 bnx2x_init_block(bp, BLOCK_HC, init_phase);
6930
6931 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
6932
6933 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
6934
6935
6936
6937
6938 val = IS_MF(bp) ? 0xF7 : 0x7;
6939
6940 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
6941 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
6942
6943 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
6944
6945 if (!CHIP_IS_E1x(bp)) {
6946
6947
6948
6949 if (IS_MF_AFEX(bp))
6950 REG_WR(bp, BP_PORT(bp) ?
6951 NIG_REG_P1_HDRS_AFTER_BASIC :
6952 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
6953 else
6954 REG_WR(bp, BP_PORT(bp) ?
6955 NIG_REG_P1_HDRS_AFTER_BASIC :
6956 NIG_REG_P0_HDRS_AFTER_BASIC,
6957 IS_MF_SD(bp) ? 7 : 6);
6958
6959 if (CHIP_IS_E3(bp))
6960 REG_WR(bp, BP_PORT(bp) ?
6961 NIG_REG_LLH1_MF_MODE :
6962 NIG_REG_LLH_MF_MODE, IS_MF(bp));
6963 }
6964 if (!CHIP_IS_E3(bp))
6965 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6966
6967 if (!CHIP_IS_E1(bp)) {
6968
6969 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6970 (IS_MF_SD(bp) ? 0x1 : 0x2));
6971
6972 if (!CHIP_IS_E1x(bp)) {
6973 val = 0;
6974 switch (bp->mf_mode) {
6975 case MULTI_FUNCTION_SD:
6976 val = 1;
6977 break;
6978 case MULTI_FUNCTION_SI:
6979 case MULTI_FUNCTION_AFEX:
6980 val = 2;
6981 break;
6982 }
6983
6984 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
6985 NIG_REG_LLH0_CLS_TYPE), val);
6986 }
6987 {
6988 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6989 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6990 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6991 }
6992 }
6993
6994
6995
6996 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6997 if (val & MISC_SPIO_SPIO5) {
6998 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6999 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7000 val = REG_RD(bp, reg_addr);
7001 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7002 REG_WR(bp, reg_addr, val);
7003 }
7004
7005 return 0;
7006}
7007
7008static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7009{
7010 int reg;
7011 u32 wb_write[2];
7012
7013 if (CHIP_IS_E1(bp))
7014 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7015 else
7016 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7017
7018 wb_write[0] = ONCHIP_ADDR1(addr);
7019 wb_write[1] = ONCHIP_ADDR2(addr);
7020 REG_WR_DMAE(bp, reg, wb_write, 2);
7021}
7022
7023static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
7024 u8 idu_sb_id, bool is_Pf)
7025{
7026 u32 data, ctl, cnt = 100;
7027 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7028 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7029 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7030 u32 sb_bit = 1 << (idu_sb_id%32);
7031 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7032 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7033
7034
7035 if (CHIP_INT_MODE_IS_BC(bp))
7036 return;
7037
7038 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7039 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7040 IGU_REGULAR_CLEANUP_SET |
7041 IGU_REGULAR_BCLEANUP;
7042
7043 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7044 func_encode << IGU_CTRL_REG_FID_SHIFT |
7045 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7046
7047 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7048 data, igu_addr_data);
7049 REG_WR(bp, igu_addr_data, data);
7050 mmiowb();
7051 barrier();
7052 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7053 ctl, igu_addr_ctl);
7054 REG_WR(bp, igu_addr_ctl, ctl);
7055 mmiowb();
7056 barrier();
7057
7058
7059 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7060 msleep(20);
7061
7062
7063 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7064 DP(NETIF_MSG_HW,
7065 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7066 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7067 }
7068}
7069
7070static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7071{
7072 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7073}
7074
7075static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7076{
7077 u32 i, base = FUNC_ILT_BASE(func);
7078 for (i = base; i < base + ILT_PER_FUNC; i++)
7079 bnx2x_ilt_wr(bp, i, 0);
7080}
7081
7082
7083static void bnx2x_init_searcher(struct bnx2x *bp)
7084{
7085 int port = BP_PORT(bp);
7086 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7087
7088 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7089}
7090
7091static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7092{
7093 int rc;
7094 struct bnx2x_func_state_params func_params = {NULL};
7095 struct bnx2x_func_switch_update_params *switch_update_params =
7096 &func_params.params.switch_update;
7097
7098
7099 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7100 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7101
7102 func_params.f_obj = &bp->func_obj;
7103 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7104
7105
7106 switch_update_params->suspend = suspend;
7107
7108 rc = bnx2x_func_state_change(bp, &func_params);
7109
7110 return rc;
7111}
7112
7113static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7114{
7115 int rc, i, port = BP_PORT(bp);
7116 int vlan_en = 0, mac_en[NUM_MACS];
7117
7118
7119
7120 if (bp->mf_mode == SINGLE_FUNCTION) {
7121 bnx2x_set_rx_filter(&bp->link_params, 0);
7122 } else {
7123 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7124 NIG_REG_LLH0_FUNC_EN);
7125 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7126 NIG_REG_LLH0_FUNC_EN, 0);
7127 for (i = 0; i < NUM_MACS; i++) {
7128 mac_en[i] = REG_RD(bp, port ?
7129 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7130 4 * i) :
7131 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7132 4 * i));
7133 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7134 4 * i) :
7135 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7136 }
7137 }
7138
7139
7140 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7141 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7142
7143
7144
7145
7146
7147
7148 rc = bnx2x_func_switch_update(bp, 1);
7149 if (rc) {
7150 BNX2X_ERR("Can't suspend tx-switching!\n");
7151 return rc;
7152 }
7153
7154
7155 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7156
7157
7158 if (bp->mf_mode == SINGLE_FUNCTION) {
7159 bnx2x_set_rx_filter(&bp->link_params, 1);
7160 } else {
7161 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7162 NIG_REG_LLH0_FUNC_EN, vlan_en);
7163 for (i = 0; i < NUM_MACS; i++) {
7164 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7165 4 * i) :
7166 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7167 mac_en[i]);
7168 }
7169 }
7170
7171
7172 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7173 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7174
7175
7176 rc = bnx2x_func_switch_update(bp, 0);
7177 if (rc) {
7178 BNX2X_ERR("Can't resume tx-switching!\n");
7179 return rc;
7180 }
7181
7182 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7183 return 0;
7184}
7185
7186int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7187{
7188 int rc;
7189
7190 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7191
7192 if (CONFIGURE_NIC_MODE(bp)) {
7193
7194 bnx2x_init_searcher(bp);
7195
7196
7197 rc = bnx2x_reset_nic_mode(bp);
7198 if (rc)
7199 BNX2X_ERR("Can't change NIC mode!\n");
7200 return rc;
7201 }
7202
7203 return 0;
7204}
7205
7206static int bnx2x_init_hw_func(struct bnx2x *bp)
7207{
7208 int port = BP_PORT(bp);
7209 int func = BP_FUNC(bp);
7210 int init_phase = PHASE_PF0 + func;
7211 struct bnx2x_ilt *ilt = BP_ILT(bp);
7212 u16 cdu_ilt_start;
7213 u32 addr, val;
7214 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7215 int i, main_mem_width, rc;
7216
7217 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7218
7219
7220 if (!CHIP_IS_E1x(bp)) {
7221 rc = bnx2x_pf_flr_clnup(bp);
7222 if (rc)
7223 return rc;
7224 }
7225
7226
7227 if (bp->common.int_block == INT_BLOCK_HC) {
7228 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7229 val = REG_RD(bp, addr);
7230 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7231 REG_WR(bp, addr, val);
7232 }
7233
7234 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7235 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7236
7237 ilt = BP_ILT(bp);
7238 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7239
7240 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7241 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7242 ilt->lines[cdu_ilt_start + i].page_mapping =
7243 bp->context[i].cxt_mapping;
7244 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7245 }
7246 bnx2x_ilt_init_op(bp, INITOP_SET);
7247
7248 if (!CONFIGURE_NIC_MODE(bp)) {
7249 bnx2x_init_searcher(bp);
7250 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7251 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7252 } else {
7253
7254 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7255 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7256
7257 }
7258
7259 if (!CHIP_IS_E1x(bp)) {
7260 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7261
7262
7263
7264
7265 if (!(bp->flags & USING_MSIX_FLAG))
7266 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7267
7268
7269
7270
7271
7272
7273 msleep(20);
7274
7275
7276
7277
7278
7279 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7280
7281 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7282 }
7283
7284 bp->dmae_ready = 1;
7285
7286 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7287
7288 if (!CHIP_IS_E1x(bp))
7289 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7290
7291 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7292 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7293 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7294 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7295 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7296 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7297 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7298 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7299 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7300 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7301 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7302 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7303 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7304
7305 if (!CHIP_IS_E1x(bp))
7306 REG_WR(bp, QM_REG_PF_EN, 1);
7307
7308 if (!CHIP_IS_E1x(bp)) {
7309 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7310 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7311 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7312 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7313 }
7314 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7315
7316 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7317 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7318 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7319 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7320 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7321 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7322 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7323 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7324 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7325 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7326 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7327 if (!CHIP_IS_E1x(bp))
7328 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7329
7330 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7331
7332 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7333
7334 if (!CHIP_IS_E1x(bp))
7335 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7336
7337 if (IS_MF(bp)) {
7338 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7339 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7340 }
7341
7342 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7343
7344
7345 if (bp->common.int_block == INT_BLOCK_HC) {
7346 if (CHIP_IS_E1H(bp)) {
7347 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7348
7349 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7350 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7351 }
7352 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7353
7354 } else {
7355 int num_segs, sb_idx, prod_offset;
7356
7357 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7358
7359 if (!CHIP_IS_E1x(bp)) {
7360 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7361 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7362 }
7363
7364 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7365
7366 if (!CHIP_IS_E1x(bp)) {
7367 int dsb_idx = 0;
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7390 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7391 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7392 prod_offset = (bp->igu_base_sb + sb_idx) *
7393 num_segs;
7394
7395 for (i = 0; i < num_segs; i++) {
7396 addr = IGU_REG_PROD_CONS_MEMORY +
7397 (prod_offset + i) * 4;
7398 REG_WR(bp, addr, 0);
7399 }
7400
7401 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7402 USTORM_ID, 0, IGU_INT_NOP, 1);
7403 bnx2x_igu_clear_sb(bp,
7404 bp->igu_base_sb + sb_idx);
7405 }
7406
7407
7408 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7409 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7410
7411 if (CHIP_MODE_IS_4_PORT(bp))
7412 dsb_idx = BP_FUNC(bp);
7413 else
7414 dsb_idx = BP_VN(bp);
7415
7416 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7417 IGU_BC_BASE_DSB_PROD + dsb_idx :
7418 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7419
7420
7421
7422
7423
7424 for (i = 0; i < (num_segs * E1HVN_MAX);
7425 i += E1HVN_MAX) {
7426 addr = IGU_REG_PROD_CONS_MEMORY +
7427 (prod_offset + i)*4;
7428 REG_WR(bp, addr, 0);
7429 }
7430
7431 if (CHIP_INT_MODE_IS_BC(bp)) {
7432 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7433 USTORM_ID, 0, IGU_INT_NOP, 1);
7434 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7435 CSTORM_ID, 0, IGU_INT_NOP, 1);
7436 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7437 XSTORM_ID, 0, IGU_INT_NOP, 1);
7438 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7439 TSTORM_ID, 0, IGU_INT_NOP, 1);
7440 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7441 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7442 } else {
7443 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7444 USTORM_ID, 0, IGU_INT_NOP, 1);
7445 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7446 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7447 }
7448 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7449
7450
7451
7452 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7453 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7454 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7455 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7456 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7457 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7458 }
7459 }
7460
7461
7462 REG_WR(bp, 0x2114, 0xffffffff);
7463 REG_WR(bp, 0x2120, 0xffffffff);
7464
7465 if (CHIP_IS_E1x(bp)) {
7466 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
7467 main_mem_base = HC_REG_MAIN_MEMORY +
7468 BP_PORT(bp) * (main_mem_size * 4);
7469 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7470 main_mem_width = 8;
7471
7472 val = REG_RD(bp, main_mem_prty_clr);
7473 if (val)
7474 DP(NETIF_MSG_HW,
7475 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7476 val);
7477
7478
7479 for (i = main_mem_base;
7480 i < main_mem_base + main_mem_size * 4;
7481 i += main_mem_width) {
7482 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7483 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7484 i, main_mem_width / 4);
7485 }
7486
7487 REG_RD(bp, main_mem_prty_clr);
7488 }
7489
7490#ifdef BNX2X_STOP_ON_ERROR
7491
7492 REG_WR8(bp, BAR_USTRORM_INTMEM +
7493 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7494 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7495 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7496 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7497 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7498 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7499 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7500#endif
7501
7502 bnx2x_phy_probe(&bp->link_params);
7503
7504 return 0;
7505}
7506
7507
7508void bnx2x_free_mem_cnic(struct bnx2x *bp)
7509{
7510 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7511
7512 if (!CHIP_IS_E1x(bp))
7513 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7514 sizeof(struct host_hc_status_block_e2));
7515 else
7516 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7517 sizeof(struct host_hc_status_block_e1x));
7518
7519 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7520}
7521
7522void bnx2x_free_mem(struct bnx2x *bp)
7523{
7524 int i;
7525
7526
7527 bnx2x_free_fp_mem(bp);
7528
7529
7530 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7531 sizeof(struct host_sp_status_block));
7532
7533 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7534 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7535
7536 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7537 sizeof(struct bnx2x_slowpath));
7538
7539 for (i = 0; i < L2_ILT_LINES(bp); i++)
7540 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7541 bp->context[i].size);
7542 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7543
7544 BNX2X_FREE(bp->ilt->lines);
7545
7546 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7547
7548 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7549 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7550}
7551
7552static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7553{
7554 int num_groups;
7555 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
7556
7557
7558 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
7559
7560
7561
7562
7563
7564 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
7565
7566
7567
7568
7569
7570
7571
7572 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
7573 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
7574
7575 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
7576 num_groups * sizeof(struct stats_query_cmd_group);
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
7587 sizeof(struct per_pf_stats) +
7588 sizeof(struct fcoe_statistics_params) +
7589 sizeof(struct per_queue_stats) * num_queue_stats +
7590 sizeof(struct stats_counter);
7591
7592 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
7593 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7594
7595
7596 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
7597 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
7598
7599 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
7600 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
7601
7602 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
7603 bp->fw_stats_req_sz;
7604 return 0;
7605
7606alloc_mem_err:
7607 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7608 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7609 BNX2X_ERR("Can't allocate memory\n");
7610 return -ENOMEM;
7611}
7612
7613int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7614{
7615 if (!CHIP_IS_E1x(bp))
7616
7617 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7618 sizeof(struct host_hc_status_block_e2));
7619 else
7620 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7621 &bp->cnic_sb_mapping,
7622 sizeof(struct
7623 host_hc_status_block_e1x));
7624
7625 if (CONFIGURE_NIC_MODE(bp))
7626
7627 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7628
7629
7630 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7631 &bp->slowpath->drv_info_to_mcp;
7632
7633 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7634 goto alloc_mem_err;
7635
7636 return 0;
7637
7638alloc_mem_err:
7639 bnx2x_free_mem_cnic(bp);
7640 BNX2X_ERR("Can't allocate memory\n");
7641 return -ENOMEM;
7642}
7643
7644int bnx2x_alloc_mem(struct bnx2x *bp)
7645{
7646 int i, allocated, context_size;
7647
7648 if (!CONFIGURE_NIC_MODE(bp))
7649
7650 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7651
7652 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7653 sizeof(struct host_sp_status_block));
7654
7655 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7656 sizeof(struct bnx2x_slowpath));
7657
7658
7659 if (bnx2x_alloc_fw_stats_mem(bp))
7660 goto alloc_mem_err;
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7676
7677 for (i = 0, allocated = 0; allocated < context_size; i++) {
7678 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7679 (context_size - allocated));
7680 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7681 &bp->context[i].cxt_mapping,
7682 bp->context[i].size);
7683 allocated += bp->context[i].size;
7684 }
7685 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7686
7687 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7688 goto alloc_mem_err;
7689
7690
7691 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7692
7693
7694 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7695 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7696
7697
7698
7699
7700
7701
7702 if (bnx2x_alloc_fp_mem(bp))
7703 goto alloc_mem_err;
7704 return 0;
7705
7706alloc_mem_err:
7707 bnx2x_free_mem(bp);
7708 BNX2X_ERR("Can't allocate memory\n");
7709 return -ENOMEM;
7710}
7711
7712
7713
7714
7715
7716int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7717 struct bnx2x_vlan_mac_obj *obj, bool set,
7718 int mac_type, unsigned long *ramrod_flags)
7719{
7720 int rc;
7721 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
7722
7723 memset(&ramrod_param, 0, sizeof(ramrod_param));
7724
7725
7726 ramrod_param.vlan_mac_obj = obj;
7727 ramrod_param.ramrod_flags = *ramrod_flags;
7728
7729
7730 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
7731 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
7732
7733 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
7734
7735
7736 if (set)
7737 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
7738 else
7739 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
7740 }
7741
7742 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7743
7744 if (rc == -EEXIST) {
7745 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
7746
7747 rc = 0;
7748 } else if (rc < 0)
7749 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7750
7751 return rc;
7752}
7753
7754int bnx2x_del_all_macs(struct bnx2x *bp,
7755 struct bnx2x_vlan_mac_obj *mac_obj,
7756 int mac_type, bool wait_for_comp)
7757{
7758 int rc;
7759 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
7760
7761
7762 if (wait_for_comp)
7763 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7764
7765
7766 __set_bit(mac_type, &vlan_mac_flags);
7767
7768 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7769 if (rc < 0)
7770 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
7771
7772 return rc;
7773}
7774
7775int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7776{
7777 unsigned long ramrod_flags = 0;
7778
7779 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7780 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7781 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7782 "Ignoring Zero MAC for STORAGE SD mode\n");
7783 return 0;
7784 }
7785
7786 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7787
7788 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7789
7790 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7791 set, BNX2X_ETH_MAC, &ramrod_flags);
7792}
7793
7794int bnx2x_setup_leading(struct bnx2x *bp)
7795{
7796 return bnx2x_setup_queue(bp, &bp->fp[0], 1);
7797}
7798
7799
7800
7801
7802
7803
7804
7805
7806void bnx2x_set_int_mode(struct bnx2x *bp)
7807{
7808 switch (int_mode) {
7809 case INT_MODE_MSI:
7810 bnx2x_enable_msi(bp);
7811
7812 case INT_MODE_INTx:
7813 bp->num_ethernet_queues = 1;
7814 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7815 BNX2X_DEV_INFO("set number of queues to 1\n");
7816 break;
7817 default:
7818
7819
7820
7821
7822 if (bnx2x_enable_msix(bp) ||
7823 bp->flags & USING_SINGLE_MSIX_FLAG) {
7824
7825 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7826 bp->num_queues,
7827 1 + bp->num_cnic_queues);
7828
7829 bp->num_queues = 1 + bp->num_cnic_queues;
7830
7831
7832 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
7833 !(bp->flags & DISABLE_MSI_FLAG))
7834 bnx2x_enable_msi(bp);
7835 }
7836 break;
7837 }
7838}
7839
7840
7841static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
7842{
7843 return L2_ILT_LINES(bp);
7844}
7845
7846void bnx2x_ilt_set_info(struct bnx2x *bp)
7847{
7848 struct ilt_client_info *ilt_client;
7849 struct bnx2x_ilt *ilt = BP_ILT(bp);
7850 u16 line = 0;
7851
7852 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
7853 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
7854
7855
7856 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
7857 ilt_client->client_num = ILT_CLIENT_CDU;
7858 ilt_client->page_size = CDU_ILT_PAGE_SZ;
7859 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7860 ilt_client->start = line;
7861 line += bnx2x_cid_ilt_lines(bp);
7862
7863 if (CNIC_SUPPORT(bp))
7864 line += CNIC_ILT_LINES;
7865 ilt_client->end = line - 1;
7866
7867 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7868 ilt_client->start,
7869 ilt_client->end,
7870 ilt_client->page_size,
7871 ilt_client->flags,
7872 ilog2(ilt_client->page_size >> 12));
7873
7874
7875 if (QM_INIT(bp->qm_cid_count)) {
7876 ilt_client = &ilt->clients[ILT_CLIENT_QM];
7877 ilt_client->client_num = ILT_CLIENT_QM;
7878 ilt_client->page_size = QM_ILT_PAGE_SZ;
7879 ilt_client->flags = 0;
7880 ilt_client->start = line;
7881
7882
7883 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
7884 QM_ILT_PAGE_SZ);
7885
7886 ilt_client->end = line - 1;
7887
7888 DP(NETIF_MSG_IFUP,
7889 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7890 ilt_client->start,
7891 ilt_client->end,
7892 ilt_client->page_size,
7893 ilt_client->flags,
7894 ilog2(ilt_client->page_size >> 12));
7895
7896 }
7897
7898 if (CNIC_SUPPORT(bp)) {
7899
7900 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7901 ilt_client->client_num = ILT_CLIENT_SRC;
7902 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7903 ilt_client->flags = 0;
7904 ilt_client->start = line;
7905 line += SRC_ILT_LINES;
7906 ilt_client->end = line - 1;
7907
7908 DP(NETIF_MSG_IFUP,
7909 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7910 ilt_client->start,
7911 ilt_client->end,
7912 ilt_client->page_size,
7913 ilt_client->flags,
7914 ilog2(ilt_client->page_size >> 12));
7915
7916
7917 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7918 ilt_client->client_num = ILT_CLIENT_TM;
7919 ilt_client->page_size = TM_ILT_PAGE_SZ;
7920 ilt_client->flags = 0;
7921 ilt_client->start = line;
7922 line += TM_ILT_LINES;
7923 ilt_client->end = line - 1;
7924
7925 DP(NETIF_MSG_IFUP,
7926 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7927 ilt_client->start,
7928 ilt_client->end,
7929 ilt_client->page_size,
7930 ilt_client->flags,
7931 ilog2(ilt_client->page_size >> 12));
7932 }
7933
7934 BUG_ON(line > ILT_MAX_LINES);
7935}
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7949 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7950{
7951
7952 u8 cos;
7953 int cxt_index, cxt_offset;
7954
7955
7956 if (!IS_FCOE_FP(fp)) {
7957 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
7958 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
7959
7960
7961
7962
7963 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
7964 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
7965
7966
7967 init_params->rx.hc_rate = bp->rx_ticks ?
7968 (1000000 / bp->rx_ticks) : 0;
7969 init_params->tx.hc_rate = bp->tx_ticks ?
7970 (1000000 / bp->tx_ticks) : 0;
7971
7972
7973 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
7974 fp->fw_sb_id;
7975
7976
7977
7978
7979
7980 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
7981 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
7982 }
7983
7984
7985 init_params->max_cos = fp->max_cos;
7986
7987 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
7988 fp->index, init_params->max_cos);
7989
7990
7991 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
7992 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
7993 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
7994 ILT_PAGE_CIDS);
7995 init_params->cxts[cos] =
7996 &bp->context[cxt_index].vcxt[cxt_offset].eth;
7997 }
7998}
7999
8000static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8001 struct bnx2x_queue_state_params *q_params,
8002 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8003 int tx_index, bool leading)
8004{
8005 memset(tx_only_params, 0, sizeof(*tx_only_params));
8006
8007
8008 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8009
8010
8011 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8012
8013
8014 tx_only_params->cid_index = tx_index;
8015
8016
8017 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8018
8019
8020 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8021
8022 DP(NETIF_MSG_IFUP,
8023 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8024 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8025 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8026 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8027
8028
8029 return bnx2x_queue_state_change(bp, q_params);
8030}
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040
8041
8042
8043
8044int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8045 bool leading)
8046{
8047 struct bnx2x_queue_state_params q_params = {NULL};
8048 struct bnx2x_queue_setup_params *setup_params =
8049 &q_params.params.setup;
8050 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8051 &q_params.params.tx_only;
8052 int rc;
8053 u8 tx_index;
8054
8055 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8056
8057
8058 if (!IS_FCOE_FP(fp))
8059 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8060 IGU_INT_ENABLE, 0);
8061
8062 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8063
8064 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8065
8066
8067 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8068
8069
8070 q_params.cmd = BNX2X_Q_CMD_INIT;
8071
8072
8073 rc = bnx2x_queue_state_change(bp, &q_params);
8074 if (rc) {
8075 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8076 return rc;
8077 }
8078
8079 DP(NETIF_MSG_IFUP, "init complete\n");
8080
8081
8082
8083 memset(setup_params, 0, sizeof(*setup_params));
8084
8085
8086 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8087
8088
8089 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8090 FIRST_TX_COS_INDEX);
8091
8092 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8093 &setup_params->rxq_params);
8094
8095 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8096 FIRST_TX_COS_INDEX);
8097
8098
8099 q_params.cmd = BNX2X_Q_CMD_SETUP;
8100
8101 if (IS_FCOE_FP(fp))
8102 bp->fcoe_init = true;
8103
8104
8105 rc = bnx2x_queue_state_change(bp, &q_params);
8106 if (rc) {
8107 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8108 return rc;
8109 }
8110
8111
8112 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8113 tx_index < fp->max_cos;
8114 tx_index++) {
8115
8116
8117 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8118 tx_only_params, tx_index, leading);
8119 if (rc) {
8120 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8121 fp->index, tx_index);
8122 return rc;
8123 }
8124 }
8125
8126 return rc;
8127}
8128
8129static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8130{
8131 struct bnx2x_fastpath *fp = &bp->fp[index];
8132 struct bnx2x_fp_txdata *txdata;
8133 struct bnx2x_queue_state_params q_params = {NULL};
8134 int rc, tx_index;
8135
8136 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8137
8138 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8139
8140 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8141
8142
8143
8144 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8145 tx_index < fp->max_cos;
8146 tx_index++){
8147
8148
8149 txdata = fp->txdata_ptr[tx_index];
8150
8151 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8152 txdata->txq_index);
8153
8154
8155 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8156 memset(&q_params.params.terminate, 0,
8157 sizeof(q_params.params.terminate));
8158 q_params.params.terminate.cid_index = tx_index;
8159
8160 rc = bnx2x_queue_state_change(bp, &q_params);
8161 if (rc)
8162 return rc;
8163
8164
8165 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8166 memset(&q_params.params.cfc_del, 0,
8167 sizeof(q_params.params.cfc_del));
8168 q_params.params.cfc_del.cid_index = tx_index;
8169 rc = bnx2x_queue_state_change(bp, &q_params);
8170 if (rc)
8171 return rc;
8172 }
8173
8174
8175 q_params.cmd = BNX2X_Q_CMD_HALT;
8176 rc = bnx2x_queue_state_change(bp, &q_params);
8177 if (rc)
8178 return rc;
8179
8180
8181 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8182 memset(&q_params.params.terminate, 0,
8183 sizeof(q_params.params.terminate));
8184 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8185 rc = bnx2x_queue_state_change(bp, &q_params);
8186 if (rc)
8187 return rc;
8188
8189 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8190 memset(&q_params.params.cfc_del, 0,
8191 sizeof(q_params.params.cfc_del));
8192 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8193 return bnx2x_queue_state_change(bp, &q_params);
8194}
8195
8196
8197static void bnx2x_reset_func(struct bnx2x *bp)
8198{
8199 int port = BP_PORT(bp);
8200 int func = BP_FUNC(bp);
8201 int i;
8202
8203
8204 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8205 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8206 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8207 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8208
8209
8210 for_each_eth_queue(bp, i) {
8211 struct bnx2x_fastpath *fp = &bp->fp[i];
8212 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8213 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8214 SB_DISABLED);
8215 }
8216
8217 if (CNIC_LOADED(bp))
8218
8219 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8220 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8221 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8222
8223
8224 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8225 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8226 SB_DISABLED);
8227
8228 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8229 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8230 0);
8231
8232
8233 if (bp->common.int_block == INT_BLOCK_HC) {
8234 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8235 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8236 } else {
8237 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8238 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8239 }
8240
8241 if (CNIC_LOADED(bp)) {
8242
8243 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8244
8245
8246
8247
8248 for (i = 0; i < 200; i++) {
8249 msleep(10);
8250 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8251 break;
8252 }
8253 }
8254
8255 bnx2x_clear_func_ilt(bp, func);
8256
8257
8258
8259
8260 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8261 struct ilt_client_info ilt_cli;
8262
8263 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8264 ilt_cli.start = 0;
8265 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8266 ilt_cli.client_num = ILT_CLIENT_TM;
8267
8268 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8269 }
8270
8271
8272 if (!CHIP_IS_E1x(bp))
8273 bnx2x_pf_disable(bp);
8274
8275 bp->dmae_ready = 0;
8276}
8277
8278static void bnx2x_reset_port(struct bnx2x *bp)
8279{
8280 int port = BP_PORT(bp);
8281 u32 val;
8282
8283
8284 bnx2x__link_reset(bp);
8285
8286 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8287
8288
8289 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8290
8291 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8292 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8293
8294
8295 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8296
8297 msleep(100);
8298
8299 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8300 if (val)
8301 DP(NETIF_MSG_IFDOWN,
8302 "BRB1 is not empty %d blocks are occupied\n", val);
8303
8304
8305}
8306
8307static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8308{
8309 struct bnx2x_func_state_params func_params = {NULL};
8310
8311
8312 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8313
8314 func_params.f_obj = &bp->func_obj;
8315 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8316
8317 func_params.params.hw_init.load_phase = load_code;
8318
8319 return bnx2x_func_state_change(bp, &func_params);
8320}
8321
8322static int bnx2x_func_stop(struct bnx2x *bp)
8323{
8324 struct bnx2x_func_state_params func_params = {NULL};
8325 int rc;
8326
8327
8328 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8329 func_params.f_obj = &bp->func_obj;
8330 func_params.cmd = BNX2X_F_CMD_STOP;
8331
8332
8333
8334
8335
8336
8337
8338 rc = bnx2x_func_state_change(bp, &func_params);
8339 if (rc) {
8340#ifdef BNX2X_STOP_ON_ERROR
8341 return rc;
8342#else
8343 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8344 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8345 return bnx2x_func_state_change(bp, &func_params);
8346#endif
8347 }
8348
8349 return 0;
8350}
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8361{
8362 u32 reset_code = 0;
8363 int port = BP_PORT(bp);
8364
8365
8366 if (unload_mode == UNLOAD_NORMAL)
8367 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8368
8369 else if (bp->flags & NO_WOL_FLAG)
8370 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8371
8372 else if (bp->wol) {
8373 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8374 u8 *mac_addr = bp->dev->dev_addr;
8375 u32 val;
8376 u16 pmc;
8377
8378
8379
8380
8381 u8 entry = (BP_VN(bp) + 1)*8;
8382
8383 val = (mac_addr[0] << 8) | mac_addr[1];
8384 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8385
8386 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8387 (mac_addr[4] << 8) | mac_addr[5];
8388 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8389
8390
8391 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
8392 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8393 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
8394
8395 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8396
8397 } else
8398 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8399
8400
8401 if (!BP_NOMCP(bp))
8402 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8403 else {
8404 int path = BP_PATH(bp);
8405
8406 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
8407 path, load_count[path][0], load_count[path][1],
8408 load_count[path][2]);
8409 load_count[path][0]--;
8410 load_count[path][1 + port]--;
8411 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
8412 path, load_count[path][0], load_count[path][1],
8413 load_count[path][2]);
8414 if (load_count[path][0] == 0)
8415 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8416 else if (load_count[path][1 + port] == 0)
8417 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8418 else
8419 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8420 }
8421
8422 return reset_code;
8423}
8424
8425
8426
8427
8428
8429
8430
8431void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8432{
8433 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8434
8435
8436 if (!BP_NOMCP(bp))
8437 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8438}
8439
8440static int bnx2x_func_wait_started(struct bnx2x *bp)
8441{
8442 int tout = 50;
8443 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8444
8445 if (!bp->port.pmf)
8446 return 0;
8447
8448
8449
8450
8451
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461
8462
8463 if (msix)
8464 synchronize_irq(bp->msix_table[0].vector);
8465 else
8466 synchronize_irq(bp->pdev->irq);
8467
8468 flush_workqueue(bnx2x_wq);
8469
8470 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8471 BNX2X_F_STATE_STARTED && tout--)
8472 msleep(20);
8473
8474 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8475 BNX2X_F_STATE_STARTED) {
8476#ifdef BNX2X_STOP_ON_ERROR
8477 BNX2X_ERR("Wrong function state\n");
8478 return -EBUSY;
8479#else
8480
8481
8482
8483
8484 struct bnx2x_func_state_params func_params = {NULL};
8485
8486 DP(NETIF_MSG_IFDOWN,
8487 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8488
8489 func_params.f_obj = &bp->func_obj;
8490 __set_bit(RAMROD_DRV_CLR_ONLY,
8491 &func_params.ramrod_flags);
8492
8493
8494 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8495 bnx2x_func_state_change(bp, &func_params);
8496
8497
8498 func_params.cmd = BNX2X_F_CMD_TX_START;
8499 return bnx2x_func_state_change(bp, &func_params);
8500#endif
8501 }
8502
8503 return 0;
8504}
8505
8506void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8507{
8508 int port = BP_PORT(bp);
8509 int i, rc = 0;
8510 u8 cos;
8511 struct bnx2x_mcast_ramrod_params rparam = {NULL};
8512 u32 reset_code;
8513
8514
8515 for_each_tx_queue(bp, i) {
8516 struct bnx2x_fastpath *fp = &bp->fp[i];
8517
8518 for_each_cos_in_tx_queue(fp, cos)
8519 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8520#ifdef BNX2X_STOP_ON_ERROR
8521 if (rc)
8522 return;
8523#endif
8524 }
8525
8526
8527 usleep_range(1000, 1000);
8528
8529
8530 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8531 false);
8532 if (rc < 0)
8533 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8534
8535
8536 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8537 true);
8538 if (rc < 0)
8539 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8540 rc);
8541
8542
8543 if (!CHIP_IS_E1(bp))
8544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8545
8546
8547
8548
8549
8550 netif_addr_lock_bh(bp->dev);
8551
8552 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8553 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8554 else
8555 bnx2x_set_storm_rx_mode(bp);
8556
8557
8558 rparam.mcast_obj = &bp->mcast_obj;
8559 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8560 if (rc < 0)
8561 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8562
8563 netif_addr_unlock_bh(bp->dev);
8564
8565
8566
8567
8568
8569
8570
8571
8572 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8573
8574
8575
8576
8577
8578 rc = bnx2x_func_wait_started(bp);
8579 if (rc) {
8580 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8581#ifdef BNX2X_STOP_ON_ERROR
8582 return;
8583#endif
8584 }
8585
8586
8587
8588
8589 for_each_eth_queue(bp, i)
8590 if (bnx2x_stop_queue(bp, i))
8591#ifdef BNX2X_STOP_ON_ERROR
8592 return;
8593#else
8594 goto unload_error;
8595#endif
8596
8597 if (CNIC_LOADED(bp)) {
8598 for_each_cnic_queue(bp, i)
8599 if (bnx2x_stop_queue(bp, i))
8600#ifdef BNX2X_STOP_ON_ERROR
8601 return;
8602#else
8603 goto unload_error;
8604#endif
8605 }
8606
8607
8608
8609
8610 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8611 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8612
8613#ifndef BNX2X_STOP_ON_ERROR
8614unload_error:
8615#endif
8616 rc = bnx2x_func_stop(bp);
8617 if (rc) {
8618 BNX2X_ERR("Function stop failed!\n");
8619#ifdef BNX2X_STOP_ON_ERROR
8620 return;
8621#endif
8622 }
8623
8624
8625 bnx2x_netif_stop(bp, 1);
8626
8627 bnx2x_del_all_napi(bp);
8628 if (CNIC_LOADED(bp))
8629 bnx2x_del_all_napi_cnic(bp);
8630
8631
8632 bnx2x_free_irq(bp);
8633
8634
8635 rc = bnx2x_reset_hw(bp, reset_code);
8636 if (rc)
8637 BNX2X_ERR("HW_RESET failed\n");
8638
8639
8640
8641 bnx2x_send_unload_done(bp, keep_link);
8642}
8643
8644void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8645{
8646 u32 val;
8647
8648 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
8649
8650 if (CHIP_IS_E1(bp)) {
8651 int port = BP_PORT(bp);
8652 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8653 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8654
8655 val = REG_RD(bp, addr);
8656 val &= ~(0x300);
8657 REG_WR(bp, addr, val);
8658 } else {
8659 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8660 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8661 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8662 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8663 }
8664}
8665
8666
8667static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8668{
8669 u32 val;
8670
8671
8672 if (!CHIP_IS_E1(bp)) {
8673
8674 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
8675
8676 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
8677 }
8678
8679
8680 if (CHIP_IS_E1x(bp)) {
8681
8682 val = REG_RD(bp, HC_REG_CONFIG_1);
8683 REG_WR(bp, HC_REG_CONFIG_1,
8684 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8685 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8686
8687 val = REG_RD(bp, HC_REG_CONFIG_0);
8688 REG_WR(bp, HC_REG_CONFIG_0,
8689 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8690 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8691 } else {
8692
8693 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8694
8695 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8696 (!close) ?
8697 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8698 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8699 }
8700
8701 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
8702 close ? "closing" : "opening");
8703 mmiowb();
8704}
8705
8706#define SHARED_MF_CLP_MAGIC 0x80000000
8707
8708static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8709{
8710
8711 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8712 *magic_val = val & SHARED_MF_CLP_MAGIC;
8713 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8714}
8715
8716
8717
8718
8719
8720
8721
8722static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8723{
8724
8725 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8726 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8727 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8728}
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8739{
8740 u32 shmem;
8741 u32 validity_offset;
8742
8743 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
8744
8745
8746 if (!CHIP_IS_E1(bp))
8747 bnx2x_clp_reset_prep(bp, magic_val);
8748
8749
8750 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8751 validity_offset =
8752 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
8753
8754
8755 if (shmem > 0)
8756 REG_WR(bp, shmem + validity_offset, 0);
8757}
8758
8759#define MCP_TIMEOUT 5000
8760#define MCP_ONE_TIMEOUT 100
8761
8762
8763
8764
8765
8766
8767static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8768{
8769
8770
8771 if (CHIP_REV_IS_SLOW(bp))
8772 msleep(MCP_ONE_TIMEOUT*10);
8773 else
8774 msleep(MCP_ONE_TIMEOUT);
8775}
8776
8777
8778
8779
8780static int bnx2x_init_shmem(struct bnx2x *bp)
8781{
8782 int cnt = 0;
8783 u32 val = 0;
8784
8785 do {
8786 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8787 if (bp->common.shmem_base) {
8788 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8789 if (val & SHR_MEM_VALIDITY_MB)
8790 return 0;
8791 }
8792
8793 bnx2x_mcp_wait_one(bp);
8794
8795 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
8796
8797 BNX2X_ERR("BAD MCP validity signature\n");
8798
8799 return -ENODEV;
8800}
8801
8802static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8803{
8804 int rc = bnx2x_init_shmem(bp);
8805
8806
8807 if (!CHIP_IS_E1(bp))
8808 bnx2x_clp_reset_done(bp, magic_val);
8809
8810 return rc;
8811}
8812
8813static void bnx2x_pxp_prep(struct bnx2x *bp)
8814{
8815 if (!CHIP_IS_E1(bp)) {
8816 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8817 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8818 mmiowb();
8819 }
8820}
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8833{
8834 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8835 u32 global_bits2, stay_reset2;
8836
8837
8838
8839
8840
8841 global_bits2 =
8842 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
8843 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
8844
8845
8846
8847
8848
8849
8850 not_reset_mask1 =
8851 MISC_REGISTERS_RESET_REG_1_RST_HC |
8852 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8853 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8854
8855 not_reset_mask2 =
8856 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
8857 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8858 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8859 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8860 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8861 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8862 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8863 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
8864 MISC_REGISTERS_RESET_REG_2_RST_ATC |
8865 MISC_REGISTERS_RESET_REG_2_PGLC |
8866 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8867 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8868 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
8869 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
8870 MISC_REGISTERS_RESET_REG_2_UMAC0 |
8871 MISC_REGISTERS_RESET_REG_2_UMAC1;
8872
8873
8874
8875
8876
8877 stay_reset2 =
8878 MISC_REGISTERS_RESET_REG_2_XMAC |
8879 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
8880
8881
8882 reset_mask1 = 0xffffffff;
8883
8884 if (CHIP_IS_E1(bp))
8885 reset_mask2 = 0xffff;
8886 else if (CHIP_IS_E1H(bp))
8887 reset_mask2 = 0x1ffff;
8888 else if (CHIP_IS_E2(bp))
8889 reset_mask2 = 0xfffff;
8890 else
8891 reset_mask2 = 0x3ffffff;
8892
8893
8894 if (!global)
8895 reset_mask2 &= ~global_bits2;
8896
8897
8898
8899
8900
8901
8902
8903
8904
8905
8906
8907
8908
8909
8910
8911 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8912 reset_mask2 & (~not_reset_mask2));
8913
8914 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8915 reset_mask1 & (~not_reset_mask1));
8916
8917 barrier();
8918 mmiowb();
8919
8920 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
8921 reset_mask2 & (~stay_reset2));
8922
8923 barrier();
8924 mmiowb();
8925
8926 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8927 mmiowb();
8928}
8929
8930
8931
8932
8933
8934
8935
8936
8937
8938
8939static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
8940{
8941 u32 cnt = 1000;
8942 u32 pend_bits = 0;
8943
8944 do {
8945 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
8946
8947 if (pend_bits == 0)
8948 break;
8949
8950 usleep_range(1000, 1000);
8951 } while (cnt-- > 0);
8952
8953 if (cnt <= 0) {
8954 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
8955 pend_bits);
8956 return -EBUSY;
8957 }
8958
8959 return 0;
8960}
8961
8962static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8963{
8964 int cnt = 1000;
8965 u32 val = 0;
8966 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8967 u32 tags_63_32 = 0;
8968
8969
8970
8971 do {
8972 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8973 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8974 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8975 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8976 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8977 if (CHIP_IS_E3(bp))
8978 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
8979
8980 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8981 ((port_is_idle_0 & 0x1) == 0x1) &&
8982 ((port_is_idle_1 & 0x1) == 0x1) &&
8983 (pgl_exp_rom2 == 0xffffffff) &&
8984 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
8985 break;
8986 usleep_range(1000, 1000);
8987 } while (cnt-- > 0);
8988
8989 if (cnt <= 0) {
8990 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
8991 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8992 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8993 pgl_exp_rom2);
8994 return -EAGAIN;
8995 }
8996
8997 barrier();
8998
8999
9000 bnx2x_set_234_gates(bp, true);
9001
9002
9003 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9004 return -EAGAIN;
9005
9006
9007
9008
9009
9010 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9011 barrier();
9012
9013
9014 mmiowb();
9015
9016
9017
9018
9019 usleep_range(1000, 1000);
9020
9021
9022
9023 if (global)
9024 bnx2x_reset_mcp_prep(bp, &val);
9025
9026
9027 bnx2x_pxp_prep(bp);
9028 barrier();
9029
9030
9031 bnx2x_process_kill_chip_reset(bp, global);
9032 barrier();
9033
9034
9035
9036 if (global && bnx2x_reset_mcp_comp(bp, val))
9037 return -EAGAIN;
9038
9039
9040
9041
9042 bnx2x_set_234_gates(bp, false);
9043
9044
9045
9046
9047 return 0;
9048}
9049
9050static int bnx2x_leader_reset(struct bnx2x *bp)
9051{
9052 int rc = 0;
9053 bool global = bnx2x_reset_is_global(bp);
9054 u32 load_code;
9055
9056
9057
9058
9059 if (!global && !BP_NOMCP(bp)) {
9060 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9061 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9062 if (!load_code) {
9063 BNX2X_ERR("MCP response failure, aborting\n");
9064 rc = -EAGAIN;
9065 goto exit_leader_reset;
9066 }
9067 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9068 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9069 BNX2X_ERR("MCP unexpected resp, aborting\n");
9070 rc = -EAGAIN;
9071 goto exit_leader_reset2;
9072 }
9073 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9074 if (!load_code) {
9075 BNX2X_ERR("MCP response failure, aborting\n");
9076 rc = -EAGAIN;
9077 goto exit_leader_reset2;
9078 }
9079 }
9080
9081
9082 if (bnx2x_process_kill(bp, global)) {
9083 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9084 BP_PATH(bp));
9085 rc = -EAGAIN;
9086 goto exit_leader_reset2;
9087 }
9088
9089
9090
9091
9092
9093 bnx2x_set_reset_done(bp);
9094 if (global)
9095 bnx2x_clear_reset_global(bp);
9096
9097exit_leader_reset2:
9098
9099 if (!global && !BP_NOMCP(bp)) {
9100 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9101 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9102 }
9103exit_leader_reset:
9104 bp->is_leader = 0;
9105 bnx2x_release_leader_lock(bp);
9106 smp_mb();
9107 return rc;
9108}
9109
9110static void bnx2x_recovery_failed(struct bnx2x *bp)
9111{
9112 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9113
9114
9115 netif_device_detach(bp->dev);
9116
9117
9118
9119
9120
9121 bnx2x_set_reset_in_progress(bp);
9122
9123
9124 bnx2x_set_power_state(bp, PCI_D3hot);
9125
9126 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9127
9128 smp_mb();
9129}
9130
9131
9132
9133
9134
9135
9136static void bnx2x_parity_recover(struct bnx2x *bp)
9137{
9138 bool global = false;
9139 u32 error_recovered, error_unrecovered;
9140 bool is_parity;
9141
9142 DP(NETIF_MSG_HW, "Handling parity\n");
9143 while (1) {
9144 switch (bp->recovery_state) {
9145 case BNX2X_RECOVERY_INIT:
9146 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9147 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9148 WARN_ON(!is_parity);
9149
9150
9151 if (bnx2x_trylock_leader_lock(bp)) {
9152 bnx2x_set_reset_in_progress(bp);
9153
9154
9155
9156
9157
9158
9159 if (global)
9160 bnx2x_set_reset_global(bp);
9161
9162 bp->is_leader = 1;
9163 }
9164
9165
9166
9167 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9168 return;
9169
9170 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9171
9172
9173
9174
9175
9176 smp_mb();
9177 break;
9178
9179 case BNX2X_RECOVERY_WAIT:
9180 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9181 if (bp->is_leader) {
9182 int other_engine = BP_PATH(bp) ? 0 : 1;
9183 bool other_load_status =
9184 bnx2x_get_load_status(bp, other_engine);
9185 bool load_status =
9186 bnx2x_get_load_status(bp, BP_PATH(bp));
9187 global = bnx2x_reset_is_global(bp);
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197 if (load_status ||
9198 (global && other_load_status)) {
9199
9200
9201
9202 schedule_delayed_work(&bp->sp_rtnl_task,
9203 HZ/10);
9204 return;
9205 } else {
9206
9207
9208
9209
9210
9211 if (bnx2x_leader_reset(bp)) {
9212 bnx2x_recovery_failed(bp);
9213 return;
9214 }
9215
9216
9217
9218
9219
9220
9221 break;
9222 }
9223 } else {
9224 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9225
9226
9227
9228
9229
9230
9231 if (bnx2x_trylock_leader_lock(bp)) {
9232
9233
9234
9235 bp->is_leader = 1;
9236 break;
9237 }
9238
9239 schedule_delayed_work(&bp->sp_rtnl_task,
9240 HZ/10);
9241 return;
9242
9243 } else {
9244
9245
9246
9247
9248 if (bnx2x_reset_is_global(bp)) {
9249 schedule_delayed_work(
9250 &bp->sp_rtnl_task,
9251 HZ/10);
9252 return;
9253 }
9254
9255 error_recovered =
9256 bp->eth_stats.recoverable_error;
9257 error_unrecovered =
9258 bp->eth_stats.unrecoverable_error;
9259 bp->recovery_state =
9260 BNX2X_RECOVERY_NIC_LOADING;
9261 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9262 error_unrecovered++;
9263 netdev_err(bp->dev,
9264 "Recovery failed. Power cycle needed\n");
9265
9266 netif_device_detach(bp->dev);
9267
9268 bnx2x_set_power_state(
9269 bp, PCI_D3hot);
9270 smp_mb();
9271 } else {
9272 bp->recovery_state =
9273 BNX2X_RECOVERY_DONE;
9274 error_recovered++;
9275 smp_mb();
9276 }
9277 bp->eth_stats.recoverable_error =
9278 error_recovered;
9279 bp->eth_stats.unrecoverable_error =
9280 error_unrecovered;
9281
9282 return;
9283 }
9284 }
9285 default:
9286 return;
9287 }
9288 }
9289}
9290
9291static int bnx2x_close(struct net_device *dev);
9292
9293
9294
9295
9296static void bnx2x_sp_rtnl_task(struct work_struct *work)
9297{
9298 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9299
9300 rtnl_lock();
9301
9302 if (!netif_running(bp->dev))
9303 goto sp_rtnl_exit;
9304
9305
9306#ifdef BNX2X_STOP_ON_ERROR
9307 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9308 "you will need to reboot when done\n");
9309 goto sp_rtnl_not_reset;
9310#endif
9311
9312 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9313
9314
9315
9316
9317 bp->sp_rtnl_state = 0;
9318 smp_mb();
9319
9320 bnx2x_parity_recover(bp);
9321
9322 goto sp_rtnl_exit;
9323 }
9324
9325 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9326
9327
9328
9329
9330 bp->sp_rtnl_state = 0;
9331 smp_mb();
9332
9333 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9334 bnx2x_nic_load(bp, LOAD_NORMAL);
9335
9336 goto sp_rtnl_exit;
9337 }
9338#ifdef BNX2X_STOP_ON_ERROR
9339sp_rtnl_not_reset:
9340#endif
9341 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9342 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9343 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9344 bnx2x_after_function_update(bp);
9345
9346
9347
9348
9349
9350 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9351 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9352 netif_device_detach(bp->dev);
9353 bnx2x_close(bp->dev);
9354 }
9355
9356sp_rtnl_exit:
9357 rtnl_unlock();
9358}
9359
9360
9361
9362static void bnx2x_period_task(struct work_struct *work)
9363{
9364 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9365
9366 if (!netif_running(bp->dev))
9367 goto period_task_exit;
9368
9369 if (CHIP_REV_IS_SLOW(bp)) {
9370 BNX2X_ERR("period task called on emulation, ignoring\n");
9371 goto period_task_exit;
9372 }
9373
9374 bnx2x_acquire_phy_lock(bp);
9375
9376
9377
9378
9379
9380 smp_mb();
9381 if (bp->port.pmf) {
9382 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9383
9384
9385 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9386 }
9387
9388 bnx2x_release_phy_lock(bp);
9389period_task_exit:
9390 return;
9391}
9392
9393
9394
9395
9396
9397static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9398{
9399 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9400 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9401 return base + (BP_ABS_FUNC(bp)) * stride;
9402}
9403
9404static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
9405{
9406 u32 reg = bnx2x_get_pretend_reg(bp);
9407
9408
9409 mmiowb();
9410
9411
9412 REG_WR(bp, reg, 0);
9413 REG_RD(bp, reg);
9414
9415
9416 bnx2x_int_disable(bp);
9417
9418
9419 mmiowb();
9420
9421
9422 REG_WR(bp, reg, BP_ABS_FUNC(bp));
9423 REG_RD(bp, reg);
9424}
9425
9426static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
9427{
9428 if (CHIP_IS_E1(bp))
9429 bnx2x_int_disable(bp);
9430 else
9431 bnx2x_undi_int_disable_e1h(bp);
9432}
9433
9434static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9435 struct bnx2x_mac_vals *vals)
9436{
9437 u32 val, base_addr, offset, mask, reset_reg;
9438 bool mac_stopped = false;
9439 u8 port = BP_PORT(bp);
9440
9441
9442 vals->bmac_addr = 0;
9443 vals->umac_addr = 0;
9444 vals->xmac_addr = 0;
9445 vals->emac_addr = 0;
9446
9447 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9448
9449 if (!CHIP_IS_E3(bp)) {
9450 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9451 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9452 if ((mask & reset_reg) && val) {
9453 u32 wb_data[2];
9454 BNX2X_DEV_INFO("Disable bmac Rx\n");
9455 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9456 : NIG_REG_INGRESS_BMAC0_MEM;
9457 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9458 : BIGMAC_REGISTER_BMAC_CONTROL;
9459
9460
9461
9462
9463
9464
9465
9466 wb_data[0] = REG_RD(bp, base_addr + offset);
9467 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9468 vals->bmac_addr = base_addr + offset;
9469 vals->bmac_val[0] = wb_data[0];
9470 vals->bmac_val[1] = wb_data[1];
9471 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9472 REG_WR(bp, vals->bmac_addr, wb_data[0]);
9473 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
9474
9475 }
9476 BNX2X_DEV_INFO("Disable emac Rx\n");
9477 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9478 vals->emac_val = REG_RD(bp, vals->emac_addr);
9479 REG_WR(bp, vals->emac_addr, 0);
9480 mac_stopped = true;
9481 } else {
9482 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9483 BNX2X_DEV_INFO("Disable xmac Rx\n");
9484 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9485 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9486 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9487 val & ~(1 << 1));
9488 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9489 val | (1 << 1));
9490 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9491 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9492 REG_WR(bp, vals->xmac_addr, 0);
9493 mac_stopped = true;
9494 }
9495 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9496 if (mask & reset_reg) {
9497 BNX2X_DEV_INFO("Disable umac Rx\n");
9498 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9499 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9500 vals->umac_val = REG_RD(bp, vals->umac_addr);
9501 REG_WR(bp, vals->umac_addr, 0);
9502 mac_stopped = true;
9503 }
9504 }
9505
9506 if (mac_stopped)
9507 msleep(20);
9508
9509}
9510
9511#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9512#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9513#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9514#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9515
9516static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9517{
9518 u16 rcq, bd;
9519 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9520
9521 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9522 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9523
9524 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9525 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9526
9527 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9528 port, bd, rcq);
9529}
9530
9531static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9532{
9533 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9534 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9535 if (!rc) {
9536 BNX2X_ERR("MCP response failure, aborting\n");
9537 return -EBUSY;
9538 }
9539
9540 return 0;
9541}
9542
9543static struct bnx2x_prev_path_list *
9544 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9545{
9546 struct bnx2x_prev_path_list *tmp_list;
9547
9548 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9549 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9550 bp->pdev->bus->number == tmp_list->bus &&
9551 BP_PATH(bp) == tmp_list->path)
9552 return tmp_list;
9553
9554 return NULL;
9555}
9556
9557static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9558{
9559 struct bnx2x_prev_path_list *tmp_list;
9560 int rc = false;
9561
9562 if (down_trylock(&bnx2x_prev_sem))
9563 return false;
9564
9565 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
9566 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9567 bp->pdev->bus->number == tmp_list->bus &&
9568 BP_PATH(bp) == tmp_list->path) {
9569 rc = true;
9570 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9571 BP_PATH(bp));
9572 break;
9573 }
9574 }
9575
9576 up(&bnx2x_prev_sem);
9577
9578 return rc;
9579}
9580
9581static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9582{
9583 struct bnx2x_prev_path_list *tmp_list;
9584 int rc;
9585
9586 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9587 if (!tmp_list) {
9588 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9589 return -ENOMEM;
9590 }
9591
9592 tmp_list->bus = bp->pdev->bus->number;
9593 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9594 tmp_list->path = BP_PATH(bp);
9595 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9596
9597 rc = down_interruptible(&bnx2x_prev_sem);
9598 if (rc) {
9599 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9600 kfree(tmp_list);
9601 } else {
9602 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
9603 BP_PATH(bp));
9604 list_add(&tmp_list->list, &bnx2x_prev_list);
9605 up(&bnx2x_prev_sem);
9606 }
9607
9608 return rc;
9609}
9610
9611static int bnx2x_do_flr(struct bnx2x *bp)
9612{
9613 int i;
9614 u16 status;
9615 struct pci_dev *dev = bp->pdev;
9616
9617
9618 if (CHIP_IS_E1x(bp)) {
9619 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9620 return -EINVAL;
9621 }
9622
9623
9624 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9625 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9626 bp->common.bc_ver);
9627 return -EINVAL;
9628 }
9629
9630
9631 for (i = 0; i < 4; i++) {
9632 if (i)
9633 msleep((1 << (i - 1)) * 100);
9634
9635 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9636 if (!(status & PCI_EXP_DEVSTA_TRPND))
9637 goto clear;
9638 }
9639
9640 dev_err(&dev->dev,
9641 "transaction is not cleared; proceeding with reset anyway\n");
9642
9643clear:
9644
9645 BNX2X_DEV_INFO("Initiating FLR\n");
9646 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9647
9648 return 0;
9649}
9650
9651static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9652{
9653 int rc;
9654
9655 BNX2X_DEV_INFO("Uncommon unload Flow\n");
9656
9657
9658 if (bnx2x_prev_is_path_marked(bp))
9659 return bnx2x_prev_mcp_done(bp);
9660
9661
9662
9663
9664
9665 rc = bnx2x_test_firmware_version(bp, false);
9666
9667 if (!rc) {
9668
9669 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
9670 rc = bnx2x_do_flr(bp);
9671 }
9672
9673 if (!rc) {
9674
9675 BNX2X_DEV_INFO("FLR successful\n");
9676 return 0;
9677 }
9678
9679 BNX2X_DEV_INFO("Could not FLR\n");
9680
9681
9682 rc = bnx2x_prev_mcp_done(bp);
9683 if (!rc)
9684 rc = BNX2X_PREV_WAIT_NEEDED;
9685
9686 return rc;
9687}
9688
9689static int bnx2x_prev_unload_common(struct bnx2x *bp)
9690{
9691 u32 reset_reg, tmp_reg = 0, rc;
9692 bool prev_undi = false;
9693 struct bnx2x_mac_vals mac_vals;
9694
9695
9696
9697
9698
9699 BNX2X_DEV_INFO("Common unload Flow\n");
9700
9701 memset(&mac_vals, 0, sizeof(mac_vals));
9702
9703 if (bnx2x_prev_is_path_marked(bp))
9704 return bnx2x_prev_mcp_done(bp);
9705
9706 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9707
9708
9709 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9710 u32 timer_count = 1000;
9711
9712
9713 bnx2x_prev_unload_close_mac(bp, &mac_vals);
9714
9715
9716 bnx2x_set_rx_filter(&bp->link_params, 0);
9717
9718
9719
9720
9721 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9722 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
9723 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9724 if (tmp_reg == 0x7) {
9725 BNX2X_DEV_INFO("UNDI previously loaded\n");
9726 prev_undi = true;
9727
9728 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9729 }
9730 }
9731
9732 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9733 while (timer_count) {
9734 u32 prev_brb = tmp_reg;
9735
9736 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9737 if (!tmp_reg)
9738 break;
9739
9740 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
9741
9742
9743 if (prev_brb > tmp_reg)
9744 timer_count = 1000;
9745 else
9746 timer_count--;
9747
9748
9749 if (prev_undi)
9750 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
9751
9752 udelay(10);
9753 }
9754
9755 if (!timer_count)
9756 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
9757
9758 }
9759
9760
9761 bnx2x_reset_common(bp);
9762
9763 if (mac_vals.xmac_addr)
9764 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
9765 if (mac_vals.umac_addr)
9766 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
9767 if (mac_vals.emac_addr)
9768 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
9769 if (mac_vals.bmac_addr) {
9770 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
9771 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
9772 }
9773
9774 rc = bnx2x_prev_mark_path(bp, prev_undi);
9775 if (rc) {
9776 bnx2x_prev_mcp_done(bp);
9777 return rc;
9778 }
9779
9780 return bnx2x_prev_mcp_done(bp);
9781}
9782
9783
9784
9785
9786
9787
9788
9789
9790static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9791{
9792 if (!CHIP_IS_E1x(bp)) {
9793 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9794 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9795 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9796 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
9797 1 << BP_FUNC(bp));
9798 }
9799 }
9800}
9801
9802static int bnx2x_prev_unload(struct bnx2x *bp)
9803{
9804 int time_counter = 10;
9805 u32 rc, fw, hw_lock_reg, hw_lock_val;
9806 struct bnx2x_prev_path_list *prev_list;
9807 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9808
9809
9810
9811
9812 bnx2x_prev_interrupted_dmae(bp);
9813
9814
9815 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9816 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9817 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
9818
9819 hw_lock_val = (REG_RD(bp, hw_lock_reg));
9820 if (hw_lock_val) {
9821 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
9822 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
9823 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9824 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
9825 }
9826
9827 BNX2X_DEV_INFO("Release Previously held hw lock\n");
9828 REG_WR(bp, hw_lock_reg, 0xffffffff);
9829 } else
9830 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
9831
9832 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
9833 BNX2X_DEV_INFO("Release previously held alr\n");
9834 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
9835 }
9836
9837
9838 do {
9839
9840 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9841 if (!fw) {
9842 BNX2X_ERR("MCP response failure, aborting\n");
9843 rc = -EBUSY;
9844 break;
9845 }
9846
9847 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9848 rc = bnx2x_prev_unload_common(bp);
9849 break;
9850 }
9851
9852
9853 rc = bnx2x_prev_unload_uncommon(bp);
9854 if (rc != BNX2X_PREV_WAIT_NEEDED)
9855 break;
9856
9857 msleep(20);
9858 } while (--time_counter);
9859
9860 if (!time_counter || rc) {
9861 BNX2X_ERR("Failed unloading previous driver, aborting\n");
9862 rc = -EBUSY;
9863 }
9864
9865
9866 prev_list = bnx2x_prev_path_get_entry(bp);
9867 if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
9868 bp->link_params.feature_config_flags |=
9869 FEATURE_CONFIG_BOOT_FROM_SAN;
9870
9871 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
9872
9873 return rc;
9874}
9875
9876static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
9877{
9878 u32 val, val2, val3, val4, id, boot_mode;
9879 u16 pmc;
9880
9881
9882
9883 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9884 id = ((val & 0xffff) << 16);
9885 val = REG_RD(bp, MISC_REG_CHIP_REV);
9886 id |= ((val & 0xf) << 12);
9887 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9888 id |= ((val & 0xff) << 4);
9889 val = REG_RD(bp, MISC_REG_BOND_ID);
9890 id |= (val & 0xf);
9891 bp->common.chip_id = id;
9892
9893
9894 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
9895 if (CHIP_IS_57810(bp))
9896 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
9897 (bp->common.chip_id & 0x0000FFFF);
9898 else if (CHIP_IS_57810_MF(bp))
9899 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
9900 (bp->common.chip_id & 0x0000FFFF);
9901 bp->common.chip_id |= 0x1;
9902 }
9903
9904
9905 bp->db_size = (1 << BNX2X_DB_SHIFT);
9906
9907 if (!CHIP_IS_E1x(bp)) {
9908 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
9909 if ((val & 1) == 0)
9910 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
9911 else
9912 val = (val >> 1) & 1;
9913 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
9914 "2_PORT_MODE");
9915 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
9916 CHIP_2_PORT_MODE;
9917
9918 if (CHIP_MODE_IS_4_PORT(bp))
9919 bp->pfid = (bp->pf_num >> 1);
9920 else
9921 bp->pfid = (bp->pf_num & 0x6);
9922 } else {
9923 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
9924 bp->pfid = bp->pf_num;
9925 }
9926
9927 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
9928
9929 bp->link_params.chip_id = bp->common.chip_id;
9930 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9931
9932 val = (REG_RD(bp, 0x2874) & 0x55);
9933 if ((bp->common.chip_id & 0x1) ||
9934 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9935 bp->flags |= ONE_PORT_FLAG;
9936 BNX2X_DEV_INFO("single port device\n");
9937 }
9938
9939 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9940 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
9941 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9942 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9943 bp->common.flash_size, bp->common.flash_size);
9944
9945 bnx2x_init_shmem(bp);
9946
9947
9948
9949 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
9950 MISC_REG_GENERIC_CR_1 :
9951 MISC_REG_GENERIC_CR_0));
9952
9953 bp->link_params.shmem_base = bp->common.shmem_base;
9954 bp->link_params.shmem2_base = bp->common.shmem2_base;
9955 if (SHMEM2_RD(bp, size) >
9956 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
9957 bp->link_params.lfa_base =
9958 REG_RD(bp, bp->common.shmem2_base +
9959 (u32)offsetof(struct shmem2_region,
9960 lfa_host_addr[BP_PORT(bp)]));
9961 else
9962 bp->link_params.lfa_base = 0;
9963 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9964 bp->common.shmem_base, bp->common.shmem2_base);
9965
9966 if (!bp->common.shmem_base) {
9967 BNX2X_DEV_INFO("MCP not active\n");
9968 bp->flags |= NO_MCP_FLAG;
9969 return;
9970 }
9971
9972 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9973 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9974
9975 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9976 SHARED_HW_CFG_LED_MODE_MASK) >>
9977 SHARED_HW_CFG_LED_MODE_SHIFT);
9978
9979 bp->link_params.feature_config_flags = 0;
9980 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9981 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9982 bp->link_params.feature_config_flags |=
9983 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9984 else
9985 bp->link_params.feature_config_flags &=
9986 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9987
9988 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9989 bp->common.bc_ver = val;
9990 BNX2X_DEV_INFO("bc_ver %X\n", val);
9991 if (val < BNX2X_BC_VER) {
9992
9993
9994 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
9995 BNX2X_BC_VER, val);
9996 }
9997 bp->link_params.feature_config_flags |=
9998 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
9999 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10000
10001 bp->link_params.feature_config_flags |=
10002 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10003 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10004 bp->link_params.feature_config_flags |=
10005 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10006 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10007 bp->link_params.feature_config_flags |=
10008 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10009 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10010
10011 bp->link_params.feature_config_flags |=
10012 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10013 FEATURE_CONFIG_MT_SUPPORT : 0;
10014
10015 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10016 BC_SUPPORTS_PFC_STATS : 0;
10017
10018 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10019 BC_SUPPORTS_FCOE_FEATURES : 0;
10020
10021 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10022 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10023 boot_mode = SHMEM_RD(bp,
10024 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10025 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10026 switch (boot_mode) {
10027 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10028 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10029 break;
10030 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10031 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10032 break;
10033 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10034 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10035 break;
10036 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10037 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10038 break;
10039 }
10040
10041 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
10042 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10043
10044 BNX2X_DEV_INFO("%sWoL capable\n",
10045 (bp->flags & NO_WOL_FLAG) ? "not " : "");
10046
10047 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10048 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10049 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10050 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10051
10052 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10053 val, val2, val3, val4);
10054}
10055
10056#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10057#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10058
10059static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10060{
10061 int pfid = BP_FUNC(bp);
10062 int igu_sb_id;
10063 u32 val;
10064 u8 fid, igu_sb_cnt = 0;
10065
10066 bp->igu_base_sb = 0xff;
10067 if (CHIP_INT_MODE_IS_BC(bp)) {
10068 int vn = BP_VN(bp);
10069 igu_sb_cnt = bp->igu_sb_cnt;
10070 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10071 FP_SB_MAX_E1x;
10072
10073 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10074 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10075
10076 return 0;
10077 }
10078
10079
10080 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10081 igu_sb_id++) {
10082 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10083 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10084 continue;
10085 fid = IGU_FID(val);
10086 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10087 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10088 continue;
10089 if (IGU_VEC(val) == 0)
10090
10091 bp->igu_dsb_id = igu_sb_id;
10092 else {
10093 if (bp->igu_base_sb == 0xff)
10094 bp->igu_base_sb = igu_sb_id;
10095 igu_sb_cnt++;
10096 }
10097 }
10098 }
10099
10100#ifdef CONFIG_PCI_MSI
10101
10102
10103
10104
10105
10106
10107 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10108#endif
10109
10110 if (igu_sb_cnt == 0) {
10111 BNX2X_ERR("CAM configuration error\n");
10112 return -EINVAL;
10113 }
10114
10115 return 0;
10116}
10117
10118static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10119{
10120 int cfg_size = 0, idx, port = BP_PORT(bp);
10121
10122
10123 bp->port.supported[0] = 0;
10124 bp->port.supported[1] = 0;
10125 switch (bp->link_params.num_phys) {
10126 case 1:
10127 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10128 cfg_size = 1;
10129 break;
10130 case 2:
10131 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10132 cfg_size = 1;
10133 break;
10134 case 3:
10135 if (bp->link_params.multi_phy_config &
10136 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10137 bp->port.supported[1] =
10138 bp->link_params.phy[EXT_PHY1].supported;
10139 bp->port.supported[0] =
10140 bp->link_params.phy[EXT_PHY2].supported;
10141 } else {
10142 bp->port.supported[0] =
10143 bp->link_params.phy[EXT_PHY1].supported;
10144 bp->port.supported[1] =
10145 bp->link_params.phy[EXT_PHY2].supported;
10146 }
10147 cfg_size = 2;
10148 break;
10149 }
10150
10151 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10152 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10153 SHMEM_RD(bp,
10154 dev_info.port_hw_config[port].external_phy_config),
10155 SHMEM_RD(bp,
10156 dev_info.port_hw_config[port].external_phy_config2));
10157 return;
10158 }
10159
10160 if (CHIP_IS_E3(bp))
10161 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10162 else {
10163 switch (switch_cfg) {
10164 case SWITCH_CFG_1G:
10165 bp->port.phy_addr = REG_RD(
10166 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10167 break;
10168 case SWITCH_CFG_10G:
10169 bp->port.phy_addr = REG_RD(
10170 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10171 break;
10172 default:
10173 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10174 bp->port.link_config[0]);
10175 return;
10176 }
10177 }
10178 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10179
10180 for (idx = 0; idx < cfg_size; idx++) {
10181 if (!(bp->link_params.speed_cap_mask[idx] &
10182 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10183 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10184
10185 if (!(bp->link_params.speed_cap_mask[idx] &
10186 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10187 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10188
10189 if (!(bp->link_params.speed_cap_mask[idx] &
10190 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10191 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10192
10193 if (!(bp->link_params.speed_cap_mask[idx] &
10194 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10195 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10196
10197 if (!(bp->link_params.speed_cap_mask[idx] &
10198 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10199 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10200 SUPPORTED_1000baseT_Full);
10201
10202 if (!(bp->link_params.speed_cap_mask[idx] &
10203 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10204 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10205
10206 if (!(bp->link_params.speed_cap_mask[idx] &
10207 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10208 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10209
10210 }
10211
10212 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10213 bp->port.supported[1]);
10214}
10215
10216static void bnx2x_link_settings_requested(struct bnx2x *bp)
10217{
10218 u32 link_config, idx, cfg_size = 0;
10219 bp->port.advertising[0] = 0;
10220 bp->port.advertising[1] = 0;
10221 switch (bp->link_params.num_phys) {
10222 case 1:
10223 case 2:
10224 cfg_size = 1;
10225 break;
10226 case 3:
10227 cfg_size = 2;
10228 break;
10229 }
10230 for (idx = 0; idx < cfg_size; idx++) {
10231 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10232 link_config = bp->port.link_config[idx];
10233 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10234 case PORT_FEATURE_LINK_SPEED_AUTO:
10235 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10236 bp->link_params.req_line_speed[idx] =
10237 SPEED_AUTO_NEG;
10238 bp->port.advertising[idx] |=
10239 bp->port.supported[idx];
10240 if (bp->link_params.phy[EXT_PHY1].type ==
10241 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10242 bp->port.advertising[idx] |=
10243 (SUPPORTED_100baseT_Half |
10244 SUPPORTED_100baseT_Full);
10245 } else {
10246
10247 bp->link_params.req_line_speed[idx] =
10248 SPEED_10000;
10249 bp->port.advertising[idx] |=
10250 (ADVERTISED_10000baseT_Full |
10251 ADVERTISED_FIBRE);
10252 continue;
10253 }
10254 break;
10255
10256 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10257 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10258 bp->link_params.req_line_speed[idx] =
10259 SPEED_10;
10260 bp->port.advertising[idx] |=
10261 (ADVERTISED_10baseT_Full |
10262 ADVERTISED_TP);
10263 } else {
10264 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10265 link_config,
10266 bp->link_params.speed_cap_mask[idx]);
10267 return;
10268 }
10269 break;
10270
10271 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10272 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10273 bp->link_params.req_line_speed[idx] =
10274 SPEED_10;
10275 bp->link_params.req_duplex[idx] =
10276 DUPLEX_HALF;
10277 bp->port.advertising[idx] |=
10278 (ADVERTISED_10baseT_Half |
10279 ADVERTISED_TP);
10280 } else {
10281 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10282 link_config,
10283 bp->link_params.speed_cap_mask[idx]);
10284 return;
10285 }
10286 break;
10287
10288 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10289 if (bp->port.supported[idx] &
10290 SUPPORTED_100baseT_Full) {
10291 bp->link_params.req_line_speed[idx] =
10292 SPEED_100;
10293 bp->port.advertising[idx] |=
10294 (ADVERTISED_100baseT_Full |
10295 ADVERTISED_TP);
10296 } else {
10297 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10298 link_config,
10299 bp->link_params.speed_cap_mask[idx]);
10300 return;
10301 }
10302 break;
10303
10304 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10305 if (bp->port.supported[idx] &
10306 SUPPORTED_100baseT_Half) {
10307 bp->link_params.req_line_speed[idx] =
10308 SPEED_100;
10309 bp->link_params.req_duplex[idx] =
10310 DUPLEX_HALF;
10311 bp->port.advertising[idx] |=
10312 (ADVERTISED_100baseT_Half |
10313 ADVERTISED_TP);
10314 } else {
10315 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10316 link_config,
10317 bp->link_params.speed_cap_mask[idx]);
10318 return;
10319 }
10320 break;
10321
10322 case PORT_FEATURE_LINK_SPEED_1G:
10323 if (bp->port.supported[idx] &
10324 SUPPORTED_1000baseT_Full) {
10325 bp->link_params.req_line_speed[idx] =
10326 SPEED_1000;
10327 bp->port.advertising[idx] |=
10328 (ADVERTISED_1000baseT_Full |
10329 ADVERTISED_TP);
10330 } else {
10331 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10332 link_config,
10333 bp->link_params.speed_cap_mask[idx]);
10334 return;
10335 }
10336 break;
10337
10338 case PORT_FEATURE_LINK_SPEED_2_5G:
10339 if (bp->port.supported[idx] &
10340 SUPPORTED_2500baseX_Full) {
10341 bp->link_params.req_line_speed[idx] =
10342 SPEED_2500;
10343 bp->port.advertising[idx] |=
10344 (ADVERTISED_2500baseX_Full |
10345 ADVERTISED_TP);
10346 } else {
10347 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10348 link_config,
10349 bp->link_params.speed_cap_mask[idx]);
10350 return;
10351 }
10352 break;
10353
10354 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10355 if (bp->port.supported[idx] &
10356 SUPPORTED_10000baseT_Full) {
10357 bp->link_params.req_line_speed[idx] =
10358 SPEED_10000;
10359 bp->port.advertising[idx] |=
10360 (ADVERTISED_10000baseT_Full |
10361 ADVERTISED_FIBRE);
10362 } else {
10363 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10364 link_config,
10365 bp->link_params.speed_cap_mask[idx]);
10366 return;
10367 }
10368 break;
10369 case PORT_FEATURE_LINK_SPEED_20G:
10370 bp->link_params.req_line_speed[idx] = SPEED_20000;
10371
10372 break;
10373 default:
10374 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10375 link_config);
10376 bp->link_params.req_line_speed[idx] =
10377 SPEED_AUTO_NEG;
10378 bp->port.advertising[idx] =
10379 bp->port.supported[idx];
10380 break;
10381 }
10382
10383 bp->link_params.req_flow_ctrl[idx] = (link_config &
10384 PORT_FEATURE_FLOW_CONTROL_MASK);
10385 if (bp->link_params.req_flow_ctrl[idx] ==
10386 BNX2X_FLOW_CTRL_AUTO) {
10387 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10388 bp->link_params.req_flow_ctrl[idx] =
10389 BNX2X_FLOW_CTRL_NONE;
10390 else
10391 bnx2x_set_requested_fc(bp);
10392 }
10393
10394 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10395 bp->link_params.req_line_speed[idx],
10396 bp->link_params.req_duplex[idx],
10397 bp->link_params.req_flow_ctrl[idx],
10398 bp->port.advertising[idx]);
10399 }
10400}
10401
10402static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10403{
10404 mac_hi = cpu_to_be16(mac_hi);
10405 mac_lo = cpu_to_be32(mac_lo);
10406 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
10407 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
10408}
10409
10410static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10411{
10412 int port = BP_PORT(bp);
10413 u32 config;
10414 u32 ext_phy_type, ext_phy_config, eee_mode;
10415
10416 bp->link_params.bp = bp;
10417 bp->link_params.port = port;
10418
10419 bp->link_params.lane_config =
10420 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10421
10422 bp->link_params.speed_cap_mask[0] =
10423 SHMEM_RD(bp,
10424 dev_info.port_hw_config[port].speed_capability_mask);
10425 bp->link_params.speed_cap_mask[1] =
10426 SHMEM_RD(bp,
10427 dev_info.port_hw_config[port].speed_capability_mask2);
10428 bp->port.link_config[0] =
10429 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10430
10431 bp->port.link_config[1] =
10432 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10433
10434 bp->link_params.multi_phy_config =
10435 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10436
10437
10438
10439 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10440 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10441 (config & PORT_FEATURE_WOL_ENABLED));
10442
10443 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
10444 bp->link_params.lane_config,
10445 bp->link_params.speed_cap_mask[0],
10446 bp->port.link_config[0]);
10447
10448 bp->link_params.switch_cfg = (bp->port.link_config[0] &
10449 PORT_FEATURE_CONNECTED_SWITCH_MASK);
10450 bnx2x_phy_probe(&bp->link_params);
10451 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10452
10453 bnx2x_link_settings_requested(bp);
10454
10455
10456
10457
10458
10459 ext_phy_config =
10460 SHMEM_RD(bp,
10461 dev_info.port_hw_config[port].external_phy_config);
10462 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
10463 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
10464 bp->mdio.prtad = bp->port.phy_addr;
10465
10466 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10467 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10468 bp->mdio.prtad =
10469 XGXS_EXT_PHY_ADDR(ext_phy_config);
10470
10471
10472 eee_mode = (((SHMEM_RD(bp, dev_info.
10473 port_feature_config[port].eee_power_mode)) &
10474 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10475 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10476 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10477 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10478 EEE_MODE_ENABLE_LPI |
10479 EEE_MODE_OUTPUT_TIME;
10480 } else {
10481 bp->link_params.eee_mode = 0;
10482 }
10483}
10484
10485void bnx2x_get_iscsi_info(struct bnx2x *bp)
10486{
10487 u32 no_flags = NO_ISCSI_FLAG;
10488 int port = BP_PORT(bp);
10489 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10490 drv_lic_key[port].max_iscsi_conn);
10491
10492 if (!CNIC_SUPPORT(bp)) {
10493 bp->flags |= no_flags;
10494 return;
10495 }
10496
10497
10498 bp->cnic_eth_dev.max_iscsi_conn =
10499 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10500 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10501
10502 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10503 bp->cnic_eth_dev.max_iscsi_conn);
10504
10505
10506
10507
10508
10509 if (!bp->cnic_eth_dev.max_iscsi_conn)
10510 bp->flags |= no_flags;
10511
10512}
10513
10514static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10515{
10516
10517 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10518 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10519 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10520 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10521
10522
10523 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10524 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10525 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10526 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10527}
10528static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10529{
10530 int port = BP_PORT(bp);
10531 int func = BP_ABS_FUNC(bp);
10532 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10533 drv_lic_key[port].max_fcoe_conn);
10534
10535 if (!CNIC_SUPPORT(bp)) {
10536 bp->flags |= NO_FCOE_FLAG;
10537 return;
10538 }
10539
10540
10541 bp->cnic_eth_dev.max_fcoe_conn =
10542 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10543 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10544
10545
10546 if (!IS_MF(bp)) {
10547
10548 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10549 SHMEM_RD(bp,
10550 dev_info.port_hw_config[port].
10551 fcoe_wwn_port_name_upper);
10552 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10553 SHMEM_RD(bp,
10554 dev_info.port_hw_config[port].
10555 fcoe_wwn_port_name_lower);
10556
10557
10558 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10559 SHMEM_RD(bp,
10560 dev_info.port_hw_config[port].
10561 fcoe_wwn_node_name_upper);
10562 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10563 SHMEM_RD(bp,
10564 dev_info.port_hw_config[port].
10565 fcoe_wwn_node_name_lower);
10566 } else if (!IS_MF_SD(bp)) {
10567
10568
10569
10570
10571 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
10572 bnx2x_get_ext_wwn_info(bp, func);
10573
10574 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
10575 bnx2x_get_ext_wwn_info(bp, func);
10576 }
10577
10578 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10579
10580
10581
10582
10583
10584 if (!bp->cnic_eth_dev.max_fcoe_conn)
10585 bp->flags |= NO_FCOE_FLAG;
10586}
10587
10588static void bnx2x_get_cnic_info(struct bnx2x *bp)
10589{
10590
10591
10592
10593
10594
10595 bnx2x_get_iscsi_info(bp);
10596 bnx2x_get_fcoe_info(bp);
10597}
10598
10599static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10600{
10601 u32 val, val2;
10602 int func = BP_ABS_FUNC(bp);
10603 int port = BP_PORT(bp);
10604 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10605 u8 *fip_mac = bp->fip_mac;
10606
10607 if (IS_MF(bp)) {
10608
10609
10610
10611
10612
10613 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10614 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10615 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10616 val2 = MF_CFG_RD(bp, func_ext_config[func].
10617 iscsi_mac_addr_upper);
10618 val = MF_CFG_RD(bp, func_ext_config[func].
10619 iscsi_mac_addr_lower);
10620 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10621 BNX2X_DEV_INFO
10622 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10623 } else {
10624 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10625 }
10626
10627 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10628 val2 = MF_CFG_RD(bp, func_ext_config[func].
10629 fcoe_mac_addr_upper);
10630 val = MF_CFG_RD(bp, func_ext_config[func].
10631 fcoe_mac_addr_lower);
10632 bnx2x_set_mac_buf(fip_mac, val, val2);
10633 BNX2X_DEV_INFO
10634 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10635 } else {
10636 bp->flags |= NO_FCOE_FLAG;
10637 }
10638
10639 bp->mf_ext_config = cfg;
10640
10641 } else {
10642 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10643
10644 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10645
10646 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10647 BNX2X_DEV_INFO
10648 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10649 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10650
10651 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10652 BNX2X_DEV_INFO("SD FCoE MODE\n");
10653 BNX2X_DEV_INFO
10654 ("Read FIP MAC: %pM\n", fip_mac);
10655 }
10656 }
10657
10658 if (IS_MF_STORAGE_SD(bp))
10659
10660 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10661
10662 if (IS_MF_FCOE_AFEX(bp))
10663
10664 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10665
10666 } else {
10667 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10668 iscsi_mac_upper);
10669 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10670 iscsi_mac_lower);
10671 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10672
10673 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10674 fcoe_fip_mac_upper);
10675 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10676 fcoe_fip_mac_lower);
10677 bnx2x_set_mac_buf(fip_mac, val, val2);
10678 }
10679
10680
10681 if (!is_valid_ether_addr(iscsi_mac)) {
10682 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10683 memset(iscsi_mac, 0, ETH_ALEN);
10684 }
10685
10686
10687 if (!is_valid_ether_addr(fip_mac)) {
10688 bp->flags |= NO_FCOE_FLAG;
10689 memset(bp->fip_mac, 0, ETH_ALEN);
10690 }
10691}
10692
10693static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10694{
10695 u32 val, val2;
10696 int func = BP_ABS_FUNC(bp);
10697 int port = BP_PORT(bp);
10698
10699
10700 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10701
10702 if (BP_NOMCP(bp)) {
10703 BNX2X_ERROR("warning: random MAC workaround active\n");
10704 eth_hw_addr_random(bp->dev);
10705 } else if (IS_MF(bp)) {
10706 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10707 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10708 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10709 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10710 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10711
10712 if (CNIC_SUPPORT(bp))
10713 bnx2x_get_cnic_mac_hwinfo(bp);
10714 } else {
10715
10716 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10717 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10718 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10719
10720 if (CNIC_SUPPORT(bp))
10721 bnx2x_get_cnic_mac_hwinfo(bp);
10722 }
10723
10724 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10725 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10726
10727 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10728 dev_err(&bp->pdev->dev,
10729 "bad Ethernet MAC address configuration: %pM\n"
10730 "change it manually before bringing up the appropriate network interface\n",
10731 bp->dev->dev_addr);
10732}
10733
10734static bool bnx2x_get_dropless_info(struct bnx2x *bp)
10735{
10736 int tmp;
10737 u32 cfg;
10738
10739 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
10740
10741 tmp = BP_ABS_FUNC(bp);
10742 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
10743 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
10744 } else {
10745
10746 tmp = BP_PORT(bp);
10747 cfg = SHMEM_RD(bp,
10748 dev_info.port_hw_config[tmp].generic_features);
10749 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
10750 }
10751 return cfg;
10752}
10753
10754static int bnx2x_get_hwinfo(struct bnx2x *bp)
10755{
10756 int func = BP_ABS_FUNC(bp);
10757 int vn;
10758 u32 val = 0;
10759 int rc = 0;
10760
10761 bnx2x_get_common_hwinfo(bp);
10762
10763
10764
10765
10766 if (CHIP_IS_E1x(bp)) {
10767 bp->common.int_block = INT_BLOCK_HC;
10768
10769 bp->igu_dsb_id = DEF_SB_IGU_ID;
10770 bp->igu_base_sb = 0;
10771 } else {
10772 bp->common.int_block = INT_BLOCK_IGU;
10773
10774
10775 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10776
10777 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
10778
10779 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10780 int tout = 5000;
10781
10782 BNX2X_DEV_INFO("FORCING Normal Mode\n");
10783
10784 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
10785 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
10786 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
10787
10788 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10789 tout--;
10790 usleep_range(1000, 1000);
10791 }
10792
10793 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10794 dev_err(&bp->pdev->dev,
10795 "FORCING Normal Mode failed!!!\n");
10796 bnx2x_release_hw_lock(bp,
10797 HW_LOCK_RESOURCE_RESET);
10798 return -EPERM;
10799 }
10800 }
10801
10802 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10803 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
10804 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
10805 } else
10806 BNX2X_DEV_INFO("IGU Normal Mode\n");
10807
10808 rc = bnx2x_get_igu_cam_info(bp);
10809 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10810 if (rc)
10811 return rc;
10812 }
10813
10814
10815
10816
10817
10818
10819 if (CHIP_IS_E1x(bp))
10820 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
10821 else
10822
10823
10824
10825
10826 bp->base_fw_ndsb = bp->igu_base_sb;
10827
10828 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
10829 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
10830 bp->igu_sb_cnt, bp->base_fw_ndsb);
10831
10832
10833
10834
10835
10836 bp->mf_ov = 0;
10837 bp->mf_mode = 0;
10838 vn = BP_VN(bp);
10839
10840 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
10841 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
10842 bp->common.shmem2_base, SHMEM2_RD(bp, size),
10843 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
10844
10845 if (SHMEM2_HAS(bp, mf_cfg_addr))
10846 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
10847 else
10848 bp->common.mf_cfg_base = bp->common.shmem_base +
10849 offsetof(struct shmem_region, func_mb) +
10850 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
10851
10852
10853
10854
10855
10856
10857
10858
10859 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
10860
10861 val = SHMEM_RD(bp,
10862 dev_info.shared_feature_config.config);
10863 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
10864
10865 switch (val) {
10866 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
10867 val = MF_CFG_RD(bp, func_mf_config[func].
10868 mac_upper);
10869
10870 if (val != 0xffff) {
10871 bp->mf_mode = MULTI_FUNCTION_SI;
10872 bp->mf_config[vn] = MF_CFG_RD(bp,
10873 func_mf_config[func].config);
10874 } else
10875 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10876 break;
10877 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10878 if ((!CHIP_IS_E1x(bp)) &&
10879 (MF_CFG_RD(bp, func_mf_config[func].
10880 mac_upper) != 0xffff) &&
10881 (SHMEM2_HAS(bp,
10882 afex_driver_support))) {
10883 bp->mf_mode = MULTI_FUNCTION_AFEX;
10884 bp->mf_config[vn] = MF_CFG_RD(bp,
10885 func_mf_config[func].config);
10886 } else {
10887 BNX2X_DEV_INFO("can not configure afex mode\n");
10888 }
10889 break;
10890 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
10891
10892 val = MF_CFG_RD(bp,
10893 func_mf_config[FUNC_0].e1hov_tag);
10894 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
10895
10896 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
10897 bp->mf_mode = MULTI_FUNCTION_SD;
10898 bp->mf_config[vn] = MF_CFG_RD(bp,
10899 func_mf_config[func].config);
10900 } else
10901 BNX2X_DEV_INFO("illegal OV for SD\n");
10902 break;
10903 default:
10904
10905 bp->mf_config[vn] = 0;
10906 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
10907 }
10908 }
10909
10910 BNX2X_DEV_INFO("%s function mode\n",
10911 IS_MF(bp) ? "multi" : "single");
10912
10913 switch (bp->mf_mode) {
10914 case MULTI_FUNCTION_SD:
10915 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
10916 FUNC_MF_CFG_E1HOV_TAG_MASK;
10917 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
10918 bp->mf_ov = val;
10919 bp->path_has_ovlan = true;
10920
10921 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
10922 func, bp->mf_ov, bp->mf_ov);
10923 } else {
10924 dev_err(&bp->pdev->dev,
10925 "No valid MF OV for func %d, aborting\n",
10926 func);
10927 return -EPERM;
10928 }
10929 break;
10930 case MULTI_FUNCTION_AFEX:
10931 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
10932 break;
10933 case MULTI_FUNCTION_SI:
10934 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
10935 func);
10936 break;
10937 default:
10938 if (vn) {
10939 dev_err(&bp->pdev->dev,
10940 "VN %d is in a single function mode, aborting\n",
10941 vn);
10942 return -EPERM;
10943 }
10944 break;
10945 }
10946
10947
10948
10949
10950
10951
10952 if (CHIP_MODE_IS_4_PORT(bp) &&
10953 !bp->path_has_ovlan &&
10954 !IS_MF(bp) &&
10955 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
10956 u8 other_port = !BP_PORT(bp);
10957 u8 other_func = BP_PATH(bp) + 2*other_port;
10958 val = MF_CFG_RD(bp,
10959 func_mf_config[other_func].e1hov_tag);
10960 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
10961 bp->path_has_ovlan = true;
10962 }
10963 }
10964
10965
10966 if (CHIP_IS_E1x(bp) && IS_MF(bp))
10967 bp->igu_sb_cnt /= E1HVN_MAX;
10968
10969
10970 bnx2x_get_port_hwinfo(bp);
10971
10972
10973 bnx2x_get_mac_hwinfo(bp);
10974
10975 bnx2x_get_cnic_info(bp);
10976
10977 return rc;
10978}
10979
10980static void bnx2x_read_fwinfo(struct bnx2x *bp)
10981{
10982 int cnt, i, block_end, rodi;
10983 char vpd_start[BNX2X_VPD_LEN+1];
10984 char str_id_reg[VENDOR_ID_LEN+1];
10985 char str_id_cap[VENDOR_ID_LEN+1];
10986 char *vpd_data;
10987 char *vpd_extended_data = NULL;
10988 u8 len;
10989
10990 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
10991 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
10992
10993 if (cnt < BNX2X_VPD_LEN)
10994 goto out_not_found;
10995
10996
10997
10998
10999 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11000 PCI_VPD_LRDT_RO_DATA);
11001 if (i < 0)
11002 goto out_not_found;
11003
11004 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11005 pci_vpd_lrdt_size(&vpd_start[i]);
11006
11007 i += PCI_VPD_LRDT_TAG_SIZE;
11008
11009 if (block_end > BNX2X_VPD_LEN) {
11010 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11011 if (vpd_extended_data == NULL)
11012 goto out_not_found;
11013
11014
11015 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11016 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11017 block_end - BNX2X_VPD_LEN,
11018 vpd_extended_data + BNX2X_VPD_LEN);
11019 if (cnt < (block_end - BNX2X_VPD_LEN))
11020 goto out_not_found;
11021 vpd_data = vpd_extended_data;
11022 } else
11023 vpd_data = vpd_start;
11024
11025
11026
11027 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11028 PCI_VPD_RO_KEYWORD_MFR_ID);
11029 if (rodi < 0)
11030 goto out_not_found;
11031
11032 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11033
11034 if (len != VENDOR_ID_LEN)
11035 goto out_not_found;
11036
11037 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11038
11039
11040 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11041 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11042 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11043 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11044
11045 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11046 PCI_VPD_RO_KEYWORD_VENDOR0);
11047 if (rodi >= 0) {
11048 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11049
11050 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11051
11052 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11053 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11054 bp->fw_ver[len] = ' ';
11055 }
11056 }
11057 kfree(vpd_extended_data);
11058 return;
11059 }
11060out_not_found:
11061 kfree(vpd_extended_data);
11062 return;
11063}
11064
11065static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
11066{
11067 u32 flags = 0;
11068
11069 if (CHIP_REV_IS_FPGA(bp))
11070 SET_FLAGS(flags, MODE_FPGA);
11071 else if (CHIP_REV_IS_EMUL(bp))
11072 SET_FLAGS(flags, MODE_EMUL);
11073 else
11074 SET_FLAGS(flags, MODE_ASIC);
11075
11076 if (CHIP_MODE_IS_4_PORT(bp))
11077 SET_FLAGS(flags, MODE_PORT4);
11078 else
11079 SET_FLAGS(flags, MODE_PORT2);
11080
11081 if (CHIP_IS_E2(bp))
11082 SET_FLAGS(flags, MODE_E2);
11083 else if (CHIP_IS_E3(bp)) {
11084 SET_FLAGS(flags, MODE_E3);
11085 if (CHIP_REV(bp) == CHIP_REV_Ax)
11086 SET_FLAGS(flags, MODE_E3_A0);
11087 else
11088 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
11089 }
11090
11091 if (IS_MF(bp)) {
11092 SET_FLAGS(flags, MODE_MF);
11093 switch (bp->mf_mode) {
11094 case MULTI_FUNCTION_SD:
11095 SET_FLAGS(flags, MODE_MF_SD);
11096 break;
11097 case MULTI_FUNCTION_SI:
11098 SET_FLAGS(flags, MODE_MF_SI);
11099 break;
11100 case MULTI_FUNCTION_AFEX:
11101 SET_FLAGS(flags, MODE_MF_AFEX);
11102 break;
11103 }
11104 } else
11105 SET_FLAGS(flags, MODE_SF);
11106
11107#if defined(__LITTLE_ENDIAN)
11108 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11109#else
11110 SET_FLAGS(flags, MODE_BIG_ENDIAN);
11111#endif
11112 INIT_MODE_FLAGS(bp) = flags;
11113}
11114
11115static int bnx2x_init_bp(struct bnx2x *bp)
11116{
11117 int func;
11118 int rc;
11119
11120 mutex_init(&bp->port.phy_mutex);
11121 mutex_init(&bp->fw_mb_mutex);
11122 spin_lock_init(&bp->stats_lock);
11123
11124
11125 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11126 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11127 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11128 rc = bnx2x_get_hwinfo(bp);
11129 if (rc)
11130 return rc;
11131
11132 bnx2x_set_modes_bitmap(bp);
11133
11134 rc = bnx2x_alloc_mem_bp(bp);
11135 if (rc)
11136 return rc;
11137
11138 bnx2x_read_fwinfo(bp);
11139
11140 func = BP_FUNC(bp);
11141
11142
11143 if (!BP_NOMCP(bp)) {
11144
11145 bp->fw_seq =
11146 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11147 DRV_MSG_SEQ_NUMBER_MASK;
11148 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11149
11150 bnx2x_prev_unload(bp);
11151 }
11152
11153
11154 if (CHIP_REV_IS_FPGA(bp))
11155 dev_err(&bp->pdev->dev, "FPGA detected\n");
11156
11157 if (BP_NOMCP(bp) && (func == 0))
11158 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11159
11160 bp->disable_tpa = disable_tpa;
11161 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11162
11163
11164 if (bp->disable_tpa) {
11165 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11166 bp->dev->features &= ~NETIF_F_LRO;
11167 } else {
11168 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11169 bp->dev->features |= NETIF_F_LRO;
11170 }
11171
11172 if (CHIP_IS_E1(bp))
11173 bp->dropless_fc = 0;
11174 else
11175 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
11176
11177 bp->mrrs = mrrs;
11178
11179 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11180
11181
11182 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11183 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11184
11185 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11186
11187 init_timer(&bp->timer);
11188 bp->timer.expires = jiffies + bp->current_interval;
11189 bp->timer.data = (unsigned long) bp;
11190 bp->timer.function = bnx2x_timer;
11191
11192 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11193 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11194 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11195 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11196 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11197 bnx2x_dcbx_init_params(bp);
11198 } else {
11199 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11200 }
11201
11202 if (CHIP_IS_E1x(bp))
11203 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11204 else
11205 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11206
11207
11208 if (CHIP_IS_E1x(bp))
11209 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11210 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11211 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11212 if (CHIP_IS_E3B0(bp))
11213 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11214
11215
11216
11217
11218
11219 if (CNIC_SUPPORT(bp))
11220 bp->min_msix_vec_cnt = 3;
11221 else
11222 bp->min_msix_vec_cnt = 2;
11223 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11224
11225 return rc;
11226}
11227
11228
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238static int bnx2x_open(struct net_device *dev)
11239{
11240 struct bnx2x *bp = netdev_priv(dev);
11241 bool global = false;
11242 int other_engine = BP_PATH(bp) ? 0 : 1;
11243 bool other_load_status, load_status;
11244
11245 bp->stats_init = true;
11246
11247 netif_carrier_off(dev);
11248
11249 bnx2x_set_power_state(bp, PCI_D0);
11250
11251 other_load_status = bnx2x_get_load_status(bp, other_engine);
11252 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11253
11254
11255
11256
11257
11258
11259
11260 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11261 bnx2x_chk_parity_attn(bp, &global, true))
11262 do {
11263
11264
11265
11266
11267
11268
11269 if (global)
11270 bnx2x_set_reset_global(bp);
11271
11272
11273
11274
11275
11276
11277
11278 if ((!load_status &&
11279 (!global || !other_load_status)) &&
11280 bnx2x_trylock_leader_lock(bp) &&
11281 !bnx2x_leader_reset(bp)) {
11282 netdev_info(bp->dev, "Recovered in open\n");
11283 break;
11284 }
11285
11286
11287 bnx2x_set_power_state(bp, PCI_D3hot);
11288 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11289
11290 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11291 "If you still see this message after a few retries then power cycle is required.\n");
11292
11293 return -EAGAIN;
11294 } while (0);
11295
11296 bp->recovery_state = BNX2X_RECOVERY_DONE;
11297 return bnx2x_nic_load(bp, LOAD_OPEN);
11298}
11299
11300
11301static int bnx2x_close(struct net_device *dev)
11302{
11303 struct bnx2x *bp = netdev_priv(dev);
11304
11305
11306 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11307
11308
11309 bnx2x_set_power_state(bp, PCI_D3hot);
11310
11311 return 0;
11312}
11313
11314static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11315 struct bnx2x_mcast_ramrod_params *p)
11316{
11317 int mc_count = netdev_mc_count(bp->dev);
11318 struct bnx2x_mcast_list_elem *mc_mac =
11319 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11320 struct netdev_hw_addr *ha;
11321
11322 if (!mc_mac)
11323 return -ENOMEM;
11324
11325 INIT_LIST_HEAD(&p->mcast_list);
11326
11327 netdev_for_each_mc_addr(ha, bp->dev) {
11328 mc_mac->mac = bnx2x_mc_addr(ha);
11329 list_add_tail(&mc_mac->link, &p->mcast_list);
11330 mc_mac++;
11331 }
11332
11333 p->mcast_list_len = mc_count;
11334
11335 return 0;
11336}
11337
11338static void bnx2x_free_mcast_macs_list(
11339 struct bnx2x_mcast_ramrod_params *p)
11340{
11341 struct bnx2x_mcast_list_elem *mc_mac =
11342 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11343 link);
11344
11345 WARN_ON(!mc_mac);
11346 kfree(mc_mac);
11347}
11348
11349
11350
11351
11352
11353
11354
11355
11356static int bnx2x_set_uc_list(struct bnx2x *bp)
11357{
11358 int rc;
11359 struct net_device *dev = bp->dev;
11360 struct netdev_hw_addr *ha;
11361 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11362 unsigned long ramrod_flags = 0;
11363
11364
11365 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11366 if (rc < 0) {
11367 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11368 return rc;
11369 }
11370
11371 netdev_for_each_uc_addr(ha, dev) {
11372 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11373 BNX2X_UC_LIST_MAC, &ramrod_flags);
11374 if (rc == -EEXIST) {
11375 DP(BNX2X_MSG_SP,
11376 "Failed to schedule ADD operations: %d\n", rc);
11377
11378 rc = 0;
11379
11380 } else if (rc < 0) {
11381
11382 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11383 rc);
11384 return rc;
11385 }
11386 }
11387
11388
11389 __set_bit(RAMROD_CONT, &ramrod_flags);
11390 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
11391 BNX2X_UC_LIST_MAC, &ramrod_flags);
11392}
11393
11394static int bnx2x_set_mc_list(struct bnx2x *bp)
11395{
11396 struct net_device *dev = bp->dev;
11397 struct bnx2x_mcast_ramrod_params rparam = {NULL};
11398 int rc = 0;
11399
11400 rparam.mcast_obj = &bp->mcast_obj;
11401
11402
11403 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11404 if (rc < 0) {
11405 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
11406 return rc;
11407 }
11408
11409
11410 if (netdev_mc_count(dev)) {
11411 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11412 if (rc) {
11413 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11414 rc);
11415 return rc;
11416 }
11417
11418
11419 rc = bnx2x_config_mcast(bp, &rparam,
11420 BNX2X_MCAST_CMD_ADD);
11421 if (rc < 0)
11422 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11423 rc);
11424
11425 bnx2x_free_mcast_macs_list(&rparam);
11426 }
11427
11428 return rc;
11429}
11430
11431
11432
11433void bnx2x_set_rx_mode(struct net_device *dev)
11434{
11435 struct bnx2x *bp = netdev_priv(dev);
11436 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11437
11438 if (bp->state != BNX2X_STATE_OPEN) {
11439 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11440 return;
11441 }
11442
11443 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11444
11445 if (dev->flags & IFF_PROMISC)
11446 rx_mode = BNX2X_RX_MODE_PROMISC;
11447 else if ((dev->flags & IFF_ALLMULTI) ||
11448 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11449 CHIP_IS_E1(bp)))
11450 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11451 else {
11452
11453 if (bnx2x_set_mc_list(bp) < 0)
11454 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11455
11456 if (bnx2x_set_uc_list(bp) < 0)
11457 rx_mode = BNX2X_RX_MODE_PROMISC;
11458 }
11459
11460 bp->rx_mode = rx_mode;
11461
11462 if (IS_MF_ISCSI_SD(bp))
11463 bp->rx_mode = BNX2X_RX_MODE_NONE;
11464
11465
11466 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11467 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11468 return;
11469 }
11470
11471 bnx2x_set_storm_rx_mode(bp);
11472}
11473
11474
11475static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11476 int devad, u16 addr)
11477{
11478 struct bnx2x *bp = netdev_priv(netdev);
11479 u16 value;
11480 int rc;
11481
11482 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11483 prtad, devad, addr);
11484
11485
11486 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11487
11488 bnx2x_acquire_phy_lock(bp);
11489 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
11490 bnx2x_release_phy_lock(bp);
11491 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11492
11493 if (!rc)
11494 rc = value;
11495 return rc;
11496}
11497
11498
11499static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11500 u16 addr, u16 value)
11501{
11502 struct bnx2x *bp = netdev_priv(netdev);
11503 int rc;
11504
11505 DP(NETIF_MSG_LINK,
11506 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11507 prtad, devad, addr, value);
11508
11509
11510 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11511
11512 bnx2x_acquire_phy_lock(bp);
11513 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
11514 bnx2x_release_phy_lock(bp);
11515 return rc;
11516}
11517
11518
11519static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11520{
11521 struct bnx2x *bp = netdev_priv(dev);
11522 struct mii_ioctl_data *mdio = if_mii(ifr);
11523
11524 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11525 mdio->phy_id, mdio->reg_num, mdio->val_in);
11526
11527 if (!netif_running(dev))
11528 return -EAGAIN;
11529
11530 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11531}
11532
11533#ifdef CONFIG_NET_POLL_CONTROLLER
11534static void poll_bnx2x(struct net_device *dev)
11535{
11536 struct bnx2x *bp = netdev_priv(dev);
11537 int i;
11538
11539 for_each_eth_queue(bp, i) {
11540 struct bnx2x_fastpath *fp = &bp->fp[i];
11541 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11542 }
11543}
11544#endif
11545
11546static int bnx2x_validate_addr(struct net_device *dev)
11547{
11548 struct bnx2x *bp = netdev_priv(dev);
11549
11550 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
11551 BNX2X_ERR("Non-valid Ethernet address\n");
11552 return -EADDRNOTAVAIL;
11553 }
11554 return 0;
11555}
11556
11557static const struct net_device_ops bnx2x_netdev_ops = {
11558 .ndo_open = bnx2x_open,
11559 .ndo_stop = bnx2x_close,
11560 .ndo_start_xmit = bnx2x_start_xmit,
11561 .ndo_select_queue = bnx2x_select_queue,
11562 .ndo_set_rx_mode = bnx2x_set_rx_mode,
11563 .ndo_set_mac_address = bnx2x_change_mac_addr,
11564 .ndo_validate_addr = bnx2x_validate_addr,
11565 .ndo_do_ioctl = bnx2x_ioctl,
11566 .ndo_change_mtu = bnx2x_change_mtu,
11567 .ndo_fix_features = bnx2x_fix_features,
11568 .ndo_set_features = bnx2x_set_features,
11569 .ndo_tx_timeout = bnx2x_tx_timeout,
11570#ifdef CONFIG_NET_POLL_CONTROLLER
11571 .ndo_poll_controller = poll_bnx2x,
11572#endif
11573 .ndo_setup_tc = bnx2x_setup_tc,
11574
11575#ifdef NETDEV_FCOE_WWNN
11576 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11577#endif
11578};
11579
11580static int bnx2x_set_coherency_mask(struct bnx2x *bp)
11581{
11582 struct device *dev = &bp->pdev->dev;
11583
11584 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
11585 bp->flags |= USING_DAC_FLAG;
11586 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
11587 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
11588 return -EIO;
11589 }
11590 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
11591 dev_err(dev, "System does not support DMA, aborting\n");
11592 return -EIO;
11593 }
11594
11595 return 0;
11596}
11597
11598static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
11599 unsigned long board_type)
11600{
11601 struct bnx2x *bp;
11602 int rc;
11603 u32 pci_cfg_dword;
11604 bool chip_is_e1x = (board_type == BCM57710 ||
11605 board_type == BCM57711 ||
11606 board_type == BCM57711E);
11607
11608 SET_NETDEV_DEV(dev, &pdev->dev);
11609 bp = netdev_priv(dev);
11610
11611 bp->dev = dev;
11612 bp->pdev = pdev;
11613 bp->flags = 0;
11614
11615 rc = pci_enable_device(pdev);
11616 if (rc) {
11617 dev_err(&bp->pdev->dev,
11618 "Cannot enable PCI device, aborting\n");
11619 goto err_out;
11620 }
11621
11622 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11623 dev_err(&bp->pdev->dev,
11624 "Cannot find PCI device base address, aborting\n");
11625 rc = -ENODEV;
11626 goto err_out_disable;
11627 }
11628
11629 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11630 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
11631 " base address, aborting\n");
11632 rc = -ENODEV;
11633 goto err_out_disable;
11634 }
11635
11636 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
11637 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
11638 PCICFG_REVESION_ID_ERROR_VAL) {
11639 pr_err("PCI device error, probably due to fan failure, aborting\n");
11640 rc = -ENODEV;
11641 goto err_out_disable;
11642 }
11643
11644 if (atomic_read(&pdev->enable_cnt) == 1) {
11645 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11646 if (rc) {
11647 dev_err(&bp->pdev->dev,
11648 "Cannot obtain PCI resources, aborting\n");
11649 goto err_out_disable;
11650 }
11651
11652 pci_set_master(pdev);
11653 pci_save_state(pdev);
11654 }
11655
11656 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11657 if (bp->pm_cap == 0) {
11658 dev_err(&bp->pdev->dev,
11659 "Cannot find power management capability, aborting\n");
11660 rc = -EIO;
11661 goto err_out_release;
11662 }
11663
11664 if (!pci_is_pcie(pdev)) {
11665 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
11666 rc = -EIO;
11667 goto err_out_release;
11668 }
11669
11670 rc = bnx2x_set_coherency_mask(bp);
11671 if (rc)
11672 goto err_out_release;
11673
11674 dev->mem_start = pci_resource_start(pdev, 0);
11675 dev->base_addr = dev->mem_start;
11676 dev->mem_end = pci_resource_end(pdev, 0);
11677
11678 dev->irq = pdev->irq;
11679
11680 bp->regview = pci_ioremap_bar(pdev, 0);
11681 if (!bp->regview) {
11682 dev_err(&bp->pdev->dev,
11683 "Cannot map register space, aborting\n");
11684 rc = -ENOMEM;
11685 goto err_out_release;
11686 }
11687
11688
11689
11690
11691
11692
11693 if (chip_is_e1x)
11694 bp->pf_num = PCI_FUNC(pdev->devfn);
11695 else {
11696 pci_read_config_dword(bp->pdev,
11697 PCICFG_ME_REGISTER, &pci_cfg_dword);
11698 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
11699 ME_REG_ABS_PF_NUM_SHIFT);
11700 }
11701 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
11702
11703 bnx2x_set_power_state(bp, PCI_D0);
11704
11705
11706 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11707 PCICFG_VENDOR_ID_OFFSET);
11708
11709
11710
11711
11712 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
11713 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
11714 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
11715 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
11716
11717 if (chip_is_e1x) {
11718 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
11719 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
11720 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
11721 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
11722 }
11723
11724
11725
11726
11727
11728 if (!chip_is_e1x)
11729 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
11730
11731 dev->watchdog_timeo = TX_TIMEOUT;
11732
11733 dev->netdev_ops = &bnx2x_netdev_ops;
11734 bnx2x_set_ethtool_ops(dev);
11735
11736 dev->priv_flags |= IFF_UNICAST_FLT;
11737
11738 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11739 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
11740 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
11741 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
11742
11743 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11744 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
11745
11746 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
11747 if (bp->flags & USING_DAC_FLAG)
11748 dev->features |= NETIF_F_HIGHDMA;
11749
11750
11751 dev->hw_features |= NETIF_F_LOOPBACK;
11752
11753#ifdef BCM_DCBNL
11754 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
11755#endif
11756
11757
11758 bp->mdio.prtad = MDIO_PRTAD_NONE;
11759 bp->mdio.mmds = 0;
11760 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11761 bp->mdio.dev = dev;
11762 bp->mdio.mdio_read = bnx2x_mdio_read;
11763 bp->mdio.mdio_write = bnx2x_mdio_write;
11764
11765 return 0;
11766
11767err_out_release:
11768 if (atomic_read(&pdev->enable_cnt) == 1)
11769 pci_release_regions(pdev);
11770
11771err_out_disable:
11772 pci_disable_device(pdev);
11773 pci_set_drvdata(pdev, NULL);
11774
11775err_out:
11776 return rc;
11777}
11778
11779static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
11780{
11781 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11782
11783 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11784
11785
11786 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11787}
11788
11789static int bnx2x_check_firmware(struct bnx2x *bp)
11790{
11791 const struct firmware *firmware = bp->firmware;
11792 struct bnx2x_fw_file_hdr *fw_hdr;
11793 struct bnx2x_fw_file_section *sections;
11794 u32 offset, len, num_ops;
11795 u16 *ops_offsets;
11796 int i;
11797 const u8 *fw_ver;
11798
11799 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
11800 BNX2X_ERR("Wrong FW size\n");
11801 return -EINVAL;
11802 }
11803
11804 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11805 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11806
11807
11808
11809 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11810 offset = be32_to_cpu(sections[i].offset);
11811 len = be32_to_cpu(sections[i].len);
11812 if (offset + len > firmware->size) {
11813 BNX2X_ERR("Section %d length is out of bounds\n", i);
11814 return -EINVAL;
11815 }
11816 }
11817
11818
11819 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11820 ops_offsets = (u16 *)(firmware->data + offset);
11821 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11822
11823 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11824 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11825 BNX2X_ERR("Section offset %d is out of bounds\n", i);
11826 return -EINVAL;
11827 }
11828 }
11829
11830
11831 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11832 fw_ver = firmware->data + offset;
11833 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11834 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11835 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11836 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11837 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11838 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
11839 BCM_5710_FW_MAJOR_VERSION,
11840 BCM_5710_FW_MINOR_VERSION,
11841 BCM_5710_FW_REVISION_VERSION,
11842 BCM_5710_FW_ENGINEERING_VERSION);
11843 return -EINVAL;
11844 }
11845
11846 return 0;
11847}
11848
11849static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11850{
11851 const __be32 *source = (const __be32 *)_source;
11852 u32 *target = (u32 *)_target;
11853 u32 i;
11854
11855 for (i = 0; i < n/4; i++)
11856 target[i] = be32_to_cpu(source[i]);
11857}
11858
11859
11860
11861
11862
11863static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11864{
11865 const __be32 *source = (const __be32 *)_source;
11866 struct raw_op *target = (struct raw_op *)_target;
11867 u32 i, j, tmp;
11868
11869 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11870 tmp = be32_to_cpu(source[j]);
11871 target[i].op = (tmp >> 24) & 0xff;
11872 target[i].offset = tmp & 0xffffff;
11873 target[i].raw_data = be32_to_cpu(source[j + 1]);
11874 }
11875}
11876
11877
11878
11879
11880static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11881{
11882 const __be32 *source = (const __be32 *)_source;
11883 struct iro *target = (struct iro *)_target;
11884 u32 i, j, tmp;
11885
11886 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
11887 target[i].base = be32_to_cpu(source[j]);
11888 j++;
11889 tmp = be32_to_cpu(source[j]);
11890 target[i].m1 = (tmp >> 16) & 0xffff;
11891 target[i].m2 = tmp & 0xffff;
11892 j++;
11893 tmp = be32_to_cpu(source[j]);
11894 target[i].m3 = (tmp >> 16) & 0xffff;
11895 target[i].size = tmp & 0xffff;
11896 j++;
11897 }
11898}
11899
11900static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11901{
11902 const __be16 *source = (const __be16 *)_source;
11903 u16 *target = (u16 *)_target;
11904 u32 i;
11905
11906 for (i = 0; i < n/2; i++)
11907 target[i] = be16_to_cpu(source[i]);
11908}
11909
11910#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11911do { \
11912 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11913 bp->arr = kmalloc(len, GFP_KERNEL); \
11914 if (!bp->arr) \
11915 goto lbl; \
11916 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11917 (u8 *)bp->arr, len); \
11918} while (0)
11919
11920static int bnx2x_init_firmware(struct bnx2x *bp)
11921{
11922 const char *fw_file_name;
11923 struct bnx2x_fw_file_hdr *fw_hdr;
11924 int rc;
11925
11926 if (bp->firmware)
11927 return 0;
11928
11929 if (CHIP_IS_E1(bp))
11930 fw_file_name = FW_FILE_NAME_E1;
11931 else if (CHIP_IS_E1H(bp))
11932 fw_file_name = FW_FILE_NAME_E1H;
11933 else if (!CHIP_IS_E1x(bp))
11934 fw_file_name = FW_FILE_NAME_E2;
11935 else {
11936 BNX2X_ERR("Unsupported chip revision\n");
11937 return -EINVAL;
11938 }
11939 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
11940
11941 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
11942 if (rc) {
11943 BNX2X_ERR("Can't load firmware file %s\n",
11944 fw_file_name);
11945 goto request_firmware_exit;
11946 }
11947
11948 rc = bnx2x_check_firmware(bp);
11949 if (rc) {
11950 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
11951 goto request_firmware_exit;
11952 }
11953
11954 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11955
11956
11957
11958 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11959
11960
11961 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11962
11963
11964 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11965 be16_to_cpu_n);
11966
11967
11968 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11969 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11970 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11971 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11972 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11973 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11974 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11975 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11976 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11977 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11978 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11979 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11980 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11981 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11982 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11983 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11984
11985 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
11986
11987 return 0;
11988
11989iro_alloc_err:
11990 kfree(bp->init_ops_offsets);
11991init_offsets_alloc_err:
11992 kfree(bp->init_ops);
11993init_ops_alloc_err:
11994 kfree(bp->init_data);
11995request_firmware_exit:
11996 release_firmware(bp->firmware);
11997 bp->firmware = NULL;
11998
11999 return rc;
12000}
12001
12002static void bnx2x_release_firmware(struct bnx2x *bp)
12003{
12004 kfree(bp->init_ops_offsets);
12005 kfree(bp->init_ops);
12006 kfree(bp->init_data);
12007 release_firmware(bp->firmware);
12008 bp->firmware = NULL;
12009}
12010
12011
12012static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12013 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12014 .init_hw_cmn = bnx2x_init_hw_common,
12015 .init_hw_port = bnx2x_init_hw_port,
12016 .init_hw_func = bnx2x_init_hw_func,
12017
12018 .reset_hw_cmn = bnx2x_reset_common,
12019 .reset_hw_port = bnx2x_reset_port,
12020 .reset_hw_func = bnx2x_reset_func,
12021
12022 .gunzip_init = bnx2x_gunzip_init,
12023 .gunzip_end = bnx2x_gunzip_end,
12024
12025 .init_fw = bnx2x_init_firmware,
12026 .release_fw = bnx2x_release_firmware,
12027};
12028
12029void bnx2x__init_func_obj(struct bnx2x *bp)
12030{
12031
12032 bnx2x_setup_dmae(bp);
12033
12034 bnx2x_init_func_obj(bp, &bp->func_obj,
12035 bnx2x_sp(bp, func_rdata),
12036 bnx2x_sp_mapping(bp, func_rdata),
12037 bnx2x_sp(bp, func_afex_rdata),
12038 bnx2x_sp_mapping(bp, func_afex_rdata),
12039 &bnx2x_func_sp_drv);
12040}
12041
12042
12043static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12044{
12045 int cid_count = BNX2X_L2_MAX_CID(bp);
12046
12047 if (CNIC_SUPPORT(bp))
12048 cid_count += CNIC_CID_MAX;
12049 return roundup(cid_count, QM_CID_ROUND);
12050}
12051
12052
12053
12054
12055
12056
12057
12058static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12059 int cnic_cnt)
12060{
12061 int pos;
12062 u16 control;
12063
12064 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
12065
12066
12067
12068
12069
12070 if (!pos)
12071 return 1 + cnic_cnt;
12072
12073
12074
12075
12076
12077
12078
12079 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
12080 return control & PCI_MSIX_FLAGS_QSIZE;
12081}
12082
12083struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
12084
12085static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12086{
12087 struct net_device *dev = NULL;
12088 struct bnx2x *bp;
12089 int pcie_width, pcie_speed;
12090 int rc, max_non_def_sbs;
12091 int rx_count, tx_count, rss_count, doorbell_size;
12092 int cnic_cnt;
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102 u8 max_cos_est = 0;
12103
12104 switch (ent->driver_data) {
12105 case BCM57710:
12106 case BCM57711:
12107 case BCM57711E:
12108 max_cos_est = BNX2X_MULTI_TX_COS_E1X;
12109 break;
12110
12111 case BCM57712:
12112 case BCM57712_MF:
12113 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
12114 break;
12115
12116 case BCM57800:
12117 case BCM57800_MF:
12118 case BCM57810:
12119 case BCM57810_MF:
12120 case BCM57840_O:
12121 case BCM57840_4_10:
12122 case BCM57840_2_20:
12123 case BCM57840_MFO:
12124 case BCM57840_MF:
12125 case BCM57811:
12126 case BCM57811_MF:
12127 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
12128 break;
12129
12130 default:
12131 pr_err("Unknown board_type (%ld), aborting\n",
12132 ent->driver_data);
12133 return -ENODEV;
12134 }
12135
12136 cnic_cnt = 1;
12137 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12138
12139 WARN_ON(!max_non_def_sbs);
12140
12141
12142 rss_count = max_non_def_sbs - cnic_cnt;
12143
12144
12145 rx_count = rss_count + cnic_cnt;
12146
12147
12148
12149
12150
12151 tx_count = rss_count * max_cos_est + cnic_cnt;
12152
12153
12154 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12155 if (!dev)
12156 return -ENOMEM;
12157
12158 bp = netdev_priv(dev);
12159
12160 bp->igu_sb_cnt = max_non_def_sbs;
12161 bp->msg_enable = debug;
12162 bp->cnic_support = cnic_cnt;
12163 bp->cnic_probe = bnx2x_cnic_probe;
12164
12165 pci_set_drvdata(pdev, dev);
12166
12167 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
12168 if (rc < 0) {
12169 free_netdev(dev);
12170 return rc;
12171 }
12172
12173 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12174 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
12175
12176 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12177 tx_count, rx_count);
12178
12179 rc = bnx2x_init_bp(bp);
12180 if (rc)
12181 goto init_one_exit;
12182
12183
12184
12185
12186
12187 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12188 if (doorbell_size > pci_resource_len(pdev, 2)) {
12189 dev_err(&bp->pdev->dev,
12190 "Cannot map doorbells, bar size too small, aborting\n");
12191 rc = -ENOMEM;
12192 goto init_one_exit;
12193 }
12194 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12195 doorbell_size);
12196 if (!bp->doorbells) {
12197 dev_err(&bp->pdev->dev,
12198 "Cannot map doorbell space, aborting\n");
12199 rc = -ENOMEM;
12200 goto init_one_exit;
12201 }
12202
12203
12204 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12205
12206
12207 if (CHIP_IS_E1x(bp))
12208 bp->flags |= NO_FCOE_FLAG;
12209
12210
12211 switch (ent->driver_data) {
12212 case BCM57840_O:
12213 case BCM57840_4_10:
12214 case BCM57840_2_20:
12215 case BCM57840_MFO:
12216 case BCM57840_MF:
12217 bp->flags |= NO_FCOE_FLAG;
12218 }
12219
12220
12221 bnx2x_set_num_queues(bp);
12222
12223
12224
12225
12226 bnx2x_set_int_mode(bp);
12227
12228 rc = register_netdev(dev);
12229 if (rc) {
12230 dev_err(&pdev->dev, "Cannot register net device\n");
12231 goto init_one_exit;
12232 }
12233
12234
12235 if (!NO_FCOE(bp)) {
12236
12237 rtnl_lock();
12238 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12239 rtnl_unlock();
12240 }
12241
12242 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12243
12244 BNX2X_DEV_INFO(
12245 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12246 board_info[ent->driver_data].name,
12247 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12248 pcie_width,
12249 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
12250 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
12251 "5GHz (Gen2)" : "2.5GHz",
12252 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12253
12254 return 0;
12255
12256init_one_exit:
12257 if (bp->regview)
12258 iounmap(bp->regview);
12259
12260 if (bp->doorbells)
12261 iounmap(bp->doorbells);
12262
12263 free_netdev(dev);
12264
12265 if (atomic_read(&pdev->enable_cnt) == 1)
12266 pci_release_regions(pdev);
12267
12268 pci_disable_device(pdev);
12269 pci_set_drvdata(pdev, NULL);
12270
12271 return rc;
12272}
12273
12274static void bnx2x_remove_one(struct pci_dev *pdev)
12275{
12276 struct net_device *dev = pci_get_drvdata(pdev);
12277 struct bnx2x *bp;
12278
12279 if (!dev) {
12280 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12281 return;
12282 }
12283 bp = netdev_priv(dev);
12284
12285
12286 if (!NO_FCOE(bp)) {
12287 rtnl_lock();
12288 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12289 rtnl_unlock();
12290 }
12291
12292#ifdef BCM_DCBNL
12293
12294 bnx2x_dcbnl_update_applist(bp, true);
12295#endif
12296
12297 unregister_netdev(dev);
12298
12299
12300 bnx2x_set_power_state(bp, PCI_D0);
12301
12302
12303 bnx2x_disable_msi(bp);
12304
12305
12306 bnx2x_set_power_state(bp, PCI_D3hot);
12307
12308
12309 cancel_delayed_work_sync(&bp->sp_rtnl_task);
12310
12311 if (bp->regview)
12312 iounmap(bp->regview);
12313
12314 if (bp->doorbells)
12315 iounmap(bp->doorbells);
12316
12317 bnx2x_release_firmware(bp);
12318
12319 bnx2x_free_mem_bp(bp);
12320
12321 free_netdev(dev);
12322
12323 if (atomic_read(&pdev->enable_cnt) == 1)
12324 pci_release_regions(pdev);
12325
12326 pci_disable_device(pdev);
12327 pci_set_drvdata(pdev, NULL);
12328}
12329
12330static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12331{
12332 int i;
12333
12334 bp->state = BNX2X_STATE_ERROR;
12335
12336 bp->rx_mode = BNX2X_RX_MODE_NONE;
12337
12338 if (CNIC_LOADED(bp))
12339 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12340
12341
12342 bnx2x_tx_disable(bp);
12343
12344 bnx2x_netif_stop(bp, 0);
12345
12346 bnx2x_del_all_napi(bp);
12347 if (CNIC_LOADED(bp))
12348 bnx2x_del_all_napi_cnic(bp);
12349
12350 del_timer_sync(&bp->timer);
12351
12352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
12353
12354
12355 bnx2x_free_irq(bp);
12356
12357
12358 bnx2x_free_skbs(bp);
12359
12360 for_each_rx_queue(bp, i)
12361 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12362
12363 bnx2x_free_mem(bp);
12364
12365 bp->state = BNX2X_STATE_CLOSED;
12366
12367 netif_carrier_off(bp->dev);
12368
12369 return 0;
12370}
12371
12372static void bnx2x_eeh_recover(struct bnx2x *bp)
12373{
12374 u32 val;
12375
12376 mutex_init(&bp->port.phy_mutex);
12377
12378
12379 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12380 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12381 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12382 BNX2X_ERR("BAD MCP validity signature\n");
12383}
12384
12385
12386
12387
12388
12389
12390
12391
12392
12393static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12394 pci_channel_state_t state)
12395{
12396 struct net_device *dev = pci_get_drvdata(pdev);
12397 struct bnx2x *bp = netdev_priv(dev);
12398
12399 rtnl_lock();
12400
12401 netif_device_detach(dev);
12402
12403 if (state == pci_channel_io_perm_failure) {
12404 rtnl_unlock();
12405 return PCI_ERS_RESULT_DISCONNECT;
12406 }
12407
12408 if (netif_running(dev))
12409 bnx2x_eeh_nic_unload(bp);
12410
12411 pci_disable_device(pdev);
12412
12413 rtnl_unlock();
12414
12415
12416 return PCI_ERS_RESULT_NEED_RESET;
12417}
12418
12419
12420
12421
12422
12423
12424
12425static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12426{
12427 struct net_device *dev = pci_get_drvdata(pdev);
12428 struct bnx2x *bp = netdev_priv(dev);
12429
12430 rtnl_lock();
12431
12432 if (pci_enable_device(pdev)) {
12433 dev_err(&pdev->dev,
12434 "Cannot re-enable PCI device after reset\n");
12435 rtnl_unlock();
12436 return PCI_ERS_RESULT_DISCONNECT;
12437 }
12438
12439 pci_set_master(pdev);
12440 pci_restore_state(pdev);
12441
12442 if (netif_running(dev))
12443 bnx2x_set_power_state(bp, PCI_D0);
12444
12445 rtnl_unlock();
12446
12447 return PCI_ERS_RESULT_RECOVERED;
12448}
12449
12450
12451
12452
12453
12454
12455
12456
12457static void bnx2x_io_resume(struct pci_dev *pdev)
12458{
12459 struct net_device *dev = pci_get_drvdata(pdev);
12460 struct bnx2x *bp = netdev_priv(dev);
12461
12462 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12463 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
12464 return;
12465 }
12466
12467 rtnl_lock();
12468
12469 bnx2x_eeh_recover(bp);
12470
12471 if (netif_running(dev))
12472 bnx2x_nic_load(bp, LOAD_NORMAL);
12473
12474 netif_device_attach(dev);
12475
12476 rtnl_unlock();
12477}
12478
12479static const struct pci_error_handlers bnx2x_err_handler = {
12480 .error_detected = bnx2x_io_error_detected,
12481 .slot_reset = bnx2x_io_slot_reset,
12482 .resume = bnx2x_io_resume,
12483};
12484
12485static struct pci_driver bnx2x_pci_driver = {
12486 .name = DRV_MODULE_NAME,
12487 .id_table = bnx2x_pci_tbl,
12488 .probe = bnx2x_init_one,
12489 .remove = bnx2x_remove_one,
12490 .suspend = bnx2x_suspend,
12491 .resume = bnx2x_resume,
12492 .err_handler = &bnx2x_err_handler,
12493};
12494
12495static int __init bnx2x_init(void)
12496{
12497 int ret;
12498
12499 pr_info("%s", version);
12500
12501 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12502 if (bnx2x_wq == NULL) {
12503 pr_err("Cannot create workqueue\n");
12504 return -ENOMEM;
12505 }
12506
12507 ret = pci_register_driver(&bnx2x_pci_driver);
12508 if (ret) {
12509 pr_err("Cannot register driver\n");
12510 destroy_workqueue(bnx2x_wq);
12511 }
12512 return ret;
12513}
12514
12515static void __exit bnx2x_cleanup(void)
12516{
12517 struct list_head *pos, *q;
12518 pci_unregister_driver(&bnx2x_pci_driver);
12519
12520 destroy_workqueue(bnx2x_wq);
12521
12522
12523 list_for_each_safe(pos, q, &bnx2x_prev_list) {
12524 struct bnx2x_prev_path_list *tmp =
12525 list_entry(pos, struct bnx2x_prev_path_list, list);
12526 list_del(pos);
12527 kfree(tmp);
12528 }
12529}
12530
12531void bnx2x_notify_link_changed(struct bnx2x *bp)
12532{
12533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
12534}
12535
12536module_init(bnx2x_init);
12537module_exit(bnx2x_cleanup);
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
12548static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12549{
12550 unsigned long ramrod_flags = 0;
12551
12552 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12553 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
12554 &bp->iscsi_l2_mac_obj, true,
12555 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
12556}
12557
12558
12559static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12560{
12561 struct eth_spe *spe;
12562 int cxt_index, cxt_offset;
12563
12564#ifdef BNX2X_STOP_ON_ERROR
12565 if (unlikely(bp->panic))
12566 return;
12567#endif
12568
12569 spin_lock_bh(&bp->spq_lock);
12570 BUG_ON(bp->cnic_spq_pending < count);
12571 bp->cnic_spq_pending -= count;
12572
12573
12574 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
12575 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
12576 & SPE_HDR_CONN_TYPE) >>
12577 SPE_HDR_CONN_TYPE_SHIFT;
12578 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
12579 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
12580
12581
12582
12583
12584 if (type == ETH_CONNECTION_TYPE) {
12585 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12586 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12587 ILT_PAGE_CIDS;
12588 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12589 (cxt_index * ILT_PAGE_CIDS);
12590 bnx2x_set_ctx_validation(bp,
12591 &bp->context[cxt_index].
12592 vcxt[cxt_offset].eth,
12593 BNX2X_ISCSI_ETH_CID(bp));
12594 }
12595 }
12596
12597
12598
12599
12600
12601
12602
12603 if (type == ETH_CONNECTION_TYPE) {
12604 if (!atomic_read(&bp->cq_spq_left))
12605 break;
12606 else
12607 atomic_dec(&bp->cq_spq_left);
12608 } else if (type == NONE_CONNECTION_TYPE) {
12609 if (!atomic_read(&bp->eq_spq_left))
12610 break;
12611 else
12612 atomic_dec(&bp->eq_spq_left);
12613 } else if ((type == ISCSI_CONNECTION_TYPE) ||
12614 (type == FCOE_CONNECTION_TYPE)) {
12615 if (bp->cnic_spq_pending >=
12616 bp->cnic_eth_dev.max_kwqe_pending)
12617 break;
12618 else
12619 bp->cnic_spq_pending++;
12620 } else {
12621 BNX2X_ERR("Unknown SPE type: %d\n", type);
12622 bnx2x_panic();
12623 break;
12624 }
12625
12626 spe = bnx2x_sp_get_next(bp);
12627 *spe = *bp->cnic_kwq_cons;
12628
12629 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
12630 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12631
12632 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12633 bp->cnic_kwq_cons = bp->cnic_kwq;
12634 else
12635 bp->cnic_kwq_cons++;
12636 }
12637 bnx2x_sp_prod_update(bp);
12638 spin_unlock_bh(&bp->spq_lock);
12639}
12640
12641static int bnx2x_cnic_sp_queue(struct net_device *dev,
12642 struct kwqe_16 *kwqes[], u32 count)
12643{
12644 struct bnx2x *bp = netdev_priv(dev);
12645 int i;
12646
12647#ifdef BNX2X_STOP_ON_ERROR
12648 if (unlikely(bp->panic)) {
12649 BNX2X_ERR("Can't post to SP queue while panic\n");
12650 return -EIO;
12651 }
12652#endif
12653
12654 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
12655 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
12656 BNX2X_ERR("Handling parity error recovery. Try again later\n");
12657 return -EAGAIN;
12658 }
12659
12660 spin_lock_bh(&bp->spq_lock);
12661
12662 for (i = 0; i < count; i++) {
12663 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12664
12665 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12666 break;
12667
12668 *bp->cnic_kwq_prod = *spe;
12669
12670 bp->cnic_kwq_pending++;
12671
12672 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
12673 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12674 spe->data.update_data_addr.hi,
12675 spe->data.update_data_addr.lo,
12676 bp->cnic_kwq_pending);
12677
12678 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12679 bp->cnic_kwq_prod = bp->cnic_kwq;
12680 else
12681 bp->cnic_kwq_prod++;
12682 }
12683
12684 spin_unlock_bh(&bp->spq_lock);
12685
12686 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12687 bnx2x_cnic_sp_post(bp, 0);
12688
12689 return i;
12690}
12691
12692static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12693{
12694 struct cnic_ops *c_ops;
12695 int rc = 0;
12696
12697 mutex_lock(&bp->cnic_mutex);
12698 c_ops = rcu_dereference_protected(bp->cnic_ops,
12699 lockdep_is_held(&bp->cnic_mutex));
12700 if (c_ops)
12701 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12702 mutex_unlock(&bp->cnic_mutex);
12703
12704 return rc;
12705}
12706
12707static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12708{
12709 struct cnic_ops *c_ops;
12710 int rc = 0;
12711
12712 rcu_read_lock();
12713 c_ops = rcu_dereference(bp->cnic_ops);
12714 if (c_ops)
12715 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12716 rcu_read_unlock();
12717
12718 return rc;
12719}
12720
12721
12722
12723
12724int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12725{
12726 struct cnic_ctl_info ctl = {0};
12727
12728 ctl.cmd = cmd;
12729
12730 return bnx2x_cnic_ctl_send(bp, &ctl);
12731}
12732
12733static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
12734{
12735 struct cnic_ctl_info ctl = {0};
12736
12737
12738 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12739 ctl.data.comp.cid = cid;
12740 ctl.data.comp.error = err;
12741
12742 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12743 bnx2x_cnic_sp_post(bp, 0);
12744}
12745
12746
12747
12748
12749
12750
12751
12752static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
12753{
12754 unsigned long accept_flags = 0, ramrod_flags = 0;
12755 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12756 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
12757
12758 if (start) {
12759
12760
12761
12762
12763
12764
12765 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
12766 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
12767 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
12768 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
12769
12770
12771 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
12772
12773 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
12774 } else
12775
12776 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
12777
12778 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
12779 set_bit(sched_state, &bp->sp_state);
12780 else {
12781 __set_bit(RAMROD_RX, &ramrod_flags);
12782 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
12783 ramrod_flags);
12784 }
12785}
12786
12787
12788static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12789{
12790 struct bnx2x *bp = netdev_priv(dev);
12791 int rc = 0;
12792
12793 switch (ctl->cmd) {
12794 case DRV_CTL_CTXTBL_WR_CMD: {
12795 u32 index = ctl->data.io.offset;
12796 dma_addr_t addr = ctl->data.io.dma_addr;
12797
12798 bnx2x_ilt_wr(bp, index, addr);
12799 break;
12800 }
12801
12802 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
12803 int count = ctl->data.credit.credit_count;
12804
12805 bnx2x_cnic_sp_post(bp, count);
12806 break;
12807 }
12808
12809
12810 case DRV_CTL_START_L2_CMD: {
12811 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12812 unsigned long sp_bits = 0;
12813
12814
12815 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
12816 cp->iscsi_l2_client_id,
12817 cp->iscsi_l2_cid, BP_FUNC(bp),
12818 bnx2x_sp(bp, mac_rdata),
12819 bnx2x_sp_mapping(bp, mac_rdata),
12820 BNX2X_FILTER_MAC_PENDING,
12821 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
12822 &bp->macs_pool);
12823
12824
12825 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
12826 if (rc)
12827 break;
12828
12829 mmiowb();
12830 barrier();
12831
12832
12833
12834 netif_addr_lock_bh(dev);
12835 bnx2x_set_iscsi_eth_rx_mode(bp, true);
12836 netif_addr_unlock_bh(dev);
12837
12838
12839 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
12840 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
12841
12842 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12843 BNX2X_ERR("rx_mode completion timed out!\n");
12844
12845 break;
12846 }
12847
12848
12849 case DRV_CTL_STOP_L2_CMD: {
12850 unsigned long sp_bits = 0;
12851
12852
12853 netif_addr_lock_bh(dev);
12854 bnx2x_set_iscsi_eth_rx_mode(bp, false);
12855 netif_addr_unlock_bh(dev);
12856
12857
12858 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
12859 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
12860
12861 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12862 BNX2X_ERR("rx_mode completion timed out!\n");
12863
12864 mmiowb();
12865 barrier();
12866
12867
12868 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
12869 BNX2X_ISCSI_ETH_MAC, true);
12870 break;
12871 }
12872 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
12873 int count = ctl->data.credit.credit_count;
12874
12875 smp_mb__before_atomic_inc();
12876 atomic_add(count, &bp->cq_spq_left);
12877 smp_mb__after_atomic_inc();
12878 break;
12879 }
12880 case DRV_CTL_ULP_REGISTER_CMD: {
12881 int ulp_type = ctl->data.register_data.ulp_type;
12882
12883 if (CHIP_IS_E3(bp)) {
12884 int idx = BP_FW_MB_IDX(bp);
12885 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12886 int path = BP_PATH(bp);
12887 int port = BP_PORT(bp);
12888 int i;
12889 u32 scratch_offset;
12890 u32 *host_addr;
12891
12892
12893 if (ulp_type == CNIC_ULP_ISCSI)
12894 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12895 else if (ulp_type == CNIC_ULP_FCOE)
12896 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12897 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12898
12899 if ((ulp_type != CNIC_ULP_FCOE) ||
12900 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
12901 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
12902 break;
12903
12904
12905 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
12906 if (!scratch_offset)
12907 break;
12908 scratch_offset += offsetof(struct glob_ncsi_oem_data,
12909 fcoe_features[path][port]);
12910 host_addr = (u32 *) &(ctl->data.register_data.
12911 fcoe_features);
12912 for (i = 0; i < sizeof(struct fcoe_capabilities);
12913 i += 4)
12914 REG_WR(bp, scratch_offset + i,
12915 *(host_addr + i/4));
12916 }
12917 break;
12918 }
12919
12920 case DRV_CTL_ULP_UNREGISTER_CMD: {
12921 int ulp_type = ctl->data.ulp_type;
12922
12923 if (CHIP_IS_E3(bp)) {
12924 int idx = BP_FW_MB_IDX(bp);
12925 u32 cap;
12926
12927 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12928 if (ulp_type == CNIC_ULP_ISCSI)
12929 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12930 else if (ulp_type == CNIC_ULP_FCOE)
12931 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12932 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12933 }
12934 break;
12935 }
12936
12937 default:
12938 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12939 rc = -EINVAL;
12940 }
12941
12942 return rc;
12943}
12944
12945void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12946{
12947 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12948
12949 if (bp->flags & USING_MSIX_FLAG) {
12950 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12951 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12952 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12953 } else {
12954 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12955 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12956 }
12957 if (!CHIP_IS_E1x(bp))
12958 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
12959 else
12960 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
12961
12962 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
12963 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
12964 cp->irq_arr[1].status_blk = bp->def_status_blk;
12965 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12966 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
12967
12968 cp->num_irq = 2;
12969}
12970
12971void bnx2x_setup_cnic_info(struct bnx2x *bp)
12972{
12973 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12974
12975
12976 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12977 bnx2x_cid_ilt_lines(bp);
12978 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12979 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12980 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12981
12982 if (NO_ISCSI_OOO(bp))
12983 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12984}
12985
12986static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12987 void *data)
12988{
12989 struct bnx2x *bp = netdev_priv(dev);
12990 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12991 int rc;
12992
12993 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
12994
12995 if (ops == NULL) {
12996 BNX2X_ERR("NULL ops received\n");
12997 return -EINVAL;
12998 }
12999
13000 if (!CNIC_SUPPORT(bp)) {
13001 BNX2X_ERR("Can't register CNIC when not supported\n");
13002 return -EOPNOTSUPP;
13003 }
13004
13005 if (!CNIC_LOADED(bp)) {
13006 rc = bnx2x_load_cnic(bp);
13007 if (rc) {
13008 BNX2X_ERR("CNIC-related load failed\n");
13009 return rc;
13010 }
13011
13012 }
13013
13014 bp->cnic_enabled = true;
13015
13016 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13017 if (!bp->cnic_kwq)
13018 return -ENOMEM;
13019
13020 bp->cnic_kwq_cons = bp->cnic_kwq;
13021 bp->cnic_kwq_prod = bp->cnic_kwq;
13022 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13023
13024 bp->cnic_spq_pending = 0;
13025 bp->cnic_kwq_pending = 0;
13026
13027 bp->cnic_data = data;
13028
13029 cp->num_irq = 0;
13030 cp->drv_state |= CNIC_DRV_STATE_REGD;
13031 cp->iro_arr = bp->iro_arr;
13032
13033 bnx2x_setup_cnic_irq_info(bp);
13034
13035 rcu_assign_pointer(bp->cnic_ops, ops);
13036
13037 return 0;
13038}
13039
13040static int bnx2x_unregister_cnic(struct net_device *dev)
13041{
13042 struct bnx2x *bp = netdev_priv(dev);
13043 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13044
13045 mutex_lock(&bp->cnic_mutex);
13046 cp->drv_state = 0;
13047 RCU_INIT_POINTER(bp->cnic_ops, NULL);
13048 mutex_unlock(&bp->cnic_mutex);
13049 synchronize_rcu();
13050 kfree(bp->cnic_kwq);
13051 bp->cnic_kwq = NULL;
13052
13053 return 0;
13054}
13055
13056struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13057{
13058 struct bnx2x *bp = netdev_priv(dev);
13059 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13060
13061
13062
13063
13064
13065 if (NO_ISCSI(bp) && NO_FCOE(bp))
13066 return NULL;
13067
13068 cp->drv_owner = THIS_MODULE;
13069 cp->chip_id = CHIP_ID(bp);
13070 cp->pdev = bp->pdev;
13071 cp->io_base = bp->regview;
13072 cp->io_base2 = bp->doorbells;
13073 cp->max_kwqe_pending = 8;
13074 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
13075 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13076 bnx2x_cid_ilt_lines(bp);
13077 cp->ctx_tbl_len = CNIC_ILT_LINES;
13078 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13079 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13080 cp->drv_ctl = bnx2x_drv_ctl;
13081 cp->drv_register_cnic = bnx2x_register_cnic;
13082 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13083 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13084 cp->iscsi_l2_client_id =
13085 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13086 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13087
13088 if (NO_ISCSI_OOO(bp))
13089 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13090
13091 if (NO_ISCSI(bp))
13092 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13093
13094 if (NO_FCOE(bp))
13095 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13096
13097 BNX2X_DEV_INFO(
13098 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
13099 cp->ctx_blk_size,
13100 cp->ctx_tbl_offset,
13101 cp->ctx_tbl_len,
13102 cp->starting_cid);
13103 return cp;
13104}
13105
13106
13107