1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if_vlan.h>
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/crc32c.h>
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
50#include <linux/io.h>
51#include <linux/stringify.h>
52
53#define BNX2X_MAIN
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56#include "bnx2x_init_ops.h"
57#include "bnx2x_cmn.h"
58#include "bnx2x_dcb.h"
59
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71
72
73#define TX_TIMEOUT (5*HZ)
74
75static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86MODULE_FIRMWARE(FW_FILE_NAME_E2);
87
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
93int num_queues;
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
97
98static int disable_tpa;
99module_param(disable_tpa, int, 0);
100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101
102static int int_mode;
103module_param(int_mode, int, 0);
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
106
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111static int poll;
112module_param(poll, int, 0);
113MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119static int debug;
120module_param(debug, int, 0);
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static struct workqueue_struct *bnx2x_wq;
124
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
129enum bnx2x_board_type {
130 BCM57710 = 0,
131 BCM57711 = 1,
132 BCM57711E = 2,
133 BCM57712 = 3,
134 BCM57712E = 4
135};
136
137
138static struct {
139 char *name;
140} board_info[] __devinitdata = {
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
146};
147
148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
154
155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166
167
168
169
170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
408
409
410
411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
430
431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
501const u32 dmae_reg_go_c[] = {
502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508
509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525{
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
529
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
534
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550#ifdef __BIG_ENDIAN
551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552#else
553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554#endif
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576
577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588
589 mutex_lock(&bp->dmae_mutex);
590
591
592 *wb_comp = 0;
593
594
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597
598 udelay(5);
599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602 if (!cnt) {
603 BNX2X_ERR("DMAE timeout!\n");
604 rc = DMAE_TIMEOUT;
605 goto unlock;
606 }
607 cnt--;
608 udelay(50);
609 }
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619unlock:
620 mutex_unlock(&bp->dmae_mutex);
621 return rc;
622}
623
624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
626{
627 struct dmae_command dmae;
628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
638
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641
642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
647
648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
652}
653
654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
655{
656 struct dmae_command dmae;
657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
669
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
671
672
673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
678
679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
680
681
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
683}
684
685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
687{
688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
689 int offset = 0;
690
691 while (len > dmae_wr_max) {
692 bnx2x_write_dmae(bp, phys_addr + offset,
693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
701
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
709}
710
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
724 char last_idx;
725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
727
728
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
753 }
754 }
755
756
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
837 }
838 }
839
840 return rc;
841}
842
843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
845 u32 addr;
846 u32 mark, offset;
847 __be32 data[9];
848 int word;
849 u32 trace_shmem_base;
850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
854
855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
860 mark = REG_RD(bp, addr);
861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
863 pr_err("begin fw dump (mark 0x%x)\n", mark);
864
865 pr_err("");
866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
867 for (word = 0; word < 8; word++)
868 data[word] = htonl(REG_RD(bp, offset + 4*word));
869 data[8] = 0x0;
870 pr_cont("%s", (char *)data);
871 }
872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
873 for (word = 0; word < 8; word++)
874 data[word] = htonl(REG_RD(bp, offset + 4*word));
875 data[8] = 0x0;
876 pr_cont("%s", (char *)data);
877 }
878 pr_err("end of fw dump\n");
879}
880
881void bnx2x_panic_dump(struct bnx2x *bp)
882{
883 int i;
884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
890
891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
894 BNX2X_ERR("begin crash dump -----------------\n");
895
896
897
898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
899 " spq_prod_idx(0x%x)\n",
900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
912
913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
928
929 for_each_eth_queue(bp, i) {
930 struct bnx2x_fastpath *fp = &bp->fp[i];
931 int loop;
932 struct hc_status_block_data_e2 sb_data_e2;
933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
944
945
946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
947 " rx_comp_prod(0x%x)"
948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
949 i, fp->rx_bd_prod, fp->rx_bd_cons,
950 fp->rx_comp_prod,
951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
953 " fp_hc_idx(0x%x)\n",
954 fp->rx_sge_prod, fp->last_max_sge,
955 le16_to_cpu(fp->fp_hc_idx));
956
957
958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
963
964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
966
967
968
969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984
985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
992
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
1015
1016
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
1036 }
1037
1038#ifdef BNX2X_STOP_ON_ERROR
1039
1040
1041 for_each_rx_queue(bp, i) {
1042 struct bnx2x_fastpath *fp = &bp->fp[i];
1043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1046 for (j = start; j != end; j = RX_BD(j + 1)) {
1047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
1050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1052 }
1053
1054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
1056 for (j = start; j != end; j = RX_SGE(j + 1)) {
1057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
1060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1062 }
1063
1064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
1066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
1069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1071 }
1072 }
1073
1074
1075 for_each_tx_queue(bp, i) {
1076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
1083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
1085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
1092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1094 }
1095 }
1096#endif
1097 bnx2x_fw_dump(bp);
1098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
1100}
1101
1102static void bnx2x_hc_int_enable(struct bnx2x *bp)
1103{
1104 int port = BP_PORT(bp);
1105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1109
1110 if (msix) {
1111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
1113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1125
1126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
1129
1130 REG_WR(bp, addr, val);
1131
1132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
1134 }
1135
1136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
1139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1141
1142 REG_WR(bp, addr, val);
1143
1144
1145
1146 mmiowb();
1147 barrier();
1148
1149 if (!CHIP_IS_E1(bp)) {
1150
1151 if (IS_MF(bp)) {
1152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1153 if (bp->port.pmf)
1154
1155 val |= 0x1100;
1156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
1162
1163
1164 mmiowb();
1165}
1166
1167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
1227{
1228 int port = BP_PORT(bp);
1229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
1232
1233
1234
1235
1236
1237 if (CHIP_IS_E1(bp)) {
1238
1239
1240
1241
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
1256
1257 mmiowb();
1258
1259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
1264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
1282static void bnx2x_int_disable(struct bnx2x *bp)
1283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
1290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1291{
1292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1293 int i, offset;
1294
1295
1296 atomic_inc(&bp->intr_sem);
1297 smp_wmb();
1298
1299 if (disable_hw)
1300
1301 bnx2x_int_disable(bp);
1302
1303
1304 if (msix) {
1305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
1307#ifdef BCM_CNIC
1308 offset++;
1309#endif
1310 for_each_eth_queue(bp, i)
1311 synchronize_irq(bp->msix_table[i + offset].vector);
1312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315
1316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1341 return false;
1342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
1360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
1363
1364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
1371 DP(BNX2X_MSG_SP,
1372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1373 fp->index, cid, command, bp->state,
1374 rr_cqe->ramrod_cqe.ramrod_type);
1375
1376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
1380 break;
1381
1382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
1387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
1390 break;
1391
1392 default:
1393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
1396 break;
1397 }
1398
1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left);
1401
1402 smp_wmb();
1403
1404 return;
1405}
1406
1407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1408{
1409 struct bnx2x *bp = netdev_priv(dev_instance);
1410 u16 status = bnx2x_ack_int(bp);
1411 u16 mask;
1412 int i;
1413
1414
1415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
1419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1420
1421
1422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
1432 for_each_eth_queue(bp, i) {
1433 struct bnx2x_fastpath *fp = &bp->fp[i];
1434
1435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1436 if (status & mask) {
1437
1438 prefetch(fp->rx_cons_sb);
1439 prefetch(fp->tx_cons_sb);
1440 prefetch(&fp->sb_running_index[SM_RX_ID]);
1441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1442 status &= ~mask;
1443 }
1444 }
1445
1446#ifdef BCM_CNIC
1447 mask = 0x2;
1448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
1460
1461 if (unlikely(status & 0x1)) {
1462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
1469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1471 status);
1472
1473 return IRQ_HANDLED;
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1486{
1487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
1489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
1491 int cnt;
1492
1493
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
1500
1501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
1508
1509 lock_status = REG_RD(bp, hw_lock_control_reg);
1510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
1515
1516
1517 for (cnt = 0; cnt < 1000; cnt++) {
1518
1519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
1521 if (lock_status & resource_bit)
1522 return 0;
1523
1524 msleep(5);
1525 }
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
1529
1530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
1534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
1536
1537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
1539
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
1547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
1554
1555 lock_status = REG_RD(bp, hw_lock_control_reg);
1556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
1560 }
1561
1562 REG_WR(bp, hw_lock_control_reg, resource_bit);
1563 return 0;
1564}
1565
1566
1567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
1597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1598{
1599
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
1606
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
1611
1612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
1624
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
1632
1633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
1639
1640 default:
1641 break;
1642 }
1643
1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1646
1647 return 0;
1648}
1649
1650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
1696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697{
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
1700
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
1705 }
1706
1707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711 switch (mode) {
1712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
1718
1719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
1725
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
1731
1732 default:
1733 break;
1734 }
1735
1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1738
1739 return 0;
1740}
1741
1742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765
1766
1767
1768
1769
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
1781void bnx2x_calc_fc_adv(struct bnx2x *bp)
1782{
1783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1788 ADVERTISED_Pause);
1789 break;
1790
1791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1793 ADVERTISED_Pause);
1794 break;
1795
1796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1798 break;
1799
1800 default:
1801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1802 ADVERTISED_Pause);
1803 break;
1804 }
1805}
1806
1807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1808{
1809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
1811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1813
1814
1815
1816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1818 else
1819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1820
1821 bnx2x_acquire_phy_lock(bp);
1822
1823 if (load_mode == LOAD_DIAG) {
1824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
1827
1828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1829
1830 bnx2x_release_phy_lock(bp);
1831
1832 bnx2x_calc_fc_adv(bp);
1833
1834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1836 bnx2x_link_report(bp);
1837 }
1838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1839 return rc;
1840 }
1841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1842 return -EINVAL;
1843}
1844
1845void bnx2x_link_set(struct bnx2x *bp)
1846{
1847 if (!BP_NOMCP(bp)) {
1848 bnx2x_acquire_phy_lock(bp);
1849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1851 bnx2x_release_phy_lock(bp);
1852
1853 bnx2x_calc_fc_adv(bp);
1854 } else
1855 BNX2X_ERR("Bootcode is missing - can not set link\n");
1856}
1857
1858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
1860 if (!BP_NOMCP(bp)) {
1861 bnx2x_acquire_phy_lock(bp);
1862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1863 bnx2x_release_phy_lock(bp);
1864 } else
1865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1866}
1867
1868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1869{
1870 u8 rc = 0;
1871
1872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
1874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
1876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
1879
1880 return rc;
1881}
1882
1883static void bnx2x_init_port_minmax(struct bnx2x *bp)
1884{
1885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
1888
1889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1892
1893
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1895
1896
1897
1898
1899 bp->cmng.rs_vars.rs_threshold =
1900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
1902
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1906
1907
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1909
1910
1911
1912
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1916}
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
1930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1934 u32 vn_cfg = bp->mf_config[vn];
1935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951
1952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1960}
1961
1962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
1966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
1968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
1977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1981
1982
1983
1984 if (bp->vn_weight_sum && (vn_min_rate == 0))
1985 vn_min_rate = DEF_MIN_RATE;
1986
1987 if (IS_MF_SI(bp))
1988
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991
1992 vn_max_rate = maxCfg * 100;
1993 }
1994
1995 DP(NETIF_MSG_IFUP,
1996 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1997 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1998
1999 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002
2003 m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005
2006 m_rs_vn.vn_counter.quota =
2007 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
2009 if (bp->vn_weight_sum) {
2010
2011
2012
2013
2014
2015 m_fair_vn.vn_credit_delta =
2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017 (8 * bp->vn_weight_sum))),
2018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
2020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2021 m_fair_vn.vn_credit_delta);
2022 }
2023
2024
2025 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2026 REG_WR(bp, BAR_XSTRORM_INTMEM +
2027 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2028 ((u32 *)(&m_rs_vn))[i]);
2029
2030 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2031 REG_WR(bp, BAR_XSTRORM_INTMEM +
2032 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2033 ((u32 *)(&m_fair_vn))[i]);
2034}
2035
2036static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2037{
2038 if (CHIP_REV_IS_SLOW(bp))
2039 return CMNG_FNS_NONE;
2040 if (IS_MF(bp))
2041 return CMNG_FNS_MINMAX;
2042
2043 return CMNG_FNS_NONE;
2044}
2045
2046static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2047{
2048 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2049
2050 if (BP_NOMCP(bp))
2051 return;
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2065 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2066
2067 if (func >= E1H_FUNC_MAX)
2068 break;
2069
2070 bp->mf_config[vn] =
2071 MF_CFG_RD(bp, func_mf_config[func].config);
2072 }
2073}
2074
2075static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2076{
2077
2078 if (cmng_type == CMNG_FNS_MINMAX) {
2079 int vn;
2080
2081
2082 bp->cmng.flags.cmng_enables = 0;
2083
2084
2085 if (read_cfg)
2086 bnx2x_read_mf_cfg(bp);
2087
2088
2089 bnx2x_init_port_minmax(bp);
2090
2091
2092 bnx2x_calc_vn_weight_sum(bp);
2093
2094
2095 if (bp->port.pmf)
2096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2097 bnx2x_init_vn_minmax(bp, vn);
2098
2099
2100 bp->cmng.flags.cmng_enables |=
2101 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2102 if (!bp->vn_weight_sum)
2103 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2104 " fairness will be disabled\n");
2105 return;
2106 }
2107
2108
2109 DP(NETIF_MSG_IFUP,
2110 "rate shaping and fairness are disabled\n");
2111}
2112
2113static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2114{
2115 int port = BP_PORT(bp);
2116 int func;
2117 int vn;
2118
2119
2120 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2121 if (vn == BP_E1HVN(bp))
2122 continue;
2123
2124 func = ((vn << 1) | port);
2125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2126 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2127 }
2128}
2129
2130
2131static void bnx2x_link_attn(struct bnx2x *bp)
2132{
2133 u32 prev_link_status = bp->link_vars.link_status;
2134
2135 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2136
2137 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2138
2139 if (bp->link_vars.link_up) {
2140
2141
2142 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2143 int port = BP_PORT(bp);
2144 u32 pause_enabled = 0;
2145
2146 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2147 pause_enabled = 1;
2148
2149 REG_WR(bp, BAR_USTRORM_INTMEM +
2150 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2151 pause_enabled);
2152 }
2153
2154 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2155 struct host_port_stats *pstats;
2156
2157 pstats = bnx2x_sp(bp, port_stats);
2158
2159 memset(&(pstats->mac_stx[0]), 0,
2160 sizeof(struct mac_stx));
2161 }
2162 if (bp->state == BNX2X_STATE_OPEN)
2163 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2164 }
2165
2166 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2167 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2168
2169 if (cmng_fns != CMNG_FNS_NONE) {
2170 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2171 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2172 } else
2173
2174 DP(NETIF_MSG_IFUP,
2175 "single function mode without fairness\n");
2176 }
2177
2178 if (IS_MF(bp))
2179 bnx2x_link_sync_notify(bp);
2180
2181
2182 if (prev_link_status != bp->link_vars.link_status)
2183 bnx2x_link_report(bp);
2184}
2185
2186void bnx2x__link_status_update(struct bnx2x *bp)
2187{
2188 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2189 return;
2190
2191 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2192
2193 if (bp->link_vars.link_up)
2194 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2195 else
2196 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2197
2198
2199
2200 bnx2x_read_mf_cfg(bp);
2201
2202
2203 bnx2x_link_report(bp);
2204}
2205
2206static void bnx2x_pmf_update(struct bnx2x *bp)
2207{
2208 int port = BP_PORT(bp);
2209 u32 val;
2210
2211 bp->port.pmf = 1;
2212 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2213
2214
2215 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2216 if (bp->common.int_block == INT_BLOCK_HC) {
2217 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2218 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2219 } else if (CHIP_IS_E2(bp)) {
2220 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2221 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2222 }
2223
2224 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2237{
2238 int mb_idx = BP_FW_MB_IDX(bp);
2239 u32 seq = ++bp->fw_seq;
2240 u32 rc = 0;
2241 u32 cnt = 1;
2242 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2243
2244 mutex_lock(&bp->fw_mb_mutex);
2245 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2246 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2247
2248 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2249
2250 do {
2251
2252 msleep(delay);
2253
2254 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2255
2256
2257 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2258
2259 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2260 cnt*delay, rc, seq);
2261
2262
2263 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2264 rc &= FW_MSG_CODE_MASK;
2265 else {
2266
2267 BNX2X_ERR("FW failed to respond!\n");
2268 bnx2x_fw_dump(bp);
2269 rc = 0;
2270 }
2271 mutex_unlock(&bp->fw_mb_mutex);
2272
2273 return rc;
2274}
2275
2276static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2277{
2278#ifdef BCM_CNIC
2279 if (IS_FCOE_FP(fp) && IS_MF(bp))
2280 return false;
2281#endif
2282 return true;
2283}
2284
2285
2286static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2287{
2288 u32 mask = (1 << cl_id);
2289
2290
2291 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2292 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2293 u8 unmatched_unicast = 0;
2294
2295 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2296 unmatched_unicast = 1;
2297
2298 if (filters & BNX2X_PROMISCUOUS_MODE) {
2299
2300 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2301 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2302 if (IS_MF_SI(bp)) {
2303
2304
2305
2306
2307 unmatched_unicast = 1;
2308 accp_all_ucast = 0;
2309 }
2310 }
2311 if (filters & BNX2X_ACCEPT_UNICAST) {
2312
2313 drop_all_ucast = 0;
2314 }
2315 if (filters & BNX2X_ACCEPT_MULTICAST)
2316
2317 drop_all_mcast = 0;
2318
2319 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2320
2321 drop_all_ucast = 0;
2322 accp_all_ucast = 1;
2323 }
2324 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2325
2326 drop_all_mcast = 0;
2327 accp_all_mcast = 1;
2328 }
2329 if (filters & BNX2X_ACCEPT_BROADCAST) {
2330
2331 drop_all_bcast = 0;
2332 accp_all_bcast = 1;
2333 }
2334
2335 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2336 bp->mac_filters.ucast_drop_all | mask :
2337 bp->mac_filters.ucast_drop_all & ~mask;
2338
2339 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2340 bp->mac_filters.mcast_drop_all | mask :
2341 bp->mac_filters.mcast_drop_all & ~mask;
2342
2343 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2344 bp->mac_filters.bcast_drop_all | mask :
2345 bp->mac_filters.bcast_drop_all & ~mask;
2346
2347 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2348 bp->mac_filters.ucast_accept_all | mask :
2349 bp->mac_filters.ucast_accept_all & ~mask;
2350
2351 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2352 bp->mac_filters.mcast_accept_all | mask :
2353 bp->mac_filters.mcast_accept_all & ~mask;
2354
2355 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2356 bp->mac_filters.bcast_accept_all | mask :
2357 bp->mac_filters.bcast_accept_all & ~mask;
2358
2359 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2360 bp->mac_filters.unmatched_unicast | mask :
2361 bp->mac_filters.unmatched_unicast & ~mask;
2362}
2363
2364static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2365{
2366 struct tstorm_eth_function_common_config tcfg = {0};
2367 u16 rss_flgs;
2368
2369
2370 if (p->func_flgs & FUNC_FLG_TPA)
2371 tcfg.config_flags |=
2372 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2373
2374
2375 rss_flgs = (p->rss->mode <<
2376 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2377
2378 if (p->rss->cap & RSS_IPV4_CAP)
2379 rss_flgs |= RSS_IPV4_CAP_MASK;
2380 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2381 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2382 if (p->rss->cap & RSS_IPV6_CAP)
2383 rss_flgs |= RSS_IPV6_CAP_MASK;
2384 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2385 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2386
2387 tcfg.config_flags |= rss_flgs;
2388 tcfg.rss_result_mask = p->rss->result_mask;
2389
2390 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2391
2392
2393 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2394 storm_memset_func_en(bp, p->func_id, 1);
2395
2396
2397 if (p->func_flgs & FUNC_FLG_STATS) {
2398 struct stats_indication_flags stats_flags = {0};
2399 stats_flags.collect_eth = 1;
2400
2401 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2402 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2403
2404 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2405 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2406
2407 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2408 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2409
2410 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2411 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2412 }
2413
2414
2415 if (p->func_flgs & FUNC_FLG_SPQ) {
2416 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2417 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2418 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2419 }
2420}
2421
2422static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2423 struct bnx2x_fastpath *fp)
2424{
2425 u16 flags = 0;
2426
2427
2428 flags |= QUEUE_FLG_CACHE_ALIGN;
2429 flags |= QUEUE_FLG_HC;
2430 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2431
2432 flags |= QUEUE_FLG_VLAN;
2433 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2434
2435 if (!fp->disable_tpa)
2436 flags |= QUEUE_FLG_TPA;
2437
2438 flags = stat_counter_valid(bp, fp) ?
2439 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2440
2441 return flags;
2442}
2443
2444static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2445 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2446 struct bnx2x_rxq_init_params *rxq_init)
2447{
2448 u16 max_sge = 0;
2449 u16 sge_sz = 0;
2450 u16 tpa_agg_size = 0;
2451
2452
2453 u16 flags = bnx2x_get_cl_flags(bp, fp);
2454
2455 if (!fp->disable_tpa) {
2456 pause->sge_th_hi = 250;
2457 pause->sge_th_lo = 150;
2458 tpa_agg_size = min_t(u32,
2459 (min_t(u32, 8, MAX_SKB_FRAGS) *
2460 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2461 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2462 SGE_PAGE_SHIFT;
2463 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2464 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2465 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2466 0xffff);
2467 }
2468
2469
2470 if (!CHIP_IS_E1(bp)) {
2471 pause->bd_th_hi = 350;
2472 pause->bd_th_lo = 250;
2473 pause->rcq_th_hi = 350;
2474 pause->rcq_th_lo = 250;
2475 pause->sge_th_hi = 0;
2476 pause->sge_th_lo = 0;
2477 pause->pri_map = 1;
2478 }
2479
2480
2481 rxq_init->flags = flags;
2482 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2483 rxq_init->dscr_map = fp->rx_desc_mapping;
2484 rxq_init->sge_map = fp->rx_sge_mapping;
2485 rxq_init->rcq_map = fp->rx_comp_mapping;
2486 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2487 rxq_init->mtu = bp->dev->mtu;
2488 rxq_init->buf_sz = bp->rx_buf_size;
2489 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2490 rxq_init->cl_id = fp->cl_id;
2491 rxq_init->spcl_id = fp->cl_id;
2492 rxq_init->stat_id = fp->cl_id;
2493 rxq_init->tpa_agg_sz = tpa_agg_size;
2494 rxq_init->sge_buf_sz = sge_sz;
2495 rxq_init->max_sges_pkt = max_sge;
2496 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2497 rxq_init->fw_sb_id = fp->fw_sb_id;
2498
2499 if (IS_FCOE_FP(fp))
2500 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2501 else
2502 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2503
2504 rxq_init->cid = HW_CID(bp, fp->cid);
2505
2506 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2507}
2508
2509static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2510 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2511{
2512 u16 flags = bnx2x_get_cl_flags(bp, fp);
2513
2514 txq_init->flags = flags;
2515 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2516 txq_init->dscr_map = fp->tx_desc_mapping;
2517 txq_init->stat_id = fp->cl_id;
2518 txq_init->cid = HW_CID(bp, fp->cid);
2519 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2520 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2521 txq_init->fw_sb_id = fp->fw_sb_id;
2522
2523 if (IS_FCOE_FP(fp)) {
2524 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2525 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2526 }
2527
2528 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2529}
2530
2531static void bnx2x_pf_init(struct bnx2x *bp)
2532{
2533 struct bnx2x_func_init_params func_init = {0};
2534 struct bnx2x_rss_params rss = {0};
2535 struct event_ring_data eq_data = { {0} };
2536 u16 flags;
2537
2538
2539 if (!CHIP_IS_E1(bp))
2540 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2541
2542 if (CHIP_IS_E2(bp)) {
2543
2544
2545 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2546 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2547 (CHIP_MODE_IS_4_PORT(bp) ?
2548 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2549
2550 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2551 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2552 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2553 (CHIP_MODE_IS_4_PORT(bp) ?
2554 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2555 }
2556
2557
2558 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2559
2560 if (CHIP_IS_E1x(bp))
2561 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2562 else
2563 flags |= FUNC_FLG_TPA;
2564
2565
2566
2567
2568
2569
2570
2571 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2572 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2573 rss.mode = bp->multi_mode;
2574 rss.result_mask = MULTI_MASK;
2575 func_init.rss = &rss;
2576
2577 func_init.func_flgs = flags;
2578 func_init.pf_id = BP_FUNC(bp);
2579 func_init.func_id = BP_FUNC(bp);
2580 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2581 func_init.spq_map = bp->spq_mapping;
2582 func_init.spq_prod = bp->spq_prod_idx;
2583
2584 bnx2x_func_init(bp, &func_init);
2585
2586 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2587
2588
2589
2590
2591
2592
2593
2594 bp->link_vars.line_speed = SPEED_10000;
2595 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2596
2597
2598 if (bp->port.pmf)
2599 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2600
2601
2602 bp->rx_mode = BNX2X_RX_MODE_NONE;
2603 bnx2x_set_storm_rx_mode(bp);
2604
2605
2606 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2607 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2608 eq_data.producer = bp->eq_prod;
2609 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2610 eq_data.sb_id = DEF_SB_ID;
2611 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2612}
2613
2614
2615static void bnx2x_e1h_disable(struct bnx2x *bp)
2616{
2617 int port = BP_PORT(bp);
2618
2619 netif_tx_disable(bp->dev);
2620
2621 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2622
2623 netif_carrier_off(bp->dev);
2624}
2625
2626static void bnx2x_e1h_enable(struct bnx2x *bp)
2627{
2628 int port = BP_PORT(bp);
2629
2630 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2631
2632
2633 netif_tx_wake_all_queues(bp->dev);
2634
2635
2636
2637
2638
2639}
2640
2641
2642
2643
2644
2645
2646static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2647{
2648 if (bp->link_vars.link_up) {
2649 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2650 bnx2x_link_sync_notify(bp);
2651 }
2652 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2653}
2654
2655static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2656{
2657 bnx2x_config_mf_bw(bp);
2658 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2659}
2660
2661static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2662{
2663 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2664
2665 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2666
2667
2668
2669
2670
2671
2672 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2673 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2674 bp->flags |= MF_FUNC_DIS;
2675
2676 bnx2x_e1h_disable(bp);
2677 } else {
2678 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2679 bp->flags &= ~MF_FUNC_DIS;
2680
2681 bnx2x_e1h_enable(bp);
2682 }
2683 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2684 }
2685 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2686 bnx2x_config_mf_bw(bp);
2687 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2688 }
2689
2690
2691 if (dcc_event)
2692 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2693 else
2694 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2695}
2696
2697
2698static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2699{
2700 struct eth_spe *next_spe = bp->spq_prod_bd;
2701
2702 if (bp->spq_prod_bd == bp->spq_last_bd) {
2703 bp->spq_prod_bd = bp->spq;
2704 bp->spq_prod_idx = 0;
2705 DP(NETIF_MSG_TIMER, "end of spq\n");
2706 } else {
2707 bp->spq_prod_bd++;
2708 bp->spq_prod_idx++;
2709 }
2710 return next_spe;
2711}
2712
2713
2714static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2715{
2716 int func = BP_FUNC(bp);
2717
2718
2719 wmb();
2720
2721 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2722 bp->spq_prod_idx);
2723 mmiowb();
2724}
2725
2726
2727int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2728 u32 data_hi, u32 data_lo, int common)
2729{
2730 struct eth_spe *spe;
2731 u16 type;
2732
2733#ifdef BNX2X_STOP_ON_ERROR
2734 if (unlikely(bp->panic))
2735 return -EIO;
2736#endif
2737
2738 spin_lock_bh(&bp->spq_lock);
2739
2740 if (!atomic_read(&bp->spq_left)) {
2741 BNX2X_ERR("BUG! SPQ ring full!\n");
2742 spin_unlock_bh(&bp->spq_lock);
2743 bnx2x_panic();
2744 return -EBUSY;
2745 }
2746
2747 spe = bnx2x_sp_get_next(bp);
2748
2749
2750 spe->hdr.conn_and_cmd_data =
2751 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2752 HW_CID(bp, cid));
2753
2754 if (common)
2755
2756
2757
2758
2759 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2760 & SPE_HDR_CONN_TYPE;
2761 else
2762
2763 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2764 & SPE_HDR_CONN_TYPE;
2765
2766 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2767 SPE_HDR_FUNCTION_ID);
2768
2769 spe->hdr.type = cpu_to_le16(type);
2770
2771 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2772 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2773
2774
2775 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2776
2777
2778
2779
2780 atomic_dec(&bp->spq_left);
2781
2782 DP(BNX2X_MSG_SP,
2783 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2784 "type(0x%x) left %x\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2789
2790 bnx2x_sp_prod_update(bp);
2791 spin_unlock_bh(&bp->spq_lock);
2792 return 0;
2793}
2794
2795
2796static int bnx2x_acquire_alr(struct bnx2x *bp)
2797{
2798 u32 j, val;
2799 int rc = 0;
2800
2801 might_sleep();
2802 for (j = 0; j < 1000; j++) {
2803 val = (1UL << 31);
2804 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806 if (val & (1L << 31))
2807 break;
2808
2809 msleep(5);
2810 }
2811 if (!(val & (1L << 31))) {
2812 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2813 rc = -EBUSY;
2814 }
2815
2816 return rc;
2817}
2818
2819
2820static void bnx2x_release_alr(struct bnx2x *bp)
2821{
2822 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2823}
2824
2825#define BNX2X_DEF_SB_ATT_IDX 0x0001
2826#define BNX2X_DEF_SB_IDX 0x0002
2827
2828static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2829{
2830 struct host_sp_status_block *def_sb = bp->def_status_blk;
2831 u16 rc = 0;
2832
2833 barrier();
2834 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2835 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2836 rc |= BNX2X_DEF_SB_ATT_IDX;
2837 }
2838
2839 if (bp->def_idx != def_sb->sp_sb.running_index) {
2840 bp->def_idx = def_sb->sp_sb.running_index;
2841 rc |= BNX2X_DEF_SB_IDX;
2842 }
2843
2844
2845 barrier();
2846 return rc;
2847}
2848
2849
2850
2851
2852
2853static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2854{
2855 int port = BP_PORT(bp);
2856 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2858 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859 NIG_REG_MASK_INTERRUPT_PORT0;
2860 u32 aeu_mask;
2861 u32 nig_mask = 0;
2862 u32 reg_addr;
2863
2864 if (bp->attn_state & asserted)
2865 BNX2X_ERR("IGU ERROR\n");
2866
2867 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2868 aeu_mask = REG_RD(bp, aeu_addr);
2869
2870 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2871 aeu_mask, asserted);
2872 aeu_mask &= ~(asserted & 0x3ff);
2873 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2874
2875 REG_WR(bp, aeu_addr, aeu_mask);
2876 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2877
2878 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2879 bp->attn_state |= asserted;
2880 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2881
2882 if (asserted & ATTN_HARD_WIRED_MASK) {
2883 if (asserted & ATTN_NIG_FOR_FUNC) {
2884
2885 bnx2x_acquire_phy_lock(bp);
2886
2887
2888 nig_mask = REG_RD(bp, nig_int_mask_addr);
2889 REG_WR(bp, nig_int_mask_addr, 0);
2890
2891 bnx2x_link_attn(bp);
2892
2893
2894 }
2895 if (asserted & ATTN_SW_TIMER_4_FUNC)
2896 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2897
2898 if (asserted & GPIO_2_FUNC)
2899 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2900
2901 if (asserted & GPIO_3_FUNC)
2902 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2903
2904 if (asserted & GPIO_4_FUNC)
2905 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2906
2907 if (port == 0) {
2908 if (asserted & ATTN_GENERAL_ATTN_1) {
2909 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2910 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2911 }
2912 if (asserted & ATTN_GENERAL_ATTN_2) {
2913 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2915 }
2916 if (asserted & ATTN_GENERAL_ATTN_3) {
2917 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2918 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2919 }
2920 } else {
2921 if (asserted & ATTN_GENERAL_ATTN_4) {
2922 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2923 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2924 }
2925 if (asserted & ATTN_GENERAL_ATTN_5) {
2926 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2927 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2928 }
2929 if (asserted & ATTN_GENERAL_ATTN_6) {
2930 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2931 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2932 }
2933 }
2934
2935 }
2936
2937 if (bp->common.int_block == INT_BLOCK_HC)
2938 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2939 COMMAND_REG_ATTN_BITS_SET);
2940 else
2941 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2942
2943 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2944 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2945 REG_WR(bp, reg_addr, asserted);
2946
2947
2948 if (asserted & ATTN_NIG_FOR_FUNC) {
2949 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950 bnx2x_release_phy_lock(bp);
2951 }
2952}
2953
2954static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955{
2956 int port = BP_PORT(bp);
2957 u32 ext_phy_config;
2958
2959 ext_phy_config =
2960 SHMEM_RD(bp,
2961 dev_info.port_hw_config[port].external_phy_config);
2962
2963 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2964 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2965 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2966 ext_phy_config);
2967
2968
2969 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2970 " the driver to shutdown the card to prevent permanent"
2971 " damage. Please contact OEM Support for assistance\n");
2972}
2973
2974static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2975{
2976 int port = BP_PORT(bp);
2977 int reg_offset;
2978 u32 val;
2979
2980 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2981 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2982
2983 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2984
2985 val = REG_RD(bp, reg_offset);
2986 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2987 REG_WR(bp, reg_offset, val);
2988
2989 BNX2X_ERR("SPIO5 hw attention\n");
2990
2991
2992 bnx2x_hw_reset_phy(&bp->link_params);
2993 bnx2x_fan_failure(bp);
2994 }
2995
2996 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2997 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2998 bnx2x_acquire_phy_lock(bp);
2999 bnx2x_handle_module_detect_int(&bp->link_params);
3000 bnx2x_release_phy_lock(bp);
3001 }
3002
3003 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3004
3005 val = REG_RD(bp, reg_offset);
3006 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3007 REG_WR(bp, reg_offset, val);
3008
3009 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3010 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3011 bnx2x_panic();
3012 }
3013}
3014
3015static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3016{
3017 u32 val;
3018
3019 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3020
3021 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3022 BNX2X_ERR("DB hw attention 0x%x\n", val);
3023
3024 if (val & 0x2)
3025 BNX2X_ERR("FATAL error from DORQ\n");
3026 }
3027
3028 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3029
3030 int port = BP_PORT(bp);
3031 int reg_offset;
3032
3033 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3034 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3035
3036 val = REG_RD(bp, reg_offset);
3037 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3038 REG_WR(bp, reg_offset, val);
3039
3040 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3041 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3042 bnx2x_panic();
3043 }
3044}
3045
3046static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3047{
3048 u32 val;
3049
3050 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3051
3052 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3053 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3054
3055 if (val & 0x2)
3056 BNX2X_ERR("FATAL error from CFC\n");
3057 }
3058
3059 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3060
3061 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3062 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3063
3064 if (val & 0x18000)
3065 BNX2X_ERR("FATAL error from PXP\n");
3066 if (CHIP_IS_E2(bp)) {
3067 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3068 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3069 }
3070 }
3071
3072 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3073
3074 int port = BP_PORT(bp);
3075 int reg_offset;
3076
3077 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3078 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3079
3080 val = REG_RD(bp, reg_offset);
3081 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3082 REG_WR(bp, reg_offset, val);
3083
3084 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3085 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3086 bnx2x_panic();
3087 }
3088}
3089
3090static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3091{
3092 u32 val;
3093
3094 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3095
3096 if (attn & BNX2X_PMF_LINK_ASSERT) {
3097 int func = BP_FUNC(bp);
3098
3099 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3100 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3101 func_mf_config[BP_ABS_FUNC(bp)].config);
3102 val = SHMEM_RD(bp,
3103 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3104 if (val & DRV_STATUS_DCC_EVENT_MASK)
3105 bnx2x_dcc_event(bp,
3106 (val & DRV_STATUS_DCC_EVENT_MASK));
3107
3108 if (val & DRV_STATUS_SET_MF_BW)
3109 bnx2x_set_mf_bw(bp);
3110
3111 bnx2x__link_status_update(bp);
3112 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3113 bnx2x_pmf_update(bp);
3114
3115 if (bp->port.pmf &&
3116 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3117 bp->dcbx_enabled > 0)
3118
3119 bnx2x_dcbx_set_params(bp,
3120 BNX2X_DCBX_STATE_NEG_RECEIVED);
3121 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3122
3123 BNX2X_ERR("MC assert!\n");
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3127 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3128 bnx2x_panic();
3129
3130 } else if (attn & BNX2X_MCP_ASSERT) {
3131
3132 BNX2X_ERR("MCP assert!\n");
3133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3134 bnx2x_fw_dump(bp);
3135
3136 } else
3137 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3138 }
3139
3140 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3141 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3142 if (attn & BNX2X_GRC_TIMEOUT) {
3143 val = CHIP_IS_E1(bp) ? 0 :
3144 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3145 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3146 }
3147 if (attn & BNX2X_GRC_RSV) {
3148 val = CHIP_IS_E1(bp) ? 0 :
3149 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3150 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3151 }
3152 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3153 }
3154}
3155
3156#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3157#define LOAD_COUNTER_BITS 16
3158#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3159#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3160#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3161
3162
3163
3164
3165static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3166{
3167 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3168 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3169 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3170 barrier();
3171 mmiowb();
3172}
3173
3174
3175
3176
3177static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3178{
3179 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3180 val |= (1 << 16);
3181 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3182 barrier();
3183 mmiowb();
3184}
3185
3186
3187
3188
3189bool bnx2x_reset_is_done(struct bnx2x *bp)
3190{
3191 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3192 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3193 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3194}
3195
3196
3197
3198
3199inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3200{
3201 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202
3203 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3204
3205 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3206 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3207 barrier();
3208 mmiowb();
3209}
3210
3211
3212
3213
3214u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3215{
3216 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3217
3218 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3219
3220 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3221 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3222 barrier();
3223 mmiowb();
3224
3225 return val1;
3226}
3227
3228
3229
3230
3231static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3232{
3233 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3234}
3235
3236static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3237{
3238 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3239 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3240}
3241
3242static inline void _print_next_block(int idx, const char *blk)
3243{
3244 if (idx)
3245 pr_cont(", ");
3246 pr_cont("%s", blk);
3247}
3248
3249static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3250{
3251 int i = 0;
3252 u32 cur_bit = 0;
3253 for (i = 0; sig; i++) {
3254 cur_bit = ((u32)0x1 << i);
3255 if (sig & cur_bit) {
3256 switch (cur_bit) {
3257 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3258 _print_next_block(par_num++, "BRB");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3261 _print_next_block(par_num++, "PARSER");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3264 _print_next_block(par_num++, "TSDM");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3267 _print_next_block(par_num++, "SEARCHER");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3270 _print_next_block(par_num++, "TSEMI");
3271 break;
3272 }
3273
3274
3275 sig &= ~cur_bit;
3276 }
3277 }
3278
3279 return par_num;
3280}
3281
3282static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3283{
3284 int i = 0;
3285 u32 cur_bit = 0;
3286 for (i = 0; sig; i++) {
3287 cur_bit = ((u32)0x1 << i);
3288 if (sig & cur_bit) {
3289 switch (cur_bit) {
3290 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3291 _print_next_block(par_num++, "PBCLIENT");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3294 _print_next_block(par_num++, "QM");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3297 _print_next_block(par_num++, "XSDM");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3300 _print_next_block(par_num++, "XSEMI");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3303 _print_next_block(par_num++, "DOORBELLQ");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3306 _print_next_block(par_num++, "VAUX PCI CORE");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3309 _print_next_block(par_num++, "DEBUG");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3312 _print_next_block(par_num++, "USDM");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3315 _print_next_block(par_num++, "USEMI");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3318 _print_next_block(par_num++, "UPB");
3319 break;
3320 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3321 _print_next_block(par_num++, "CSDM");
3322 break;
3323 }
3324
3325
3326 sig &= ~cur_bit;
3327 }
3328 }
3329
3330 return par_num;
3331}
3332
3333static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3334{
3335 int i = 0;
3336 u32 cur_bit = 0;
3337 for (i = 0; sig; i++) {
3338 cur_bit = ((u32)0x1 << i);
3339 if (sig & cur_bit) {
3340 switch (cur_bit) {
3341 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3342 _print_next_block(par_num++, "CSEMI");
3343 break;
3344 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3345 _print_next_block(par_num++, "PXP");
3346 break;
3347 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3348 _print_next_block(par_num++,
3349 "PXPPCICLOCKCLIENT");
3350 break;
3351 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3352 _print_next_block(par_num++, "CFC");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3355 _print_next_block(par_num++, "CDU");
3356 break;
3357 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3358 _print_next_block(par_num++, "IGU");
3359 break;
3360 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3361 _print_next_block(par_num++, "MISC");
3362 break;
3363 }
3364
3365
3366 sig &= ~cur_bit;
3367 }
3368 }
3369
3370 return par_num;
3371}
3372
3373static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3374{
3375 int i = 0;
3376 u32 cur_bit = 0;
3377 for (i = 0; sig; i++) {
3378 cur_bit = ((u32)0x1 << i);
3379 if (sig & cur_bit) {
3380 switch (cur_bit) {
3381 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3382 _print_next_block(par_num++, "MCP ROM");
3383 break;
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3385 _print_next_block(par_num++, "MCP UMP RX");
3386 break;
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3388 _print_next_block(par_num++, "MCP UMP TX");
3389 break;
3390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3391 _print_next_block(par_num++, "MCP SCPAD");
3392 break;
3393 }
3394
3395
3396 sig &= ~cur_bit;
3397 }
3398 }
3399
3400 return par_num;
3401}
3402
3403static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3404 u32 sig2, u32 sig3)
3405{
3406 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3407 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3408 int par_num = 0;
3409 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3410 "[0]:0x%08x [1]:0x%08x "
3411 "[2]:0x%08x [3]:0x%08x\n",
3412 sig0 & HW_PRTY_ASSERT_SET_0,
3413 sig1 & HW_PRTY_ASSERT_SET_1,
3414 sig2 & HW_PRTY_ASSERT_SET_2,
3415 sig3 & HW_PRTY_ASSERT_SET_3);
3416 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3417 bp->dev->name);
3418 par_num = bnx2x_print_blocks_with_parity0(
3419 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3420 par_num = bnx2x_print_blocks_with_parity1(
3421 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3422 par_num = bnx2x_print_blocks_with_parity2(
3423 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3424 par_num = bnx2x_print_blocks_with_parity3(
3425 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3426 printk("\n");
3427 return true;
3428 } else
3429 return false;
3430}
3431
3432bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3433{
3434 struct attn_route attn;
3435 int port = BP_PORT(bp);
3436
3437 attn.sig[0] = REG_RD(bp,
3438 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3439 port*4);
3440 attn.sig[1] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3442 port*4);
3443 attn.sig[2] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3445 port*4);
3446 attn.sig[3] = REG_RD(bp,
3447 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3448 port*4);
3449
3450 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3451 attn.sig[3]);
3452}
3453
3454
3455static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3456{
3457 u32 val;
3458 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3459
3460 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3461 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3462 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3463 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3464 "ADDRESS_ERROR\n");
3465 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3466 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3467 "INCORRECT_RCV_BEHAVIOR\n");
3468 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3469 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3470 "WAS_ERROR_ATTN\n");
3471 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3472 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3473 "VF_LENGTH_VIOLATION_ATTN\n");
3474 if (val &
3475 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3476 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3477 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3478 if (val &
3479 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3480 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3481 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3482 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3483 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3484 "TCPL_ERROR_ATTN\n");
3485 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3486 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3487 "TCPL_IN_TWO_RCBS_ATTN\n");
3488 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3489 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3490 "CSSNOOP_FIFO_OVERFLOW\n");
3491 }
3492 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3493 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3494 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3495 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3496 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3497 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3498 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3499 "_ATC_TCPL_TO_NOT_PEND\n");
3500 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3501 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3502 "ATC_GPA_MULTIPLE_HITS\n");
3503 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3504 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3505 "ATC_RCPL_TO_EMPTY_CNT\n");
3506 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3507 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3508 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3509 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3510 "ATC_IREQ_LESS_THAN_STU\n");
3511 }
3512
3513 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3514 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3515 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3516 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3517 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3518 }
3519
3520}
3521
3522static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3523{
3524 struct attn_route attn, *group_mask;
3525 int port = BP_PORT(bp);
3526 int index;
3527 u32 reg_addr;
3528 u32 val;
3529 u32 aeu_mask;
3530
3531
3532
3533 bnx2x_acquire_alr(bp);
3534
3535 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3536 bp->recovery_state = BNX2X_RECOVERY_INIT;
3537 bnx2x_set_reset_in_progress(bp);
3538 schedule_delayed_work(&bp->reset_task, 0);
3539
3540 bnx2x_int_disable(bp);
3541 bnx2x_release_alr(bp);
3542
3543
3544
3545 return;
3546 }
3547
3548 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3549 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3550 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3551 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3552 if (CHIP_IS_E2(bp))
3553 attn.sig[4] =
3554 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3555 else
3556 attn.sig[4] = 0;
3557
3558 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3559 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3560
3561 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3562 if (deasserted & (1 << index)) {
3563 group_mask = &bp->attn_group[index];
3564
3565 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3566 "%08x %08x %08x\n",
3567 index,
3568 group_mask->sig[0], group_mask->sig[1],
3569 group_mask->sig[2], group_mask->sig[3],
3570 group_mask->sig[4]);
3571
3572 bnx2x_attn_int_deasserted4(bp,
3573 attn.sig[4] & group_mask->sig[4]);
3574 bnx2x_attn_int_deasserted3(bp,
3575 attn.sig[3] & group_mask->sig[3]);
3576 bnx2x_attn_int_deasserted1(bp,
3577 attn.sig[1] & group_mask->sig[1]);
3578 bnx2x_attn_int_deasserted2(bp,
3579 attn.sig[2] & group_mask->sig[2]);
3580 bnx2x_attn_int_deasserted0(bp,
3581 attn.sig[0] & group_mask->sig[0]);
3582 }
3583 }
3584
3585 bnx2x_release_alr(bp);
3586
3587 if (bp->common.int_block == INT_BLOCK_HC)
3588 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3589 COMMAND_REG_ATTN_BITS_CLR);
3590 else
3591 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3592
3593 val = ~deasserted;
3594 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3595 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3596 REG_WR(bp, reg_addr, val);
3597
3598 if (~bp->attn_state & deasserted)
3599 BNX2X_ERR("IGU ERROR\n");
3600
3601 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3602 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3603
3604 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3605 aeu_mask = REG_RD(bp, reg_addr);
3606
3607 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3608 aeu_mask, deasserted);
3609 aeu_mask |= (deasserted & 0x3ff);
3610 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3611
3612 REG_WR(bp, reg_addr, aeu_mask);
3613 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3614
3615 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3616 bp->attn_state &= ~deasserted;
3617 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3618}
3619
3620static void bnx2x_attn_int(struct bnx2x *bp)
3621{
3622
3623 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3624 attn_bits);
3625 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3626 attn_bits_ack);
3627 u32 attn_state = bp->attn_state;
3628
3629
3630 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3631 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3632
3633 DP(NETIF_MSG_HW,
3634 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3635 attn_bits, attn_ack, asserted, deasserted);
3636
3637 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3638 BNX2X_ERR("BAD attention state\n");
3639
3640
3641 if (asserted)
3642 bnx2x_attn_int_asserted(bp, asserted);
3643
3644 if (deasserted)
3645 bnx2x_attn_int_deasserted(bp, deasserted);
3646}
3647
3648static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3649{
3650
3651 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3652 mmiowb();
3653}
3654
3655#ifdef BCM_CNIC
3656static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3657 union event_ring_elem *elem)
3658{
3659 if (!bp->cnic_eth_dev.starting_cid ||
3660 cid < bp->cnic_eth_dev.starting_cid)
3661 return 1;
3662
3663 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3664
3665 if (unlikely(elem->message.data.cfc_del_event.error)) {
3666 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3667 cid);
3668 bnx2x_panic_dump(bp);
3669 }
3670 bnx2x_cnic_cfc_comp(bp, cid);
3671 return 0;
3672}
3673#endif
3674
3675static void bnx2x_eq_int(struct bnx2x *bp)
3676{
3677 u16 hw_cons, sw_cons, sw_prod;
3678 union event_ring_elem *elem;
3679 u32 cid;
3680 u8 opcode;
3681 int spqe_cnt = 0;
3682
3683 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3684
3685
3686
3687
3688
3689
3690 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3691 hw_cons++;
3692
3693
3694
3695
3696
3697 sw_cons = bp->eq_cons;
3698 sw_prod = bp->eq_prod;
3699
3700 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3701 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3702
3703 for (; sw_cons != hw_cons;
3704 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3705
3706
3707 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3708
3709 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3710 opcode = elem->message.opcode;
3711
3712
3713
3714 switch (opcode) {
3715 case EVENT_RING_OPCODE_STAT_QUERY:
3716 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3717
3718 continue;
3719
3720 case EVENT_RING_OPCODE_CFC_DEL:
3721
3722
3723
3724
3725
3726 DP(NETIF_MSG_IFDOWN,
3727 "got delete ramrod for MULTI[%d]\n", cid);
3728#ifdef BCM_CNIC
3729 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3730 goto next_spqe;
3731 if (cid == BNX2X_FCOE_ETH_CID)
3732 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3733 else
3734#endif
3735 bnx2x_fp(bp, cid, state) =
3736 BNX2X_FP_STATE_CLOSED;
3737
3738 goto next_spqe;
3739
3740 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3741 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3742 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3743 goto next_spqe;
3744 case EVENT_RING_OPCODE_START_TRAFFIC:
3745 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3746 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3747 goto next_spqe;
3748 }
3749
3750 switch (opcode | bp->state) {
3751 case (EVENT_RING_OPCODE_FUNCTION_START |
3752 BNX2X_STATE_OPENING_WAIT4_PORT):
3753 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3754 bp->state = BNX2X_STATE_FUNC_STARTED;
3755 break;
3756
3757 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3758 BNX2X_STATE_CLOSING_WAIT4_HALT):
3759 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3760 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3761 break;
3762
3763 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3764 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3765 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3766 bp->set_mac_pending = 0;
3767 break;
3768
3769 case (EVENT_RING_OPCODE_SET_MAC |
3770 BNX2X_STATE_CLOSING_WAIT4_HALT):
3771 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3772 bp->set_mac_pending = 0;
3773 break;
3774 default:
3775
3776 BNX2X_ERR("Unknown EQ event %d\n",
3777 elem->message.opcode);
3778 }
3779next_spqe:
3780 spqe_cnt++;
3781 }
3782
3783 smp_mb__before_atomic_inc();
3784 atomic_add(spqe_cnt, &bp->spq_left);
3785
3786 bp->eq_cons = sw_cons;
3787 bp->eq_prod = sw_prod;
3788
3789 smp_wmb();
3790
3791
3792 bnx2x_update_eq_prod(bp, bp->eq_prod);
3793}
3794
3795static void bnx2x_sp_task(struct work_struct *work)
3796{
3797 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3798 u16 status;
3799
3800
3801 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3802 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3803 return;
3804 }
3805
3806 status = bnx2x_update_dsb_idx(bp);
3807
3808
3809
3810 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3811
3812
3813 if (status & BNX2X_DEF_SB_ATT_IDX) {
3814 bnx2x_attn_int(bp);
3815 status &= ~BNX2X_DEF_SB_ATT_IDX;
3816 }
3817
3818
3819 if (status & BNX2X_DEF_SB_IDX) {
3820#ifdef BCM_CNIC
3821 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3822
3823 if ((!NO_FCOE(bp)) &&
3824 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3825 napi_schedule(&bnx2x_fcoe(bp, napi));
3826#endif
3827
3828 bnx2x_eq_int(bp);
3829
3830 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3831 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3832
3833 status &= ~BNX2X_DEF_SB_IDX;
3834 }
3835
3836 if (unlikely(status))
3837 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3838 status);
3839
3840 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3841 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3842}
3843
3844irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3845{
3846 struct net_device *dev = dev_instance;
3847 struct bnx2x *bp = netdev_priv(dev);
3848
3849
3850 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3851 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3852 return IRQ_HANDLED;
3853 }
3854
3855 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3856 IGU_INT_DISABLE, 0);
3857
3858#ifdef BNX2X_STOP_ON_ERROR
3859 if (unlikely(bp->panic))
3860 return IRQ_HANDLED;
3861#endif
3862
3863#ifdef BCM_CNIC
3864 {
3865 struct cnic_ops *c_ops;
3866
3867 rcu_read_lock();
3868 c_ops = rcu_dereference(bp->cnic_ops);
3869 if (c_ops)
3870 c_ops->cnic_handler(bp->cnic_data, NULL);
3871 rcu_read_unlock();
3872 }
3873#endif
3874 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3875
3876 return IRQ_HANDLED;
3877}
3878
3879
3880
3881static void bnx2x_timer(unsigned long data)
3882{
3883 struct bnx2x *bp = (struct bnx2x *) data;
3884
3885 if (!netif_running(bp->dev))
3886 return;
3887
3888 if (atomic_read(&bp->intr_sem) != 0)
3889 goto timer_restart;
3890
3891 if (poll) {
3892 struct bnx2x_fastpath *fp = &bp->fp[0];
3893 int rc;
3894
3895 bnx2x_tx_int(fp);
3896 rc = bnx2x_rx_int(fp, 1000);
3897 }
3898
3899 if (!BP_NOMCP(bp)) {
3900 int mb_idx = BP_FW_MB_IDX(bp);
3901 u32 drv_pulse;
3902 u32 mcp_pulse;
3903
3904 ++bp->fw_drv_pulse_wr_seq;
3905 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3906
3907 drv_pulse = bp->fw_drv_pulse_wr_seq;
3908 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3909
3910 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3911 MCP_PULSE_SEQ_MASK);
3912
3913
3914
3915 if ((drv_pulse != mcp_pulse) &&
3916 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3917
3918 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3919 drv_pulse, mcp_pulse);
3920 }
3921 }
3922
3923 if (bp->state == BNX2X_STATE_OPEN)
3924 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3925
3926timer_restart:
3927 mod_timer(&bp->timer, jiffies + bp->current_interval);
3928}
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3939{
3940 u32 i;
3941 if (!(len%4) && !(addr%4))
3942 for (i = 0; i < len; i += 4)
3943 REG_WR(bp, addr + i, fill);
3944 else
3945 for (i = 0; i < len; i++)
3946 REG_WR8(bp, addr + i, fill);
3947
3948}
3949
3950
3951static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3952 int fw_sb_id,
3953 u32 *sb_data_p,
3954 u32 data_size)
3955{
3956 int index;
3957 for (index = 0; index < data_size; index++)
3958 REG_WR(bp, BAR_CSTRORM_INTMEM +
3959 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3960 sizeof(u32)*index,
3961 *(sb_data_p + index));
3962}
3963
3964static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3965{
3966 u32 *sb_data_p;
3967 u32 data_size = 0;
3968 struct hc_status_block_data_e2 sb_data_e2;
3969 struct hc_status_block_data_e1x sb_data_e1x;
3970
3971
3972 if (CHIP_IS_E2(bp)) {
3973 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3974 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3975 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3976 sb_data_e2.common.p_func.vf_valid = false;
3977 sb_data_p = (u32 *)&sb_data_e2;
3978 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3979 } else {
3980 memset(&sb_data_e1x, 0,
3981 sizeof(struct hc_status_block_data_e1x));
3982 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3983 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3984 sb_data_e1x.common.p_func.vf_valid = false;
3985 sb_data_p = (u32 *)&sb_data_e1x;
3986 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3987 }
3988 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3989
3990 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3991 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3992 CSTORM_STATUS_BLOCK_SIZE);
3993 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3994 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3995 CSTORM_SYNC_BLOCK_SIZE);
3996}
3997
3998
3999static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4000 struct hc_sp_status_block_data *sp_sb_data)
4001{
4002 int func = BP_FUNC(bp);
4003 int i;
4004 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4005 REG_WR(bp, BAR_CSTRORM_INTMEM +
4006 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4007 i*sizeof(u32),
4008 *((u32 *)sp_sb_data + i));
4009}
4010
4011static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4012{
4013 int func = BP_FUNC(bp);
4014 struct hc_sp_status_block_data sp_sb_data;
4015 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4016
4017 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4018 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4019 sp_sb_data.p_func.vf_valid = false;
4020
4021 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4022
4023 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4024 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4025 CSTORM_SP_STATUS_BLOCK_SIZE);
4026 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4027 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4028 CSTORM_SP_SYNC_BLOCK_SIZE);
4029
4030}
4031
4032
4033static inline
4034void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4035 int igu_sb_id, int igu_seg_id)
4036{
4037 hc_sm->igu_sb_id = igu_sb_id;
4038 hc_sm->igu_seg_id = igu_seg_id;
4039 hc_sm->timer_value = 0xFF;
4040 hc_sm->time_to_expire = 0xFFFFFFFF;
4041}
4042
4043static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4044 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4045{
4046 int igu_seg_id;
4047
4048 struct hc_status_block_data_e2 sb_data_e2;
4049 struct hc_status_block_data_e1x sb_data_e1x;
4050 struct hc_status_block_sm *hc_sm_p;
4051 struct hc_index_data *hc_index_p;
4052 int data_size;
4053 u32 *sb_data_p;
4054
4055 if (CHIP_INT_MODE_IS_BC(bp))
4056 igu_seg_id = HC_SEG_ACCESS_NORM;
4057 else
4058 igu_seg_id = IGU_SEG_ACCESS_NORM;
4059
4060 bnx2x_zero_fp_sb(bp, fw_sb_id);
4061
4062 if (CHIP_IS_E2(bp)) {
4063 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4064 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4065 sb_data_e2.common.p_func.vf_id = vfid;
4066 sb_data_e2.common.p_func.vf_valid = vf_valid;
4067 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4068 sb_data_e2.common.same_igu_sb_1b = true;
4069 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4070 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4071 hc_sm_p = sb_data_e2.common.state_machine;
4072 hc_index_p = sb_data_e2.index_data;
4073 sb_data_p = (u32 *)&sb_data_e2;
4074 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4075 } else {
4076 memset(&sb_data_e1x, 0,
4077 sizeof(struct hc_status_block_data_e1x));
4078 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4079 sb_data_e1x.common.p_func.vf_id = 0xff;
4080 sb_data_e1x.common.p_func.vf_valid = false;
4081 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4082 sb_data_e1x.common.same_igu_sb_1b = true;
4083 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4084 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4085 hc_sm_p = sb_data_e1x.common.state_machine;
4086 hc_index_p = sb_data_e1x.index_data;
4087 sb_data_p = (u32 *)&sb_data_e1x;
4088 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4089 }
4090
4091 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4092 igu_sb_id, igu_seg_id);
4093 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4094 igu_sb_id, igu_seg_id);
4095
4096 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4097
4098
4099 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4100}
4101
4102static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4103 u8 sb_index, u8 disable, u16 usec)
4104{
4105 int port = BP_PORT(bp);
4106 u8 ticks = usec / BNX2X_BTR;
4107
4108 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4109
4110 disable = disable ? 1 : (usec ? 0 : 1);
4111 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4112}
4113
4114static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4115 u16 tx_usec, u16 rx_usec)
4116{
4117 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4118 false, rx_usec);
4119 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4120 false, tx_usec);
4121}
4122
4123static void bnx2x_init_def_sb(struct bnx2x *bp)
4124{
4125 struct host_sp_status_block *def_sb = bp->def_status_blk;
4126 dma_addr_t mapping = bp->def_status_blk_mapping;
4127 int igu_sp_sb_index;
4128 int igu_seg_id;
4129 int port = BP_PORT(bp);
4130 int func = BP_FUNC(bp);
4131 int reg_offset;
4132 u64 section;
4133 int index;
4134 struct hc_sp_status_block_data sp_sb_data;
4135 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4136
4137 if (CHIP_INT_MODE_IS_BC(bp)) {
4138 igu_sp_sb_index = DEF_SB_IGU_ID;
4139 igu_seg_id = HC_SEG_ACCESS_DEF;
4140 } else {
4141 igu_sp_sb_index = bp->igu_dsb_id;
4142 igu_seg_id = IGU_SEG_ACCESS_DEF;
4143 }
4144
4145
4146 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4147 atten_status_block);
4148 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4149
4150 bp->attn_state = 0;
4151
4152 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4153 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4154 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4155 int sindex;
4156
4157 for (sindex = 0; sindex < 4; sindex++)
4158 bp->attn_group[index].sig[sindex] =
4159 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4160
4161 if (CHIP_IS_E2(bp))
4162
4163
4164
4165
4166
4167 bp->attn_group[index].sig[4] = REG_RD(bp,
4168 reg_offset + 0x10 + 0x4*index);
4169 else
4170 bp->attn_group[index].sig[4] = 0;
4171 }
4172
4173 if (bp->common.int_block == INT_BLOCK_HC) {
4174 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4175 HC_REG_ATTN_MSG0_ADDR_L);
4176
4177 REG_WR(bp, reg_offset, U64_LO(section));
4178 REG_WR(bp, reg_offset + 4, U64_HI(section));
4179 } else if (CHIP_IS_E2(bp)) {
4180 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4181 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4182 }
4183
4184 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4185 sp_sb);
4186
4187 bnx2x_zero_sp_sb(bp);
4188
4189 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4190 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4191 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4192 sp_sb_data.igu_seg_id = igu_seg_id;
4193 sp_sb_data.p_func.pf_id = func;
4194 sp_sb_data.p_func.vnic_id = BP_VN(bp);
4195 sp_sb_data.p_func.vf_id = 0xff;
4196
4197 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4198
4199 bp->stats_pending = 0;
4200 bp->set_mac_pending = 0;
4201
4202 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4203}
4204
4205void bnx2x_update_coalesce(struct bnx2x *bp)
4206{
4207 int i;
4208
4209 for_each_eth_queue(bp, i)
4210 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4211 bp->rx_ticks, bp->tx_ticks);
4212}
4213
4214static void bnx2x_init_sp_ring(struct bnx2x *bp)
4215{
4216 spin_lock_init(&bp->spq_lock);
4217 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4218
4219 bp->spq_prod_idx = 0;
4220 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4221 bp->spq_prod_bd = bp->spq;
4222 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4223}
4224
4225static void bnx2x_init_eq_ring(struct bnx2x *bp)
4226{
4227 int i;
4228 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4229 union event_ring_elem *elem =
4230 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4231
4232 elem->next_page.addr.hi =
4233 cpu_to_le32(U64_HI(bp->eq_mapping +
4234 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4235 elem->next_page.addr.lo =
4236 cpu_to_le32(U64_LO(bp->eq_mapping +
4237 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4238 }
4239 bp->eq_cons = 0;
4240 bp->eq_prod = NUM_EQ_DESC;
4241 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4242}
4243
4244static void bnx2x_init_ind_table(struct bnx2x *bp)
4245{
4246 int func = BP_FUNC(bp);
4247 int i;
4248
4249 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4250 return;
4251
4252 DP(NETIF_MSG_IFUP,
4253 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4254 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4255 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4256 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4257 bp->fp->cl_id + (i % (bp->num_queues -
4258 NONE_ETH_CONTEXT_USE)));
4259}
4260
4261void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4262{
4263 int mode = bp->rx_mode;
4264 int port = BP_PORT(bp);
4265 u16 cl_id;
4266 u32 def_q_filters = 0;
4267
4268
4269 u32 llh_mask =
4270 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4271 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4272 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4273 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4274
4275 switch (mode) {
4276 case BNX2X_RX_MODE_NONE:
4277 def_q_filters = BNX2X_ACCEPT_NONE;
4278#ifdef BCM_CNIC
4279 if (!NO_FCOE(bp)) {
4280 cl_id = bnx2x_fcoe(bp, cl_id);
4281 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4282 }
4283#endif
4284 break;
4285
4286 case BNX2X_RX_MODE_NORMAL:
4287 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4288 BNX2X_ACCEPT_MULTICAST;
4289#ifdef BCM_CNIC
4290 if (!NO_FCOE(bp)) {
4291 cl_id = bnx2x_fcoe(bp, cl_id);
4292 bnx2x_rxq_set_mac_filters(bp, cl_id,
4293 BNX2X_ACCEPT_UNICAST |
4294 BNX2X_ACCEPT_MULTICAST);
4295 }
4296#endif
4297 break;
4298
4299 case BNX2X_RX_MODE_ALLMULTI:
4300 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4301 BNX2X_ACCEPT_ALL_MULTICAST;
4302#ifdef BCM_CNIC
4303
4304
4305
4306
4307 if (!NO_FCOE(bp)) {
4308 cl_id = bnx2x_fcoe(bp, cl_id);
4309 bnx2x_rxq_set_mac_filters(bp, cl_id,
4310 BNX2X_ACCEPT_UNICAST);
4311 }
4312#endif
4313 break;
4314
4315 case BNX2X_RX_MODE_PROMISC:
4316 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4317#ifdef BCM_CNIC
4318
4319
4320
4321
4322 if (!NO_FCOE(bp)) {
4323 cl_id = bnx2x_fcoe(bp, cl_id);
4324 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4325 }
4326#endif
4327
4328 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4329 break;
4330
4331 default:
4332 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4333 break;
4334 }
4335
4336 cl_id = BP_L_ID(bp);
4337 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4338
4339 REG_WR(bp,
4340 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4341 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4342
4343 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4344 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4345 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4346 "unmatched_ucast 0x%x\n", mode,
4347 bp->mac_filters.ucast_drop_all,
4348 bp->mac_filters.mcast_drop_all,
4349 bp->mac_filters.bcast_drop_all,
4350 bp->mac_filters.ucast_accept_all,
4351 bp->mac_filters.mcast_accept_all,
4352 bp->mac_filters.bcast_accept_all,
4353 bp->mac_filters.unmatched_unicast
4354 );
4355
4356 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4357}
4358
4359static void bnx2x_init_internal_common(struct bnx2x *bp)
4360{
4361 int i;
4362
4363 if (!CHIP_IS_E1(bp)) {
4364
4365
4366
4367 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4368 bp->mf_mode);
4369 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4370 bp->mf_mode);
4371 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4372 bp->mf_mode);
4373 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4374 bp->mf_mode);
4375 }
4376
4377 if (IS_MF_SI(bp))
4378
4379
4380
4381
4382
4383 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4384 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4385
4386
4387
4388 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4389 REG_WR(bp, BAR_USTRORM_INTMEM +
4390 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4391 if (CHIP_IS_E2(bp)) {
4392 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4393 CHIP_INT_MODE_IS_BC(bp) ?
4394 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4395 }
4396}
4397
4398static void bnx2x_init_internal_port(struct bnx2x *bp)
4399{
4400
4401 bnx2x_dcb_init_intmem_pfc(bp);
4402}
4403
4404static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4405{
4406 switch (load_code) {
4407 case FW_MSG_CODE_DRV_LOAD_COMMON:
4408 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4409 bnx2x_init_internal_common(bp);
4410
4411
4412 case FW_MSG_CODE_DRV_LOAD_PORT:
4413 bnx2x_init_internal_port(bp);
4414
4415
4416 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4417
4418
4419 break;
4420
4421 default:
4422 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4423 break;
4424 }
4425}
4426
4427static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4428{
4429 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4430
4431 fp->state = BNX2X_FP_STATE_CLOSED;
4432
4433 fp->index = fp->cid = fp_idx;
4434 fp->cl_id = BP_L_ID(bp) + fp_idx;
4435 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4436 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4437
4438 fp->cl_qzone_id = fp->cl_id +
4439 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4440 ETH_MAX_RX_CLIENTS_E1H);
4441
4442 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4443 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4444 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4445
4446 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4447 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4448
4449 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4450 "cl_id %d fw_sb %d igu_sb %d\n",
4451 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4452 fp->igu_sb_id);
4453 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4454 fp->fw_sb_id, fp->igu_sb_id);
4455
4456 bnx2x_update_fpsb_idx(fp);
4457}
4458
4459void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4460{
4461 int i;
4462
4463 for_each_eth_queue(bp, i)
4464 bnx2x_init_fp_sb(bp, i);
4465#ifdef BCM_CNIC
4466 if (!NO_FCOE(bp))
4467 bnx2x_init_fcoe_fp(bp);
4468
4469 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4470 BNX2X_VF_ID_INVALID, false,
4471 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4472
4473#endif
4474
4475
4476 rmb();
4477
4478 bnx2x_init_def_sb(bp);
4479 bnx2x_update_dsb_idx(bp);
4480 bnx2x_init_rx_rings(bp);
4481 bnx2x_init_tx_rings(bp);
4482 bnx2x_init_sp_ring(bp);
4483 bnx2x_init_eq_ring(bp);
4484 bnx2x_init_internal(bp, load_code);
4485 bnx2x_pf_init(bp);
4486 bnx2x_init_ind_table(bp);
4487 bnx2x_stats_init(bp);
4488
4489
4490 atomic_set(&bp->intr_sem, 0);
4491
4492
4493 mb();
4494 mmiowb();
4495
4496 bnx2x_int_enable(bp);
4497
4498
4499 bnx2x_attn_int_deasserted0(bp,
4500 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4501 AEU_INPUTS_ATTN_BITS_SPIO5);
4502}
4503
4504
4505
4506
4507
4508
4509
4510static int bnx2x_gunzip_init(struct bnx2x *bp)
4511{
4512 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4513 &bp->gunzip_mapping, GFP_KERNEL);
4514 if (bp->gunzip_buf == NULL)
4515 goto gunzip_nomem1;
4516
4517 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4518 if (bp->strm == NULL)
4519 goto gunzip_nomem2;
4520
4521 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4522 GFP_KERNEL);
4523 if (bp->strm->workspace == NULL)
4524 goto gunzip_nomem3;
4525
4526 return 0;
4527
4528gunzip_nomem3:
4529 kfree(bp->strm);
4530 bp->strm = NULL;
4531
4532gunzip_nomem2:
4533 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4534 bp->gunzip_mapping);
4535 bp->gunzip_buf = NULL;
4536
4537gunzip_nomem1:
4538 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4539 " un-compression\n");
4540 return -ENOMEM;
4541}
4542
4543static void bnx2x_gunzip_end(struct bnx2x *bp)
4544{
4545 kfree(bp->strm->workspace);
4546 kfree(bp->strm);
4547 bp->strm = NULL;
4548
4549 if (bp->gunzip_buf) {
4550 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4551 bp->gunzip_mapping);
4552 bp->gunzip_buf = NULL;
4553 }
4554}
4555
4556static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4557{
4558 int n, rc;
4559
4560
4561 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4562 BNX2X_ERR("Bad gzip header\n");
4563 return -EINVAL;
4564 }
4565
4566 n = 10;
4567
4568#define FNAME 0x8
4569
4570 if (zbuf[3] & FNAME)
4571 while ((zbuf[n++] != 0) && (n < len));
4572
4573 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4574 bp->strm->avail_in = len - n;
4575 bp->strm->next_out = bp->gunzip_buf;
4576 bp->strm->avail_out = FW_BUF_SIZE;
4577
4578 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4579 if (rc != Z_OK)
4580 return rc;
4581
4582 rc = zlib_inflate(bp->strm, Z_FINISH);
4583 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4584 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4585 bp->strm->msg);
4586
4587 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4588 if (bp->gunzip_outlen & 0x3)
4589 netdev_err(bp->dev, "Firmware decompression error:"
4590 " gunzip_outlen (%d) not aligned\n",
4591 bp->gunzip_outlen);
4592 bp->gunzip_outlen >>= 2;
4593
4594 zlib_inflateEnd(bp->strm);
4595
4596 if (rc == Z_STREAM_END)
4597 return 0;
4598
4599 return rc;
4600}
4601
4602
4603
4604
4605
4606
4607
4608
4609static void bnx2x_lb_pckt(struct bnx2x *bp)
4610{
4611 u32 wb_write[3];
4612
4613
4614 wb_write[0] = 0x55555555;
4615 wb_write[1] = 0x55555555;
4616 wb_write[2] = 0x20;
4617 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4618
4619
4620 wb_write[0] = 0x09000000;
4621 wb_write[1] = 0x55555555;
4622 wb_write[2] = 0x10;
4623 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4624}
4625
4626
4627
4628
4629
4630static int bnx2x_int_mem_test(struct bnx2x *bp)
4631{
4632 int factor;
4633 int count, i;
4634 u32 val = 0;
4635
4636 if (CHIP_REV_IS_FPGA(bp))
4637 factor = 120;
4638 else if (CHIP_REV_IS_EMUL(bp))
4639 factor = 200;
4640 else
4641 factor = 1;
4642
4643
4644 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4645 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4646 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4647 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4648
4649
4650 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4651
4652
4653 bnx2x_lb_pckt(bp);
4654
4655
4656
4657 count = 1000 * factor;
4658 while (count) {
4659
4660 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4661 val = *bnx2x_sp(bp, wb_data[0]);
4662 if (val == 0x10)
4663 break;
4664
4665 msleep(10);
4666 count--;
4667 }
4668 if (val != 0x10) {
4669 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4670 return -1;
4671 }
4672
4673
4674 count = 1000 * factor;
4675 while (count) {
4676 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4677 if (val == 1)
4678 break;
4679
4680 msleep(10);
4681 count--;
4682 }
4683 if (val != 0x1) {
4684 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4685 return -2;
4686 }
4687
4688
4689 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4690 msleep(50);
4691 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4692 msleep(50);
4693 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4694 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4695
4696 DP(NETIF_MSG_HW, "part2\n");
4697
4698
4699 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4700 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4701 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4702 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4703
4704
4705 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4706
4707
4708 for (i = 0; i < 10; i++)
4709 bnx2x_lb_pckt(bp);
4710
4711
4712
4713 count = 1000 * factor;
4714 while (count) {
4715
4716 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4717 val = *bnx2x_sp(bp, wb_data[0]);
4718 if (val == 0xb0)
4719 break;
4720
4721 msleep(10);
4722 count--;
4723 }
4724 if (val != 0xb0) {
4725 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4726 return -3;
4727 }
4728
4729
4730 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4731 if (val != 2)
4732 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4733
4734
4735 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4736
4737
4738 msleep(10 * factor);
4739
4740 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4741 if (val != 3)
4742 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4743
4744
4745 for (i = 0; i < 11; i++)
4746 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4747 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4748 if (val != 1) {
4749 BNX2X_ERR("clear of NIG failed\n");
4750 return -4;
4751 }
4752
4753
4754 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4755 msleep(50);
4756 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4757 msleep(50);
4758 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4759 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4760#ifndef BCM_CNIC
4761
4762 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4763#endif
4764
4765
4766 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4767 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4768 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4769 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4770
4771 DP(NETIF_MSG_HW, "done\n");
4772
4773 return 0;
4774}
4775
4776static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4777{
4778 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4779 if (CHIP_IS_E2(bp))
4780 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4781 else
4782 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4783 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4784 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4785
4786
4787
4788
4789
4790
4791 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4792 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4793 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4794 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4795 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4796 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4797
4798
4799 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4800 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4801 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4802
4803
4804 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4805 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4806 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4807 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4808
4809
4810
4811 if (CHIP_REV_IS_FPGA(bp))
4812 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4813 else if (CHIP_IS_E2(bp))
4814 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4815 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4816 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4817 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4818 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4819 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4820 else
4821 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4822 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4823 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4824 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4825
4826
4827 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4828 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4829
4830 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
4831}
4832
4833static void bnx2x_reset_common(struct bnx2x *bp)
4834{
4835
4836 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4837 0xd3ffff7f);
4838 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4839}
4840
4841static void bnx2x_init_pxp(struct bnx2x *bp)
4842{
4843 u16 devctl;
4844 int r_order, w_order;
4845
4846 pci_read_config_word(bp->pdev,
4847 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4848 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4849 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4850 if (bp->mrrs == -1)
4851 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4852 else {
4853 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4854 r_order = bp->mrrs;
4855 }
4856
4857 bnx2x_init_pxp_arb(bp, r_order, w_order);
4858}
4859
4860static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4861{
4862 int is_required;
4863 u32 val;
4864 int port;
4865
4866 if (BP_NOMCP(bp))
4867 return;
4868
4869 is_required = 0;
4870 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4871 SHARED_HW_CFG_FAN_FAILURE_MASK;
4872
4873 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4874 is_required = 1;
4875
4876
4877
4878
4879
4880
4881 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4882 for (port = PORT_0; port < PORT_MAX; port++) {
4883 is_required |=
4884 bnx2x_fan_failure_det_req(
4885 bp,
4886 bp->common.shmem_base,
4887 bp->common.shmem2_base,
4888 port);
4889 }
4890
4891 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4892
4893 if (is_required == 0)
4894 return;
4895
4896
4897 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4898 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4899
4900
4901 val = REG_RD(bp, MISC_REG_SPIO_INT);
4902 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4903 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4904 REG_WR(bp, MISC_REG_SPIO_INT, val);
4905
4906
4907 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4908 val |= (1 << MISC_REGISTERS_SPIO_5);
4909 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4910}
4911
4912static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4913{
4914 u32 offset = 0;
4915
4916 if (CHIP_IS_E1(bp))
4917 return;
4918 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4919 return;
4920
4921 switch (BP_ABS_FUNC(bp)) {
4922 case 0:
4923 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4924 break;
4925 case 1:
4926 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4927 break;
4928 case 2:
4929 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4930 break;
4931 case 3:
4932 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4933 break;
4934 case 4:
4935 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4936 break;
4937 case 5:
4938 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4939 break;
4940 case 6:
4941 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4942 break;
4943 case 7:
4944 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4945 break;
4946 default:
4947 return;
4948 }
4949
4950 REG_WR(bp, offset, pretend_func_num);
4951 REG_RD(bp, offset);
4952 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4953}
4954
4955static void bnx2x_pf_disable(struct bnx2x *bp)
4956{
4957 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4958 val &= ~IGU_PF_CONF_FUNC_EN;
4959
4960 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4961 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4962 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4963}
4964
4965static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4966{
4967 u32 val, i;
4968
4969 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4970
4971 bnx2x_reset_common(bp);
4972 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4973 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4974
4975 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4976 if (!CHIP_IS_E1(bp))
4977 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4978
4979 if (CHIP_IS_E2(bp)) {
4980 u8 fid;
4981
4982
4983
4984
4985
4986
4987
4988
4989 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4990 if (fid == BP_ABS_FUNC(bp)) {
4991 REG_WR(bp,
4992 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4993 1);
4994 continue;
4995 }
4996
4997 bnx2x_pretend_func(bp, fid);
4998
4999 bnx2x_pf_disable(bp);
5000 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5001 }
5002 }
5003
5004 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5005 if (CHIP_IS_E1(bp)) {
5006
5007
5008 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5009 }
5010
5011 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5012 bnx2x_init_pxp(bp);
5013
5014#ifdef __BIG_ENDIAN
5015 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5016 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5017 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5018 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5019 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5020
5021 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5022
5023
5024 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5025 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5026 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5027 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5028#endif
5029
5030 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5031
5032 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5033 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5034
5035
5036 msleep(100);
5037
5038 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5039 if (val != 1) {
5040 BNX2X_ERR("PXP2 CFG failed\n");
5041 return -EBUSY;
5042 }
5043 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5044 if (val != 1) {
5045 BNX2X_ERR("PXP2 RD_INIT failed\n");
5046 return -EBUSY;
5047 }
5048
5049
5050
5051
5052
5053
5054 if (CHIP_IS_E2(bp)) {
5055 struct ilt_client_info ilt_cli;
5056 struct bnx2x_ilt ilt;
5057 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5058 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5059
5060
5061 ilt_cli.start = 0;
5062 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5063 ilt_cli.client_num = ILT_CLIENT_TM;
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5077 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5078 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5079
5080 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5081 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5082 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5083 }
5084
5085
5086 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5087 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5088
5089 if (CHIP_IS_E2(bp)) {
5090 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5091 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5092 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5093
5094 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5095
5096
5097 do {
5098 msleep(200);
5099 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5100 } while (factor-- && (val != 1));
5101
5102 if (val != 1) {
5103 BNX2X_ERR("ATC_INIT failed\n");
5104 return -EBUSY;
5105 }
5106 }
5107
5108 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5109
5110
5111 bp->dmae_ready = 1;
5112 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5113
5114 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5115 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5116 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5117 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5118
5119 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5120 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5121 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5122 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5123
5124 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5125
5126 if (CHIP_MODE_IS_4_PORT(bp))
5127 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5128
5129
5130 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5131
5132
5133 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5134 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5135
5136#ifdef BCM_CNIC
5137 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5138#endif
5139
5140 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5141 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5142
5143 if (!CHIP_REV_IS_SLOW(bp)) {
5144
5145 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5146 }
5147
5148 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5149 if (CHIP_MODE_IS_4_PORT(bp)) {
5150 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5151 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5152 }
5153
5154 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5155 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5156#ifndef BCM_CNIC
5157
5158 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5159#endif
5160 if (!CHIP_IS_E1(bp))
5161 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5162
5163 if (CHIP_IS_E2(bp)) {
5164
5165
5166 int has_ovlan = IS_MF_SD(bp);
5167 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5168 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5169 }
5170
5171 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5172 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5173 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5174 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5175
5176 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5178 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5179 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5180
5181 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5182 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5183 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5184 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5185
5186 if (CHIP_MODE_IS_4_PORT(bp))
5187 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5188
5189
5190 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5191 0x80000000);
5192 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5193 0x80000000);
5194
5195 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5196 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5197 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5198
5199 if (CHIP_IS_E2(bp)) {
5200 int has_ovlan = IS_MF_SD(bp);
5201 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5202 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5203 }
5204
5205 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5206 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5207 REG_WR(bp, i, random32());
5208
5209 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5210#ifdef BCM_CNIC
5211 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5212 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5213 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5214 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5215 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5216 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5217 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5218 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5219 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5220 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5221#endif
5222 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5223
5224 if (sizeof(union cdu_context) != 1024)
5225
5226 dev_alert(&bp->pdev->dev, "please adjust the size "
5227 "of cdu_context(%ld)\n",
5228 (long)sizeof(union cdu_context));
5229
5230 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5231 val = (4 << 24) + (0 << 12) + 1024;
5232 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5233
5234 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5235 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5236
5237 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5238
5239
5240 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5241
5242 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5243
5244 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5245 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5246
5247 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5248 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5249
5250 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5251
5252 REG_WR(bp, 0x2814, 0xffffffff);
5253 REG_WR(bp, 0x3820, 0xffffffff);
5254
5255 if (CHIP_IS_E2(bp)) {
5256 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5257 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5258 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5259 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5260 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5261 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5262 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5263 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5264 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5265 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5266 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5267 }
5268
5269 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5270 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5271 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5272 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5273
5274 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5275 if (!CHIP_IS_E1(bp)) {
5276 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5277 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5278 }
5279 if (CHIP_IS_E2(bp)) {
5280
5281
5282 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5283 }
5284
5285 if (CHIP_REV_IS_SLOW(bp))
5286 msleep(200);
5287
5288
5289 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5290 if (val != 1) {
5291 BNX2X_ERR("CFC LL_INIT failed\n");
5292 return -EBUSY;
5293 }
5294 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5295 if (val != 1) {
5296 BNX2X_ERR("CFC AC_INIT failed\n");
5297 return -EBUSY;
5298 }
5299 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5300 if (val != 1) {
5301 BNX2X_ERR("CFC CAM_INIT failed\n");
5302 return -EBUSY;
5303 }
5304 REG_WR(bp, CFC_REG_DEBUG0, 0);
5305
5306 if (CHIP_IS_E1(bp)) {
5307
5308
5309 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5310 val = *bnx2x_sp(bp, wb_data[0]);
5311
5312
5313 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5314 BNX2X_ERR("internal mem self test failed\n");
5315 return -EBUSY;
5316 }
5317 }
5318
5319 bnx2x_setup_fan_failure_detection(bp);
5320
5321
5322 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5323
5324 bnx2x_enable_blocks_attention(bp);
5325 if (CHIP_PARITY_ENABLED(bp))
5326 bnx2x_enable_blocks_parity(bp);
5327
5328 if (!BP_NOMCP(bp)) {
5329
5330 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5331 CHIP_IS_E1x(bp)) {
5332 u32 shmem_base[2], shmem2_base[2];
5333 shmem_base[0] = bp->common.shmem_base;
5334 shmem2_base[0] = bp->common.shmem2_base;
5335 if (CHIP_IS_E2(bp)) {
5336 shmem_base[1] =
5337 SHMEM2_RD(bp, other_shmem_base_addr);
5338 shmem2_base[1] =
5339 SHMEM2_RD(bp, other_shmem2_base_addr);
5340 }
5341 bnx2x_acquire_phy_lock(bp);
5342 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5343 bp->common.chip_id);
5344 bnx2x_release_phy_lock(bp);
5345 }
5346 } else
5347 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5348
5349 return 0;
5350}
5351
5352static int bnx2x_init_hw_port(struct bnx2x *bp)
5353{
5354 int port = BP_PORT(bp);
5355 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5356 u32 low, high;
5357 u32 val;
5358
5359 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5360
5361 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5362
5363 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5364 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5365
5366
5367
5368
5369
5370
5371 if (CHIP_IS_E2(bp))
5372 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5373
5374 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5375 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5376 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5377 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5378
5379
5380 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5381
5382#ifdef BCM_CNIC
5383 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5384 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5385 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5386#endif
5387
5388 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5389
5390 if (CHIP_MODE_IS_4_PORT(bp))
5391 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5392
5393 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5394 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5395 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5396
5397 low = 0;
5398 high = 513;
5399 } else {
5400 if (IS_MF(bp))
5401 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5402 else if (bp->dev->mtu > 4096) {
5403 if (bp->flags & ONE_PORT_FLAG)
5404 low = 160;
5405 else {
5406 val = bp->dev->mtu;
5407
5408 low = 96 + (val/64) +
5409 ((val % 64) ? 1 : 0);
5410 }
5411 } else
5412 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5413 high = low + 56;
5414 }
5415 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5416 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5417 }
5418
5419 if (CHIP_MODE_IS_4_PORT(bp)) {
5420 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5421 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5422 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5423 BRB1_REG_MAC_GUARANTIED_0), 40);
5424 }
5425
5426 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5427
5428 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5429 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5430 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5431 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5432
5433 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5434 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5435 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5436 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5437 if (CHIP_MODE_IS_4_PORT(bp))
5438 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5439
5440 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5441 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5442
5443 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5444
5445 if (!CHIP_IS_E2(bp)) {
5446
5447 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5448
5449
5450 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5451
5452 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5453
5454
5455 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5456 udelay(50);
5457 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5458 }
5459
5460#ifdef BCM_CNIC
5461 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5462#endif
5463 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5464 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5465
5466 if (CHIP_IS_E1(bp)) {
5467 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5468 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5469 }
5470 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5471
5472 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5473
5474 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5475
5476
5477
5478
5479 val = IS_MF(bp) ? 0xF7 : 0x7;
5480
5481 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5482 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5483
5484 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5485 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5486 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5487 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5488 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5489
5490 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5491
5492 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5493
5494 if (!CHIP_IS_E1(bp)) {
5495
5496 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5497 (IS_MF_SD(bp) ? 0x1 : 0x2));
5498
5499 if (CHIP_IS_E2(bp)) {
5500 val = 0;
5501 switch (bp->mf_mode) {
5502 case MULTI_FUNCTION_SD:
5503 val = 1;
5504 break;
5505 case MULTI_FUNCTION_SI:
5506 val = 2;
5507 break;
5508 }
5509
5510 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5511 NIG_REG_LLH0_CLS_TYPE), val);
5512 }
5513 {
5514 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5515 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5516 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5517 }
5518 }
5519
5520 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5521 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5522 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5523 bp->common.shmem2_base, port)) {
5524 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5525 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526 val = REG_RD(bp, reg_addr);
5527 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5528 REG_WR(bp, reg_addr, val);
5529 }
5530 bnx2x__link_reset(bp);
5531
5532 return 0;
5533}
5534
5535static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5536{
5537 int reg;
5538
5539 if (CHIP_IS_E1(bp))
5540 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5541 else
5542 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5543
5544 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5545}
5546
5547static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5548{
5549 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true );
5550}
5551
5552static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5553{
5554 u32 i, base = FUNC_ILT_BASE(func);
5555 for (i = base; i < base + ILT_PER_FUNC; i++)
5556 bnx2x_ilt_wr(bp, i, 0);
5557}
5558
5559static int bnx2x_init_hw_func(struct bnx2x *bp)
5560{
5561 int port = BP_PORT(bp);
5562 int func = BP_FUNC(bp);
5563 struct bnx2x_ilt *ilt = BP_ILT(bp);
5564 u16 cdu_ilt_start;
5565 u32 addr, val;
5566 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5567 int i, main_mem_width;
5568
5569 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5570
5571
5572 if (bp->common.int_block == INT_BLOCK_HC) {
5573 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5574 val = REG_RD(bp, addr);
5575 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5576 REG_WR(bp, addr, val);
5577 }
5578
5579 ilt = BP_ILT(bp);
5580 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5581
5582 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5583 ilt->lines[cdu_ilt_start + i].page =
5584 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5585 ilt->lines[cdu_ilt_start + i].page_mapping =
5586 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5587
5588
5589 }
5590 bnx2x_ilt_init_op(bp, INITOP_SET);
5591
5592#ifdef BCM_CNIC
5593 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5594
5595
5596 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5597#endif
5598
5599#ifndef BCM_CNIC
5600
5601 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5602#endif
5603
5604 if (CHIP_IS_E2(bp)) {
5605 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5606
5607
5608
5609
5610 if (!(bp->flags & USING_MSIX_FLAG))
5611 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5612
5613
5614
5615
5616
5617
5618 msleep(20);
5619
5620
5621
5622
5623
5624 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5625
5626 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5627 }
5628
5629 bp->dmae_ready = 1;
5630
5631 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5632
5633 if (CHIP_IS_E2(bp))
5634 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5635
5636 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5637 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5638 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5639 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5642 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5643 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5644 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5645
5646 if (CHIP_IS_E2(bp)) {
5647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5648 BP_PATH(bp));
5649 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5650 BP_PATH(bp));
5651 }
5652
5653 if (CHIP_MODE_IS_4_PORT(bp))
5654 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5655
5656 if (CHIP_IS_E2(bp))
5657 REG_WR(bp, QM_REG_PF_EN, 1);
5658
5659 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5660
5661 if (CHIP_MODE_IS_4_PORT(bp))
5662 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5663
5664 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5665 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5666 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5667 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5668 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5669 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5670 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5671 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5672 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5673 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5674 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5675 if (CHIP_IS_E2(bp))
5676 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5677
5678 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5679
5680 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5681
5682 if (CHIP_IS_E2(bp))
5683 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5684
5685 if (IS_MF(bp)) {
5686 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5687 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5688 }
5689
5690 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5691
5692
5693 if (bp->common.int_block == INT_BLOCK_HC) {
5694 if (CHIP_IS_E1H(bp)) {
5695 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5696
5697 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5699 }
5700 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5701
5702 } else {
5703 int num_segs, sb_idx, prod_offset;
5704
5705 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5706
5707 if (CHIP_IS_E2(bp)) {
5708 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5709 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5710 }
5711
5712 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5713
5714 if (CHIP_IS_E2(bp)) {
5715 int dsb_idx = 0;
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5738 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5739 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5740 prod_offset = (bp->igu_base_sb + sb_idx) *
5741 num_segs;
5742
5743 for (i = 0; i < num_segs; i++) {
5744 addr = IGU_REG_PROD_CONS_MEMORY +
5745 (prod_offset + i) * 4;
5746 REG_WR(bp, addr, 0);
5747 }
5748
5749 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5750 USTORM_ID, 0, IGU_INT_NOP, 1);
5751 bnx2x_igu_clear_sb(bp,
5752 bp->igu_base_sb + sb_idx);
5753 }
5754
5755
5756 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5757 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5758
5759 if (CHIP_MODE_IS_4_PORT(bp))
5760 dsb_idx = BP_FUNC(bp);
5761 else
5762 dsb_idx = BP_E1HVN(bp);
5763
5764 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5765 IGU_BC_BASE_DSB_PROD + dsb_idx :
5766 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5767
5768 for (i = 0; i < (num_segs * E1HVN_MAX);
5769 i += E1HVN_MAX) {
5770 addr = IGU_REG_PROD_CONS_MEMORY +
5771 (prod_offset + i)*4;
5772 REG_WR(bp, addr, 0);
5773 }
5774
5775 if (CHIP_INT_MODE_IS_BC(bp)) {
5776 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5777 USTORM_ID, 0, IGU_INT_NOP, 1);
5778 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5779 CSTORM_ID, 0, IGU_INT_NOP, 1);
5780 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5781 XSTORM_ID, 0, IGU_INT_NOP, 1);
5782 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5783 TSTORM_ID, 0, IGU_INT_NOP, 1);
5784 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5785 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5786 } else {
5787 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5788 USTORM_ID, 0, IGU_INT_NOP, 1);
5789 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5790 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5791 }
5792 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5793
5794
5795
5796 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5797 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5798 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5799 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5800 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5801 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5802 }
5803 }
5804
5805
5806 REG_WR(bp, 0x2114, 0xffffffff);
5807 REG_WR(bp, 0x2120, 0xffffffff);
5808
5809 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5810 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5811 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5812 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5813 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5814 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5815
5816 if (CHIP_IS_E1x(bp)) {
5817 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
5818 main_mem_base = HC_REG_MAIN_MEMORY +
5819 BP_PORT(bp) * (main_mem_size * 4);
5820 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5821 main_mem_width = 8;
5822
5823 val = REG_RD(bp, main_mem_prty_clr);
5824 if (val)
5825 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5826 "block during "
5827 "function init (0x%x)!\n", val);
5828
5829
5830 for (i = main_mem_base;
5831 i < main_mem_base + main_mem_size * 4;
5832 i += main_mem_width) {
5833 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5834 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5835 i, main_mem_width / 4);
5836 }
5837
5838 REG_RD(bp, main_mem_prty_clr);
5839 }
5840
5841 bnx2x_phy_probe(&bp->link_params);
5842
5843 return 0;
5844}
5845
5846int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5847{
5848 int rc = 0;
5849
5850 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5851 BP_ABS_FUNC(bp), load_code);
5852
5853 bp->dmae_ready = 0;
5854 mutex_init(&bp->dmae_mutex);
5855 rc = bnx2x_gunzip_init(bp);
5856 if (rc)
5857 return rc;
5858
5859 switch (load_code) {
5860 case FW_MSG_CODE_DRV_LOAD_COMMON:
5861 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5862 rc = bnx2x_init_hw_common(bp, load_code);
5863 if (rc)
5864 goto init_hw_err;
5865
5866
5867 case FW_MSG_CODE_DRV_LOAD_PORT:
5868 rc = bnx2x_init_hw_port(bp);
5869 if (rc)
5870 goto init_hw_err;
5871
5872
5873 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5874 rc = bnx2x_init_hw_func(bp);
5875 if (rc)
5876 goto init_hw_err;
5877 break;
5878
5879 default:
5880 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5881 break;
5882 }
5883
5884 if (!BP_NOMCP(bp)) {
5885 int mb_idx = BP_FW_MB_IDX(bp);
5886
5887 bp->fw_drv_pulse_wr_seq =
5888 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5889 DRV_PULSE_SEQ_MASK);
5890 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5891 }
5892
5893init_hw_err:
5894 bnx2x_gunzip_end(bp);
5895
5896 return rc;
5897}
5898
5899void bnx2x_free_mem(struct bnx2x *bp)
5900{
5901
5902#define BNX2X_PCI_FREE(x, y, size) \
5903 do { \
5904 if (x) { \
5905 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5906 x = NULL; \
5907 y = 0; \
5908 } \
5909 } while (0)
5910
5911#define BNX2X_FREE(x) \
5912 do { \
5913 if (x) { \
5914 kfree((void *)x); \
5915 x = NULL; \
5916 } \
5917 } while (0)
5918
5919 int i;
5920
5921
5922
5923 for_each_queue(bp, i) {
5924#ifdef BCM_CNIC
5925
5926 if (IS_FCOE_IDX(i)) {
5927 union host_hc_status_block *sb =
5928 &bnx2x_fp(bp, i, status_blk);
5929 memset(sb, 0, sizeof(union host_hc_status_block));
5930 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5931 } else {
5932#endif
5933
5934 if (CHIP_IS_E2(bp))
5935 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5936 bnx2x_fp(bp, i, status_blk_mapping),
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5940 bnx2x_fp(bp, i, status_blk_mapping),
5941 sizeof(struct host_hc_status_block_e1x));
5942#ifdef BCM_CNIC
5943 }
5944#endif
5945 }
5946
5947 for_each_rx_queue(bp, i) {
5948
5949
5950 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5951 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5952 bnx2x_fp(bp, i, rx_desc_mapping),
5953 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5954
5955 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5956 bnx2x_fp(bp, i, rx_comp_mapping),
5957 sizeof(struct eth_fast_path_rx_cqe) *
5958 NUM_RCQ_BD);
5959
5960
5961 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5962 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5963 bnx2x_fp(bp, i, rx_sge_mapping),
5964 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5965 }
5966
5967 for_each_tx_queue(bp, i) {
5968
5969
5970 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5971 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5972 bnx2x_fp(bp, i, tx_desc_mapping),
5973 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5974 }
5975
5976
5977 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5978 sizeof(struct host_sp_status_block));
5979
5980 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5981 sizeof(struct bnx2x_slowpath));
5982
5983 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5984 bp->context.size);
5985
5986 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5987
5988 BNX2X_FREE(bp->ilt->lines);
5989
5990#ifdef BCM_CNIC
5991 if (CHIP_IS_E2(bp))
5992 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5993 sizeof(struct host_hc_status_block_e2));
5994 else
5995 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5996 sizeof(struct host_hc_status_block_e1x));
5997
5998 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5999#endif
6000
6001 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6002
6003 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6004 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6005
6006#undef BNX2X_PCI_FREE
6007#undef BNX2X_KFREE
6008}
6009
6010static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6011{
6012 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6013 if (CHIP_IS_E2(bp)) {
6014 bnx2x_fp(bp, index, sb_index_values) =
6015 (__le16 *)status_blk.e2_sb->sb.index_values;
6016 bnx2x_fp(bp, index, sb_running_index) =
6017 (__le16 *)status_blk.e2_sb->sb.running_index;
6018 } else {
6019 bnx2x_fp(bp, index, sb_index_values) =
6020 (__le16 *)status_blk.e1x_sb->sb.index_values;
6021 bnx2x_fp(bp, index, sb_running_index) =
6022 (__le16 *)status_blk.e1x_sb->sb.running_index;
6023 }
6024}
6025
6026int bnx2x_alloc_mem(struct bnx2x *bp)
6027{
6028#define BNX2X_PCI_ALLOC(x, y, size) \
6029 do { \
6030 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6031 if (x == NULL) \
6032 goto alloc_mem_err; \
6033 memset(x, 0, size); \
6034 } while (0)
6035
6036#define BNX2X_ALLOC(x, size) \
6037 do { \
6038 x = kzalloc(size, GFP_KERNEL); \
6039 if (x == NULL) \
6040 goto alloc_mem_err; \
6041 } while (0)
6042
6043 int i;
6044
6045
6046
6047 for_each_queue(bp, i) {
6048 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6049 bnx2x_fp(bp, i, bp) = bp;
6050
6051#ifdef BCM_CNIC
6052 if (!IS_FCOE_IDX(i)) {
6053#endif
6054 if (CHIP_IS_E2(bp))
6055 BNX2X_PCI_ALLOC(sb->e2_sb,
6056 &bnx2x_fp(bp, i, status_blk_mapping),
6057 sizeof(struct host_hc_status_block_e2));
6058 else
6059 BNX2X_PCI_ALLOC(sb->e1x_sb,
6060 &bnx2x_fp(bp, i, status_blk_mapping),
6061 sizeof(struct host_hc_status_block_e1x));
6062#ifdef BCM_CNIC
6063 }
6064#endif
6065 set_sb_shortcuts(bp, i);
6066 }
6067
6068 for_each_queue(bp, i) {
6069
6070
6071 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6072 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6073 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6074 &bnx2x_fp(bp, i, rx_desc_mapping),
6075 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6076
6077 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6078 &bnx2x_fp(bp, i, rx_comp_mapping),
6079 sizeof(struct eth_fast_path_rx_cqe) *
6080 NUM_RCQ_BD);
6081
6082
6083 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6084 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6085 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6086 &bnx2x_fp(bp, i, rx_sge_mapping),
6087 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6088 }
6089
6090 for_each_queue(bp, i) {
6091
6092
6093 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6094 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6095 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6096 &bnx2x_fp(bp, i, tx_desc_mapping),
6097 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6098 }
6099
6100
6101#ifdef BCM_CNIC
6102 if (CHIP_IS_E2(bp))
6103 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6104 sizeof(struct host_hc_status_block_e2));
6105 else
6106 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6107 sizeof(struct host_hc_status_block_e1x));
6108
6109
6110 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6111#endif
6112
6113
6114 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6115 sizeof(struct host_sp_status_block));
6116
6117 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6118 sizeof(struct bnx2x_slowpath));
6119
6120 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6121
6122 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6123 bp->context.size);
6124
6125 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6126
6127 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6128 goto alloc_mem_err;
6129
6130
6131 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6132
6133
6134 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6135 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6136 return 0;
6137
6138alloc_mem_err:
6139 bnx2x_free_mem(bp);
6140 return -ENOMEM;
6141
6142#undef BNX2X_PCI_ALLOC
6143#undef BNX2X_ALLOC
6144}
6145
6146
6147
6148
6149static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6150 int *state_p, int flags);
6151
6152int bnx2x_func_start(struct bnx2x *bp)
6153{
6154 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6155
6156
6157 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6158 WAIT_RAMROD_COMMON);
6159}
6160
6161static int bnx2x_func_stop(struct bnx2x *bp)
6162{
6163 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6164
6165
6166 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6167 0, &(bp->state), WAIT_RAMROD_COMMON);
6168}
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6181 u32 cl_bit_vec, u8 cam_offset,
6182 u8 is_bcast)
6183{
6184 struct mac_configuration_cmd *config =
6185 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6186 int ramrod_flags = WAIT_RAMROD_COMMON;
6187
6188 bp->set_mac_pending = 1;
6189 smp_wmb();
6190
6191 config->hdr.length = 1;
6192 config->hdr.offset = cam_offset;
6193 config->hdr.client_id = 0xff;
6194 config->hdr.reserved1 = 0;
6195
6196
6197 config->config_table[0].msb_mac_addr =
6198 swab16(*(u16 *)&mac[0]);
6199 config->config_table[0].middle_mac_addr =
6200 swab16(*(u16 *)&mac[2]);
6201 config->config_table[0].lsb_mac_addr =
6202 swab16(*(u16 *)&mac[4]);
6203 config->config_table[0].clients_bit_vector =
6204 cpu_to_le32(cl_bit_vec);
6205 config->config_table[0].vlan_id = 0;
6206 config->config_table[0].pf_id = BP_FUNC(bp);
6207 if (set)
6208 SET_FLAG(config->config_table[0].flags,
6209 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6210 T_ETH_MAC_COMMAND_SET);
6211 else
6212 SET_FLAG(config->config_table[0].flags,
6213 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6214 T_ETH_MAC_COMMAND_INVALIDATE);
6215
6216 if (is_bcast)
6217 SET_FLAG(config->config_table[0].flags,
6218 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6219
6220 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6221 (set ? "setting" : "clearing"),
6222 config->config_table[0].msb_mac_addr,
6223 config->config_table[0].middle_mac_addr,
6224 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6225
6226 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6227 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6229
6230
6231 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6232}
6233
6234static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6235 int *state_p, int flags)
6236{
6237
6238 int cnt = 5000;
6239 u8 poll = flags & WAIT_RAMROD_POLL;
6240 u8 common = flags & WAIT_RAMROD_COMMON;
6241
6242 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6243 poll ? "polling" : "waiting", state, idx);
6244
6245 might_sleep();
6246 while (cnt--) {
6247 if (poll) {
6248 if (common)
6249 bnx2x_eq_int(bp);
6250 else {
6251 bnx2x_rx_int(bp->fp, 10);
6252
6253
6254
6255
6256 if (idx)
6257 bnx2x_rx_int(&bp->fp[idx], 10);
6258 }
6259 }
6260
6261 mb();
6262 if (*state_p == state) {
6263#ifdef BNX2X_STOP_ON_ERROR
6264 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6265#endif
6266 return 0;
6267 }
6268
6269 msleep(1);
6270
6271 if (bp->panic)
6272 return -EIO;
6273 }
6274
6275
6276 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6277 poll ? "polling" : "waiting", state, idx);
6278#ifdef BNX2X_STOP_ON_ERROR
6279 bnx2x_panic();
6280#endif
6281
6282 return -EBUSY;
6283}
6284
6285static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6286{
6287 if (CHIP_IS_E1H(bp))
6288 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6289 else if (CHIP_MODE_IS_4_PORT(bp))
6290 return BP_FUNC(bp) * 32 + rel_offset;
6291 else
6292 return BP_VN(bp) * 32 + rel_offset;
6293}
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305enum {
6306 LLH_CAM_ISCSI_ETH_LINE = 0,
6307 LLH_CAM_ETH_LINE,
6308 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6309};
6310
6311static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6312 int set,
6313 unsigned char *dev_addr,
6314 int index)
6315{
6316 u32 wb_data[2];
6317 u32 mem_offset, ena_offset, mem_index;
6318
6319
6320
6321
6322
6323
6324 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6325 return;
6326
6327
6328
6329 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6330 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6331 NIG_REG_LLH0_FUNC_MEM;
6332 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6333 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6334 mem_index = index;
6335 } else {
6336 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6337 NIG_REG_P0_LLH_FUNC_MEM2;
6338 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6339 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6340 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6341 }
6342
6343 if (set) {
6344
6345 mem_offset += 8*mem_index;
6346
6347 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6348 (dev_addr[4] << 8) | dev_addr[5]);
6349 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6350
6351 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6352 }
6353
6354
6355 REG_WR(bp, ena_offset + 4*mem_index, set);
6356
6357}
6358
6359void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6360{
6361 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6362 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6363
6364
6365 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6366 (1 << bp->fp->cl_id), cam_offset , 0);
6367
6368 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6369
6370 if (CHIP_IS_E1(bp)) {
6371
6372 static const u8 bcast[ETH_ALEN] = {
6373 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6374 };
6375 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6376 }
6377}
6378static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6379{
6380 int i = 0, old;
6381 struct net_device *dev = bp->dev;
6382 struct netdev_hw_addr *ha;
6383 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6384 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6385
6386 netdev_for_each_mc_addr(ha, dev) {
6387
6388 config_cmd->config_table[i].msb_mac_addr =
6389 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6390 config_cmd->config_table[i].middle_mac_addr =
6391 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6392 config_cmd->config_table[i].lsb_mac_addr =
6393 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6394
6395 config_cmd->config_table[i].vlan_id = 0;
6396 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6397 config_cmd->config_table[i].clients_bit_vector =
6398 cpu_to_le32(1 << BP_L_ID(bp));
6399
6400 SET_FLAG(config_cmd->config_table[i].flags,
6401 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6402 T_ETH_MAC_COMMAND_SET);
6403
6404 DP(NETIF_MSG_IFUP,
6405 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6406 config_cmd->config_table[i].msb_mac_addr,
6407 config_cmd->config_table[i].middle_mac_addr,
6408 config_cmd->config_table[i].lsb_mac_addr);
6409 i++;
6410 }
6411 old = config_cmd->hdr.length;
6412 if (old > i) {
6413 for (; i < old; i++) {
6414 if (CAM_IS_INVALID(config_cmd->
6415 config_table[i])) {
6416
6417 break;
6418 }
6419
6420 SET_FLAG(config_cmd->config_table[i].flags,
6421 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6422 T_ETH_MAC_COMMAND_INVALIDATE);
6423 }
6424 }
6425
6426 config_cmd->hdr.length = i;
6427 config_cmd->hdr.offset = offset;
6428 config_cmd->hdr.client_id = 0xff;
6429 config_cmd->hdr.reserved1 = 0;
6430
6431 bp->set_mac_pending = 1;
6432 smp_wmb();
6433
6434 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6435 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6436}
6437static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6438{
6439 int i;
6440 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6441 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6442 int ramrod_flags = WAIT_RAMROD_COMMON;
6443
6444 bp->set_mac_pending = 1;
6445 smp_wmb();
6446
6447 for (i = 0; i < config_cmd->hdr.length; i++)
6448 SET_FLAG(config_cmd->config_table[i].flags,
6449 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6450 T_ETH_MAC_COMMAND_INVALIDATE);
6451
6452 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6453 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6454
6455
6456 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6457 ramrod_flags);
6458
6459}
6460
6461#ifdef BCM_CNIC
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6473{
6474 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6475 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6476 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6477 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6478 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6479
6480
6481 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6482 cam_offset, 0);
6483
6484 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6485
6486 return 0;
6487}
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6500{
6501 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6502
6503
6504
6505
6506
6507
6508
6509 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6510 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6511
6512 return 0;
6513}
6514
6515int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6516{
6517 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6518
6519
6520
6521
6522
6523
6524
6525
6526 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6527 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6528
6529 return 0;
6530}
6531#endif
6532
6533static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6534 struct bnx2x_client_init_params *params,
6535 u8 activate,
6536 struct client_init_ramrod_data *data)
6537{
6538
6539 memset(data, 0, sizeof(*data));
6540
6541
6542 data->general.client_id = params->rxq_params.cl_id;
6543 data->general.statistics_counter_id = params->rxq_params.stat_id;
6544 data->general.statistics_en_flg =
6545 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6546 data->general.is_fcoe_flg =
6547 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6548 data->general.activate_flg = activate;
6549 data->general.sp_client_id = params->rxq_params.spcl_id;
6550
6551
6552 data->rx.tpa_en_flg =
6553 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6554 data->rx.vmqueue_mode_en_flg = 0;
6555 data->rx.cache_line_alignment_log_size =
6556 params->rxq_params.cache_line_log;
6557 data->rx.enable_dynamic_hc =
6558 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6559 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6560 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6561 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6562
6563
6564 data->rx.drop_ip_cs_err_flg = 0;
6565 data->rx.drop_tcp_cs_err_flg = 0;
6566 data->rx.drop_ttl0_flg = 0;
6567 data->rx.drop_udp_cs_err_flg = 0;
6568
6569 data->rx.inner_vlan_removal_enable_flg =
6570 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6571 data->rx.outer_vlan_removal_enable_flg =
6572 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6573 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6574 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6575 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6576 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6577 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6578 data->rx.bd_page_base.lo =
6579 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6580 data->rx.bd_page_base.hi =
6581 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6582 data->rx.sge_page_base.lo =
6583 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6584 data->rx.sge_page_base.hi =
6585 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6586 data->rx.cqe_page_base.lo =
6587 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6588 data->rx.cqe_page_base.hi =
6589 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6590 data->rx.is_leading_rss =
6591 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6592 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6593
6594
6595 data->tx.enforce_security_flg = 0;
6596 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6597 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6598 data->tx.mtu = 0;
6599 data->tx.tx_bd_page_base.lo =
6600 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6601 data->tx.tx_bd_page_base.hi =
6602 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6603
6604
6605 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6606 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6607 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6608 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6609 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6610 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6611 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6612
6613 data->fc.safc_group_num = params->txq_params.cos;
6614 data->fc.safc_group_en_flg =
6615 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6616 data->fc.traffic_type =
6617 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6618 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6619}
6620
6621static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6622{
6623
6624 cxt->ustorm_ag_context.cdu_usage =
6625 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6626 ETH_CONNECTION_TYPE);
6627
6628 cxt->xstorm_ag_context.cdu_reserved =
6629 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6630 ETH_CONNECTION_TYPE);
6631}
6632
6633static int bnx2x_setup_fw_client(struct bnx2x *bp,
6634 struct bnx2x_client_init_params *params,
6635 u8 activate,
6636 struct client_init_ramrod_data *data,
6637 dma_addr_t data_mapping)
6638{
6639 u16 hc_usec;
6640 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6641 int ramrod_flags = 0, rc;
6642
6643
6644 hc_usec = params->txq_params.hc_rate ?
6645 1000000 / params->txq_params.hc_rate : 0;
6646 bnx2x_update_coalesce_sb_index(bp,
6647 params->txq_params.fw_sb_id,
6648 params->txq_params.sb_cq_index,
6649 !(params->txq_params.flags & QUEUE_FLG_HC),
6650 hc_usec);
6651
6652 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6653
6654 hc_usec = params->rxq_params.hc_rate ?
6655 1000000 / params->rxq_params.hc_rate : 0;
6656 bnx2x_update_coalesce_sb_index(bp,
6657 params->rxq_params.fw_sb_id,
6658 params->rxq_params.sb_cq_index,
6659 !(params->rxq_params.flags & QUEUE_FLG_HC),
6660 hc_usec);
6661
6662 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6663 params->rxq_params.cid);
6664
6665
6666 if (params->txq_params.flags & QUEUE_FLG_STATS)
6667 storm_memset_xstats_zero(bp, BP_PORT(bp),
6668 params->txq_params.stat_id);
6669
6670 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6671 storm_memset_ustats_zero(bp, BP_PORT(bp),
6672 params->rxq_params.stat_id);
6673 storm_memset_tstats_zero(bp, BP_PORT(bp),
6674 params->rxq_params.stat_id);
6675 }
6676
6677
6678 bnx2x_fill_cl_init_data(bp, params, activate, data);
6679
6680
6681
6682
6683
6684
6685
6686 mmiowb();
6687
6688
6689 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6690 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6691
6692
6693 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6694 params->ramrod_params.index,
6695 params->ramrod_params.pstate,
6696 ramrod_flags);
6697 return rc;
6698}
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6709{
6710 int rc = 0;
6711
6712 switch (bp->int_mode) {
6713 case INT_MODE_MSI:
6714 bnx2x_enable_msi(bp);
6715
6716 case INT_MODE_INTx:
6717 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6718 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6719 break;
6720 default:
6721
6722 bnx2x_set_num_queues(bp);
6723
6724 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6725 bp->num_queues);
6726
6727
6728
6729
6730
6731 rc = bnx2x_enable_msix(bp);
6732 if (rc) {
6733
6734 if (bp->multi_mode)
6735 DP(NETIF_MSG_IFUP,
6736 "Multi requested but failed to "
6737 "enable MSI-X (%d), "
6738 "set number of queues to %d\n",
6739 bp->num_queues,
6740 1 + NONE_ETH_CONTEXT_USE);
6741 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6742
6743 if (!(bp->flags & DISABLE_MSI_FLAG))
6744 bnx2x_enable_msi(bp);
6745 }
6746
6747 break;
6748 }
6749
6750 return rc;
6751}
6752
6753
6754static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6755{
6756 return L2_ILT_LINES(bp);
6757}
6758
6759void bnx2x_ilt_set_info(struct bnx2x *bp)
6760{
6761 struct ilt_client_info *ilt_client;
6762 struct bnx2x_ilt *ilt = BP_ILT(bp);
6763 u16 line = 0;
6764
6765 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6766 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6767
6768
6769 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6770 ilt_client->client_num = ILT_CLIENT_CDU;
6771 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6772 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6773 ilt_client->start = line;
6774 line += L2_ILT_LINES(bp);
6775#ifdef BCM_CNIC
6776 line += CNIC_ILT_LINES;
6777#endif
6778 ilt_client->end = line - 1;
6779
6780 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6781 "flags 0x%x, hw psz %d\n",
6782 ilt_client->start,
6783 ilt_client->end,
6784 ilt_client->page_size,
6785 ilt_client->flags,
6786 ilog2(ilt_client->page_size >> 12));
6787
6788
6789 if (QM_INIT(bp->qm_cid_count)) {
6790 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6791 ilt_client->client_num = ILT_CLIENT_QM;
6792 ilt_client->page_size = QM_ILT_PAGE_SZ;
6793 ilt_client->flags = 0;
6794 ilt_client->start = line;
6795
6796
6797 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6798 QM_ILT_PAGE_SZ);
6799
6800 ilt_client->end = line - 1;
6801
6802 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6803 "flags 0x%x, hw psz %d\n",
6804 ilt_client->start,
6805 ilt_client->end,
6806 ilt_client->page_size,
6807 ilt_client->flags,
6808 ilog2(ilt_client->page_size >> 12));
6809
6810 }
6811
6812 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6813#ifdef BCM_CNIC
6814 ilt_client->client_num = ILT_CLIENT_SRC;
6815 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6816 ilt_client->flags = 0;
6817 ilt_client->start = line;
6818 line += SRC_ILT_LINES;
6819 ilt_client->end = line - 1;
6820
6821 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6822 "flags 0x%x, hw psz %d\n",
6823 ilt_client->start,
6824 ilt_client->end,
6825 ilt_client->page_size,
6826 ilt_client->flags,
6827 ilog2(ilt_client->page_size >> 12));
6828
6829#else
6830 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6831#endif
6832
6833
6834 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6835#ifdef BCM_CNIC
6836 ilt_client->client_num = ILT_CLIENT_TM;
6837 ilt_client->page_size = TM_ILT_PAGE_SZ;
6838 ilt_client->flags = 0;
6839 ilt_client->start = line;
6840 line += TM_ILT_LINES;
6841 ilt_client->end = line - 1;
6842
6843 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6844 "flags 0x%x, hw psz %d\n",
6845 ilt_client->start,
6846 ilt_client->end,
6847 ilt_client->page_size,
6848 ilt_client->flags,
6849 ilog2(ilt_client->page_size >> 12));
6850
6851#else
6852 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6853#endif
6854}
6855
6856int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6857 int is_leading)
6858{
6859 struct bnx2x_client_init_params params = { {0} };
6860 int rc;
6861
6862
6863 if (!IS_FCOE_FP(fp))
6864 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6865 IGU_INT_ENABLE, 0);
6866
6867 params.ramrod_params.pstate = &fp->state;
6868 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6869 params.ramrod_params.index = fp->index;
6870 params.ramrod_params.cid = fp->cid;
6871
6872#ifdef BCM_CNIC
6873 if (IS_FCOE_FP(fp))
6874 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6875
6876#endif
6877
6878 if (is_leading)
6879 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6880
6881 bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params);
6882
6883 bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params);
6884
6885 rc = bnx2x_setup_fw_client(bp, ¶ms, 1,
6886 bnx2x_sp(bp, client_init_data),
6887 bnx2x_sp_mapping(bp, client_init_data));
6888 return rc;
6889}
6890
6891static int bnx2x_stop_fw_client(struct bnx2x *bp,
6892 struct bnx2x_client_ramrod_params *p)
6893{
6894 int rc;
6895
6896 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6897
6898
6899 *p->pstate = BNX2X_FP_STATE_HALTING;
6900 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6901 p->cl_id, 0);
6902
6903
6904 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6905 p->pstate, poll_flag);
6906 if (rc)
6907 return rc;
6908
6909 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6910 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6911 p->cl_id, 0);
6912
6913 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6914 p->pstate, poll_flag);
6915 if (rc)
6916 return rc;
6917
6918
6919
6920 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6921
6922
6923 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6924 p->pstate, WAIT_RAMROD_COMMON);
6925 return rc;
6926}
6927
6928static int bnx2x_stop_client(struct bnx2x *bp, int index)
6929{
6930 struct bnx2x_client_ramrod_params client_stop = {0};
6931 struct bnx2x_fastpath *fp = &bp->fp[index];
6932
6933 client_stop.index = index;
6934 client_stop.cid = fp->cid;
6935 client_stop.cl_id = fp->cl_id;
6936 client_stop.pstate = &(fp->state);
6937 client_stop.poll = 0;
6938
6939 return bnx2x_stop_fw_client(bp, &client_stop);
6940}
6941
6942
6943static void bnx2x_reset_func(struct bnx2x *bp)
6944{
6945 int port = BP_PORT(bp);
6946 int func = BP_FUNC(bp);
6947 int i;
6948 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6949 (CHIP_IS_E2(bp) ?
6950 offsetof(struct hc_status_block_data_e2, common) :
6951 offsetof(struct hc_status_block_data_e1x, common));
6952 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6953 int pfid_offset = offsetof(struct pci_entity, pf_id);
6954
6955
6956 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6957 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6958 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6959 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6960
6961
6962 for_each_eth_queue(bp, i) {
6963 struct bnx2x_fastpath *fp = &bp->fp[i];
6964 REG_WR8(bp,
6965 BAR_CSTRORM_INTMEM +
6966 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6967 + pfunc_offset_fp + pfid_offset,
6968 HC_FUNCTION_DISABLED);
6969 }
6970
6971
6972 REG_WR8(bp,
6973 BAR_CSTRORM_INTMEM +
6974 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6975 pfunc_offset_sp + pfid_offset,
6976 HC_FUNCTION_DISABLED);
6977
6978
6979 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6980 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6981 0);
6982
6983
6984 if (bp->common.int_block == INT_BLOCK_HC) {
6985 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6986 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6987 } else {
6988 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6989 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6990 }
6991
6992#ifdef BCM_CNIC
6993
6994 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6995
6996
6997
6998
6999 for (i = 0; i < 200; i++) {
7000 msleep(10);
7001 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7002 break;
7003 }
7004#endif
7005
7006 bnx2x_clear_func_ilt(bp, func);
7007
7008
7009
7010
7011 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7012 struct ilt_client_info ilt_cli;
7013
7014 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7015 ilt_cli.start = 0;
7016 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7017 ilt_cli.client_num = ILT_CLIENT_TM;
7018
7019 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7020 }
7021
7022
7023 if (CHIP_IS_E2(bp))
7024 bnx2x_pf_disable(bp);
7025
7026 bp->dmae_ready = 0;
7027}
7028
7029static void bnx2x_reset_port(struct bnx2x *bp)
7030{
7031 int port = BP_PORT(bp);
7032 u32 val;
7033
7034 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7035
7036
7037 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7038
7039 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7040 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7041
7042
7043 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7044
7045 msleep(100);
7046
7047 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7048 if (val)
7049 DP(NETIF_MSG_IFDOWN,
7050 "BRB1 is not empty %d blocks are occupied\n", val);
7051
7052
7053}
7054
7055static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7056{
7057 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7058 BP_ABS_FUNC(bp), reset_code);
7059
7060 switch (reset_code) {
7061 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7062 bnx2x_reset_port(bp);
7063 bnx2x_reset_func(bp);
7064 bnx2x_reset_common(bp);
7065 break;
7066
7067 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7068 bnx2x_reset_port(bp);
7069 bnx2x_reset_func(bp);
7070 break;
7071
7072 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7073 bnx2x_reset_func(bp);
7074 break;
7075
7076 default:
7077 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7078 break;
7079 }
7080}
7081
7082#ifdef BCM_CNIC
7083static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7084{
7085 if (bp->flags & FCOE_MACS_SET) {
7086 if (!IS_MF_SD(bp))
7087 bnx2x_set_fip_eth_mac_addr(bp, 0);
7088
7089 bnx2x_set_all_enode_macs(bp, 0);
7090
7091 bp->flags &= ~FCOE_MACS_SET;
7092 }
7093}
7094#endif
7095
7096void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7097{
7098 int port = BP_PORT(bp);
7099 u32 reset_code = 0;
7100 int i, cnt, rc;
7101
7102
7103 for_each_tx_queue(bp, i) {
7104 struct bnx2x_fastpath *fp = &bp->fp[i];
7105
7106 cnt = 1000;
7107 while (bnx2x_has_tx_work_unload(fp)) {
7108
7109 if (!cnt) {
7110 BNX2X_ERR("timeout waiting for queue[%d]\n",
7111 i);
7112#ifdef BNX2X_STOP_ON_ERROR
7113 bnx2x_panic();
7114 return -EBUSY;
7115#else
7116 break;
7117#endif
7118 }
7119 cnt--;
7120 msleep(1);
7121 }
7122 }
7123
7124 msleep(1);
7125
7126 if (CHIP_IS_E1(bp)) {
7127
7128
7129
7130 bnx2x_invlidate_e1_mc_list(bp);
7131 bnx2x_set_eth_mac(bp, 0);
7132
7133 } else {
7134 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7135
7136 bnx2x_set_eth_mac(bp, 0);
7137
7138 for (i = 0; i < MC_HASH_SIZE; i++)
7139 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7140 }
7141
7142#ifdef BCM_CNIC
7143 bnx2x_del_fcoe_eth_macs(bp);
7144#endif
7145
7146 if (unload_mode == UNLOAD_NORMAL)
7147 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7148
7149 else if (bp->flags & NO_WOL_FLAG)
7150 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7151
7152 else if (bp->wol) {
7153 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7154 u8 *mac_addr = bp->dev->dev_addr;
7155 u32 val;
7156
7157
7158 u8 entry = (BP_E1HVN(bp) + 1)*8;
7159
7160 val = (mac_addr[0] << 8) | mac_addr[1];
7161 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7162
7163 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7164 (mac_addr[4] << 8) | mac_addr[5];
7165 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7166
7167 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7168
7169 } else
7170 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7171
7172
7173
7174 for_each_queue(bp, i)
7175
7176 if (bnx2x_stop_client(bp, i))
7177#ifdef BNX2X_STOP_ON_ERROR
7178 return;
7179#else
7180 goto unload_error;
7181#endif
7182
7183 rc = bnx2x_func_stop(bp);
7184 if (rc) {
7185 BNX2X_ERR("Function stop failed!\n");
7186#ifdef BNX2X_STOP_ON_ERROR
7187 return;
7188#else
7189 goto unload_error;
7190#endif
7191 }
7192#ifndef BNX2X_STOP_ON_ERROR
7193unload_error:
7194#endif
7195 if (!BP_NOMCP(bp))
7196 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7197 else {
7198 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7199 "%d, %d, %d\n", BP_PATH(bp),
7200 load_count[BP_PATH(bp)][0],
7201 load_count[BP_PATH(bp)][1],
7202 load_count[BP_PATH(bp)][2]);
7203 load_count[BP_PATH(bp)][0]--;
7204 load_count[BP_PATH(bp)][1 + port]--;
7205 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7206 "%d, %d, %d\n", BP_PATH(bp),
7207 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7208 load_count[BP_PATH(bp)][2]);
7209 if (load_count[BP_PATH(bp)][0] == 0)
7210 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7211 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7212 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7213 else
7214 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7215 }
7216
7217 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7218 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7219 bnx2x__link_reset(bp);
7220
7221
7222 bnx2x_netif_stop(bp, 1);
7223
7224
7225 bnx2x_free_irq(bp);
7226
7227
7228 bnx2x_reset_chip(bp, reset_code);
7229
7230
7231 if (!BP_NOMCP(bp))
7232 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7233
7234}
7235
7236void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7237{
7238 u32 val;
7239
7240 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7241
7242 if (CHIP_IS_E1(bp)) {
7243 int port = BP_PORT(bp);
7244 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7245 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7246
7247 val = REG_RD(bp, addr);
7248 val &= ~(0x300);
7249 REG_WR(bp, addr, val);
7250 } else if (CHIP_IS_E1H(bp)) {
7251 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7252 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7253 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7254 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7255 }
7256}
7257
7258
7259static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7260{
7261 u32 val, addr;
7262
7263
7264 if (!CHIP_IS_E1(bp)) {
7265
7266 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7267 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7268 close ? (val | 0x1) : (val & (~(u32)1)));
7269
7270 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7271 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7272 close ? (val | 0x1) : (val & (~(u32)1)));
7273 }
7274
7275
7276 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7277 val = REG_RD(bp, addr);
7278 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7279
7280 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7281 close ? "closing" : "opening");
7282 mmiowb();
7283}
7284
7285#define SHARED_MF_CLP_MAGIC 0x80000000
7286
7287static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7288{
7289
7290 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7291 *magic_val = val & SHARED_MF_CLP_MAGIC;
7292 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7293}
7294
7295
7296
7297
7298
7299
7300static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7301{
7302
7303 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7304 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7305 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7306}
7307
7308
7309
7310
7311
7312
7313
7314static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7315{
7316 u32 shmem;
7317 u32 validity_offset;
7318
7319 DP(NETIF_MSG_HW, "Starting\n");
7320
7321
7322 if (!CHIP_IS_E1(bp))
7323 bnx2x_clp_reset_prep(bp, magic_val);
7324
7325
7326 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7327 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7328
7329
7330 if (shmem > 0)
7331 REG_WR(bp, shmem + validity_offset, 0);
7332}
7333
7334#define MCP_TIMEOUT 5000
7335#define MCP_ONE_TIMEOUT 100
7336
7337
7338
7339
7340
7341
7342static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7343{
7344
7345
7346 if (CHIP_REV_IS_SLOW(bp))
7347 msleep(MCP_ONE_TIMEOUT*10);
7348 else
7349 msleep(MCP_ONE_TIMEOUT);
7350}
7351
7352static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7353{
7354 u32 shmem, cnt, validity_offset, val;
7355 int rc = 0;
7356
7357 msleep(100);
7358
7359
7360 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7361 if (shmem == 0) {
7362 BNX2X_ERR("Shmem 0 return failure\n");
7363 rc = -ENOTTY;
7364 goto exit_lbl;
7365 }
7366
7367 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7368
7369
7370 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7371
7372
7373
7374 val = REG_RD(bp, shmem + validity_offset);
7375 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7376 shmem + validity_offset, val);
7377
7378
7379 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7380 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7381 break;
7382
7383 bnx2x_mcp_wait_one(bp);
7384 }
7385
7386 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7387
7388
7389 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7390 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7391 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7392 rc = -ENOTTY;
7393 goto exit_lbl;
7394 }
7395
7396exit_lbl:
7397
7398 if (!CHIP_IS_E1(bp))
7399 bnx2x_clp_reset_done(bp, magic_val);
7400
7401 return rc;
7402}
7403
7404static void bnx2x_pxp_prep(struct bnx2x *bp)
7405{
7406 if (!CHIP_IS_E1(bp)) {
7407 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7408 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7409 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7410 mmiowb();
7411 }
7412}
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7425{
7426 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7427
7428 not_reset_mask1 =
7429 MISC_REGISTERS_RESET_REG_1_RST_HC |
7430 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7431 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7432
7433 not_reset_mask2 =
7434 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7435 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7436 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7437 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7438 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7439 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7440 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7441 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7442
7443 reset_mask1 = 0xffffffff;
7444
7445 if (CHIP_IS_E1(bp))
7446 reset_mask2 = 0xffff;
7447 else
7448 reset_mask2 = 0x1ffff;
7449
7450 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7451 reset_mask1 & (~not_reset_mask1));
7452 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7453 reset_mask2 & (~not_reset_mask2));
7454
7455 barrier();
7456 mmiowb();
7457
7458 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7459 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7460 mmiowb();
7461}
7462
7463static int bnx2x_process_kill(struct bnx2x *bp)
7464{
7465 int cnt = 1000;
7466 u32 val = 0;
7467 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7468
7469
7470
7471 do {
7472 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7473 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7474 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7475 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7476 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7477 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7478 ((port_is_idle_0 & 0x1) == 0x1) &&
7479 ((port_is_idle_1 & 0x1) == 0x1) &&
7480 (pgl_exp_rom2 == 0xffffffff))
7481 break;
7482 msleep(1);
7483 } while (cnt-- > 0);
7484
7485 if (cnt <= 0) {
7486 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7487 " are still"
7488 " outstanding read requests after 1s!\n");
7489 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7490 " port_is_idle_0=0x%08x,"
7491 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7492 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7493 pgl_exp_rom2);
7494 return -EAGAIN;
7495 }
7496
7497 barrier();
7498
7499
7500 bnx2x_set_234_gates(bp, true);
7501
7502
7503
7504
7505 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7506 barrier();
7507
7508
7509 mmiowb();
7510
7511
7512
7513
7514 msleep(1);
7515
7516
7517
7518 bnx2x_reset_mcp_prep(bp, &val);
7519
7520
7521 bnx2x_pxp_prep(bp);
7522 barrier();
7523
7524
7525 bnx2x_process_kill_chip_reset(bp);
7526 barrier();
7527
7528
7529
7530 if (bnx2x_reset_mcp_comp(bp, val))
7531 return -EAGAIN;
7532
7533
7534 bnx2x_pxp_prep(bp);
7535
7536
7537 bnx2x_set_234_gates(bp, false);
7538
7539
7540
7541
7542 return 0;
7543}
7544
7545static int bnx2x_leader_reset(struct bnx2x *bp)
7546{
7547 int rc = 0;
7548
7549 if (bnx2x_process_kill(bp)) {
7550 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7551 bp->dev->name);
7552 rc = -EAGAIN;
7553 goto exit_leader_reset;
7554 }
7555
7556
7557 bnx2x_set_reset_done(bp);
7558 bp->recovery_state = BNX2X_RECOVERY_DONE;
7559
7560exit_leader_reset:
7561 bp->is_leader = 0;
7562 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7563 smp_wmb();
7564 return rc;
7565}
7566
7567
7568
7569
7570
7571static void bnx2x_parity_recover(struct bnx2x *bp)
7572{
7573 DP(NETIF_MSG_HW, "Handling parity\n");
7574 while (1) {
7575 switch (bp->recovery_state) {
7576 case BNX2X_RECOVERY_INIT:
7577 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7578
7579 if (bnx2x_trylock_hw_lock(bp,
7580 HW_LOCK_RESOURCE_RESERVED_08))
7581 bp->is_leader = 1;
7582
7583
7584
7585 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7586 return;
7587
7588 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7589
7590
7591
7592 smp_wmb();
7593 break;
7594
7595 case BNX2X_RECOVERY_WAIT:
7596 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7597 if (bp->is_leader) {
7598 u32 load_counter = bnx2x_get_load_cnt(bp);
7599 if (load_counter) {
7600
7601
7602
7603 schedule_delayed_work(&bp->reset_task,
7604 HZ/10);
7605 return;
7606 } else {
7607
7608
7609
7610
7611
7612 if (bnx2x_leader_reset(bp) ||
7613 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7614 printk(KERN_ERR"%s: Recovery "
7615 "has failed. Power cycle is "
7616 "needed.\n", bp->dev->name);
7617
7618 netif_device_detach(bp->dev);
7619
7620
7621
7622
7623
7624 bnx2x_set_reset_in_progress(bp);
7625
7626 bnx2x_set_power_state(bp,
7627 PCI_D3hot);
7628 return;
7629 }
7630
7631 return;
7632 }
7633 } else {
7634 if (!bnx2x_reset_is_done(bp)) {
7635
7636
7637
7638
7639
7640
7641 if (bnx2x_trylock_hw_lock(bp,
7642 HW_LOCK_RESOURCE_RESERVED_08)) {
7643
7644
7645
7646 bp->is_leader = 1;
7647 break;
7648 }
7649
7650 schedule_delayed_work(&bp->reset_task,
7651 HZ/10);
7652 return;
7653
7654 } else {
7655
7656
7657
7658 bnx2x_nic_load(bp, LOAD_NORMAL);
7659 bp->recovery_state =
7660 BNX2X_RECOVERY_DONE;
7661 smp_wmb();
7662 return;
7663 }
7664 }
7665 default:
7666 return;
7667 }
7668 }
7669}
7670
7671
7672
7673
7674static void bnx2x_reset_task(struct work_struct *work)
7675{
7676 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7677
7678#ifdef BNX2X_STOP_ON_ERROR
7679 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7680 " so reset not done to allow debug dump,\n"
7681 KERN_ERR " you will need to reboot when done\n");
7682 return;
7683#endif
7684
7685 rtnl_lock();
7686
7687 if (!netif_running(bp->dev))
7688 goto reset_task_exit;
7689
7690 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7691 bnx2x_parity_recover(bp);
7692 else {
7693 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7694 bnx2x_nic_load(bp, LOAD_NORMAL);
7695 }
7696
7697reset_task_exit:
7698 rtnl_unlock();
7699}
7700
7701
7702
7703
7704
7705
7706
7707static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7708{
7709 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7710 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7711 return base + (BP_ABS_FUNC(bp)) * stride;
7712}
7713
7714static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7715{
7716 u32 reg = bnx2x_get_pretend_reg(bp);
7717
7718
7719 mmiowb();
7720
7721
7722 REG_WR(bp, reg, 0);
7723 REG_RD(bp, reg);
7724
7725
7726 bnx2x_int_disable(bp);
7727
7728
7729 mmiowb();
7730
7731
7732 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7733 REG_RD(bp, reg);
7734}
7735
7736static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7737{
7738 if (CHIP_IS_E1(bp))
7739 bnx2x_int_disable(bp);
7740 else
7741 bnx2x_undi_int_disable_e1h(bp);
7742}
7743
7744static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7745{
7746 u32 val;
7747
7748
7749 val = REG_RD(bp, MISC_REG_UNPREPARED);
7750 if (val == 0x1) {
7751
7752
7753
7754 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7755 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7756 if (val == 0x7) {
7757 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7758
7759 int orig_pf_num = bp->pf_num;
7760 u32 swap_en;
7761 u32 swap_val;
7762
7763
7764 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7765
7766 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7767
7768
7769 bp->pf_num = 0;
7770 bp->fw_seq =
7771 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7772 DRV_MSG_SEQ_NUMBER_MASK);
7773 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7774
7775
7776 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7777
7778
7779 bnx2x_fw_command(bp,
7780 DRV_MSG_CODE_UNLOAD_DONE, 0);
7781
7782
7783 bp->pf_num = 1;
7784 bp->fw_seq =
7785 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7786 DRV_MSG_SEQ_NUMBER_MASK);
7787 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7788
7789 bnx2x_fw_command(bp, reset_code, 0);
7790 }
7791
7792
7793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7794
7795 bnx2x_undi_int_disable(bp);
7796
7797
7798
7799 REG_WR(bp,
7800 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7801 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7802
7803
7804 REG_WR(bp,
7805 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7806 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7807
7808 REG_WR(bp,
7809 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7810 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7811 msleep(10);
7812
7813
7814 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7815 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7816
7817 REG_WR(bp,
7818 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7819 0xd3ffffff);
7820 REG_WR(bp,
7821 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7822 0x1403);
7823
7824 REG_WR(bp,
7825 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7826 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7827 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7828 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7829
7830
7831 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7832
7833
7834 bp->pf_num = orig_pf_num;
7835 bp->fw_seq =
7836 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7837 DRV_MSG_SEQ_NUMBER_MASK);
7838 } else
7839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7840 }
7841}
7842
7843static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7844{
7845 u32 val, val2, val3, val4, id;
7846 u16 pmc;
7847
7848
7849
7850 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7851 id = ((val & 0xffff) << 16);
7852 val = REG_RD(bp, MISC_REG_CHIP_REV);
7853 id |= ((val & 0xf) << 12);
7854 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7855 id |= ((val & 0xff) << 4);
7856 val = REG_RD(bp, MISC_REG_BOND_ID);
7857 id |= (val & 0xf);
7858 bp->common.chip_id = id;
7859
7860
7861 bp->db_size = (1 << BNX2X_DB_SHIFT);
7862
7863 if (CHIP_IS_E2(bp)) {
7864 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7865 if ((val & 1) == 0)
7866 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7867 else
7868 val = (val >> 1) & 1;
7869 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7870 "2_PORT_MODE");
7871 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7872 CHIP_2_PORT_MODE;
7873
7874 if (CHIP_MODE_IS_4_PORT(bp))
7875 bp->pfid = (bp->pf_num >> 1);
7876 else
7877 bp->pfid = (bp->pf_num & 0x6);
7878 } else {
7879 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
7880 bp->pfid = bp->pf_num;
7881 }
7882
7883
7884
7885
7886
7887
7888 if (CHIP_IS_E1x(bp))
7889 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7890 else
7891 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7892
7893 bp->link_params.chip_id = bp->common.chip_id;
7894 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7895
7896 val = (REG_RD(bp, 0x2874) & 0x55);
7897 if ((bp->common.chip_id & 0x1) ||
7898 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7899 bp->flags |= ONE_PORT_FLAG;
7900 BNX2X_DEV_INFO("single port device\n");
7901 }
7902
7903 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7904 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7905 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7906 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7907 bp->common.flash_size, bp->common.flash_size);
7908
7909 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7910 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7911 MISC_REG_GENERIC_CR_1 :
7912 MISC_REG_GENERIC_CR_0));
7913 bp->link_params.shmem_base = bp->common.shmem_base;
7914 bp->link_params.shmem2_base = bp->common.shmem2_base;
7915 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7916 bp->common.shmem_base, bp->common.shmem2_base);
7917
7918 if (!bp->common.shmem_base) {
7919 BNX2X_DEV_INFO("MCP not active\n");
7920 bp->flags |= NO_MCP_FLAG;
7921 return;
7922 }
7923
7924 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7925 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7926 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7927 BNX2X_ERR("BAD MCP validity signature\n");
7928
7929 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7930 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7931
7932 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7933 SHARED_HW_CFG_LED_MODE_MASK) >>
7934 SHARED_HW_CFG_LED_MODE_SHIFT);
7935
7936 bp->link_params.feature_config_flags = 0;
7937 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7938 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7939 bp->link_params.feature_config_flags |=
7940 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7941 else
7942 bp->link_params.feature_config_flags &=
7943 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7944
7945 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7946 bp->common.bc_ver = val;
7947 BNX2X_DEV_INFO("bc_ver %X\n", val);
7948 if (val < BNX2X_BC_VER) {
7949
7950
7951 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7952 "please upgrade BC\n", BNX2X_BC_VER, val);
7953 }
7954 bp->link_params.feature_config_flags |=
7955 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7956 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7957
7958 bp->link_params.feature_config_flags |=
7959 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7960 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7961
7962 if (BP_E1HVN(bp) == 0) {
7963 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7964 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7965 } else {
7966
7967 bp->flags |= NO_WOL_FLAG;
7968 }
7969 BNX2X_DEV_INFO("%sWoL capable\n",
7970 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7971
7972 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7973 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7974 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7975 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7976
7977 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7978 val, val2, val3, val4);
7979}
7980
7981#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7982#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7983
7984static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7985{
7986 int pfid = BP_FUNC(bp);
7987 int vn = BP_E1HVN(bp);
7988 int igu_sb_id;
7989 u32 val;
7990 u8 fid;
7991
7992 bp->igu_base_sb = 0xff;
7993 bp->igu_sb_cnt = 0;
7994 if (CHIP_INT_MODE_IS_BC(bp)) {
7995 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7996 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7997
7998 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7999 FP_SB_MAX_E1x;
8000
8001 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8002 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8003
8004 return;
8005 }
8006
8007
8008 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8009 igu_sb_id++) {
8010 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8011 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8012 continue;
8013 fid = IGU_FID(val);
8014 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8015 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8016 continue;
8017 if (IGU_VEC(val) == 0)
8018
8019 bp->igu_dsb_id = igu_sb_id;
8020 else {
8021 if (bp->igu_base_sb == 0xff)
8022 bp->igu_base_sb = igu_sb_id;
8023 bp->igu_sb_cnt++;
8024 }
8025 }
8026 }
8027 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8028 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8029 if (bp->igu_sb_cnt == 0)
8030 BNX2X_ERR("CAM configuration error\n");
8031}
8032
8033static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8034 u32 switch_cfg)
8035{
8036 int cfg_size = 0, idx, port = BP_PORT(bp);
8037
8038
8039 bp->port.supported[0] = 0;
8040 bp->port.supported[1] = 0;
8041 switch (bp->link_params.num_phys) {
8042 case 1:
8043 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8044 cfg_size = 1;
8045 break;
8046 case 2:
8047 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8048 cfg_size = 1;
8049 break;
8050 case 3:
8051 if (bp->link_params.multi_phy_config &
8052 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8053 bp->port.supported[1] =
8054 bp->link_params.phy[EXT_PHY1].supported;
8055 bp->port.supported[0] =
8056 bp->link_params.phy[EXT_PHY2].supported;
8057 } else {
8058 bp->port.supported[0] =
8059 bp->link_params.phy[EXT_PHY1].supported;
8060 bp->port.supported[1] =
8061 bp->link_params.phy[EXT_PHY2].supported;
8062 }
8063 cfg_size = 2;
8064 break;
8065 }
8066
8067 if (!(bp->port.supported[0] || bp->port.supported[1])) {
8068 BNX2X_ERR("NVRAM config error. BAD phy config."
8069 "PHY1 config 0x%x, PHY2 config 0x%x\n",
8070 SHMEM_RD(bp,
8071 dev_info.port_hw_config[port].external_phy_config),
8072 SHMEM_RD(bp,
8073 dev_info.port_hw_config[port].external_phy_config2));
8074 return;
8075 }
8076
8077 switch (switch_cfg) {
8078 case SWITCH_CFG_1G:
8079 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8080 port*0x10);
8081 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8082 break;
8083
8084 case SWITCH_CFG_10G:
8085 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8086 port*0x18);
8087 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8088 break;
8089
8090 default:
8091 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8092 bp->port.link_config[0]);
8093 return;
8094 }
8095
8096 for (idx = 0; idx < cfg_size; idx++) {
8097 if (!(bp->link_params.speed_cap_mask[idx] &
8098 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8099 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8100
8101 if (!(bp->link_params.speed_cap_mask[idx] &
8102 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8103 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8104
8105 if (!(bp->link_params.speed_cap_mask[idx] &
8106 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8107 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8108
8109 if (!(bp->link_params.speed_cap_mask[idx] &
8110 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8111 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8112
8113 if (!(bp->link_params.speed_cap_mask[idx] &
8114 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8115 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8116 SUPPORTED_1000baseT_Full);
8117
8118 if (!(bp->link_params.speed_cap_mask[idx] &
8119 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8120 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8121
8122 if (!(bp->link_params.speed_cap_mask[idx] &
8123 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8124 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8125
8126 }
8127
8128 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8129 bp->port.supported[1]);
8130}
8131
8132static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8133{
8134 u32 link_config, idx, cfg_size = 0;
8135 bp->port.advertising[0] = 0;
8136 bp->port.advertising[1] = 0;
8137 switch (bp->link_params.num_phys) {
8138 case 1:
8139 case 2:
8140 cfg_size = 1;
8141 break;
8142 case 3:
8143 cfg_size = 2;
8144 break;
8145 }
8146 for (idx = 0; idx < cfg_size; idx++) {
8147 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8148 link_config = bp->port.link_config[idx];
8149 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8150 case PORT_FEATURE_LINK_SPEED_AUTO:
8151 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8152 bp->link_params.req_line_speed[idx] =
8153 SPEED_AUTO_NEG;
8154 bp->port.advertising[idx] |=
8155 bp->port.supported[idx];
8156 } else {
8157
8158 bp->link_params.req_line_speed[idx] =
8159 SPEED_10000;
8160 bp->port.advertising[idx] |=
8161 (ADVERTISED_10000baseT_Full |
8162 ADVERTISED_FIBRE);
8163 continue;
8164 }
8165 break;
8166
8167 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8168 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8169 bp->link_params.req_line_speed[idx] =
8170 SPEED_10;
8171 bp->port.advertising[idx] |=
8172 (ADVERTISED_10baseT_Full |
8173 ADVERTISED_TP);
8174 } else {
8175 BNX2X_ERROR("NVRAM config error. "
8176 "Invalid link_config 0x%x"
8177 " speed_cap_mask 0x%x\n",
8178 link_config,
8179 bp->link_params.speed_cap_mask[idx]);
8180 return;
8181 }
8182 break;
8183
8184 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8185 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8186 bp->link_params.req_line_speed[idx] =
8187 SPEED_10;
8188 bp->link_params.req_duplex[idx] =
8189 DUPLEX_HALF;
8190 bp->port.advertising[idx] |=
8191 (ADVERTISED_10baseT_Half |
8192 ADVERTISED_TP);
8193 } else {
8194 BNX2X_ERROR("NVRAM config error. "
8195 "Invalid link_config 0x%x"
8196 " speed_cap_mask 0x%x\n",
8197 link_config,
8198 bp->link_params.speed_cap_mask[idx]);
8199 return;
8200 }
8201 break;
8202
8203 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8204 if (bp->port.supported[idx] &
8205 SUPPORTED_100baseT_Full) {
8206 bp->link_params.req_line_speed[idx] =
8207 SPEED_100;
8208 bp->port.advertising[idx] |=
8209 (ADVERTISED_100baseT_Full |
8210 ADVERTISED_TP);
8211 } else {
8212 BNX2X_ERROR("NVRAM config error. "
8213 "Invalid link_config 0x%x"
8214 " speed_cap_mask 0x%x\n",
8215 link_config,
8216 bp->link_params.speed_cap_mask[idx]);
8217 return;
8218 }
8219 break;
8220
8221 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8222 if (bp->port.supported[idx] &
8223 SUPPORTED_100baseT_Half) {
8224 bp->link_params.req_line_speed[idx] =
8225 SPEED_100;
8226 bp->link_params.req_duplex[idx] =
8227 DUPLEX_HALF;
8228 bp->port.advertising[idx] |=
8229 (ADVERTISED_100baseT_Half |
8230 ADVERTISED_TP);
8231 } else {
8232 BNX2X_ERROR("NVRAM config error. "
8233 "Invalid link_config 0x%x"
8234 " speed_cap_mask 0x%x\n",
8235 link_config,
8236 bp->link_params.speed_cap_mask[idx]);
8237 return;
8238 }
8239 break;
8240
8241 case PORT_FEATURE_LINK_SPEED_1G:
8242 if (bp->port.supported[idx] &
8243 SUPPORTED_1000baseT_Full) {
8244 bp->link_params.req_line_speed[idx] =
8245 SPEED_1000;
8246 bp->port.advertising[idx] |=
8247 (ADVERTISED_1000baseT_Full |
8248 ADVERTISED_TP);
8249 } else {
8250 BNX2X_ERROR("NVRAM config error. "
8251 "Invalid link_config 0x%x"
8252 " speed_cap_mask 0x%x\n",
8253 link_config,
8254 bp->link_params.speed_cap_mask[idx]);
8255 return;
8256 }
8257 break;
8258
8259 case PORT_FEATURE_LINK_SPEED_2_5G:
8260 if (bp->port.supported[idx] &
8261 SUPPORTED_2500baseX_Full) {
8262 bp->link_params.req_line_speed[idx] =
8263 SPEED_2500;
8264 bp->port.advertising[idx] |=
8265 (ADVERTISED_2500baseX_Full |
8266 ADVERTISED_TP);
8267 } else {
8268 BNX2X_ERROR("NVRAM config error. "
8269 "Invalid link_config 0x%x"
8270 " speed_cap_mask 0x%x\n",
8271 link_config,
8272 bp->link_params.speed_cap_mask[idx]);
8273 return;
8274 }
8275 break;
8276
8277 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8278 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8279 case PORT_FEATURE_LINK_SPEED_10G_KR:
8280 if (bp->port.supported[idx] &
8281 SUPPORTED_10000baseT_Full) {
8282 bp->link_params.req_line_speed[idx] =
8283 SPEED_10000;
8284 bp->port.advertising[idx] |=
8285 (ADVERTISED_10000baseT_Full |
8286 ADVERTISED_FIBRE);
8287 } else {
8288 BNX2X_ERROR("NVRAM config error. "
8289 "Invalid link_config 0x%x"
8290 " speed_cap_mask 0x%x\n",
8291 link_config,
8292 bp->link_params.speed_cap_mask[idx]);
8293 return;
8294 }
8295 break;
8296
8297 default:
8298 BNX2X_ERROR("NVRAM config error. "
8299 "BAD link speed link_config 0x%x\n",
8300 link_config);
8301 bp->link_params.req_line_speed[idx] =
8302 SPEED_AUTO_NEG;
8303 bp->port.advertising[idx] =
8304 bp->port.supported[idx];
8305 break;
8306 }
8307
8308 bp->link_params.req_flow_ctrl[idx] = (link_config &
8309 PORT_FEATURE_FLOW_CONTROL_MASK);
8310 if ((bp->link_params.req_flow_ctrl[idx] ==
8311 BNX2X_FLOW_CTRL_AUTO) &&
8312 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8313 bp->link_params.req_flow_ctrl[idx] =
8314 BNX2X_FLOW_CTRL_NONE;
8315 }
8316
8317 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8318 " 0x%x advertising 0x%x\n",
8319 bp->link_params.req_line_speed[idx],
8320 bp->link_params.req_duplex[idx],
8321 bp->link_params.req_flow_ctrl[idx],
8322 bp->port.advertising[idx]);
8323 }
8324}
8325
8326static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8327{
8328 mac_hi = cpu_to_be16(mac_hi);
8329 mac_lo = cpu_to_be32(mac_lo);
8330 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8331 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8332}
8333
8334static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8335{
8336 int port = BP_PORT(bp);
8337 u32 config;
8338 u32 ext_phy_type, ext_phy_config;
8339
8340 bp->link_params.bp = bp;
8341 bp->link_params.port = port;
8342
8343 bp->link_params.lane_config =
8344 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8345
8346 bp->link_params.speed_cap_mask[0] =
8347 SHMEM_RD(bp,
8348 dev_info.port_hw_config[port].speed_capability_mask);
8349 bp->link_params.speed_cap_mask[1] =
8350 SHMEM_RD(bp,
8351 dev_info.port_hw_config[port].speed_capability_mask2);
8352 bp->port.link_config[0] =
8353 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8354
8355 bp->port.link_config[1] =
8356 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8357
8358 bp->link_params.multi_phy_config =
8359 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8360
8361
8362
8363 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8364 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8365 (config & PORT_FEATURE_WOL_ENABLED));
8366
8367 BNX2X_DEV_INFO("lane_config 0x%08x "
8368 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8369 bp->link_params.lane_config,
8370 bp->link_params.speed_cap_mask[0],
8371 bp->port.link_config[0]);
8372
8373 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8374 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8375 bnx2x_phy_probe(&bp->link_params);
8376 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8377
8378 bnx2x_link_settings_requested(bp);
8379
8380
8381
8382
8383
8384 ext_phy_config =
8385 SHMEM_RD(bp,
8386 dev_info.port_hw_config[port].external_phy_config);
8387 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8388 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8389 bp->mdio.prtad = bp->port.phy_addr;
8390
8391 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8392 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8393 bp->mdio.prtad =
8394 XGXS_EXT_PHY_ADDR(ext_phy_config);
8395
8396
8397
8398
8399
8400 if (IS_MF(bp))
8401 bp->port.need_hw_lock = 1;
8402 else
8403 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8404 bp->common.shmem_base,
8405 bp->common.shmem2_base);
8406}
8407
8408static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8409{
8410 u32 val, val2;
8411 int func = BP_ABS_FUNC(bp);
8412 int port = BP_PORT(bp);
8413
8414 if (BP_NOMCP(bp)) {
8415 BNX2X_ERROR("warning: random MAC workaround active\n");
8416 random_ether_addr(bp->dev->dev_addr);
8417 } else if (IS_MF(bp)) {
8418 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8419 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8420 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8421 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8422 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8423
8424#ifdef BCM_CNIC
8425
8426 if (IS_MF_SI(bp)) {
8427 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8428 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8429 val2 = MF_CFG_RD(bp, func_ext_config[func].
8430 iscsi_mac_addr_upper);
8431 val = MF_CFG_RD(bp, func_ext_config[func].
8432 iscsi_mac_addr_lower);
8433 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8434 }
8435 }
8436#endif
8437 } else {
8438
8439 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8440 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8441 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8442
8443#ifdef BCM_CNIC
8444 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8445 iscsi_mac_upper);
8446 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8447 iscsi_mac_lower);
8448 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8449#endif
8450 }
8451
8452 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8453 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8454
8455#ifdef BCM_CNIC
8456
8457 if (!CHIP_IS_E1x(bp)) {
8458 if (IS_MF_SD(bp))
8459 memcpy(bp->fip_mac, bp->dev->dev_addr,
8460 sizeof(bp->fip_mac));
8461 else
8462 memcpy(bp->fip_mac, bp->iscsi_mac,
8463 sizeof(bp->fip_mac));
8464 }
8465#endif
8466}
8467
8468static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8469{
8470 int func = BP_ABS_FUNC(bp);
8471 int vn, port;
8472 u32 val = 0;
8473 int rc = 0;
8474
8475 bnx2x_get_common_hwinfo(bp);
8476
8477 if (CHIP_IS_E1x(bp)) {
8478 bp->common.int_block = INT_BLOCK_HC;
8479
8480 bp->igu_dsb_id = DEF_SB_IGU_ID;
8481 bp->igu_base_sb = 0;
8482 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8483 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8484 } else {
8485 bp->common.int_block = INT_BLOCK_IGU;
8486 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8487 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8488 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8489 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8490 } else
8491 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8492
8493 bnx2x_get_igu_cam_info(bp);
8494
8495 }
8496 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8497 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8498
8499
8500
8501
8502
8503 bp->mf_ov = 0;
8504 bp->mf_mode = 0;
8505 vn = BP_E1HVN(bp);
8506 port = BP_PORT(bp);
8507
8508 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8509 DP(NETIF_MSG_PROBE,
8510 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8511 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8512 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8513 if (SHMEM2_HAS(bp, mf_cfg_addr))
8514 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8515 else
8516 bp->common.mf_cfg_base = bp->common.shmem_base +
8517 offsetof(struct shmem_region, func_mb) +
8518 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8519
8520
8521
8522
8523
8524
8525
8526
8527 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8528
8529 val = SHMEM_RD(bp,
8530 dev_info.shared_feature_config.config);
8531 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8532
8533 switch (val) {
8534 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8535 val = MF_CFG_RD(bp, func_mf_config[func].
8536 mac_upper);
8537
8538 if (val != 0xffff) {
8539 bp->mf_mode = MULTI_FUNCTION_SI;
8540 bp->mf_config[vn] = MF_CFG_RD(bp,
8541 func_mf_config[func].config);
8542 } else
8543 DP(NETIF_MSG_PROBE, "illegal MAC "
8544 "address for SI\n");
8545 break;
8546 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8547
8548 val = MF_CFG_RD(bp,
8549 func_mf_config[FUNC_0].e1hov_tag);
8550 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8551
8552 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8553 bp->mf_mode = MULTI_FUNCTION_SD;
8554 bp->mf_config[vn] = MF_CFG_RD(bp,
8555 func_mf_config[func].config);
8556 } else
8557 DP(NETIF_MSG_PROBE, "illegal OV for "
8558 "SD\n");
8559 break;
8560 default:
8561
8562 bp->mf_config[vn] = 0;
8563 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8564 val);
8565 }
8566 }
8567
8568 BNX2X_DEV_INFO("%s function mode\n",
8569 IS_MF(bp) ? "multi" : "single");
8570
8571 switch (bp->mf_mode) {
8572 case MULTI_FUNCTION_SD:
8573 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8574 FUNC_MF_CFG_E1HOV_TAG_MASK;
8575 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8576 bp->mf_ov = val;
8577 BNX2X_DEV_INFO("MF OV for func %d is %d"
8578 " (0x%04x)\n", func,
8579 bp->mf_ov, bp->mf_ov);
8580 } else {
8581 BNX2X_ERR("No valid MF OV for func %d,"
8582 " aborting\n", func);
8583 rc = -EPERM;
8584 }
8585 break;
8586 case MULTI_FUNCTION_SI:
8587 BNX2X_DEV_INFO("func %d is in MF "
8588 "switch-independent mode\n", func);
8589 break;
8590 default:
8591 if (vn) {
8592 BNX2X_ERR("VN %d in single function mode,"
8593 " aborting\n", vn);
8594 rc = -EPERM;
8595 }
8596 break;
8597 }
8598
8599 }
8600
8601
8602 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8603 bp->igu_sb_cnt /= E1HVN_MAX;
8604
8605
8606
8607
8608
8609#define MAX_L2_CLIENTS 16
8610 if (CHIP_IS_E2(bp))
8611 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8612 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8613
8614 if (!BP_NOMCP(bp)) {
8615 bnx2x_get_port_hwinfo(bp);
8616
8617 bp->fw_seq =
8618 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8619 DRV_MSG_SEQ_NUMBER_MASK);
8620 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8621 }
8622
8623
8624 bnx2x_get_mac_hwinfo(bp);
8625
8626 return rc;
8627}
8628
8629static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8630{
8631 int cnt, i, block_end, rodi;
8632 char vpd_data[BNX2X_VPD_LEN+1];
8633 char str_id_reg[VENDOR_ID_LEN+1];
8634 char str_id_cap[VENDOR_ID_LEN+1];
8635 u8 len;
8636
8637 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8638 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8639
8640 if (cnt < BNX2X_VPD_LEN)
8641 goto out_not_found;
8642
8643 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8644 PCI_VPD_LRDT_RO_DATA);
8645 if (i < 0)
8646 goto out_not_found;
8647
8648
8649 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8650 pci_vpd_lrdt_size(&vpd_data[i]);
8651
8652 i += PCI_VPD_LRDT_TAG_SIZE;
8653
8654 if (block_end > BNX2X_VPD_LEN)
8655 goto out_not_found;
8656
8657 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8658 PCI_VPD_RO_KEYWORD_MFR_ID);
8659 if (rodi < 0)
8660 goto out_not_found;
8661
8662 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8663
8664 if (len != VENDOR_ID_LEN)
8665 goto out_not_found;
8666
8667 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8668
8669
8670 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8671 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8672 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8673 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8674
8675 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8676 PCI_VPD_RO_KEYWORD_VENDOR0);
8677 if (rodi >= 0) {
8678 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8679
8680 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8681
8682 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8683 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8684 bp->fw_ver[len] = ' ';
8685 }
8686 }
8687 return;
8688 }
8689out_not_found:
8690 return;
8691}
8692
8693static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8694{
8695 int func;
8696 int timer_interval;
8697 int rc;
8698
8699
8700 atomic_set(&bp->intr_sem, 1);
8701 smp_wmb();
8702
8703 mutex_init(&bp->port.phy_mutex);
8704 mutex_init(&bp->fw_mb_mutex);
8705 spin_lock_init(&bp->stats_lock);
8706#ifdef BCM_CNIC
8707 mutex_init(&bp->cnic_mutex);
8708#endif
8709
8710 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8711 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8712
8713 rc = bnx2x_get_hwinfo(bp);
8714
8715 if (!rc)
8716 rc = bnx2x_alloc_mem_bp(bp);
8717
8718 bnx2x_read_fwinfo(bp);
8719
8720 func = BP_FUNC(bp);
8721
8722
8723 if (!BP_NOMCP(bp))
8724 bnx2x_undi_unload(bp);
8725
8726 if (CHIP_REV_IS_FPGA(bp))
8727 dev_err(&bp->pdev->dev, "FPGA detected\n");
8728
8729 if (BP_NOMCP(bp) && (func == 0))
8730 dev_err(&bp->pdev->dev, "MCP disabled, "
8731 "must load devices in order!\n");
8732
8733 bp->multi_mode = multi_mode;
8734 bp->int_mode = int_mode;
8735
8736 bp->dev->features |= NETIF_F_GRO;
8737
8738
8739 if (disable_tpa) {
8740 bp->flags &= ~TPA_ENABLE_FLAG;
8741 bp->dev->features &= ~NETIF_F_LRO;
8742 } else {
8743 bp->flags |= TPA_ENABLE_FLAG;
8744 bp->dev->features |= NETIF_F_LRO;
8745 }
8746 bp->disable_tpa = disable_tpa;
8747
8748 if (CHIP_IS_E1(bp))
8749 bp->dropless_fc = 0;
8750 else
8751 bp->dropless_fc = dropless_fc;
8752
8753 bp->mrrs = mrrs;
8754
8755 bp->tx_ring_size = MAX_TX_AVAIL;
8756
8757 bp->rx_csum = 1;
8758
8759
8760 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8761 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8762
8763 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8764 bp->current_interval = (poll ? poll : timer_interval);
8765
8766 init_timer(&bp->timer);
8767 bp->timer.expires = jiffies + bp->current_interval;
8768 bp->timer.data = (unsigned long) bp;
8769 bp->timer.function = bnx2x_timer;
8770
8771 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8772 bnx2x_dcbx_init_params(bp);
8773
8774 return rc;
8775}
8776
8777
8778
8779
8780
8781
8782
8783static int bnx2x_open(struct net_device *dev)
8784{
8785 struct bnx2x *bp = netdev_priv(dev);
8786
8787 netif_carrier_off(dev);
8788
8789 bnx2x_set_power_state(bp, PCI_D0);
8790
8791 if (!bnx2x_reset_is_done(bp)) {
8792 do {
8793
8794
8795
8796 bp->fw_seq = 0;
8797
8798
8799
8800
8801
8802
8803
8804 if ((bnx2x_get_load_cnt(bp) == 0) &&
8805 bnx2x_trylock_hw_lock(bp,
8806 HW_LOCK_RESOURCE_RESERVED_08) &&
8807 (!bnx2x_leader_reset(bp))) {
8808 DP(NETIF_MSG_HW, "Recovered in open\n");
8809 break;
8810 }
8811
8812 bnx2x_set_power_state(bp, PCI_D3hot);
8813
8814 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8815 " completed yet. Try again later. If u still see this"
8816 " message after a few retries then power cycle is"
8817 " required.\n", bp->dev->name);
8818
8819 return -EAGAIN;
8820 } while (0);
8821 }
8822
8823 bp->recovery_state = BNX2X_RECOVERY_DONE;
8824
8825 return bnx2x_nic_load(bp, LOAD_OPEN);
8826}
8827
8828
8829static int bnx2x_close(struct net_device *dev)
8830{
8831 struct bnx2x *bp = netdev_priv(dev);
8832
8833
8834 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8835 bnx2x_set_power_state(bp, PCI_D3hot);
8836
8837 return 0;
8838}
8839
8840
8841void bnx2x_set_rx_mode(struct net_device *dev)
8842{
8843 struct bnx2x *bp = netdev_priv(dev);
8844 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8845 int port = BP_PORT(bp);
8846
8847 if (bp->state != BNX2X_STATE_OPEN) {
8848 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8849 return;
8850 }
8851
8852 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8853
8854 if (dev->flags & IFF_PROMISC)
8855 rx_mode = BNX2X_RX_MODE_PROMISC;
8856 else if ((dev->flags & IFF_ALLMULTI) ||
8857 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8858 CHIP_IS_E1(bp)))
8859 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8860 else {
8861 if (CHIP_IS_E1(bp)) {
8862
8863
8864
8865
8866
8867 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8868 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8869 BNX2X_MAX_MULTICAST*(1 + port));
8870
8871 bnx2x_set_e1_mc_list(bp, offset);
8872 } else {
8873
8874 struct netdev_hw_addr *ha;
8875 u32 mc_filter[MC_HASH_SIZE];
8876 u32 crc, bit, regidx;
8877 int i;
8878
8879 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8880
8881 netdev_for_each_mc_addr(ha, dev) {
8882 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8883 bnx2x_mc_addr(ha));
8884
8885 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8886 ETH_ALEN);
8887 bit = (crc >> 24) & 0xff;
8888 regidx = bit >> 5;
8889 bit &= 0x1f;
8890 mc_filter[regidx] |= (1 << bit);
8891 }
8892
8893 for (i = 0; i < MC_HASH_SIZE; i++)
8894 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8895 mc_filter[i]);
8896 }
8897 }
8898
8899 bp->rx_mode = rx_mode;
8900 bnx2x_set_storm_rx_mode(bp);
8901}
8902
8903
8904static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8905 int devad, u16 addr)
8906{
8907 struct bnx2x *bp = netdev_priv(netdev);
8908 u16 value;
8909 int rc;
8910
8911 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8912 prtad, devad, addr);
8913
8914
8915 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8916
8917 bnx2x_acquire_phy_lock(bp);
8918 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8919 bnx2x_release_phy_lock(bp);
8920 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8921
8922 if (!rc)
8923 rc = value;
8924 return rc;
8925}
8926
8927
8928static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8929 u16 addr, u16 value)
8930{
8931 struct bnx2x *bp = netdev_priv(netdev);
8932 int rc;
8933
8934 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8935 " value 0x%x\n", prtad, devad, addr, value);
8936
8937
8938 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8939
8940 bnx2x_acquire_phy_lock(bp);
8941 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8942 bnx2x_release_phy_lock(bp);
8943 return rc;
8944}
8945
8946
8947static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8948{
8949 struct bnx2x *bp = netdev_priv(dev);
8950 struct mii_ioctl_data *mdio = if_mii(ifr);
8951
8952 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8953 mdio->phy_id, mdio->reg_num, mdio->val_in);
8954
8955 if (!netif_running(dev))
8956 return -EAGAIN;
8957
8958 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8959}
8960
8961#ifdef CONFIG_NET_POLL_CONTROLLER
8962static void poll_bnx2x(struct net_device *dev)
8963{
8964 struct bnx2x *bp = netdev_priv(dev);
8965
8966 disable_irq(bp->pdev->irq);
8967 bnx2x_interrupt(bp->pdev->irq, dev);
8968 enable_irq(bp->pdev->irq);
8969}
8970#endif
8971
8972static const struct net_device_ops bnx2x_netdev_ops = {
8973 .ndo_open = bnx2x_open,
8974 .ndo_stop = bnx2x_close,
8975 .ndo_start_xmit = bnx2x_start_xmit,
8976 .ndo_select_queue = bnx2x_select_queue,
8977 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8978 .ndo_set_mac_address = bnx2x_change_mac_addr,
8979 .ndo_validate_addr = eth_validate_addr,
8980 .ndo_do_ioctl = bnx2x_ioctl,
8981 .ndo_change_mtu = bnx2x_change_mtu,
8982 .ndo_tx_timeout = bnx2x_tx_timeout,
8983#ifdef CONFIG_NET_POLL_CONTROLLER
8984 .ndo_poll_controller = poll_bnx2x,
8985#endif
8986};
8987
8988static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8989 struct net_device *dev)
8990{
8991 struct bnx2x *bp;
8992 int rc;
8993
8994 SET_NETDEV_DEV(dev, &pdev->dev);
8995 bp = netdev_priv(dev);
8996
8997 bp->dev = dev;
8998 bp->pdev = pdev;
8999 bp->flags = 0;
9000 bp->pf_num = PCI_FUNC(pdev->devfn);
9001
9002 rc = pci_enable_device(pdev);
9003 if (rc) {
9004 dev_err(&bp->pdev->dev,
9005 "Cannot enable PCI device, aborting\n");
9006 goto err_out;
9007 }
9008
9009 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9010 dev_err(&bp->pdev->dev,
9011 "Cannot find PCI device base address, aborting\n");
9012 rc = -ENODEV;
9013 goto err_out_disable;
9014 }
9015
9016 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9017 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9018 " base address, aborting\n");
9019 rc = -ENODEV;
9020 goto err_out_disable;
9021 }
9022
9023 if (atomic_read(&pdev->enable_cnt) == 1) {
9024 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9025 if (rc) {
9026 dev_err(&bp->pdev->dev,
9027 "Cannot obtain PCI resources, aborting\n");
9028 goto err_out_disable;
9029 }
9030
9031 pci_set_master(pdev);
9032 pci_save_state(pdev);
9033 }
9034
9035 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9036 if (bp->pm_cap == 0) {
9037 dev_err(&bp->pdev->dev,
9038 "Cannot find power management capability, aborting\n");
9039 rc = -EIO;
9040 goto err_out_release;
9041 }
9042
9043 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9044 if (bp->pcie_cap == 0) {
9045 dev_err(&bp->pdev->dev,
9046 "Cannot find PCI Express capability, aborting\n");
9047 rc = -EIO;
9048 goto err_out_release;
9049 }
9050
9051 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9052 bp->flags |= USING_DAC_FLAG;
9053 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9054 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9055 " failed, aborting\n");
9056 rc = -EIO;
9057 goto err_out_release;
9058 }
9059
9060 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9061 dev_err(&bp->pdev->dev,
9062 "System does not support DMA, aborting\n");
9063 rc = -EIO;
9064 goto err_out_release;
9065 }
9066
9067 dev->mem_start = pci_resource_start(pdev, 0);
9068 dev->base_addr = dev->mem_start;
9069 dev->mem_end = pci_resource_end(pdev, 0);
9070
9071 dev->irq = pdev->irq;
9072
9073 bp->regview = pci_ioremap_bar(pdev, 0);
9074 if (!bp->regview) {
9075 dev_err(&bp->pdev->dev,
9076 "Cannot map register space, aborting\n");
9077 rc = -ENOMEM;
9078 goto err_out_release;
9079 }
9080
9081 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9082 min_t(u64, BNX2X_DB_SIZE(bp),
9083 pci_resource_len(pdev, 2)));
9084 if (!bp->doorbells) {
9085 dev_err(&bp->pdev->dev,
9086 "Cannot map doorbell space, aborting\n");
9087 rc = -ENOMEM;
9088 goto err_out_unmap;
9089 }
9090
9091 bnx2x_set_power_state(bp, PCI_D0);
9092
9093
9094 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9095 PCICFG_VENDOR_ID_OFFSET);
9096 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9097 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9098 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9099 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9100
9101
9102 bnx2x_clear_load_cnt(bp);
9103
9104 dev->watchdog_timeo = TX_TIMEOUT;
9105
9106 dev->netdev_ops = &bnx2x_netdev_ops;
9107 bnx2x_set_ethtool_ops(dev);
9108 dev->features |= NETIF_F_SG;
9109 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9110 if (bp->flags & USING_DAC_FLAG)
9111 dev->features |= NETIF_F_HIGHDMA;
9112 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9113 dev->features |= NETIF_F_TSO6;
9114 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9115
9116 dev->vlan_features |= NETIF_F_SG;
9117 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9118 if (bp->flags & USING_DAC_FLAG)
9119 dev->vlan_features |= NETIF_F_HIGHDMA;
9120 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9121 dev->vlan_features |= NETIF_F_TSO6;
9122
9123#ifdef BCM_DCB
9124 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9125#endif
9126
9127
9128 bp->mdio.prtad = MDIO_PRTAD_NONE;
9129 bp->mdio.mmds = 0;
9130 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9131 bp->mdio.dev = dev;
9132 bp->mdio.mdio_read = bnx2x_mdio_read;
9133 bp->mdio.mdio_write = bnx2x_mdio_write;
9134
9135 return 0;
9136
9137err_out_unmap:
9138 if (bp->regview) {
9139 iounmap(bp->regview);
9140 bp->regview = NULL;
9141 }
9142 if (bp->doorbells) {
9143 iounmap(bp->doorbells);
9144 bp->doorbells = NULL;
9145 }
9146
9147err_out_release:
9148 if (atomic_read(&pdev->enable_cnt) == 1)
9149 pci_release_regions(pdev);
9150
9151err_out_disable:
9152 pci_disable_device(pdev);
9153 pci_set_drvdata(pdev, NULL);
9154
9155err_out:
9156 return rc;
9157}
9158
9159static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9160 int *width, int *speed)
9161{
9162 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9163
9164 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9165
9166
9167 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9168}
9169
9170static int bnx2x_check_firmware(struct bnx2x *bp)
9171{
9172 const struct firmware *firmware = bp->firmware;
9173 struct bnx2x_fw_file_hdr *fw_hdr;
9174 struct bnx2x_fw_file_section *sections;
9175 u32 offset, len, num_ops;
9176 u16 *ops_offsets;
9177 int i;
9178 const u8 *fw_ver;
9179
9180 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9181 return -EINVAL;
9182
9183 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9184 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9185
9186
9187
9188 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9189 offset = be32_to_cpu(sections[i].offset);
9190 len = be32_to_cpu(sections[i].len);
9191 if (offset + len > firmware->size) {
9192 dev_err(&bp->pdev->dev,
9193 "Section %d length is out of bounds\n", i);
9194 return -EINVAL;
9195 }
9196 }
9197
9198
9199 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9200 ops_offsets = (u16 *)(firmware->data + offset);
9201 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9202
9203 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9204 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9205 dev_err(&bp->pdev->dev,
9206 "Section offset %d is out of bounds\n", i);
9207 return -EINVAL;
9208 }
9209 }
9210
9211
9212 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9213 fw_ver = firmware->data + offset;
9214 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9215 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9216 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9217 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9218 dev_err(&bp->pdev->dev,
9219 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9220 fw_ver[0], fw_ver[1], fw_ver[2],
9221 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9222 BCM_5710_FW_MINOR_VERSION,
9223 BCM_5710_FW_REVISION_VERSION,
9224 BCM_5710_FW_ENGINEERING_VERSION);
9225 return -EINVAL;
9226 }
9227
9228 return 0;
9229}
9230
9231static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9232{
9233 const __be32 *source = (const __be32 *)_source;
9234 u32 *target = (u32 *)_target;
9235 u32 i;
9236
9237 for (i = 0; i < n/4; i++)
9238 target[i] = be32_to_cpu(source[i]);
9239}
9240
9241
9242
9243
9244
9245static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9246{
9247 const __be32 *source = (const __be32 *)_source;
9248 struct raw_op *target = (struct raw_op *)_target;
9249 u32 i, j, tmp;
9250
9251 for (i = 0, j = 0; i < n/8; i++, j += 2) {
9252 tmp = be32_to_cpu(source[j]);
9253 target[i].op = (tmp >> 24) & 0xff;
9254 target[i].offset = tmp & 0xffffff;
9255 target[i].raw_data = be32_to_cpu(source[j + 1]);
9256 }
9257}
9258
9259
9260
9261
9262
9263static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9264{
9265 const __be32 *source = (const __be32 *)_source;
9266 struct iro *target = (struct iro *)_target;
9267 u32 i, j, tmp;
9268
9269 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9270 target[i].base = be32_to_cpu(source[j]);
9271 j++;
9272 tmp = be32_to_cpu(source[j]);
9273 target[i].m1 = (tmp >> 16) & 0xffff;
9274 target[i].m2 = tmp & 0xffff;
9275 j++;
9276 tmp = be32_to_cpu(source[j]);
9277 target[i].m3 = (tmp >> 16) & 0xffff;
9278 target[i].size = tmp & 0xffff;
9279 j++;
9280 }
9281}
9282
9283static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9284{
9285 const __be16 *source = (const __be16 *)_source;
9286 u16 *target = (u16 *)_target;
9287 u32 i;
9288
9289 for (i = 0; i < n/2; i++)
9290 target[i] = be16_to_cpu(source[i]);
9291}
9292
9293#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9294do { \
9295 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9296 bp->arr = kmalloc(len, GFP_KERNEL); \
9297 if (!bp->arr) { \
9298 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9299 goto lbl; \
9300 } \
9301 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9302 (u8 *)bp->arr, len); \
9303} while (0)
9304
9305int bnx2x_init_firmware(struct bnx2x *bp)
9306{
9307 const char *fw_file_name;
9308 struct bnx2x_fw_file_hdr *fw_hdr;
9309 int rc;
9310
9311 if (CHIP_IS_E1(bp))
9312 fw_file_name = FW_FILE_NAME_E1;
9313 else if (CHIP_IS_E1H(bp))
9314 fw_file_name = FW_FILE_NAME_E1H;
9315 else if (CHIP_IS_E2(bp))
9316 fw_file_name = FW_FILE_NAME_E2;
9317 else {
9318 BNX2X_ERR("Unsupported chip revision\n");
9319 return -EINVAL;
9320 }
9321
9322 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9323
9324 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9325 if (rc) {
9326 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9327 goto request_firmware_exit;
9328 }
9329
9330 rc = bnx2x_check_firmware(bp);
9331 if (rc) {
9332 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9333 goto request_firmware_exit;
9334 }
9335
9336 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9337
9338
9339
9340 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9341
9342
9343 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9344
9345
9346 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9347 be16_to_cpu_n);
9348
9349
9350 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9351 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9352 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9353 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9354 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9355 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9356 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9357 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9358 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9359 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9360 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9361 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9362 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9363 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9364 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9365 be32_to_cpu(fw_hdr->csem_pram_data.offset);
9366
9367 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9368
9369 return 0;
9370
9371iro_alloc_err:
9372 kfree(bp->init_ops_offsets);
9373init_offsets_alloc_err:
9374 kfree(bp->init_ops);
9375init_ops_alloc_err:
9376 kfree(bp->init_data);
9377request_firmware_exit:
9378 release_firmware(bp->firmware);
9379
9380 return rc;
9381}
9382
9383static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9384{
9385 int cid_count = L2_FP_COUNT(l2_cid_count);
9386
9387#ifdef BCM_CNIC
9388 cid_count += CNIC_CID_MAX;
9389#endif
9390 return roundup(cid_count, QM_CID_ROUND);
9391}
9392
9393static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9394 const struct pci_device_id *ent)
9395{
9396 struct net_device *dev = NULL;
9397 struct bnx2x *bp;
9398 int pcie_width, pcie_speed;
9399 int rc, cid_count;
9400
9401 switch (ent->driver_data) {
9402 case BCM57710:
9403 case BCM57711:
9404 case BCM57711E:
9405 cid_count = FP_SB_MAX_E1x;
9406 break;
9407
9408 case BCM57712:
9409 case BCM57712E:
9410 cid_count = FP_SB_MAX_E2;
9411 break;
9412
9413 default:
9414 pr_err("Unknown board_type (%ld), aborting\n",
9415 ent->driver_data);
9416 return -ENODEV;
9417 }
9418
9419 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9420
9421
9422 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9423 if (!dev) {
9424 dev_err(&pdev->dev, "Cannot allocate net device\n");
9425 return -ENOMEM;
9426 }
9427
9428 bp = netdev_priv(dev);
9429 bp->msg_enable = debug;
9430
9431 pci_set_drvdata(pdev, dev);
9432
9433 bp->l2_cid_count = cid_count;
9434
9435 rc = bnx2x_init_dev(pdev, dev);
9436 if (rc < 0) {
9437 free_netdev(dev);
9438 return rc;
9439 }
9440
9441 rc = bnx2x_init_bp(bp);
9442 if (rc)
9443 goto init_one_exit;
9444
9445
9446 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9447
9448#ifdef BCM_CNIC
9449
9450 if (CHIP_IS_E1x(bp))
9451 bp->flags |= NO_FCOE_FLAG;
9452
9453#endif
9454
9455
9456
9457
9458 bnx2x_set_int_mode(bp);
9459
9460
9461 bnx2x_add_all_napi(bp);
9462
9463 rc = register_netdev(dev);
9464 if (rc) {
9465 dev_err(&pdev->dev, "Cannot register net device\n");
9466 goto init_one_exit;
9467 }
9468
9469#ifdef BCM_CNIC
9470 if (!NO_FCOE(bp)) {
9471
9472 rtnl_lock();
9473 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9474 rtnl_unlock();
9475 }
9476#endif
9477
9478 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9479
9480 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9481 " IRQ %d, ", board_info[ent->driver_data].name,
9482 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9483 pcie_width,
9484 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9485 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9486 "5GHz (Gen2)" : "2.5GHz",
9487 dev->base_addr, bp->pdev->irq);
9488 pr_cont("node addr %pM\n", dev->dev_addr);
9489
9490 return 0;
9491
9492init_one_exit:
9493 if (bp->regview)
9494 iounmap(bp->regview);
9495
9496 if (bp->doorbells)
9497 iounmap(bp->doorbells);
9498
9499 free_netdev(dev);
9500
9501 if (atomic_read(&pdev->enable_cnt) == 1)
9502 pci_release_regions(pdev);
9503
9504 pci_disable_device(pdev);
9505 pci_set_drvdata(pdev, NULL);
9506
9507 return rc;
9508}
9509
9510static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9511{
9512 struct net_device *dev = pci_get_drvdata(pdev);
9513 struct bnx2x *bp;
9514
9515 if (!dev) {
9516 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9517 return;
9518 }
9519 bp = netdev_priv(dev);
9520
9521#ifdef BCM_CNIC
9522
9523 if (!NO_FCOE(bp)) {
9524 rtnl_lock();
9525 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9526 rtnl_unlock();
9527 }
9528#endif
9529
9530 unregister_netdev(dev);
9531
9532
9533 bnx2x_del_all_napi(bp);
9534
9535
9536 bnx2x_set_power_state(bp, PCI_D0);
9537
9538
9539 bnx2x_disable_msi(bp);
9540
9541
9542 bnx2x_set_power_state(bp, PCI_D3hot);
9543
9544
9545 cancel_delayed_work_sync(&bp->reset_task);
9546
9547 if (bp->regview)
9548 iounmap(bp->regview);
9549
9550 if (bp->doorbells)
9551 iounmap(bp->doorbells);
9552
9553 bnx2x_free_mem_bp(bp);
9554
9555 free_netdev(dev);
9556
9557 if (atomic_read(&pdev->enable_cnt) == 1)
9558 pci_release_regions(pdev);
9559
9560 pci_disable_device(pdev);
9561 pci_set_drvdata(pdev, NULL);
9562}
9563
9564static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9565{
9566 int i;
9567
9568 bp->state = BNX2X_STATE_ERROR;
9569
9570 bp->rx_mode = BNX2X_RX_MODE_NONE;
9571
9572 bnx2x_netif_stop(bp, 0);
9573 netif_carrier_off(bp->dev);
9574
9575 del_timer_sync(&bp->timer);
9576 bp->stats_state = STATS_STATE_DISABLED;
9577 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9578
9579
9580 bnx2x_free_irq(bp);
9581
9582
9583 bnx2x_free_skbs(bp);
9584
9585 for_each_rx_queue(bp, i)
9586 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9587
9588 bnx2x_free_mem(bp);
9589
9590 bp->state = BNX2X_STATE_CLOSED;
9591
9592 return 0;
9593}
9594
9595static void bnx2x_eeh_recover(struct bnx2x *bp)
9596{
9597 u32 val;
9598
9599 mutex_init(&bp->port.phy_mutex);
9600
9601 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9602 bp->link_params.shmem_base = bp->common.shmem_base;
9603 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9604
9605 if (!bp->common.shmem_base ||
9606 (bp->common.shmem_base < 0xA0000) ||
9607 (bp->common.shmem_base >= 0xC0000)) {
9608 BNX2X_DEV_INFO("MCP not active\n");
9609 bp->flags |= NO_MCP_FLAG;
9610 return;
9611 }
9612
9613 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9614 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9615 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9616 BNX2X_ERR("BAD MCP validity signature\n");
9617
9618 if (!BP_NOMCP(bp)) {
9619 bp->fw_seq =
9620 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9621 DRV_MSG_SEQ_NUMBER_MASK);
9622 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9623 }
9624}
9625
9626
9627
9628
9629
9630
9631
9632
9633
9634static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9635 pci_channel_state_t state)
9636{
9637 struct net_device *dev = pci_get_drvdata(pdev);
9638 struct bnx2x *bp = netdev_priv(dev);
9639
9640 rtnl_lock();
9641
9642 netif_device_detach(dev);
9643
9644 if (state == pci_channel_io_perm_failure) {
9645 rtnl_unlock();
9646 return PCI_ERS_RESULT_DISCONNECT;
9647 }
9648
9649 if (netif_running(dev))
9650 bnx2x_eeh_nic_unload(bp);
9651
9652 pci_disable_device(pdev);
9653
9654 rtnl_unlock();
9655
9656
9657 return PCI_ERS_RESULT_NEED_RESET;
9658}
9659
9660
9661
9662
9663
9664
9665
9666static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9667{
9668 struct net_device *dev = pci_get_drvdata(pdev);
9669 struct bnx2x *bp = netdev_priv(dev);
9670
9671 rtnl_lock();
9672
9673 if (pci_enable_device(pdev)) {
9674 dev_err(&pdev->dev,
9675 "Cannot re-enable PCI device after reset\n");
9676 rtnl_unlock();
9677 return PCI_ERS_RESULT_DISCONNECT;
9678 }
9679
9680 pci_set_master(pdev);
9681 pci_restore_state(pdev);
9682
9683 if (netif_running(dev))
9684 bnx2x_set_power_state(bp, PCI_D0);
9685
9686 rtnl_unlock();
9687
9688 return PCI_ERS_RESULT_RECOVERED;
9689}
9690
9691
9692
9693
9694
9695
9696
9697
9698static void bnx2x_io_resume(struct pci_dev *pdev)
9699{
9700 struct net_device *dev = pci_get_drvdata(pdev);
9701 struct bnx2x *bp = netdev_priv(dev);
9702
9703 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9704 printk(KERN_ERR "Handling parity error recovery. "
9705 "Try again later\n");
9706 return;
9707 }
9708
9709 rtnl_lock();
9710
9711 bnx2x_eeh_recover(bp);
9712
9713 if (netif_running(dev))
9714 bnx2x_nic_load(bp, LOAD_NORMAL);
9715
9716 netif_device_attach(dev);
9717
9718 rtnl_unlock();
9719}
9720
9721static struct pci_error_handlers bnx2x_err_handler = {
9722 .error_detected = bnx2x_io_error_detected,
9723 .slot_reset = bnx2x_io_slot_reset,
9724 .resume = bnx2x_io_resume,
9725};
9726
9727static struct pci_driver bnx2x_pci_driver = {
9728 .name = DRV_MODULE_NAME,
9729 .id_table = bnx2x_pci_tbl,
9730 .probe = bnx2x_init_one,
9731 .remove = __devexit_p(bnx2x_remove_one),
9732 .suspend = bnx2x_suspend,
9733 .resume = bnx2x_resume,
9734 .err_handler = &bnx2x_err_handler,
9735};
9736
9737static int __init bnx2x_init(void)
9738{
9739 int ret;
9740
9741 pr_info("%s", version);
9742
9743 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9744 if (bnx2x_wq == NULL) {
9745 pr_err("Cannot create workqueue\n");
9746 return -ENOMEM;
9747 }
9748
9749 ret = pci_register_driver(&bnx2x_pci_driver);
9750 if (ret) {
9751 pr_err("Cannot register driver\n");
9752 destroy_workqueue(bnx2x_wq);
9753 }
9754 return ret;
9755}
9756
9757static void __exit bnx2x_cleanup(void)
9758{
9759 pci_unregister_driver(&bnx2x_pci_driver);
9760
9761 destroy_workqueue(bnx2x_wq);
9762}
9763
9764module_init(bnx2x_init);
9765module_exit(bnx2x_cleanup);
9766
9767#ifdef BCM_CNIC
9768
9769
9770static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9771{
9772 struct eth_spe *spe;
9773
9774#ifdef BNX2X_STOP_ON_ERROR
9775 if (unlikely(bp->panic))
9776 return;
9777#endif
9778
9779 spin_lock_bh(&bp->spq_lock);
9780 BUG_ON(bp->cnic_spq_pending < count);
9781 bp->cnic_spq_pending -= count;
9782
9783
9784 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9785 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9786 & SPE_HDR_CONN_TYPE) >>
9787 SPE_HDR_CONN_TYPE_SHIFT;
9788
9789
9790
9791
9792 if (type == ETH_CONNECTION_TYPE) {
9793 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9794 hdr.conn_and_cmd_data) >>
9795 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9796
9797 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9798 bnx2x_set_ctx_validation(&bp->context.
9799 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9800 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9801 }
9802
9803
9804
9805
9806 if ((type == NONE_CONNECTION_TYPE) ||
9807 (type == ETH_CONNECTION_TYPE)) {
9808 if (!atomic_read(&bp->spq_left))
9809 break;
9810 else
9811 atomic_dec(&bp->spq_left);
9812 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9813 (type == FCOE_CONNECTION_TYPE)) {
9814 if (bp->cnic_spq_pending >=
9815 bp->cnic_eth_dev.max_kwqe_pending)
9816 break;
9817 else
9818 bp->cnic_spq_pending++;
9819 } else {
9820 BNX2X_ERR("Unknown SPE type: %d\n", type);
9821 bnx2x_panic();
9822 break;
9823 }
9824
9825 spe = bnx2x_sp_get_next(bp);
9826 *spe = *bp->cnic_kwq_cons;
9827
9828 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9829 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9830
9831 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9832 bp->cnic_kwq_cons = bp->cnic_kwq;
9833 else
9834 bp->cnic_kwq_cons++;
9835 }
9836 bnx2x_sp_prod_update(bp);
9837 spin_unlock_bh(&bp->spq_lock);
9838}
9839
9840static int bnx2x_cnic_sp_queue(struct net_device *dev,
9841 struct kwqe_16 *kwqes[], u32 count)
9842{
9843 struct bnx2x *bp = netdev_priv(dev);
9844 int i;
9845
9846#ifdef BNX2X_STOP_ON_ERROR
9847 if (unlikely(bp->panic))
9848 return -EIO;
9849#endif
9850
9851 spin_lock_bh(&bp->spq_lock);
9852
9853 for (i = 0; i < count; i++) {
9854 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9855
9856 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9857 break;
9858
9859 *bp->cnic_kwq_prod = *spe;
9860
9861 bp->cnic_kwq_pending++;
9862
9863 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9864 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9865 spe->data.update_data_addr.hi,
9866 spe->data.update_data_addr.lo,
9867 bp->cnic_kwq_pending);
9868
9869 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9870 bp->cnic_kwq_prod = bp->cnic_kwq;
9871 else
9872 bp->cnic_kwq_prod++;
9873 }
9874
9875 spin_unlock_bh(&bp->spq_lock);
9876
9877 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9878 bnx2x_cnic_sp_post(bp, 0);
9879
9880 return i;
9881}
9882
9883static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9884{
9885 struct cnic_ops *c_ops;
9886 int rc = 0;
9887
9888 mutex_lock(&bp->cnic_mutex);
9889 c_ops = bp->cnic_ops;
9890 if (c_ops)
9891 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9892 mutex_unlock(&bp->cnic_mutex);
9893
9894 return rc;
9895}
9896
9897static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9898{
9899 struct cnic_ops *c_ops;
9900 int rc = 0;
9901
9902 rcu_read_lock();
9903 c_ops = rcu_dereference(bp->cnic_ops);
9904 if (c_ops)
9905 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9906 rcu_read_unlock();
9907
9908 return rc;
9909}
9910
9911
9912
9913
9914int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9915{
9916 struct cnic_ctl_info ctl = {0};
9917
9918 ctl.cmd = cmd;
9919
9920 return bnx2x_cnic_ctl_send(bp, &ctl);
9921}
9922
9923static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9924{
9925 struct cnic_ctl_info ctl;
9926
9927
9928 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9929 ctl.data.comp.cid = cid;
9930
9931 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9932 bnx2x_cnic_sp_post(bp, 0);
9933}
9934
9935static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9936{
9937 struct bnx2x *bp = netdev_priv(dev);
9938 int rc = 0;
9939
9940 switch (ctl->cmd) {
9941 case DRV_CTL_CTXTBL_WR_CMD: {
9942 u32 index = ctl->data.io.offset;
9943 dma_addr_t addr = ctl->data.io.dma_addr;
9944
9945 bnx2x_ilt_wr(bp, index, addr);
9946 break;
9947 }
9948
9949 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9950 int count = ctl->data.credit.credit_count;
9951
9952 bnx2x_cnic_sp_post(bp, count);
9953 break;
9954 }
9955
9956
9957 case DRV_CTL_START_L2_CMD: {
9958 u32 cli = ctl->data.ring.client_id;
9959
9960
9961 bnx2x_del_fcoe_eth_macs(bp);
9962
9963
9964 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9965
9966 mmiowb();
9967 barrier();
9968
9969
9970
9971
9972
9973
9974
9975 bnx2x_rxq_set_mac_filters(bp, cli,
9976 BNX2X_ACCEPT_UNICAST |
9977 BNX2X_ACCEPT_BROADCAST |
9978 BNX2X_ACCEPT_ALL_MULTICAST);
9979 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9980
9981 break;
9982 }
9983
9984
9985 case DRV_CTL_STOP_L2_CMD: {
9986 u32 cli = ctl->data.ring.client_id;
9987
9988
9989 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9990 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9991
9992 mmiowb();
9993 barrier();
9994
9995
9996 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9997 break;
9998 }
9999 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10000 int count = ctl->data.credit.credit_count;
10001
10002 smp_mb__before_atomic_inc();
10003 atomic_add(count, &bp->spq_left);
10004 smp_mb__after_atomic_inc();
10005 break;
10006 }
10007
10008 default:
10009 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10010 rc = -EINVAL;
10011 }
10012
10013 return rc;
10014}
10015
10016void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
10017{
10018 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10019
10020 if (bp->flags & USING_MSIX_FLAG) {
10021 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10022 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10023 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10024 } else {
10025 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10026 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10027 }
10028 if (CHIP_IS_E2(bp))
10029 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10030 else
10031 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10032
10033 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10034 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10035 cp->irq_arr[1].status_blk = bp->def_status_blk;
10036 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10037 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10038
10039 cp->num_irq = 2;
10040}
10041
10042static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10043 void *data)
10044{
10045 struct bnx2x *bp = netdev_priv(dev);
10046 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10047
10048 if (ops == NULL)
10049 return -EINVAL;
10050
10051 if (atomic_read(&bp->intr_sem) != 0)
10052 return -EBUSY;
10053
10054 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10055 if (!bp->cnic_kwq)
10056 return -ENOMEM;
10057
10058 bp->cnic_kwq_cons = bp->cnic_kwq;
10059 bp->cnic_kwq_prod = bp->cnic_kwq;
10060 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10061
10062 bp->cnic_spq_pending = 0;
10063 bp->cnic_kwq_pending = 0;
10064
10065 bp->cnic_data = data;
10066
10067 cp->num_irq = 0;
10068 cp->drv_state = CNIC_DRV_STATE_REGD;
10069 cp->iro_arr = bp->iro_arr;
10070
10071 bnx2x_setup_cnic_irq_info(bp);
10072
10073 rcu_assign_pointer(bp->cnic_ops, ops);
10074
10075 return 0;
10076}
10077
10078static int bnx2x_unregister_cnic(struct net_device *dev)
10079{
10080 struct bnx2x *bp = netdev_priv(dev);
10081 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10082
10083 mutex_lock(&bp->cnic_mutex);
10084 cp->drv_state = 0;
10085 rcu_assign_pointer(bp->cnic_ops, NULL);
10086 mutex_unlock(&bp->cnic_mutex);
10087 synchronize_rcu();
10088 kfree(bp->cnic_kwq);
10089 bp->cnic_kwq = NULL;
10090
10091 return 0;
10092}
10093
10094struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10095{
10096 struct bnx2x *bp = netdev_priv(dev);
10097 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10098
10099 cp->drv_owner = THIS_MODULE;
10100 cp->chip_id = CHIP_ID(bp);
10101 cp->pdev = bp->pdev;
10102 cp->io_base = bp->regview;
10103 cp->io_base2 = bp->doorbells;
10104 cp->max_kwqe_pending = 8;
10105 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10106 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10107 bnx2x_cid_ilt_lines(bp);
10108 cp->ctx_tbl_len = CNIC_ILT_LINES;
10109 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10110 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10111 cp->drv_ctl = bnx2x_drv_ctl;
10112 cp->drv_register_cnic = bnx2x_register_cnic;
10113 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10114 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10115 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10116 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10117 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10118
10119 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10120 "starting cid %d\n",
10121 cp->ctx_blk_size,
10122 cp->ctx_tbl_offset,
10123 cp->ctx_tbl_len,
10124 cp->starting_cid);
10125 return cp;
10126}
10127EXPORT_SYMBOL(bnx2x_cnic_probe);
10128
10129#endif
10130
10131