1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h>
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/aer.h>
31#include <linux/init.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/dma-mapping.h>
36#include <linux/bitops.h>
37#include <linux/irq.h>
38#include <linux/delay.h>
39#include <asm/byteorder.h>
40#include <linux/time.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/if_vlan.h>
44#include <net/ip.h>
45#include <net/ipv6.h>
46#include <net/tcp.h>
47#include <net/checksum.h>
48#include <net/ip6_checksum.h>
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
51#include <linux/crc32c.h>
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
54#include <linux/io.h>
55#include <linux/semaphore.h>
56#include <linux/stringify.h>
57#include <linux/vmalloc.h>
58
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61#include "bnx2x_init_ops.h"
62#include "bnx2x_cmn.h"
63#include "bnx2x_vfpf.h"
64#include "bnx2x_dcb.h"
65#include "bnx2x_sp.h"
66
67#include <linux/firmware.h>
68#include "bnx2x_fw_file_hdr.h"
69
70#define FW_FILE_VERSION \
71 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
72 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
73 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
74 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
75#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
76#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
77#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
78
79
80#define TX_TIMEOUT (5*HZ)
81
82static char version[] =
83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
85
86MODULE_AUTHOR("Eliezer Tamir");
87MODULE_DESCRIPTION("Broadcom NetXtreme II "
88 "BCM57710/57711/57711E/"
89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
90 "57840/57840_MF Driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_MODULE_VERSION);
93MODULE_FIRMWARE(FW_FILE_NAME_E1);
94MODULE_FIRMWARE(FW_FILE_NAME_E1H);
95MODULE_FIRMWARE(FW_FILE_NAME_E2);
96
97int bnx2x_num_queues;
98module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
99MODULE_PARM_DESC(num_queues,
100 " Set number of queues (default is as a number of CPUs)");
101
102static int disable_tpa;
103module_param(disable_tpa, int, S_IRUGO);
104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105
106static int int_mode;
107module_param(int_mode, int, S_IRUGO);
108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
109 "(1 INT#x; 2 MSI)");
110
111static int dropless_fc;
112module_param(dropless_fc, int, S_IRUGO);
113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114
115static int mrrs = -1;
116module_param(mrrs, int, S_IRUGO);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119static int debug;
120module_param(debug, int, S_IRUGO);
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123struct workqueue_struct *bnx2x_wq;
124
125struct bnx2x_mac_vals {
126 u32 xmac_addr;
127 u32 xmac_val;
128 u32 emac_addr;
129 u32 emac_val;
130 u32 umac_addr;
131 u32 umac_val;
132 u32 bmac_addr;
133 u32 bmac_val[2];
134};
135
136enum bnx2x_board_type {
137 BCM57710 = 0,
138 BCM57711,
139 BCM57711E,
140 BCM57712,
141 BCM57712_MF,
142 BCM57712_VF,
143 BCM57800,
144 BCM57800_MF,
145 BCM57800_VF,
146 BCM57810,
147 BCM57810_MF,
148 BCM57810_VF,
149 BCM57840_4_10,
150 BCM57840_2_20,
151 BCM57840_MF,
152 BCM57840_VF,
153 BCM57811,
154 BCM57811_MF,
155 BCM57840_O,
156 BCM57840_MFO,
157 BCM57811_VF
158};
159
160
161static struct {
162 char *name;
163} board_info[] = {
164 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
165 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
166 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
167 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
168 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
169 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
170 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
171 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
172 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
173 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
174 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
175 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
176 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
177 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
178 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
179 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
180 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
181 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
182 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
183 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
184 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
185};
186
187#ifndef PCI_DEVICE_ID_NX2_57710
188#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
189#endif
190#ifndef PCI_DEVICE_ID_NX2_57711
191#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
192#endif
193#ifndef PCI_DEVICE_ID_NX2_57711E
194#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
195#endif
196#ifndef PCI_DEVICE_ID_NX2_57712
197#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57712_MF
200#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
201#endif
202#ifndef PCI_DEVICE_ID_NX2_57712_VF
203#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
204#endif
205#ifndef PCI_DEVICE_ID_NX2_57800
206#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
207#endif
208#ifndef PCI_DEVICE_ID_NX2_57800_MF
209#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
210#endif
211#ifndef PCI_DEVICE_ID_NX2_57800_VF
212#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
213#endif
214#ifndef PCI_DEVICE_ID_NX2_57810
215#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
216#endif
217#ifndef PCI_DEVICE_ID_NX2_57810_MF
218#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
219#endif
220#ifndef PCI_DEVICE_ID_NX2_57840_O
221#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
222#endif
223#ifndef PCI_DEVICE_ID_NX2_57810_VF
224#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
225#endif
226#ifndef PCI_DEVICE_ID_NX2_57840_4_10
227#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
228#endif
229#ifndef PCI_DEVICE_ID_NX2_57840_2_20
230#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
231#endif
232#ifndef PCI_DEVICE_ID_NX2_57840_MFO
233#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
234#endif
235#ifndef PCI_DEVICE_ID_NX2_57840_MF
236#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
237#endif
238#ifndef PCI_DEVICE_ID_NX2_57840_VF
239#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
240#endif
241#ifndef PCI_DEVICE_ID_NX2_57811
242#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
243#endif
244#ifndef PCI_DEVICE_ID_NX2_57811_MF
245#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
246#endif
247#ifndef PCI_DEVICE_ID_NX2_57811_VF
248#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
249#endif
250
251static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
273 { 0 }
274};
275
276MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
277
278
279#define BNX2X_PREV_WAIT_NEEDED 1
280static DEFINE_SEMAPHORE(bnx2x_prev_sem);
281static LIST_HEAD(bnx2x_prev_list);
282
283
284static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
285static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
286static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
287
288
289
290
291
292static void __storm_memset_dma_mapping(struct bnx2x *bp,
293 u32 addr, dma_addr_t mapping)
294{
295 REG_WR(bp, addr, U64_LO(mapping));
296 REG_WR(bp, addr + 4, U64_HI(mapping));
297}
298
299static void storm_memset_spq_addr(struct bnx2x *bp,
300 dma_addr_t mapping, u16 abs_fid)
301{
302 u32 addr = XSEM_REG_FAST_MEMORY +
303 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
304
305 __storm_memset_dma_mapping(bp, addr, mapping);
306}
307
308static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
309 u16 pf_id)
310{
311 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
312 pf_id);
313 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
314 pf_id);
315 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
316 pf_id);
317 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
318 pf_id);
319}
320
321static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
322 u8 enable)
323{
324 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
325 enable);
326 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
327 enable);
328 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
329 enable);
330 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
331 enable);
332}
333
334static void storm_memset_eq_data(struct bnx2x *bp,
335 struct event_ring_data *eq_data,
336 u16 pfid)
337{
338 size_t size = sizeof(struct event_ring_data);
339
340 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
341
342 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
343}
344
345static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
346 u16 pfid)
347{
348 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
349 REG_WR16(bp, addr, eq_prod);
350}
351
352
353
354
355static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
356{
357 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
358 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
359 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
360 PCICFG_VENDOR_ID_OFFSET);
361}
362
363static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
364{
365 u32 val;
366
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
368 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
370 PCICFG_VENDOR_ID_OFFSET);
371
372 return val;
373}
374
375#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
376#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
377#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
378#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
379#define DMAE_DP_DST_NONE "dst_addr [none]"
380
381static void bnx2x_dp_dmae(struct bnx2x *bp,
382 struct dmae_command *dmae, int msglvl)
383{
384 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
385 int i;
386
387 switch (dmae->opcode & DMAE_COMMAND_DST) {
388 case DMAE_CMD_DST_PCI:
389 if (src_type == DMAE_CMD_SRC_PCI)
390 DP(msglvl, "DMAE: opcode 0x%08x\n"
391 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
392 "comp_addr [%x:%08x], comp_val 0x%08x\n",
393 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
394 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
395 dmae->comp_addr_hi, dmae->comp_addr_lo,
396 dmae->comp_val);
397 else
398 DP(msglvl, "DMAE: opcode 0x%08x\n"
399 "src [%08x], len [%d*4], dst [%x:%08x]\n"
400 "comp_addr [%x:%08x], comp_val 0x%08x\n",
401 dmae->opcode, dmae->src_addr_lo >> 2,
402 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403 dmae->comp_addr_hi, dmae->comp_addr_lo,
404 dmae->comp_val);
405 break;
406 case DMAE_CMD_DST_GRC:
407 if (src_type == DMAE_CMD_SRC_PCI)
408 DP(msglvl, "DMAE: opcode 0x%08x\n"
409 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
410 "comp_addr [%x:%08x], comp_val 0x%08x\n",
411 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
412 dmae->len, dmae->dst_addr_lo >> 2,
413 dmae->comp_addr_hi, dmae->comp_addr_lo,
414 dmae->comp_val);
415 else
416 DP(msglvl, "DMAE: opcode 0x%08x\n"
417 "src [%08x], len [%d*4], dst [%08x]\n"
418 "comp_addr [%x:%08x], comp_val 0x%08x\n",
419 dmae->opcode, dmae->src_addr_lo >> 2,
420 dmae->len, dmae->dst_addr_lo >> 2,
421 dmae->comp_addr_hi, dmae->comp_addr_lo,
422 dmae->comp_val);
423 break;
424 default:
425 if (src_type == DMAE_CMD_SRC_PCI)
426 DP(msglvl, "DMAE: opcode 0x%08x\n"
427 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
428 "comp_addr [%x:%08x] comp_val 0x%08x\n",
429 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
430 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
431 dmae->comp_val);
432 else
433 DP(msglvl, "DMAE: opcode 0x%08x\n"
434 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
435 "comp_addr [%x:%08x] comp_val 0x%08x\n",
436 dmae->opcode, dmae->src_addr_lo >> 2,
437 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
438 dmae->comp_val);
439 break;
440 }
441
442 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
443 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
444 i, *(((u32 *)dmae) + i));
445}
446
447
448void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
449{
450 u32 cmd_offset;
451 int i;
452
453 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
454 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
455 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
456 }
457 REG_WR(bp, dmae_reg_go_c[idx], 1);
458}
459
460u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
461{
462 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
463 DMAE_CMD_C_ENABLE);
464}
465
466u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
467{
468 return opcode & ~DMAE_CMD_SRC_RESET;
469}
470
471u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
472 bool with_comp, u8 comp_type)
473{
474 u32 opcode = 0;
475
476 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
477 (dst_type << DMAE_COMMAND_DST_SHIFT));
478
479 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
480
481 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
482 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
483 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
484 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
485
486#ifdef __BIG_ENDIAN
487 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
488#else
489 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
490#endif
491 if (with_comp)
492 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
493 return opcode;
494}
495
496void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
497 struct dmae_command *dmae,
498 u8 src_type, u8 dst_type)
499{
500 memset(dmae, 0, sizeof(struct dmae_command));
501
502
503 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
504 true, DMAE_COMP_PCI);
505
506
507 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
508 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
509 dmae->comp_val = DMAE_COMP_VAL;
510}
511
512
513int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
514 u32 *comp)
515{
516 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
517 int rc = 0;
518
519 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
520
521
522
523
524
525 spin_lock_bh(&bp->dmae_lock);
526
527
528 *comp = 0;
529
530
531 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
532
533
534 udelay(5);
535 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
536
537 if (!cnt ||
538 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
539 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
540 BNX2X_ERR("DMAE timeout!\n");
541 rc = DMAE_TIMEOUT;
542 goto unlock;
543 }
544 cnt--;
545 udelay(50);
546 }
547 if (*comp & DMAE_PCI_ERR_FLAG) {
548 BNX2X_ERR("DMAE PCI error!\n");
549 rc = DMAE_PCI_ERROR;
550 }
551
552unlock:
553 spin_unlock_bh(&bp->dmae_lock);
554 return rc;
555}
556
557void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
558 u32 len32)
559{
560 int rc;
561 struct dmae_command dmae;
562
563 if (!bp->dmae_ready) {
564 u32 *data = bnx2x_sp(bp, wb_data[0]);
565
566 if (CHIP_IS_E1(bp))
567 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
568 else
569 bnx2x_init_str_wr(bp, dst_addr, data, len32);
570 return;
571 }
572
573
574 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
575
576
577 dmae.src_addr_lo = U64_LO(dma_addr);
578 dmae.src_addr_hi = U64_HI(dma_addr);
579 dmae.dst_addr_lo = dst_addr >> 2;
580 dmae.dst_addr_hi = 0;
581 dmae.len = len32;
582
583
584 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
585 if (rc) {
586 BNX2X_ERR("DMAE returned failure %d\n", rc);
587#ifdef BNX2X_STOP_ON_ERROR
588 bnx2x_panic();
589#endif
590 }
591}
592
593void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
594{
595 int rc;
596 struct dmae_command dmae;
597
598 if (!bp->dmae_ready) {
599 u32 *data = bnx2x_sp(bp, wb_data[0]);
600 int i;
601
602 if (CHIP_IS_E1(bp))
603 for (i = 0; i < len32; i++)
604 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
605 else
606 for (i = 0; i < len32; i++)
607 data[i] = REG_RD(bp, src_addr + i*4);
608
609 return;
610 }
611
612
613 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
614
615
616 dmae.src_addr_lo = src_addr >> 2;
617 dmae.src_addr_hi = 0;
618 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
619 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
620 dmae.len = len32;
621
622
623 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
624 if (rc) {
625 BNX2X_ERR("DMAE returned failure %d\n", rc);
626#ifdef BNX2X_STOP_ON_ERROR
627 bnx2x_panic();
628#endif
629 }
630}
631
632static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
633 u32 addr, u32 len)
634{
635 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
636 int offset = 0;
637
638 while (len > dmae_wr_max) {
639 bnx2x_write_dmae(bp, phys_addr + offset,
640 addr + offset, dmae_wr_max);
641 offset += dmae_wr_max * 4;
642 len -= dmae_wr_max;
643 }
644
645 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
646}
647
648static int bnx2x_mc_assert(struct bnx2x *bp)
649{
650 char last_idx;
651 int i, rc = 0;
652 u32 row0, row1, row2, row3;
653
654
655 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
656 XSTORM_ASSERT_LIST_INDEX_OFFSET);
657 if (last_idx)
658 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
659
660
661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
662
663 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
664 XSTORM_ASSERT_LIST_OFFSET(i));
665 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
666 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
667 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
668 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
669 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
670 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
671
672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
674 i, row3, row2, row1, row0);
675 rc++;
676 } else {
677 break;
678 }
679 }
680
681
682 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
683 TSTORM_ASSERT_LIST_INDEX_OFFSET);
684 if (last_idx)
685 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
686
687
688 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
689
690 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
691 TSTORM_ASSERT_LIST_OFFSET(i));
692 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
693 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
694 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
695 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
696 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
697 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
698
699 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
700 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
701 i, row3, row2, row1, row0);
702 rc++;
703 } else {
704 break;
705 }
706 }
707
708
709 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
710 CSTORM_ASSERT_LIST_INDEX_OFFSET);
711 if (last_idx)
712 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
713
714
715 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
716
717 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
718 CSTORM_ASSERT_LIST_OFFSET(i));
719 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
720 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
721 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
722 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
723 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
724 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
725
726 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
727 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
728 i, row3, row2, row1, row0);
729 rc++;
730 } else {
731 break;
732 }
733 }
734
735
736 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
737 USTORM_ASSERT_LIST_INDEX_OFFSET);
738 if (last_idx)
739 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
740
741
742 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
743
744 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
745 USTORM_ASSERT_LIST_OFFSET(i));
746 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
747 USTORM_ASSERT_LIST_OFFSET(i) + 4);
748 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
749 USTORM_ASSERT_LIST_OFFSET(i) + 8);
750 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
751 USTORM_ASSERT_LIST_OFFSET(i) + 12);
752
753 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
754 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
755 i, row3, row2, row1, row0);
756 rc++;
757 } else {
758 break;
759 }
760 }
761
762 return rc;
763}
764
765#define MCPR_TRACE_BUFFER_SIZE (0x800)
766#define SCRATCH_BUFFER_SIZE(bp) \
767 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
768
769void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
770{
771 u32 addr, val;
772 u32 mark, offset;
773 __be32 data[9];
774 int word;
775 u32 trace_shmem_base;
776 if (BP_NOMCP(bp)) {
777 BNX2X_ERR("NO MCP - can not dump\n");
778 return;
779 }
780 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
781 (bp->common.bc_ver & 0xff0000) >> 16,
782 (bp->common.bc_ver & 0xff00) >> 8,
783 (bp->common.bc_ver & 0xff));
784
785 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
786 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
787 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
788
789 if (BP_PATH(bp) == 0)
790 trace_shmem_base = bp->common.shmem_base;
791 else
792 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
793
794
795 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
796 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
797 SCRATCH_BUFFER_SIZE(bp)) {
798 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
799 trace_shmem_base);
800 return;
801 }
802
803 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
804
805
806 mark = REG_RD(bp, addr);
807 if (mark != MFW_TRACE_SIGNATURE) {
808 BNX2X_ERR("Trace buffer signature is missing.");
809 return ;
810 }
811
812
813 addr += 4;
814 mark = REG_RD(bp, addr);
815 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
816 if (mark >= trace_shmem_base || mark < addr + 4) {
817 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
818 return;
819 }
820 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
821
822 printk("%s", lvl);
823
824
825 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
826 for (word = 0; word < 8; word++)
827 data[word] = htonl(REG_RD(bp, offset + 4*word));
828 data[8] = 0x0;
829 pr_cont("%s", (char *)data);
830 }
831
832
833 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
834 for (word = 0; word < 8; word++)
835 data[word] = htonl(REG_RD(bp, offset + 4*word));
836 data[8] = 0x0;
837 pr_cont("%s", (char *)data);
838 }
839 printk("%s" "end of fw dump\n", lvl);
840}
841
842static void bnx2x_fw_dump(struct bnx2x *bp)
843{
844 bnx2x_fw_dump_lvl(bp, KERN_ERR);
845}
846
847static void bnx2x_hc_int_disable(struct bnx2x *bp)
848{
849 int port = BP_PORT(bp);
850 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
851 u32 val = REG_RD(bp, addr);
852
853
854
855
856
857 if (CHIP_IS_E1(bp)) {
858
859
860
861
862 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
863
864 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
865 HC_CONFIG_0_REG_INT_LINE_EN_0 |
866 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
867 } else
868 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
869 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
870 HC_CONFIG_0_REG_INT_LINE_EN_0 |
871 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
872
873 DP(NETIF_MSG_IFDOWN,
874 "write %x to HC %d (addr 0x%x)\n",
875 val, port, addr);
876
877
878 mmiowb();
879
880 REG_WR(bp, addr, val);
881 if (REG_RD(bp, addr) != val)
882 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
883}
884
885static void bnx2x_igu_int_disable(struct bnx2x *bp)
886{
887 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
888
889 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
890 IGU_PF_CONF_INT_LINE_EN |
891 IGU_PF_CONF_ATTN_BIT_EN);
892
893 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
894
895
896 mmiowb();
897
898 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
899 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
900 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
901}
902
903static void bnx2x_int_disable(struct bnx2x *bp)
904{
905 if (bp->common.int_block == INT_BLOCK_HC)
906 bnx2x_hc_int_disable(bp);
907 else
908 bnx2x_igu_int_disable(bp);
909}
910
911void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
912{
913 int i;
914 u16 j;
915 struct hc_sp_status_block_data sp_sb_data;
916 int func = BP_FUNC(bp);
917#ifdef BNX2X_STOP_ON_ERROR
918 u16 start = 0, end = 0;
919 u8 cos;
920#endif
921 if (disable_int)
922 bnx2x_int_disable(bp);
923
924 bp->stats_state = STATS_STATE_DISABLED;
925 bp->eth_stats.unrecoverable_error++;
926 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
927
928 BNX2X_ERR("begin crash dump -----------------\n");
929
930
931
932 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
933 bp->def_idx, bp->def_att_idx, bp->attn_state,
934 bp->spq_prod_idx, bp->stats_counter);
935 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
936 bp->def_status_blk->atten_status_block.attn_bits,
937 bp->def_status_blk->atten_status_block.attn_bits_ack,
938 bp->def_status_blk->atten_status_block.status_block_id,
939 bp->def_status_blk->atten_status_block.attn_bits_index);
940 BNX2X_ERR(" def (");
941 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
942 pr_cont("0x%x%s",
943 bp->def_status_blk->sp_sb.index_values[i],
944 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
945
946 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
947 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
948 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
949 i*sizeof(u32));
950
951 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
952 sp_sb_data.igu_sb_id,
953 sp_sb_data.igu_seg_id,
954 sp_sb_data.p_func.pf_id,
955 sp_sb_data.p_func.vnic_id,
956 sp_sb_data.p_func.vf_id,
957 sp_sb_data.p_func.vf_valid,
958 sp_sb_data.state);
959
960 for_each_eth_queue(bp, i) {
961 struct bnx2x_fastpath *fp = &bp->fp[i];
962 int loop;
963 struct hc_status_block_data_e2 sb_data_e2;
964 struct hc_status_block_data_e1x sb_data_e1x;
965 struct hc_status_block_sm *hc_sm_p =
966 CHIP_IS_E1x(bp) ?
967 sb_data_e1x.common.state_machine :
968 sb_data_e2.common.state_machine;
969 struct hc_index_data *hc_index_p =
970 CHIP_IS_E1x(bp) ?
971 sb_data_e1x.index_data :
972 sb_data_e2.index_data;
973 u8 data_size, cos;
974 u32 *sb_data_p;
975 struct bnx2x_fp_txdata txdata;
976
977
978 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
979 i, fp->rx_bd_prod, fp->rx_bd_cons,
980 fp->rx_comp_prod,
981 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
982 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
983 fp->rx_sge_prod, fp->last_max_sge,
984 le16_to_cpu(fp->fp_hc_idx));
985
986
987 for_each_cos_in_tx_queue(fp, cos)
988 {
989 txdata = *fp->txdata_ptr[cos];
990 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
991 i, txdata.tx_pkt_prod,
992 txdata.tx_pkt_cons, txdata.tx_bd_prod,
993 txdata.tx_bd_cons,
994 le16_to_cpu(*txdata.tx_cons_sb));
995 }
996
997 loop = CHIP_IS_E1x(bp) ?
998 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
999
1000
1001
1002 if (IS_FCOE_FP(fp))
1003 continue;
1004
1005 BNX2X_ERR(" run indexes (");
1006 for (j = 0; j < HC_SB_MAX_SM; j++)
1007 pr_cont("0x%x%s",
1008 fp->sb_running_index[j],
1009 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1010
1011 BNX2X_ERR(" indexes (");
1012 for (j = 0; j < loop; j++)
1013 pr_cont("0x%x%s",
1014 fp->sb_index_values[j],
1015 (j == loop - 1) ? ")" : " ");
1016
1017 data_size = CHIP_IS_E1x(bp) ?
1018 sizeof(struct hc_status_block_data_e1x) :
1019 sizeof(struct hc_status_block_data_e2);
1020 data_size /= sizeof(u32);
1021 sb_data_p = CHIP_IS_E1x(bp) ?
1022 (u32 *)&sb_data_e1x :
1023 (u32 *)&sb_data_e2;
1024
1025 for (j = 0; j < data_size; j++)
1026 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1027 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1028 j * sizeof(u32));
1029
1030 if (!CHIP_IS_E1x(bp)) {
1031 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1032 sb_data_e2.common.p_func.pf_id,
1033 sb_data_e2.common.p_func.vf_id,
1034 sb_data_e2.common.p_func.vf_valid,
1035 sb_data_e2.common.p_func.vnic_id,
1036 sb_data_e2.common.same_igu_sb_1b,
1037 sb_data_e2.common.state);
1038 } else {
1039 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1040 sb_data_e1x.common.p_func.pf_id,
1041 sb_data_e1x.common.p_func.vf_id,
1042 sb_data_e1x.common.p_func.vf_valid,
1043 sb_data_e1x.common.p_func.vnic_id,
1044 sb_data_e1x.common.same_igu_sb_1b,
1045 sb_data_e1x.common.state);
1046 }
1047
1048
1049 for (j = 0; j < HC_SB_MAX_SM; j++) {
1050 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1051 j, hc_sm_p[j].__flags,
1052 hc_sm_p[j].igu_sb_id,
1053 hc_sm_p[j].igu_seg_id,
1054 hc_sm_p[j].time_to_expire,
1055 hc_sm_p[j].timer_value);
1056 }
1057
1058
1059 for (j = 0; j < loop; j++) {
1060 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1061 hc_index_p[j].flags,
1062 hc_index_p[j].timeout);
1063 }
1064 }
1065
1066#ifdef BNX2X_STOP_ON_ERROR
1067
1068
1069 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1070 for (i = 0; i < NUM_EQ_DESC; i++) {
1071 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1072
1073 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1074 i, bp->eq_ring[i].message.opcode,
1075 bp->eq_ring[i].message.error);
1076 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1077 }
1078
1079
1080
1081 for_each_valid_rx_queue(bp, i) {
1082 struct bnx2x_fastpath *fp = &bp->fp[i];
1083
1084 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1085 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1086 for (j = start; j != end; j = RX_BD(j + 1)) {
1087 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1088 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1089
1090 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1091 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1092 }
1093
1094 start = RX_SGE(fp->rx_sge_prod);
1095 end = RX_SGE(fp->last_max_sge);
1096 for (j = start; j != end; j = RX_SGE(j + 1)) {
1097 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1098 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1099
1100 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1101 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1102 }
1103
1104 start = RCQ_BD(fp->rx_comp_cons - 10);
1105 end = RCQ_BD(fp->rx_comp_cons + 503);
1106 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1107 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1108
1109 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1110 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1111 }
1112 }
1113
1114
1115 for_each_valid_tx_queue(bp, i) {
1116 struct bnx2x_fastpath *fp = &bp->fp[i];
1117 for_each_cos_in_tx_queue(fp, cos) {
1118 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1119
1120 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1121 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1122 for (j = start; j != end; j = TX_BD(j + 1)) {
1123 struct sw_tx_bd *sw_bd =
1124 &txdata->tx_buf_ring[j];
1125
1126 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1127 i, cos, j, sw_bd->skb,
1128 sw_bd->first_bd);
1129 }
1130
1131 start = TX_BD(txdata->tx_bd_cons - 10);
1132 end = TX_BD(txdata->tx_bd_cons + 254);
1133 for (j = start; j != end; j = TX_BD(j + 1)) {
1134 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1135
1136 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1137 i, cos, j, tx_bd[0], tx_bd[1],
1138 tx_bd[2], tx_bd[3]);
1139 }
1140 }
1141 }
1142#endif
1143 bnx2x_fw_dump(bp);
1144 bnx2x_mc_assert(bp);
1145 BNX2X_ERR("end crash dump -----------------\n");
1146}
1147
1148
1149
1150
1151
1152
1153
1154#define FLR_WAIT_USEC 10000
1155#define FLR_WAIT_INTERVAL 50
1156#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1157
1158struct pbf_pN_buf_regs {
1159 int pN;
1160 u32 init_crd;
1161 u32 crd;
1162 u32 crd_freed;
1163};
1164
1165struct pbf_pN_cmd_regs {
1166 int pN;
1167 u32 lines_occup;
1168 u32 lines_freed;
1169};
1170
1171static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1172 struct pbf_pN_buf_regs *regs,
1173 u32 poll_count)
1174{
1175 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1176 u32 cur_cnt = poll_count;
1177
1178 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1179 crd = crd_start = REG_RD(bp, regs->crd);
1180 init_crd = REG_RD(bp, regs->init_crd);
1181
1182 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1183 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1184 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1185
1186 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1187 (init_crd - crd_start))) {
1188 if (cur_cnt--) {
1189 udelay(FLR_WAIT_INTERVAL);
1190 crd = REG_RD(bp, regs->crd);
1191 crd_freed = REG_RD(bp, regs->crd_freed);
1192 } else {
1193 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1194 regs->pN);
1195 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1196 regs->pN, crd);
1197 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1198 regs->pN, crd_freed);
1199 break;
1200 }
1201 }
1202 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1203 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1204}
1205
1206static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1207 struct pbf_pN_cmd_regs *regs,
1208 u32 poll_count)
1209{
1210 u32 occup, to_free, freed, freed_start;
1211 u32 cur_cnt = poll_count;
1212
1213 occup = to_free = REG_RD(bp, regs->lines_occup);
1214 freed = freed_start = REG_RD(bp, regs->lines_freed);
1215
1216 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1217 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1218
1219 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1220 if (cur_cnt--) {
1221 udelay(FLR_WAIT_INTERVAL);
1222 occup = REG_RD(bp, regs->lines_occup);
1223 freed = REG_RD(bp, regs->lines_freed);
1224 } else {
1225 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1226 regs->pN);
1227 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1228 regs->pN, occup);
1229 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1230 regs->pN, freed);
1231 break;
1232 }
1233 }
1234 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1235 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1236}
1237
1238static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1239 u32 expected, u32 poll_count)
1240{
1241 u32 cur_cnt = poll_count;
1242 u32 val;
1243
1244 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1245 udelay(FLR_WAIT_INTERVAL);
1246
1247 return val;
1248}
1249
1250int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1251 char *msg, u32 poll_cnt)
1252{
1253 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1254 if (val != 0) {
1255 BNX2X_ERR("%s usage count=%d\n", msg, val);
1256 return 1;
1257 }
1258 return 0;
1259}
1260
1261
1262u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1263{
1264
1265 if (CHIP_REV_IS_EMUL(bp))
1266 return FLR_POLL_CNT * 2000;
1267
1268 if (CHIP_REV_IS_FPGA(bp))
1269 return FLR_POLL_CNT * 120;
1270
1271 return FLR_POLL_CNT;
1272}
1273
1274void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1275{
1276 struct pbf_pN_cmd_regs cmd_regs[] = {
1277 {0, (CHIP_IS_E3B0(bp)) ?
1278 PBF_REG_TQ_OCCUPANCY_Q0 :
1279 PBF_REG_P0_TQ_OCCUPANCY,
1280 (CHIP_IS_E3B0(bp)) ?
1281 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1282 PBF_REG_P0_TQ_LINES_FREED_CNT},
1283 {1, (CHIP_IS_E3B0(bp)) ?
1284 PBF_REG_TQ_OCCUPANCY_Q1 :
1285 PBF_REG_P1_TQ_OCCUPANCY,
1286 (CHIP_IS_E3B0(bp)) ?
1287 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1288 PBF_REG_P1_TQ_LINES_FREED_CNT},
1289 {4, (CHIP_IS_E3B0(bp)) ?
1290 PBF_REG_TQ_OCCUPANCY_LB_Q :
1291 PBF_REG_P4_TQ_OCCUPANCY,
1292 (CHIP_IS_E3B0(bp)) ?
1293 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1294 PBF_REG_P4_TQ_LINES_FREED_CNT}
1295 };
1296
1297 struct pbf_pN_buf_regs buf_regs[] = {
1298 {0, (CHIP_IS_E3B0(bp)) ?
1299 PBF_REG_INIT_CRD_Q0 :
1300 PBF_REG_P0_INIT_CRD ,
1301 (CHIP_IS_E3B0(bp)) ?
1302 PBF_REG_CREDIT_Q0 :
1303 PBF_REG_P0_CREDIT,
1304 (CHIP_IS_E3B0(bp)) ?
1305 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1306 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1307 {1, (CHIP_IS_E3B0(bp)) ?
1308 PBF_REG_INIT_CRD_Q1 :
1309 PBF_REG_P1_INIT_CRD,
1310 (CHIP_IS_E3B0(bp)) ?
1311 PBF_REG_CREDIT_Q1 :
1312 PBF_REG_P1_CREDIT,
1313 (CHIP_IS_E3B0(bp)) ?
1314 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1315 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1316 {4, (CHIP_IS_E3B0(bp)) ?
1317 PBF_REG_INIT_CRD_LB_Q :
1318 PBF_REG_P4_INIT_CRD,
1319 (CHIP_IS_E3B0(bp)) ?
1320 PBF_REG_CREDIT_LB_Q :
1321 PBF_REG_P4_CREDIT,
1322 (CHIP_IS_E3B0(bp)) ?
1323 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1324 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1325 };
1326
1327 int i;
1328
1329
1330 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1331 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1332
1333
1334 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1335 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1336}
1337
1338#define OP_GEN_PARAM(param) \
1339 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1340
1341#define OP_GEN_TYPE(type) \
1342 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1343
1344#define OP_GEN_AGG_VECT(index) \
1345 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1346
1347int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1348{
1349 u32 op_gen_command = 0;
1350 u32 comp_addr = BAR_CSTRORM_INTMEM +
1351 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1352 int ret = 0;
1353
1354 if (REG_RD(bp, comp_addr)) {
1355 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1356 return 1;
1357 }
1358
1359 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1360 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1361 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1362 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1363
1364 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1365 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1366
1367 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1368 BNX2X_ERR("FW final cleanup did not succeed\n");
1369 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1370 (REG_RD(bp, comp_addr)));
1371 bnx2x_panic();
1372 return 1;
1373 }
1374
1375 REG_WR(bp, comp_addr, 0);
1376
1377 return ret;
1378}
1379
1380u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1381{
1382 u16 status;
1383
1384 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1385 return status & PCI_EXP_DEVSTA_TRPND;
1386}
1387
1388
1389
1390static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1391{
1392
1393 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1394 CFC_REG_NUM_LCIDS_INSIDE_PF,
1395 "CFC PF usage counter timed out",
1396 poll_cnt))
1397 return 1;
1398
1399
1400 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1401 DORQ_REG_PF_USAGE_CNT,
1402 "DQ PF usage counter timed out",
1403 poll_cnt))
1404 return 1;
1405
1406
1407 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1408 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1409 "QM PF usage counter timed out",
1410 poll_cnt))
1411 return 1;
1412
1413
1414 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1415 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1416 "Timers VNIC usage counter timed out",
1417 poll_cnt))
1418 return 1;
1419 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1420 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1421 "Timers NUM_SCANS usage counter timed out",
1422 poll_cnt))
1423 return 1;
1424
1425
1426 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1427 dmae_reg_go_c[INIT_DMAE_C(bp)],
1428 "DMAE command register timed out",
1429 poll_cnt))
1430 return 1;
1431
1432 return 0;
1433}
1434
1435static void bnx2x_hw_enable_status(struct bnx2x *bp)
1436{
1437 u32 val;
1438
1439 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1440 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1441
1442 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1443 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1444
1445 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1446 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1447
1448 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1449 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1450
1451 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1452 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1453
1454 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1455 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1456
1457 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1458 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1459
1460 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1461 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1462 val);
1463}
1464
1465static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1466{
1467 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1468
1469 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1470
1471
1472 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1473
1474
1475 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1476 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1477 return -EBUSY;
1478
1479
1480
1481
1482 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1483 return -EBUSY;
1484
1485
1486
1487
1488 bnx2x_tx_hw_flushed(bp, poll_cnt);
1489
1490
1491 msleep(100);
1492
1493
1494 if (bnx2x_is_pcie_pending(bp->pdev))
1495 BNX2X_ERR("PCIE Transactions still pending\n");
1496
1497
1498 bnx2x_hw_enable_status(bp);
1499
1500
1501
1502
1503
1504 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1505
1506 return 0;
1507}
1508
1509static void bnx2x_hc_int_enable(struct bnx2x *bp)
1510{
1511 int port = BP_PORT(bp);
1512 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1513 u32 val = REG_RD(bp, addr);
1514 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1515 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1516 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1517
1518 if (msix) {
1519 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1520 HC_CONFIG_0_REG_INT_LINE_EN_0);
1521 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1522 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1523 if (single_msix)
1524 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1525 } else if (msi) {
1526 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1527 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1528 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1529 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1530 } else {
1531 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1532 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1533 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1534 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1535
1536 if (!CHIP_IS_E1(bp)) {
1537 DP(NETIF_MSG_IFUP,
1538 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1539
1540 REG_WR(bp, addr, val);
1541
1542 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1543 }
1544 }
1545
1546 if (CHIP_IS_E1(bp))
1547 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1548
1549 DP(NETIF_MSG_IFUP,
1550 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1551 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1552
1553 REG_WR(bp, addr, val);
1554
1555
1556
1557 mmiowb();
1558 barrier();
1559
1560 if (!CHIP_IS_E1(bp)) {
1561
1562 if (IS_MF(bp)) {
1563 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1564 if (bp->port.pmf)
1565
1566 val |= 0x1100;
1567 } else
1568 val = 0xffff;
1569
1570 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1571 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1572 }
1573
1574
1575 mmiowb();
1576}
1577
1578static void bnx2x_igu_int_enable(struct bnx2x *bp)
1579{
1580 u32 val;
1581 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1582 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1583 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1584
1585 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1586
1587 if (msix) {
1588 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1589 IGU_PF_CONF_SINGLE_ISR_EN);
1590 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1591 IGU_PF_CONF_ATTN_BIT_EN);
1592
1593 if (single_msix)
1594 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1595 } else if (msi) {
1596 val &= ~IGU_PF_CONF_INT_LINE_EN;
1597 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1598 IGU_PF_CONF_ATTN_BIT_EN |
1599 IGU_PF_CONF_SINGLE_ISR_EN);
1600 } else {
1601 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1602 val |= (IGU_PF_CONF_INT_LINE_EN |
1603 IGU_PF_CONF_ATTN_BIT_EN |
1604 IGU_PF_CONF_SINGLE_ISR_EN);
1605 }
1606
1607
1608 if ((!msix) || single_msix) {
1609 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1610 bnx2x_ack_int(bp);
1611 }
1612
1613 val |= IGU_PF_CONF_FUNC_EN;
1614
1615 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1616 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1617
1618 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1619
1620 if (val & IGU_PF_CONF_INT_LINE_EN)
1621 pci_intx(bp->pdev, true);
1622
1623 barrier();
1624
1625
1626 if (IS_MF(bp)) {
1627 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1628 if (bp->port.pmf)
1629
1630 val |= 0x1100;
1631 } else
1632 val = 0xffff;
1633
1634 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1635 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1636
1637
1638 mmiowb();
1639}
1640
1641void bnx2x_int_enable(struct bnx2x *bp)
1642{
1643 if (bp->common.int_block == INT_BLOCK_HC)
1644 bnx2x_hc_int_enable(bp);
1645 else
1646 bnx2x_igu_int_enable(bp);
1647}
1648
1649void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1650{
1651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1652 int i, offset;
1653
1654 if (disable_hw)
1655
1656 bnx2x_int_disable(bp);
1657
1658
1659 if (msix) {
1660 synchronize_irq(bp->msix_table[0].vector);
1661 offset = 1;
1662 if (CNIC_SUPPORT(bp))
1663 offset++;
1664 for_each_eth_queue(bp, i)
1665 synchronize_irq(bp->msix_table[offset++].vector);
1666 } else
1667 synchronize_irq(bp->pdev->irq);
1668
1669
1670 cancel_delayed_work(&bp->sp_task);
1671 cancel_delayed_work(&bp->period_task);
1672 flush_workqueue(bnx2x_wq);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1683{
1684 u32 lock_status;
1685 u32 resource_bit = (1 << resource);
1686 int func = BP_FUNC(bp);
1687 u32 hw_lock_control_reg;
1688
1689 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1690 "Trying to take a lock on resource %d\n", resource);
1691
1692
1693 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1694 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1695 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1696 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1697 return false;
1698 }
1699
1700 if (func <= 5)
1701 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1702 else
1703 hw_lock_control_reg =
1704 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1705
1706
1707 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1708 lock_status = REG_RD(bp, hw_lock_control_reg);
1709 if (lock_status & resource_bit)
1710 return true;
1711
1712 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1713 "Failed to get a lock on resource %d\n", resource);
1714 return false;
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1726{
1727 if (BP_PATH(bp))
1728 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1729 else
1730 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1741{
1742 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1743}
1744
1745static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1746
1747
1748static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1749{
1750
1751
1752
1753
1754 atomic_set(&bp->interrupt_occurred, 1);
1755
1756
1757
1758
1759
1760 smp_wmb();
1761
1762
1763 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1764}
1765
1766void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1767{
1768 struct bnx2x *bp = fp->bp;
1769 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1770 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1771 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1772 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1773
1774 DP(BNX2X_MSG_SP,
1775 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1776 fp->index, cid, command, bp->state,
1777 rr_cqe->ramrod_cqe.ramrod_type);
1778
1779
1780
1781
1782 if (cid >= BNX2X_FIRST_VF_CID &&
1783 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1784 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1785
1786 switch (command) {
1787 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1788 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1789 drv_cmd = BNX2X_Q_CMD_UPDATE;
1790 break;
1791
1792 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1793 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1794 drv_cmd = BNX2X_Q_CMD_SETUP;
1795 break;
1796
1797 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1798 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1799 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1800 break;
1801
1802 case (RAMROD_CMD_ID_ETH_HALT):
1803 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1804 drv_cmd = BNX2X_Q_CMD_HALT;
1805 break;
1806
1807 case (RAMROD_CMD_ID_ETH_TERMINATE):
1808 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1809 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1810 break;
1811
1812 case (RAMROD_CMD_ID_ETH_EMPTY):
1813 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1814 drv_cmd = BNX2X_Q_CMD_EMPTY;
1815 break;
1816
1817 default:
1818 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1819 command, fp->index);
1820 return;
1821 }
1822
1823 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1824 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1825
1826
1827
1828
1829
1830
1831
1832#ifdef BNX2X_STOP_ON_ERROR
1833 bnx2x_panic();
1834#else
1835 return;
1836#endif
1837
1838 bnx2x_iov_sp_event(bp, cid, true);
1839
1840 smp_mb__before_atomic_inc();
1841 atomic_inc(&bp->cq_spq_left);
1842
1843 smp_mb__after_atomic_inc();
1844
1845 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1846
1847 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1848 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858 smp_mb__before_clear_bit();
1859 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1860 wmb();
1861 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1862 smp_mb__after_clear_bit();
1863
1864
1865 bnx2x_schedule_sp_task(bp);
1866 }
1867
1868 return;
1869}
1870
1871irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1872{
1873 struct bnx2x *bp = netdev_priv(dev_instance);
1874 u16 status = bnx2x_ack_int(bp);
1875 u16 mask;
1876 int i;
1877 u8 cos;
1878
1879
1880 if (unlikely(status == 0)) {
1881 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1882 return IRQ_NONE;
1883 }
1884 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1885
1886#ifdef BNX2X_STOP_ON_ERROR
1887 if (unlikely(bp->panic))
1888 return IRQ_HANDLED;
1889#endif
1890
1891 for_each_eth_queue(bp, i) {
1892 struct bnx2x_fastpath *fp = &bp->fp[i];
1893
1894 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1895 if (status & mask) {
1896
1897 for_each_cos_in_tx_queue(fp, cos)
1898 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1899 prefetch(&fp->sb_running_index[SM_RX_ID]);
1900 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1901 status &= ~mask;
1902 }
1903 }
1904
1905 if (CNIC_SUPPORT(bp)) {
1906 mask = 0x2;
1907 if (status & (mask | 0x1)) {
1908 struct cnic_ops *c_ops = NULL;
1909
1910 rcu_read_lock();
1911 c_ops = rcu_dereference(bp->cnic_ops);
1912 if (c_ops && (bp->cnic_eth_dev.drv_state &
1913 CNIC_DRV_STATE_HANDLES_IRQ))
1914 c_ops->cnic_handler(bp->cnic_data, NULL);
1915 rcu_read_unlock();
1916
1917 status &= ~mask;
1918 }
1919 }
1920
1921 if (unlikely(status & 0x1)) {
1922
1923
1924
1925
1926 bnx2x_schedule_sp_task(bp);
1927
1928 status &= ~0x1;
1929 if (!status)
1930 return IRQ_HANDLED;
1931 }
1932
1933 if (unlikely(status))
1934 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1935 status);
1936
1937 return IRQ_HANDLED;
1938}
1939
1940
1941
1942
1943
1944
1945
1946int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1947{
1948 u32 lock_status;
1949 u32 resource_bit = (1 << resource);
1950 int func = BP_FUNC(bp);
1951 u32 hw_lock_control_reg;
1952 int cnt;
1953
1954
1955 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1956 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958 return -EINVAL;
1959 }
1960
1961 if (func <= 5) {
1962 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963 } else {
1964 hw_lock_control_reg =
1965 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966 }
1967
1968
1969 lock_status = REG_RD(bp, hw_lock_control_reg);
1970 if (lock_status & resource_bit) {
1971 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
1972 lock_status, resource_bit);
1973 return -EEXIST;
1974 }
1975
1976
1977 for (cnt = 0; cnt < 1000; cnt++) {
1978
1979 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1980 lock_status = REG_RD(bp, hw_lock_control_reg);
1981 if (lock_status & resource_bit)
1982 return 0;
1983
1984 usleep_range(5000, 10000);
1985 }
1986 BNX2X_ERR("Timeout\n");
1987 return -EAGAIN;
1988}
1989
1990int bnx2x_release_leader_lock(struct bnx2x *bp)
1991{
1992 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1993}
1994
1995int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1996{
1997 u32 lock_status;
1998 u32 resource_bit = (1 << resource);
1999 int func = BP_FUNC(bp);
2000 u32 hw_lock_control_reg;
2001
2002
2003 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2004 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2005 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2006 return -EINVAL;
2007 }
2008
2009 if (func <= 5) {
2010 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2011 } else {
2012 hw_lock_control_reg =
2013 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2014 }
2015
2016
2017 lock_status = REG_RD(bp, hw_lock_control_reg);
2018 if (!(lock_status & resource_bit)) {
2019 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2020 lock_status, resource_bit);
2021 return -EFAULT;
2022 }
2023
2024 REG_WR(bp, hw_lock_control_reg, resource_bit);
2025 return 0;
2026}
2027
2028int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2029{
2030
2031 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2032 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2033 int gpio_shift = gpio_num +
2034 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2035 u32 gpio_mask = (1 << gpio_shift);
2036 u32 gpio_reg;
2037 int value;
2038
2039 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2040 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2041 return -EINVAL;
2042 }
2043
2044
2045 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2046
2047
2048 if ((gpio_reg & gpio_mask) == gpio_mask)
2049 value = 1;
2050 else
2051 value = 0;
2052
2053 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2054
2055 return value;
2056}
2057
2058int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2059{
2060
2061 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2062 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2063 int gpio_shift = gpio_num +
2064 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2065 u32 gpio_mask = (1 << gpio_shift);
2066 u32 gpio_reg;
2067
2068 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2069 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2070 return -EINVAL;
2071 }
2072
2073 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2074
2075 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2076
2077 switch (mode) {
2078 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2079 DP(NETIF_MSG_LINK,
2080 "Set GPIO %d (shift %d) -> output low\n",
2081 gpio_num, gpio_shift);
2082
2083 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2084 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2085 break;
2086
2087 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2088 DP(NETIF_MSG_LINK,
2089 "Set GPIO %d (shift %d) -> output high\n",
2090 gpio_num, gpio_shift);
2091
2092 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2093 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2094 break;
2095
2096 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2097 DP(NETIF_MSG_LINK,
2098 "Set GPIO %d (shift %d) -> input\n",
2099 gpio_num, gpio_shift);
2100
2101 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2102 break;
2103
2104 default:
2105 break;
2106 }
2107
2108 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2109 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110
2111 return 0;
2112}
2113
2114int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2115{
2116 u32 gpio_reg = 0;
2117 int rc = 0;
2118
2119
2120
2121 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2124 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2125 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2126 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2127
2128 switch (mode) {
2129 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2130 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2131
2132 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2133 break;
2134
2135 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2136 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2137
2138 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2139 break;
2140
2141 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2142 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2143
2144 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2145 break;
2146
2147 default:
2148 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2149 rc = -EINVAL;
2150 break;
2151 }
2152
2153 if (rc == 0)
2154 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2155
2156 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2157
2158 return rc;
2159}
2160
2161int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2162{
2163
2164 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2165 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2166 int gpio_shift = gpio_num +
2167 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2168 u32 gpio_mask = (1 << gpio_shift);
2169 u32 gpio_reg;
2170
2171 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2172 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2173 return -EINVAL;
2174 }
2175
2176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2177
2178 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2179
2180 switch (mode) {
2181 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2182 DP(NETIF_MSG_LINK,
2183 "Clear GPIO INT %d (shift %d) -> output low\n",
2184 gpio_num, gpio_shift);
2185
2186 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2187 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2188 break;
2189
2190 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2191 DP(NETIF_MSG_LINK,
2192 "Set GPIO INT %d (shift %d) -> output high\n",
2193 gpio_num, gpio_shift);
2194
2195 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2196 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2197 break;
2198
2199 default:
2200 break;
2201 }
2202
2203 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2204 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2205
2206 return 0;
2207}
2208
2209static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2210{
2211 u32 spio_reg;
2212
2213
2214 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2215 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2216 return -EINVAL;
2217 }
2218
2219 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2220
2221 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2222
2223 switch (mode) {
2224 case MISC_SPIO_OUTPUT_LOW:
2225 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2226
2227 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2228 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2229 break;
2230
2231 case MISC_SPIO_OUTPUT_HIGH:
2232 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2233
2234 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2235 spio_reg |= (spio << MISC_SPIO_SET_POS);
2236 break;
2237
2238 case MISC_SPIO_INPUT_HI_Z:
2239 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2240
2241 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2242 break;
2243
2244 default:
2245 break;
2246 }
2247
2248 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2249 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2250
2251 return 0;
2252}
2253
2254void bnx2x_calc_fc_adv(struct bnx2x *bp)
2255{
2256 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2257 switch (bp->link_vars.ieee_fc &
2258 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2259 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2260 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2261 ADVERTISED_Pause);
2262 break;
2263
2264 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2265 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2266 ADVERTISED_Pause);
2267 break;
2268
2269 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2270 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2271 break;
2272
2273 default:
2274 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2275 ADVERTISED_Pause);
2276 break;
2277 }
2278}
2279
2280static void bnx2x_set_requested_fc(struct bnx2x *bp)
2281{
2282
2283
2284
2285
2286 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2287 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2288 else
2289 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2290}
2291
2292static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2293{
2294 u32 pause_enabled = 0;
2295
2296 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2297 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2298 pause_enabled = 1;
2299
2300 REG_WR(bp, BAR_USTRORM_INTMEM +
2301 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2302 pause_enabled);
2303 }
2304
2305 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2306 pause_enabled ? "enabled" : "disabled");
2307}
2308
2309int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2310{
2311 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2312 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2313
2314 if (!BP_NOMCP(bp)) {
2315 bnx2x_set_requested_fc(bp);
2316 bnx2x_acquire_phy_lock(bp);
2317
2318 if (load_mode == LOAD_DIAG) {
2319 struct link_params *lp = &bp->link_params;
2320 lp->loopback_mode = LOOPBACK_XGXS;
2321
2322 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2323 if (lp->speed_cap_mask[cfx_idx] &
2324 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2325 lp->req_line_speed[cfx_idx] =
2326 SPEED_10000;
2327 else
2328 lp->req_line_speed[cfx_idx] =
2329 SPEED_1000;
2330 }
2331 }
2332
2333 if (load_mode == LOAD_LOOPBACK_EXT) {
2334 struct link_params *lp = &bp->link_params;
2335 lp->loopback_mode = LOOPBACK_EXT;
2336 }
2337
2338 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2339
2340 bnx2x_release_phy_lock(bp);
2341
2342 bnx2x_init_dropless_fc(bp);
2343
2344 bnx2x_calc_fc_adv(bp);
2345
2346 if (bp->link_vars.link_up) {
2347 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2348 bnx2x_link_report(bp);
2349 }
2350 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2351 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2352 return rc;
2353 }
2354 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2355 return -EINVAL;
2356}
2357
2358void bnx2x_link_set(struct bnx2x *bp)
2359{
2360 if (!BP_NOMCP(bp)) {
2361 bnx2x_acquire_phy_lock(bp);
2362 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2363 bnx2x_release_phy_lock(bp);
2364
2365 bnx2x_init_dropless_fc(bp);
2366
2367 bnx2x_calc_fc_adv(bp);
2368 } else
2369 BNX2X_ERR("Bootcode is missing - can not set link\n");
2370}
2371
2372static void bnx2x__link_reset(struct bnx2x *bp)
2373{
2374 if (!BP_NOMCP(bp)) {
2375 bnx2x_acquire_phy_lock(bp);
2376 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2377 bnx2x_release_phy_lock(bp);
2378 } else
2379 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2380}
2381
2382void bnx2x_force_link_reset(struct bnx2x *bp)
2383{
2384 bnx2x_acquire_phy_lock(bp);
2385 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2386 bnx2x_release_phy_lock(bp);
2387}
2388
2389u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2390{
2391 u8 rc = 0;
2392
2393 if (!BP_NOMCP(bp)) {
2394 bnx2x_acquire_phy_lock(bp);
2395 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2396 is_serdes);
2397 bnx2x_release_phy_lock(bp);
2398 } else
2399 BNX2X_ERR("Bootcode is missing - can not test link\n");
2400
2401 return rc;
2402}
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413static void bnx2x_calc_vn_min(struct bnx2x *bp,
2414 struct cmng_init_input *input)
2415{
2416 int all_zero = 1;
2417 int vn;
2418
2419 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2420 u32 vn_cfg = bp->mf_config[vn];
2421 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2422 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2423
2424
2425 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2426 vn_min_rate = 0;
2427
2428 else if (!vn_min_rate)
2429 vn_min_rate = DEF_MIN_RATE;
2430 else
2431 all_zero = 0;
2432
2433 input->vnic_min_rate[vn] = vn_min_rate;
2434 }
2435
2436
2437 if (BNX2X_IS_ETS_ENABLED(bp)) {
2438 input->flags.cmng_enables &=
2439 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2440 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2441 } else if (all_zero) {
2442 input->flags.cmng_enables &=
2443 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2444 DP(NETIF_MSG_IFUP,
2445 "All MIN values are zeroes fairness will be disabled\n");
2446 } else
2447 input->flags.cmng_enables |=
2448 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2449}
2450
2451static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2452 struct cmng_init_input *input)
2453{
2454 u16 vn_max_rate;
2455 u32 vn_cfg = bp->mf_config[vn];
2456
2457 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2458 vn_max_rate = 0;
2459 else {
2460 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2461
2462 if (IS_MF_SI(bp)) {
2463
2464 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2465 } else
2466
2467 vn_max_rate = maxCfg * 100;
2468 }
2469
2470 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2471
2472 input->vnic_max_rate[vn] = vn_max_rate;
2473}
2474
2475static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2476{
2477 if (CHIP_REV_IS_SLOW(bp))
2478 return CMNG_FNS_NONE;
2479 if (IS_MF(bp))
2480 return CMNG_FNS_MINMAX;
2481
2482 return CMNG_FNS_NONE;
2483}
2484
2485void bnx2x_read_mf_cfg(struct bnx2x *bp)
2486{
2487 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2488
2489 if (BP_NOMCP(bp))
2490 return;
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2504 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2505
2506 if (func >= E1H_FUNC_MAX)
2507 break;
2508
2509 bp->mf_config[vn] =
2510 MF_CFG_RD(bp, func_mf_config[func].config);
2511 }
2512 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2513 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2514 bp->flags |= MF_FUNC_DIS;
2515 } else {
2516 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2517 bp->flags &= ~MF_FUNC_DIS;
2518 }
2519}
2520
2521static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2522{
2523 struct cmng_init_input input;
2524 memset(&input, 0, sizeof(struct cmng_init_input));
2525
2526 input.port_rate = bp->link_vars.line_speed;
2527
2528 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2529 int vn;
2530
2531
2532 if (read_cfg)
2533 bnx2x_read_mf_cfg(bp);
2534
2535
2536 bnx2x_calc_vn_min(bp, &input);
2537
2538
2539 if (bp->port.pmf)
2540 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2541 bnx2x_calc_vn_max(bp, vn, &input);
2542
2543
2544 input.flags.cmng_enables |=
2545 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2546
2547 bnx2x_init_cmng(&input, &bp->cmng);
2548 return;
2549 }
2550
2551
2552 DP(NETIF_MSG_IFUP,
2553 "rate shaping and fairness are disabled\n");
2554}
2555
2556static void storm_memset_cmng(struct bnx2x *bp,
2557 struct cmng_init *cmng,
2558 u8 port)
2559{
2560 int vn;
2561 size_t size = sizeof(struct cmng_struct_per_port);
2562
2563 u32 addr = BAR_XSTRORM_INTMEM +
2564 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2565
2566 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2567
2568 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2569 int func = func_by_vn(bp, vn);
2570
2571 addr = BAR_XSTRORM_INTMEM +
2572 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2573 size = sizeof(struct rate_shaping_vars_per_vn);
2574 __storm_memset_struct(bp, addr, size,
2575 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2576
2577 addr = BAR_XSTRORM_INTMEM +
2578 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2579 size = sizeof(struct fairness_vars_per_vn);
2580 __storm_memset_struct(bp, addr, size,
2581 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2582 }
2583}
2584
2585
2586void bnx2x_set_local_cmng(struct bnx2x *bp)
2587{
2588 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2589
2590 if (cmng_fns != CMNG_FNS_NONE) {
2591 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593 } else {
2594
2595 DP(NETIF_MSG_IFUP,
2596 "single function mode without fairness\n");
2597 }
2598}
2599
2600
2601static void bnx2x_link_attn(struct bnx2x *bp)
2602{
2603
2604 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2605
2606 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2607
2608 bnx2x_init_dropless_fc(bp);
2609
2610 if (bp->link_vars.link_up) {
2611
2612 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2613 struct host_port_stats *pstats;
2614
2615 pstats = bnx2x_sp(bp, port_stats);
2616
2617 memset(&(pstats->mac_stx[0]), 0,
2618 sizeof(struct mac_stx));
2619 }
2620 if (bp->state == BNX2X_STATE_OPEN)
2621 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2622 }
2623
2624 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2625 bnx2x_set_local_cmng(bp);
2626
2627 __bnx2x_link_report(bp);
2628
2629 if (IS_MF(bp))
2630 bnx2x_link_sync_notify(bp);
2631}
2632
2633void bnx2x__link_status_update(struct bnx2x *bp)
2634{
2635 if (bp->state != BNX2X_STATE_OPEN)
2636 return;
2637
2638
2639 if (IS_PF(bp)) {
2640 bnx2x_dcbx_pmf_update(bp);
2641 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2642 if (bp->link_vars.link_up)
2643 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2644 else
2645 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2646
2647 bnx2x_link_report(bp);
2648
2649 } else {
2650 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2651 SUPPORTED_10baseT_Full |
2652 SUPPORTED_100baseT_Half |
2653 SUPPORTED_100baseT_Full |
2654 SUPPORTED_1000baseT_Full |
2655 SUPPORTED_2500baseX_Full |
2656 SUPPORTED_10000baseT_Full |
2657 SUPPORTED_TP |
2658 SUPPORTED_FIBRE |
2659 SUPPORTED_Autoneg |
2660 SUPPORTED_Pause |
2661 SUPPORTED_Asym_Pause);
2662 bp->port.advertising[0] = bp->port.supported[0];
2663
2664 bp->link_params.bp = bp;
2665 bp->link_params.port = BP_PORT(bp);
2666 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2667 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2668 bp->link_params.req_line_speed[0] = SPEED_10000;
2669 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2670 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2671 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2672 bp->link_vars.line_speed = SPEED_10000;
2673 bp->link_vars.link_status =
2674 (LINK_STATUS_LINK_UP |
2675 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2676 bp->link_vars.link_up = 1;
2677 bp->link_vars.duplex = DUPLEX_FULL;
2678 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2679 __bnx2x_link_report(bp);
2680 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2681 }
2682}
2683
2684static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2685 u16 vlan_val, u8 allowed_prio)
2686{
2687 struct bnx2x_func_state_params func_params = {NULL};
2688 struct bnx2x_func_afex_update_params *f_update_params =
2689 &func_params.params.afex_update;
2690
2691 func_params.f_obj = &bp->func_obj;
2692 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2693
2694
2695
2696
2697
2698 f_update_params->vif_id = vifid;
2699 f_update_params->afex_default_vlan = vlan_val;
2700 f_update_params->allowed_priorities = allowed_prio;
2701
2702
2703 if (bnx2x_func_state_change(bp, &func_params) < 0)
2704 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2705
2706 return 0;
2707}
2708
2709static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2710 u16 vif_index, u8 func_bit_map)
2711{
2712 struct bnx2x_func_state_params func_params = {NULL};
2713 struct bnx2x_func_afex_viflists_params *update_params =
2714 &func_params.params.afex_viflists;
2715 int rc;
2716 u32 drv_msg_code;
2717
2718
2719 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2720 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2721 cmd_type);
2722
2723 func_params.f_obj = &bp->func_obj;
2724 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2725
2726
2727 update_params->afex_vif_list_command = cmd_type;
2728 update_params->vif_list_index = vif_index;
2729 update_params->func_bit_map =
2730 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2731 update_params->func_to_clear = 0;
2732 drv_msg_code =
2733 (cmd_type == VIF_LIST_RULE_GET) ?
2734 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2735 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2736
2737
2738
2739
2740 rc = bnx2x_func_state_change(bp, &func_params);
2741 if (rc < 0)
2742 bnx2x_fw_command(bp, drv_msg_code, 0);
2743
2744 return 0;
2745}
2746
2747static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2748{
2749 struct afex_stats afex_stats;
2750 u32 func = BP_ABS_FUNC(bp);
2751 u32 mf_config;
2752 u16 vlan_val;
2753 u32 vlan_prio;
2754 u16 vif_id;
2755 u8 allowed_prio;
2756 u8 vlan_mode;
2757 u32 addr_to_write, vifid, addrs, stats_type, i;
2758
2759 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2760 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2761 DP(BNX2X_MSG_MCP,
2762 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2763 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2764 }
2765
2766 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2767 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2768 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2769 DP(BNX2X_MSG_MCP,
2770 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2771 vifid, addrs);
2772 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2773 addrs);
2774 }
2775
2776 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2777 addr_to_write = SHMEM2_RD(bp,
2778 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2779 stats_type = SHMEM2_RD(bp,
2780 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2781
2782 DP(BNX2X_MSG_MCP,
2783 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2784 addr_to_write);
2785
2786 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2787
2788
2789 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2790 REG_WR(bp, addr_to_write + i*sizeof(u32),
2791 *(((u32 *)(&afex_stats))+i));
2792
2793
2794 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2795 }
2796
2797 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2798 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2799 bp->mf_config[BP_VN(bp)] = mf_config;
2800 DP(BNX2X_MSG_MCP,
2801 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2802 mf_config);
2803
2804
2805 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2806
2807 struct cmng_init_input cmng_input;
2808 struct rate_shaping_vars_per_vn m_rs_vn;
2809 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2810 u32 addr = BAR_XSTRORM_INTMEM +
2811 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2812
2813 bp->mf_config[BP_VN(bp)] = mf_config;
2814
2815 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2816 m_rs_vn.vn_counter.rate =
2817 cmng_input.vnic_max_rate[BP_VN(bp)];
2818 m_rs_vn.vn_counter.quota =
2819 (m_rs_vn.vn_counter.rate *
2820 RS_PERIODIC_TIMEOUT_USEC) / 8;
2821
2822 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2823
2824
2825 vif_id =
2826 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2827 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2828 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2829 vlan_val =
2830 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2831 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2832 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2833 vlan_prio = (mf_config &
2834 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2835 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2836 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2837 vlan_mode =
2838 (MF_CFG_RD(bp,
2839 func_mf_config[func].afex_config) &
2840 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2841 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2842 allowed_prio =
2843 (MF_CFG_RD(bp,
2844 func_mf_config[func].afex_config) &
2845 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2846 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2847
2848
2849 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2850 allowed_prio))
2851 return;
2852
2853 bp->afex_def_vlan_tag = vlan_val;
2854 bp->afex_vlan_mode = vlan_mode;
2855 } else {
2856
2857 bnx2x_link_report(bp);
2858
2859
2860 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2861
2862
2863 bp->afex_def_vlan_tag = -1;
2864 }
2865 }
2866}
2867
2868static void bnx2x_pmf_update(struct bnx2x *bp)
2869{
2870 int port = BP_PORT(bp);
2871 u32 val;
2872
2873 bp->port.pmf = 1;
2874 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2875
2876
2877
2878
2879
2880 smp_mb();
2881
2882
2883 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2884
2885 bnx2x_dcbx_pmf_update(bp);
2886
2887
2888 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2889 if (bp->common.int_block == INT_BLOCK_HC) {
2890 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2891 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2892 } else if (!CHIP_IS_E1x(bp)) {
2893 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2894 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2895 }
2896
2897 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2898}
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2910{
2911 int mb_idx = BP_FW_MB_IDX(bp);
2912 u32 seq;
2913 u32 rc = 0;
2914 u32 cnt = 1;
2915 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2916
2917 mutex_lock(&bp->fw_mb_mutex);
2918 seq = ++bp->fw_seq;
2919 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2920 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2921
2922 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2923 (command | seq), param);
2924
2925 do {
2926
2927 msleep(delay);
2928
2929 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2930
2931
2932 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2933
2934 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2935 cnt*delay, rc, seq);
2936
2937
2938 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2939 rc &= FW_MSG_CODE_MASK;
2940 else {
2941
2942 BNX2X_ERR("FW failed to respond!\n");
2943 bnx2x_fw_dump(bp);
2944 rc = 0;
2945 }
2946 mutex_unlock(&bp->fw_mb_mutex);
2947
2948 return rc;
2949}
2950
2951static void storm_memset_func_cfg(struct bnx2x *bp,
2952 struct tstorm_eth_function_common_config *tcfg,
2953 u16 abs_fid)
2954{
2955 size_t size = sizeof(struct tstorm_eth_function_common_config);
2956
2957 u32 addr = BAR_TSTRORM_INTMEM +
2958 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2959
2960 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2961}
2962
2963void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2964{
2965 if (CHIP_IS_E1x(bp)) {
2966 struct tstorm_eth_function_common_config tcfg = {0};
2967
2968 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2969 }
2970
2971
2972 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2973 storm_memset_func_en(bp, p->func_id, 1);
2974
2975
2976 if (p->func_flgs & FUNC_FLG_SPQ) {
2977 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2978 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2979 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2980 }
2981}
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2993 struct bnx2x_fastpath *fp,
2994 bool zero_stats)
2995{
2996 unsigned long flags = 0;
2997
2998
2999 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3000
3001
3002
3003
3004
3005
3006 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3007 if (zero_stats)
3008 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3009
3010 if (bp->flags & TX_SWITCHING)
3011 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3012
3013 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3014 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3015
3016#ifdef BNX2X_STOP_ON_ERROR
3017 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3018#endif
3019
3020 return flags;
3021}
3022
3023static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3024 struct bnx2x_fastpath *fp,
3025 bool leading)
3026{
3027 unsigned long flags = 0;
3028
3029
3030 if (IS_MF_SD(bp))
3031 __set_bit(BNX2X_Q_FLG_OV, &flags);
3032
3033 if (IS_FCOE_FP(fp)) {
3034 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3035
3036 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3037 }
3038
3039 if (!fp->disable_tpa) {
3040 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3041 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3042 if (fp->mode == TPA_MODE_GRO)
3043 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3044 }
3045
3046 if (leading) {
3047 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3048 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3049 }
3050
3051
3052 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3053
3054
3055 if (IS_MF_AFEX(bp))
3056 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3057
3058 return flags | bnx2x_get_common_flags(bp, fp, true);
3059}
3060
3061static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3062 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3063 u8 cos)
3064{
3065 gen_init->stat_id = bnx2x_stats_id(fp);
3066 gen_init->spcl_id = fp->cl_id;
3067
3068
3069 if (IS_FCOE_FP(fp))
3070 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3071 else
3072 gen_init->mtu = bp->dev->mtu;
3073
3074 gen_init->cos = cos;
3075}
3076
3077static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3078 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3079 struct bnx2x_rxq_setup_params *rxq_init)
3080{
3081 u8 max_sge = 0;
3082 u16 sge_sz = 0;
3083 u16 tpa_agg_size = 0;
3084
3085 if (!fp->disable_tpa) {
3086 pause->sge_th_lo = SGE_TH_LO(bp);
3087 pause->sge_th_hi = SGE_TH_HI(bp);
3088
3089
3090 WARN_ON(bp->dropless_fc &&
3091 pause->sge_th_hi + FW_PREFETCH_CNT >
3092 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3093
3094 tpa_agg_size = TPA_AGG_SIZE;
3095 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3096 SGE_PAGE_SHIFT;
3097 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3098 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3099 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3100 }
3101
3102
3103 if (!CHIP_IS_E1(bp)) {
3104 pause->bd_th_lo = BD_TH_LO(bp);
3105 pause->bd_th_hi = BD_TH_HI(bp);
3106
3107 pause->rcq_th_lo = RCQ_TH_LO(bp);
3108 pause->rcq_th_hi = RCQ_TH_HI(bp);
3109
3110
3111
3112
3113 WARN_ON(bp->dropless_fc &&
3114 pause->bd_th_hi + FW_PREFETCH_CNT >
3115 bp->rx_ring_size);
3116 WARN_ON(bp->dropless_fc &&
3117 pause->rcq_th_hi + FW_PREFETCH_CNT >
3118 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3119
3120 pause->pri_map = 1;
3121 }
3122
3123
3124 rxq_init->dscr_map = fp->rx_desc_mapping;
3125 rxq_init->sge_map = fp->rx_sge_mapping;
3126 rxq_init->rcq_map = fp->rx_comp_mapping;
3127 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3128
3129
3130
3131
3132 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3133 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3134
3135 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3136 rxq_init->tpa_agg_sz = tpa_agg_size;
3137 rxq_init->sge_buf_sz = sge_sz;
3138 rxq_init->max_sges_pkt = max_sge;
3139 rxq_init->rss_engine_id = BP_FUNC(bp);
3140 rxq_init->mcast_engine_id = BP_FUNC(bp);
3141
3142
3143
3144
3145
3146
3147 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3148
3149 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3150 rxq_init->fw_sb_id = fp->fw_sb_id;
3151
3152 if (IS_FCOE_FP(fp))
3153 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3154 else
3155 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3156
3157
3158
3159 if (IS_MF_AFEX(bp)) {
3160 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3161 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3162 }
3163}
3164
3165static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3166 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3167 u8 cos)
3168{
3169 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3170 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3171 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3172 txq_init->fw_sb_id = fp->fw_sb_id;
3173
3174
3175
3176
3177
3178 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3179
3180 if (IS_FCOE_FP(fp)) {
3181 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3182 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3183 }
3184}
3185
3186static void bnx2x_pf_init(struct bnx2x *bp)
3187{
3188 struct bnx2x_func_init_params func_init = {0};
3189 struct event_ring_data eq_data = { {0} };
3190 u16 flags;
3191
3192 if (!CHIP_IS_E1x(bp)) {
3193
3194
3195 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3196 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3197 (CHIP_MODE_IS_4_PORT(bp) ?
3198 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3199
3200 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3201 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3202 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3203 (CHIP_MODE_IS_4_PORT(bp) ?
3204 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3205 }
3206
3207
3208 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3209
3210
3211
3212
3213 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3214
3215 func_init.func_flgs = flags;
3216 func_init.pf_id = BP_FUNC(bp);
3217 func_init.func_id = BP_FUNC(bp);
3218 func_init.spq_map = bp->spq_mapping;
3219 func_init.spq_prod = bp->spq_prod_idx;
3220
3221 bnx2x_func_init(bp, &func_init);
3222
3223 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3224
3225
3226
3227
3228
3229
3230
3231 bp->link_vars.line_speed = SPEED_10000;
3232 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3233
3234
3235 if (bp->port.pmf)
3236 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3237
3238
3239 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3240 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3241 eq_data.producer = bp->eq_prod;
3242 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3243 eq_data.sb_id = DEF_SB_ID;
3244 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3245}
3246
3247static void bnx2x_e1h_disable(struct bnx2x *bp)
3248{
3249 int port = BP_PORT(bp);
3250
3251 bnx2x_tx_disable(bp);
3252
3253 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3254}
3255
3256static void bnx2x_e1h_enable(struct bnx2x *bp)
3257{
3258 int port = BP_PORT(bp);
3259
3260 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3261
3262
3263 netif_tx_wake_all_queues(bp->dev);
3264
3265
3266
3267
3268
3269}
3270
3271#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3272
3273static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3274{
3275 struct eth_stats_info *ether_stat =
3276 &bp->slowpath->drv_info_to_mcp.ether_stat;
3277 struct bnx2x_vlan_mac_obj *mac_obj =
3278 &bp->sp_objs->mac_obj;
3279 int i;
3280
3281 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3282 ETH_STAT_INFO_VERSION_LEN);
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3293 memset(ether_stat->mac_local + i, 0,
3294 sizeof(ether_stat->mac_local[0]));
3295 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3296 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3297 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3298 ETH_ALEN);
3299 ether_stat->mtu_size = bp->dev->mtu;
3300 if (bp->dev->features & NETIF_F_RXCSUM)
3301 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3302 if (bp->dev->features & NETIF_F_TSO)
3303 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3304 ether_stat->feature_flags |= bp->common.boot_mode;
3305
3306 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3307
3308 ether_stat->txq_size = bp->tx_ring_size;
3309 ether_stat->rxq_size = bp->rx_ring_size;
3310
3311#ifdef CONFIG_BNX2X_SRIOV
3312 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3313#endif
3314}
3315
3316static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3317{
3318 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3319 struct fcoe_stats_info *fcoe_stat =
3320 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3321
3322 if (!CNIC_LOADED(bp))
3323 return;
3324
3325 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3326
3327 fcoe_stat->qos_priority =
3328 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3329
3330
3331 if (!NO_FCOE(bp)) {
3332 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3333 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3334 tstorm_queue_statistics;
3335
3336 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3337 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3338 xstorm_queue_statistics;
3339
3340 struct fcoe_statistics_params *fw_fcoe_stat =
3341 &bp->fw_stats_data->fcoe;
3342
3343 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3344 fcoe_stat->rx_bytes_lo,
3345 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3346
3347 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3348 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3349 fcoe_stat->rx_bytes_lo,
3350 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3351
3352 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3353 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3354 fcoe_stat->rx_bytes_lo,
3355 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3356
3357 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3358 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3359 fcoe_stat->rx_bytes_lo,
3360 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3361
3362 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3363 fcoe_stat->rx_frames_lo,
3364 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3365
3366 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3367 fcoe_stat->rx_frames_lo,
3368 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3369
3370 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3371 fcoe_stat->rx_frames_lo,
3372 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3373
3374 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3375 fcoe_stat->rx_frames_lo,
3376 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3377
3378 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3379 fcoe_stat->tx_bytes_lo,
3380 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3381
3382 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3383 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3384 fcoe_stat->tx_bytes_lo,
3385 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3386
3387 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3388 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3389 fcoe_stat->tx_bytes_lo,
3390 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3391
3392 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3393 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3394 fcoe_stat->tx_bytes_lo,
3395 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3396
3397 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3398 fcoe_stat->tx_frames_lo,
3399 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3400
3401 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3402 fcoe_stat->tx_frames_lo,
3403 fcoe_q_xstorm_stats->ucast_pkts_sent);
3404
3405 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3406 fcoe_stat->tx_frames_lo,
3407 fcoe_q_xstorm_stats->bcast_pkts_sent);
3408
3409 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3410 fcoe_stat->tx_frames_lo,
3411 fcoe_q_xstorm_stats->mcast_pkts_sent);
3412 }
3413
3414
3415 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3416}
3417
3418static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3419{
3420 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3421 struct iscsi_stats_info *iscsi_stat =
3422 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3423
3424 if (!CNIC_LOADED(bp))
3425 return;
3426
3427 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3428 ETH_ALEN);
3429
3430 iscsi_stat->qos_priority =
3431 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3432
3433
3434 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3435}
3436
3437
3438
3439
3440
3441
3442static void bnx2x_config_mf_bw(struct bnx2x *bp)
3443{
3444 if (bp->link_vars.link_up) {
3445 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3446 bnx2x_link_sync_notify(bp);
3447 }
3448 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3449}
3450
3451static void bnx2x_set_mf_bw(struct bnx2x *bp)
3452{
3453 bnx2x_config_mf_bw(bp);
3454 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3455}
3456
3457static void bnx2x_handle_eee_event(struct bnx2x *bp)
3458{
3459 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3460 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3461}
3462
3463static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3464{
3465 enum drv_info_opcode op_code;
3466 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3467
3468
3469 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3470 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3471 return;
3472 }
3473
3474 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3475 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3476
3477 memset(&bp->slowpath->drv_info_to_mcp, 0,
3478 sizeof(union drv_info_to_mcp));
3479
3480 switch (op_code) {
3481 case ETH_STATS_OPCODE:
3482 bnx2x_drv_info_ether_stat(bp);
3483 break;
3484 case FCOE_STATS_OPCODE:
3485 bnx2x_drv_info_fcoe_stat(bp);
3486 break;
3487 case ISCSI_STATS_OPCODE:
3488 bnx2x_drv_info_iscsi_stat(bp);
3489 break;
3490 default:
3491
3492 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3493 return;
3494 }
3495
3496
3497
3498
3499 SHMEM2_WR(bp, drv_info_host_addr_lo,
3500 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3501 SHMEM2_WR(bp, drv_info_host_addr_hi,
3502 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3503
3504 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3505}
3506
3507static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3508{
3509 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3510
3511 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3512
3513
3514
3515
3516
3517
3518 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3519 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3520 bp->flags |= MF_FUNC_DIS;
3521
3522 bnx2x_e1h_disable(bp);
3523 } else {
3524 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3525 bp->flags &= ~MF_FUNC_DIS;
3526
3527 bnx2x_e1h_enable(bp);
3528 }
3529 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3530 }
3531 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3532 bnx2x_config_mf_bw(bp);
3533 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3534 }
3535
3536
3537 if (dcc_event)
3538 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3539 else
3540 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3541}
3542
3543
3544static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3545{
3546 struct eth_spe *next_spe = bp->spq_prod_bd;
3547
3548 if (bp->spq_prod_bd == bp->spq_last_bd) {
3549 bp->spq_prod_bd = bp->spq;
3550 bp->spq_prod_idx = 0;
3551 DP(BNX2X_MSG_SP, "end of spq\n");
3552 } else {
3553 bp->spq_prod_bd++;
3554 bp->spq_prod_idx++;
3555 }
3556 return next_spe;
3557}
3558
3559
3560static void bnx2x_sp_prod_update(struct bnx2x *bp)
3561{
3562 int func = BP_FUNC(bp);
3563
3564
3565
3566
3567
3568
3569 mb();
3570
3571 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3572 bp->spq_prod_idx);
3573 mmiowb();
3574}
3575
3576
3577
3578
3579
3580
3581
3582static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3583{
3584 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3585 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3586 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3587 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3588 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3589 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3590 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3591 return true;
3592 else
3593 return false;
3594}
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3611 u32 data_hi, u32 data_lo, int cmd_type)
3612{
3613 struct eth_spe *spe;
3614 u16 type;
3615 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3616
3617#ifdef BNX2X_STOP_ON_ERROR
3618 if (unlikely(bp->panic)) {
3619 BNX2X_ERR("Can't post SP when there is panic\n");
3620 return -EIO;
3621 }
3622#endif
3623
3624 spin_lock_bh(&bp->spq_lock);
3625
3626 if (common) {
3627 if (!atomic_read(&bp->eq_spq_left)) {
3628 BNX2X_ERR("BUG! EQ ring full!\n");
3629 spin_unlock_bh(&bp->spq_lock);
3630 bnx2x_panic();
3631 return -EBUSY;
3632 }
3633 } else if (!atomic_read(&bp->cq_spq_left)) {
3634 BNX2X_ERR("BUG! SPQ ring full!\n");
3635 spin_unlock_bh(&bp->spq_lock);
3636 bnx2x_panic();
3637 return -EBUSY;
3638 }
3639
3640 spe = bnx2x_sp_get_next(bp);
3641
3642
3643 spe->hdr.conn_and_cmd_data =
3644 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3645 HW_CID(bp, cid));
3646
3647 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3648
3649 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3650 SPE_HDR_FUNCTION_ID);
3651
3652 spe->hdr.type = cpu_to_le16(type);
3653
3654 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3655 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3656
3657
3658
3659
3660
3661
3662 if (common)
3663 atomic_dec(&bp->eq_spq_left);
3664 else
3665 atomic_dec(&bp->cq_spq_left);
3666
3667 DP(BNX2X_MSG_SP,
3668 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3669 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3670 (u32)(U64_LO(bp->spq_mapping) +
3671 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3672 HW_CID(bp, cid), data_hi, data_lo, type,
3673 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3674
3675 bnx2x_sp_prod_update(bp);
3676 spin_unlock_bh(&bp->spq_lock);
3677 return 0;
3678}
3679
3680
3681static int bnx2x_acquire_alr(struct bnx2x *bp)
3682{
3683 u32 j, val;
3684 int rc = 0;
3685
3686 might_sleep();
3687 for (j = 0; j < 1000; j++) {
3688 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3689 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3690 if (val & MCPR_ACCESS_LOCK_LOCK)
3691 break;
3692
3693 usleep_range(5000, 10000);
3694 }
3695 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3696 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3697 rc = -EBUSY;
3698 }
3699
3700 return rc;
3701}
3702
3703
3704static void bnx2x_release_alr(struct bnx2x *bp)
3705{
3706 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3707}
3708
3709#define BNX2X_DEF_SB_ATT_IDX 0x0001
3710#define BNX2X_DEF_SB_IDX 0x0002
3711
3712static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3713{
3714 struct host_sp_status_block *def_sb = bp->def_status_blk;
3715 u16 rc = 0;
3716
3717 barrier();
3718 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3719 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3720 rc |= BNX2X_DEF_SB_ATT_IDX;
3721 }
3722
3723 if (bp->def_idx != def_sb->sp_sb.running_index) {
3724 bp->def_idx = def_sb->sp_sb.running_index;
3725 rc |= BNX2X_DEF_SB_IDX;
3726 }
3727
3728
3729 barrier();
3730 return rc;
3731}
3732
3733
3734
3735
3736
3737static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3738{
3739 int port = BP_PORT(bp);
3740 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3741 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3742 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3743 NIG_REG_MASK_INTERRUPT_PORT0;
3744 u32 aeu_mask;
3745 u32 nig_mask = 0;
3746 u32 reg_addr;
3747
3748 if (bp->attn_state & asserted)
3749 BNX2X_ERR("IGU ERROR\n");
3750
3751 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3752 aeu_mask = REG_RD(bp, aeu_addr);
3753
3754 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3755 aeu_mask, asserted);
3756 aeu_mask &= ~(asserted & 0x3ff);
3757 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3758
3759 REG_WR(bp, aeu_addr, aeu_mask);
3760 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3761
3762 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3763 bp->attn_state |= asserted;
3764 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3765
3766 if (asserted & ATTN_HARD_WIRED_MASK) {
3767 if (asserted & ATTN_NIG_FOR_FUNC) {
3768
3769 bnx2x_acquire_phy_lock(bp);
3770
3771
3772 nig_mask = REG_RD(bp, nig_int_mask_addr);
3773
3774
3775
3776
3777 if (nig_mask) {
3778 REG_WR(bp, nig_int_mask_addr, 0);
3779
3780 bnx2x_link_attn(bp);
3781 }
3782
3783
3784 }
3785 if (asserted & ATTN_SW_TIMER_4_FUNC)
3786 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3787
3788 if (asserted & GPIO_2_FUNC)
3789 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3790
3791 if (asserted & GPIO_3_FUNC)
3792 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3793
3794 if (asserted & GPIO_4_FUNC)
3795 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3796
3797 if (port == 0) {
3798 if (asserted & ATTN_GENERAL_ATTN_1) {
3799 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3801 }
3802 if (asserted & ATTN_GENERAL_ATTN_2) {
3803 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3804 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3805 }
3806 if (asserted & ATTN_GENERAL_ATTN_3) {
3807 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3809 }
3810 } else {
3811 if (asserted & ATTN_GENERAL_ATTN_4) {
3812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3814 }
3815 if (asserted & ATTN_GENERAL_ATTN_5) {
3816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3818 }
3819 if (asserted & ATTN_GENERAL_ATTN_6) {
3820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3822 }
3823 }
3824
3825 }
3826
3827 if (bp->common.int_block == INT_BLOCK_HC)
3828 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3829 COMMAND_REG_ATTN_BITS_SET);
3830 else
3831 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3832
3833 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3834 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3835 REG_WR(bp, reg_addr, asserted);
3836
3837
3838 if (asserted & ATTN_NIG_FOR_FUNC) {
3839
3840
3841
3842 if (bp->common.int_block != INT_BLOCK_HC) {
3843 u32 cnt = 0, igu_acked;
3844 do {
3845 igu_acked = REG_RD(bp,
3846 IGU_REG_ATTENTION_ACK_BITS);
3847 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3848 (++cnt < MAX_IGU_ATTN_ACK_TO));
3849 if (!igu_acked)
3850 DP(NETIF_MSG_HW,
3851 "Failed to verify IGU ack on time\n");
3852 barrier();
3853 }
3854 REG_WR(bp, nig_int_mask_addr, nig_mask);
3855 bnx2x_release_phy_lock(bp);
3856 }
3857}
3858
3859static void bnx2x_fan_failure(struct bnx2x *bp)
3860{
3861 int port = BP_PORT(bp);
3862 u32 ext_phy_config;
3863
3864 ext_phy_config =
3865 SHMEM_RD(bp,
3866 dev_info.port_hw_config[port].external_phy_config);
3867
3868 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3870 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3871 ext_phy_config);
3872
3873
3874 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3875 "Please contact OEM Support for assistance\n");
3876
3877
3878
3879
3880
3881 smp_mb__before_clear_bit();
3882 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3883 smp_mb__after_clear_bit();
3884 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3885}
3886
3887static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3888{
3889 int port = BP_PORT(bp);
3890 int reg_offset;
3891 u32 val;
3892
3893 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3894 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3895
3896 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3897
3898 val = REG_RD(bp, reg_offset);
3899 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3900 REG_WR(bp, reg_offset, val);
3901
3902 BNX2X_ERR("SPIO5 hw attention\n");
3903
3904
3905 bnx2x_hw_reset_phy(&bp->link_params);
3906 bnx2x_fan_failure(bp);
3907 }
3908
3909 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3910 bnx2x_acquire_phy_lock(bp);
3911 bnx2x_handle_module_detect_int(&bp->link_params);
3912 bnx2x_release_phy_lock(bp);
3913 }
3914
3915 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3916
3917 val = REG_RD(bp, reg_offset);
3918 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3919 REG_WR(bp, reg_offset, val);
3920
3921 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3922 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3923 bnx2x_panic();
3924 }
3925}
3926
3927static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3928{
3929 u32 val;
3930
3931 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3932
3933 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3934 BNX2X_ERR("DB hw attention 0x%x\n", val);
3935
3936 if (val & 0x2)
3937 BNX2X_ERR("FATAL error from DORQ\n");
3938 }
3939
3940 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3941
3942 int port = BP_PORT(bp);
3943 int reg_offset;
3944
3945 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3946 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3947
3948 val = REG_RD(bp, reg_offset);
3949 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3950 REG_WR(bp, reg_offset, val);
3951
3952 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3953 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3954 bnx2x_panic();
3955 }
3956}
3957
3958static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3959{
3960 u32 val;
3961
3962 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3963
3964 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3965 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3966
3967 if (val & 0x2)
3968 BNX2X_ERR("FATAL error from CFC\n");
3969 }
3970
3971 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3972 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3973 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3974
3975 if (val & 0x18000)
3976 BNX2X_ERR("FATAL error from PXP\n");
3977
3978 if (!CHIP_IS_E1x(bp)) {
3979 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3980 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3981 }
3982 }
3983
3984 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3985
3986 int port = BP_PORT(bp);
3987 int reg_offset;
3988
3989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3991
3992 val = REG_RD(bp, reg_offset);
3993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3994 REG_WR(bp, reg_offset, val);
3995
3996 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3997 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3998 bnx2x_panic();
3999 }
4000}
4001
4002static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4003{
4004 u32 val;
4005
4006 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4007
4008 if (attn & BNX2X_PMF_LINK_ASSERT) {
4009 int func = BP_FUNC(bp);
4010
4011 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4012 bnx2x_read_mf_cfg(bp);
4013 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4014 func_mf_config[BP_ABS_FUNC(bp)].config);
4015 val = SHMEM_RD(bp,
4016 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4017 if (val & DRV_STATUS_DCC_EVENT_MASK)
4018 bnx2x_dcc_event(bp,
4019 (val & DRV_STATUS_DCC_EVENT_MASK));
4020
4021 if (val & DRV_STATUS_SET_MF_BW)
4022 bnx2x_set_mf_bw(bp);
4023
4024 if (val & DRV_STATUS_DRV_INFO_REQ)
4025 bnx2x_handle_drv_info_req(bp);
4026
4027 if (val & DRV_STATUS_VF_DISABLED)
4028 bnx2x_vf_handle_flr_event(bp);
4029
4030 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4031 bnx2x_pmf_update(bp);
4032
4033 if (bp->port.pmf &&
4034 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4035 bp->dcbx_enabled > 0)
4036
4037 bnx2x_dcbx_set_params(bp,
4038 BNX2X_DCBX_STATE_NEG_RECEIVED);
4039 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4040 bnx2x_handle_afex_cmd(bp,
4041 val & DRV_STATUS_AFEX_EVENT_MASK);
4042 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4043 bnx2x_handle_eee_event(bp);
4044 if (bp->link_vars.periodic_flags &
4045 PERIODIC_FLAGS_LINK_EVENT) {
4046
4047 bnx2x_acquire_phy_lock(bp);
4048 bp->link_vars.periodic_flags &=
4049 ~PERIODIC_FLAGS_LINK_EVENT;
4050 bnx2x_release_phy_lock(bp);
4051 if (IS_MF(bp))
4052 bnx2x_link_sync_notify(bp);
4053 bnx2x_link_report(bp);
4054 }
4055
4056
4057
4058 bnx2x__link_status_update(bp);
4059 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4060
4061 BNX2X_ERR("MC assert!\n");
4062 bnx2x_mc_assert(bp);
4063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4064 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4067 bnx2x_panic();
4068
4069 } else if (attn & BNX2X_MCP_ASSERT) {
4070
4071 BNX2X_ERR("MCP assert!\n");
4072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4073 bnx2x_fw_dump(bp);
4074
4075 } else
4076 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4077 }
4078
4079 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4080 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4081 if (attn & BNX2X_GRC_TIMEOUT) {
4082 val = CHIP_IS_E1(bp) ? 0 :
4083 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4084 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4085 }
4086 if (attn & BNX2X_GRC_RSV) {
4087 val = CHIP_IS_E1(bp) ? 0 :
4088 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4089 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4090 }
4091 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4092 }
4093}
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4110
4111#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4112#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4113#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4114#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4115#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4116#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4117#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4118
4119
4120
4121
4122
4123
4124void bnx2x_set_reset_global(struct bnx2x *bp)
4125{
4126 u32 val;
4127 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4128 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4129 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4130 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4131}
4132
4133
4134
4135
4136
4137
4138static void bnx2x_clear_reset_global(struct bnx2x *bp)
4139{
4140 u32 val;
4141 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4142 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4143 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4144 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4145}
4146
4147
4148
4149
4150
4151
4152static bool bnx2x_reset_is_global(struct bnx2x *bp)
4153{
4154 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4155
4156 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4157 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4158}
4159
4160
4161
4162
4163
4164
4165static void bnx2x_set_reset_done(struct bnx2x *bp)
4166{
4167 u32 val;
4168 u32 bit = BP_PATH(bp) ?
4169 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4170 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4171 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4172
4173
4174 val &= ~bit;
4175 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4176
4177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4178}
4179
4180
4181
4182
4183
4184
4185void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4186{
4187 u32 val;
4188 u32 bit = BP_PATH(bp) ?
4189 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4190 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4191 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4192
4193
4194 val |= bit;
4195 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4196 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4197}
4198
4199
4200
4201
4202
4203bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4204{
4205 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4206 u32 bit = engine ?
4207 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4208
4209
4210 return (val & bit) ? false : true;
4211}
4212
4213
4214
4215
4216
4217
4218void bnx2x_set_pf_load(struct bnx2x *bp)
4219{
4220 u32 val1, val;
4221 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4222 BNX2X_PATH0_LOAD_CNT_MASK;
4223 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4224 BNX2X_PATH0_LOAD_CNT_SHIFT;
4225
4226 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4227 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4228
4229 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4230
4231
4232 val1 = (val & mask) >> shift;
4233
4234
4235 val1 |= (1 << bp->pf_num);
4236
4237
4238 val &= ~mask;
4239
4240
4241 val |= ((val1 << shift) & mask);
4242
4243 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4244 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4245}
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256bool bnx2x_clear_pf_load(struct bnx2x *bp)
4257{
4258 u32 val1, val;
4259 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4260 BNX2X_PATH0_LOAD_CNT_MASK;
4261 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4262 BNX2X_PATH0_LOAD_CNT_SHIFT;
4263
4264 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4265 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4266 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4267
4268
4269 val1 = (val & mask) >> shift;
4270
4271
4272 val1 &= ~(1 << bp->pf_num);
4273
4274
4275 val &= ~mask;
4276
4277
4278 val |= ((val1 << shift) & mask);
4279
4280 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4281 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4282 return val1 != 0;
4283}
4284
4285
4286
4287
4288
4289
4290static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4291{
4292 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4293 BNX2X_PATH0_LOAD_CNT_MASK);
4294 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4295 BNX2X_PATH0_LOAD_CNT_SHIFT);
4296 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4297
4298 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4299
4300 val = (val & mask) >> shift;
4301
4302 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4303 engine, val);
4304
4305 return val != 0;
4306}
4307
4308static void _print_parity(struct bnx2x *bp, u32 reg)
4309{
4310 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4311}
4312
4313static void _print_next_block(int idx, const char *blk)
4314{
4315 pr_cont("%s%s", idx ? ", " : "", blk);
4316}
4317
4318static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4319 int *par_num, bool print)
4320{
4321 u32 cur_bit;
4322 bool res;
4323 int i;
4324
4325 res = false;
4326
4327 for (i = 0; sig; i++) {
4328 cur_bit = (0x1UL << i);
4329 if (sig & cur_bit) {
4330 res |= true;
4331
4332 if (print) {
4333 switch (cur_bit) {
4334 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4335 _print_next_block((*par_num)++, "BRB");
4336 _print_parity(bp,
4337 BRB1_REG_BRB1_PRTY_STS);
4338 break;
4339 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4340 _print_next_block((*par_num)++,
4341 "PARSER");
4342 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4343 break;
4344 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4345 _print_next_block((*par_num)++, "TSDM");
4346 _print_parity(bp,
4347 TSDM_REG_TSDM_PRTY_STS);
4348 break;
4349 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4350 _print_next_block((*par_num)++,
4351 "SEARCHER");
4352 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4353 break;
4354 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4355 _print_next_block((*par_num)++, "TCM");
4356 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4357 break;
4358 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4359 _print_next_block((*par_num)++,
4360 "TSEMI");
4361 _print_parity(bp,
4362 TSEM_REG_TSEM_PRTY_STS_0);
4363 _print_parity(bp,
4364 TSEM_REG_TSEM_PRTY_STS_1);
4365 break;
4366 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4367 _print_next_block((*par_num)++, "XPB");
4368 _print_parity(bp, GRCBASE_XPB +
4369 PB_REG_PB_PRTY_STS);
4370 break;
4371 }
4372 }
4373
4374
4375 sig &= ~cur_bit;
4376 }
4377 }
4378
4379 return res;
4380}
4381
4382static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4383 int *par_num, bool *global,
4384 bool print)
4385{
4386 u32 cur_bit;
4387 bool res;
4388 int i;
4389
4390 res = false;
4391
4392 for (i = 0; sig; i++) {
4393 cur_bit = (0x1UL << i);
4394 if (sig & cur_bit) {
4395 res |= true;
4396 switch (cur_bit) {
4397 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4398 if (print) {
4399 _print_next_block((*par_num)++, "PBF");
4400 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4401 }
4402 break;
4403 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4404 if (print) {
4405 _print_next_block((*par_num)++, "QM");
4406 _print_parity(bp, QM_REG_QM_PRTY_STS);
4407 }
4408 break;
4409 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4410 if (print) {
4411 _print_next_block((*par_num)++, "TM");
4412 _print_parity(bp, TM_REG_TM_PRTY_STS);
4413 }
4414 break;
4415 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4416 if (print) {
4417 _print_next_block((*par_num)++, "XSDM");
4418 _print_parity(bp,
4419 XSDM_REG_XSDM_PRTY_STS);
4420 }
4421 break;
4422 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4423 if (print) {
4424 _print_next_block((*par_num)++, "XCM");
4425 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4426 }
4427 break;
4428 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4429 if (print) {
4430 _print_next_block((*par_num)++,
4431 "XSEMI");
4432 _print_parity(bp,
4433 XSEM_REG_XSEM_PRTY_STS_0);
4434 _print_parity(bp,
4435 XSEM_REG_XSEM_PRTY_STS_1);
4436 }
4437 break;
4438 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4439 if (print) {
4440 _print_next_block((*par_num)++,
4441 "DOORBELLQ");
4442 _print_parity(bp,
4443 DORQ_REG_DORQ_PRTY_STS);
4444 }
4445 break;
4446 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4447 if (print) {
4448 _print_next_block((*par_num)++, "NIG");
4449 if (CHIP_IS_E1x(bp)) {
4450 _print_parity(bp,
4451 NIG_REG_NIG_PRTY_STS);
4452 } else {
4453 _print_parity(bp,
4454 NIG_REG_NIG_PRTY_STS_0);
4455 _print_parity(bp,
4456 NIG_REG_NIG_PRTY_STS_1);
4457 }
4458 }
4459 break;
4460 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4461 if (print)
4462 _print_next_block((*par_num)++,
4463 "VAUX PCI CORE");
4464 *global = true;
4465 break;
4466 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4467 if (print) {
4468 _print_next_block((*par_num)++,
4469 "DEBUG");
4470 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4471 }
4472 break;
4473 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4474 if (print) {
4475 _print_next_block((*par_num)++, "USDM");
4476 _print_parity(bp,
4477 USDM_REG_USDM_PRTY_STS);
4478 }
4479 break;
4480 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4481 if (print) {
4482 _print_next_block((*par_num)++, "UCM");
4483 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4484 }
4485 break;
4486 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4487 if (print) {
4488 _print_next_block((*par_num)++,
4489 "USEMI");
4490 _print_parity(bp,
4491 USEM_REG_USEM_PRTY_STS_0);
4492 _print_parity(bp,
4493 USEM_REG_USEM_PRTY_STS_1);
4494 }
4495 break;
4496 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4497 if (print) {
4498 _print_next_block((*par_num)++, "UPB");
4499 _print_parity(bp, GRCBASE_UPB +
4500 PB_REG_PB_PRTY_STS);
4501 }
4502 break;
4503 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4504 if (print) {
4505 _print_next_block((*par_num)++, "CSDM");
4506 _print_parity(bp,
4507 CSDM_REG_CSDM_PRTY_STS);
4508 }
4509 break;
4510 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4511 if (print) {
4512 _print_next_block((*par_num)++, "CCM");
4513 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4514 }
4515 break;
4516 }
4517
4518
4519 sig &= ~cur_bit;
4520 }
4521 }
4522
4523 return res;
4524}
4525
4526static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4527 int *par_num, bool print)
4528{
4529 u32 cur_bit;
4530 bool res;
4531 int i;
4532
4533 res = false;
4534
4535 for (i = 0; sig; i++) {
4536 cur_bit = (0x1UL << i);
4537 if (sig & cur_bit) {
4538 res |= true;
4539 if (print) {
4540 switch (cur_bit) {
4541 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4542 _print_next_block((*par_num)++,
4543 "CSEMI");
4544 _print_parity(bp,
4545 CSEM_REG_CSEM_PRTY_STS_0);
4546 _print_parity(bp,
4547 CSEM_REG_CSEM_PRTY_STS_1);
4548 break;
4549 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4550 _print_next_block((*par_num)++, "PXP");
4551 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4552 _print_parity(bp,
4553 PXP2_REG_PXP2_PRTY_STS_0);
4554 _print_parity(bp,
4555 PXP2_REG_PXP2_PRTY_STS_1);
4556 break;
4557 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4558 _print_next_block((*par_num)++,
4559 "PXPPCICLOCKCLIENT");
4560 break;
4561 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4562 _print_next_block((*par_num)++, "CFC");
4563 _print_parity(bp,
4564 CFC_REG_CFC_PRTY_STS);
4565 break;
4566 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4567 _print_next_block((*par_num)++, "CDU");
4568 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4569 break;
4570 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4571 _print_next_block((*par_num)++, "DMAE");
4572 _print_parity(bp,
4573 DMAE_REG_DMAE_PRTY_STS);
4574 break;
4575 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4576 _print_next_block((*par_num)++, "IGU");
4577 if (CHIP_IS_E1x(bp))
4578 _print_parity(bp,
4579 HC_REG_HC_PRTY_STS);
4580 else
4581 _print_parity(bp,
4582 IGU_REG_IGU_PRTY_STS);
4583 break;
4584 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4585 _print_next_block((*par_num)++, "MISC");
4586 _print_parity(bp,
4587 MISC_REG_MISC_PRTY_STS);
4588 break;
4589 }
4590 }
4591
4592
4593 sig &= ~cur_bit;
4594 }
4595 }
4596
4597 return res;
4598}
4599
4600static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4601 int *par_num, bool *global,
4602 bool print)
4603{
4604 bool res = false;
4605 u32 cur_bit;
4606 int i;
4607
4608 for (i = 0; sig; i++) {
4609 cur_bit = (0x1UL << i);
4610 if (sig & cur_bit) {
4611 switch (cur_bit) {
4612 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4613 if (print)
4614 _print_next_block((*par_num)++,
4615 "MCP ROM");
4616 *global = true;
4617 res |= true;
4618 break;
4619 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4620 if (print)
4621 _print_next_block((*par_num)++,
4622 "MCP UMP RX");
4623 *global = true;
4624 res |= true;
4625 break;
4626 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4627 if (print)
4628 _print_next_block((*par_num)++,
4629 "MCP UMP TX");
4630 *global = true;
4631 res |= true;
4632 break;
4633 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4634 if (print)
4635 _print_next_block((*par_num)++,
4636 "MCP SCPAD");
4637
4638 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4639 1UL << 10);
4640 break;
4641 }
4642
4643
4644 sig &= ~cur_bit;
4645 }
4646 }
4647
4648 return res;
4649}
4650
4651static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4652 int *par_num, bool print)
4653{
4654 u32 cur_bit;
4655 bool res;
4656 int i;
4657
4658 res = false;
4659
4660 for (i = 0; sig; i++) {
4661 cur_bit = (0x1UL << i);
4662 if (sig & cur_bit) {
4663 res |= true;
4664 if (print) {
4665 switch (cur_bit) {
4666 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4667 _print_next_block((*par_num)++,
4668 "PGLUE_B");
4669 _print_parity(bp,
4670 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4671 break;
4672 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4673 _print_next_block((*par_num)++, "ATC");
4674 _print_parity(bp,
4675 ATC_REG_ATC_PRTY_STS);
4676 break;
4677 }
4678 }
4679
4680 sig &= ~cur_bit;
4681 }
4682 }
4683
4684 return res;
4685}
4686
4687static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4688 u32 *sig)
4689{
4690 bool res = false;
4691
4692 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4693 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4694 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4695 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4696 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4697 int par_num = 0;
4698 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4699 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4700 sig[0] & HW_PRTY_ASSERT_SET_0,
4701 sig[1] & HW_PRTY_ASSERT_SET_1,
4702 sig[2] & HW_PRTY_ASSERT_SET_2,
4703 sig[3] & HW_PRTY_ASSERT_SET_3,
4704 sig[4] & HW_PRTY_ASSERT_SET_4);
4705 if (print)
4706 netdev_err(bp->dev,
4707 "Parity errors detected in blocks: ");
4708 res |= bnx2x_check_blocks_with_parity0(bp,
4709 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4710 res |= bnx2x_check_blocks_with_parity1(bp,
4711 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4712 res |= bnx2x_check_blocks_with_parity2(bp,
4713 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4714 res |= bnx2x_check_blocks_with_parity3(bp,
4715 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4716 res |= bnx2x_check_blocks_with_parity4(bp,
4717 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4718
4719 if (print)
4720 pr_cont("\n");
4721 }
4722
4723 return res;
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4734{
4735 struct attn_route attn = { {0} };
4736 int port = BP_PORT(bp);
4737
4738 attn.sig[0] = REG_RD(bp,
4739 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4740 port*4);
4741 attn.sig[1] = REG_RD(bp,
4742 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4743 port*4);
4744 attn.sig[2] = REG_RD(bp,
4745 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4746 port*4);
4747 attn.sig[3] = REG_RD(bp,
4748 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4749 port*4);
4750
4751
4752
4753 attn.sig[3] &= ((REG_RD(bp,
4754 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4755 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4756 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4757 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4758
4759 if (!CHIP_IS_E1x(bp))
4760 attn.sig[4] = REG_RD(bp,
4761 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4762 port*4);
4763
4764 return bnx2x_parity_attn(bp, global, print, attn.sig);
4765}
4766
4767static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4768{
4769 u32 val;
4770 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4771
4772 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4773 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4774 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4775 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4776 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4777 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4778 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4779 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4780 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4781 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4782 if (val &
4783 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4784 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4785 if (val &
4786 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4787 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4788 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4789 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4790 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4791 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4792 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4793 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4794 }
4795 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4796 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4797 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4798 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4799 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4800 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4801 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4802 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4803 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4804 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4805 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4806 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4807 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4808 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4809 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4810 }
4811
4812 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4813 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4814 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4815 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4816 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4817 }
4818}
4819
4820static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4821{
4822 struct attn_route attn, *group_mask;
4823 int port = BP_PORT(bp);
4824 int index;
4825 u32 reg_addr;
4826 u32 val;
4827 u32 aeu_mask;
4828 bool global = false;
4829
4830
4831
4832 bnx2x_acquire_alr(bp);
4833
4834 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4835#ifndef BNX2X_STOP_ON_ERROR
4836 bp->recovery_state = BNX2X_RECOVERY_INIT;
4837 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4838
4839 bnx2x_int_disable(bp);
4840
4841
4842
4843#else
4844 bnx2x_panic();
4845#endif
4846 bnx2x_release_alr(bp);
4847 return;
4848 }
4849
4850 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4851 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4852 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4853 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4854 if (!CHIP_IS_E1x(bp))
4855 attn.sig[4] =
4856 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4857 else
4858 attn.sig[4] = 0;
4859
4860 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4861 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4862
4863 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4864 if (deasserted & (1 << index)) {
4865 group_mask = &bp->attn_group[index];
4866
4867 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4868 index,
4869 group_mask->sig[0], group_mask->sig[1],
4870 group_mask->sig[2], group_mask->sig[3],
4871 group_mask->sig[4]);
4872
4873 bnx2x_attn_int_deasserted4(bp,
4874 attn.sig[4] & group_mask->sig[4]);
4875 bnx2x_attn_int_deasserted3(bp,
4876 attn.sig[3] & group_mask->sig[3]);
4877 bnx2x_attn_int_deasserted1(bp,
4878 attn.sig[1] & group_mask->sig[1]);
4879 bnx2x_attn_int_deasserted2(bp,
4880 attn.sig[2] & group_mask->sig[2]);
4881 bnx2x_attn_int_deasserted0(bp,
4882 attn.sig[0] & group_mask->sig[0]);
4883 }
4884 }
4885
4886 bnx2x_release_alr(bp);
4887
4888 if (bp->common.int_block == INT_BLOCK_HC)
4889 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4890 COMMAND_REG_ATTN_BITS_CLR);
4891 else
4892 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4893
4894 val = ~deasserted;
4895 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4896 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4897 REG_WR(bp, reg_addr, val);
4898
4899 if (~bp->attn_state & deasserted)
4900 BNX2X_ERR("IGU ERROR\n");
4901
4902 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4903 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4904
4905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4906 aeu_mask = REG_RD(bp, reg_addr);
4907
4908 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4909 aeu_mask, deasserted);
4910 aeu_mask |= (deasserted & 0x3ff);
4911 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4912
4913 REG_WR(bp, reg_addr, aeu_mask);
4914 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4915
4916 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4917 bp->attn_state &= ~deasserted;
4918 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4919}
4920
4921static void bnx2x_attn_int(struct bnx2x *bp)
4922{
4923
4924 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4925 attn_bits);
4926 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4927 attn_bits_ack);
4928 u32 attn_state = bp->attn_state;
4929
4930
4931 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4932 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4933
4934 DP(NETIF_MSG_HW,
4935 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4936 attn_bits, attn_ack, asserted, deasserted);
4937
4938 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4939 BNX2X_ERR("BAD attention state\n");
4940
4941
4942 if (asserted)
4943 bnx2x_attn_int_asserted(bp, asserted);
4944
4945 if (deasserted)
4946 bnx2x_attn_int_deasserted(bp, deasserted);
4947}
4948
4949void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4950 u16 index, u8 op, u8 update)
4951{
4952 u32 igu_addr = bp->igu_base_addr;
4953 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4954 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4955 igu_addr);
4956}
4957
4958static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4959{
4960
4961 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4962 mmiowb();
4963}
4964
4965static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4966 union event_ring_elem *elem)
4967{
4968 u8 err = elem->message.error;
4969
4970 if (!bp->cnic_eth_dev.starting_cid ||
4971 (cid < bp->cnic_eth_dev.starting_cid &&
4972 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4973 return 1;
4974
4975 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4976
4977 if (unlikely(err)) {
4978
4979 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4980 cid);
4981 bnx2x_panic_dump(bp, false);
4982 }
4983 bnx2x_cnic_cfc_comp(bp, cid, err);
4984 return 0;
4985}
4986
4987static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4988{
4989 struct bnx2x_mcast_ramrod_params rparam;
4990 int rc;
4991
4992 memset(&rparam, 0, sizeof(rparam));
4993
4994 rparam.mcast_obj = &bp->mcast_obj;
4995
4996 netif_addr_lock_bh(bp->dev);
4997
4998
4999 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5000
5001
5002 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5003 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5004 if (rc < 0)
5005 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5006 rc);
5007 }
5008
5009 netif_addr_unlock_bh(bp->dev);
5010}
5011
5012static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5013 union event_ring_elem *elem)
5014{
5015 unsigned long ramrod_flags = 0;
5016 int rc = 0;
5017 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5018 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5019
5020
5021 __set_bit(RAMROD_CONT, &ramrod_flags);
5022
5023 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5024 >> BNX2X_SWCID_SHIFT) {
5025 case BNX2X_FILTER_MAC_PENDING:
5026 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5027 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5028 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5029 else
5030 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5031
5032 break;
5033 case BNX2X_FILTER_MCAST_PENDING:
5034 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5035
5036
5037
5038 bnx2x_handle_mcast_eqe(bp);
5039 return;
5040 default:
5041 BNX2X_ERR("Unsupported classification command: %d\n",
5042 elem->message.data.eth_event.echo);
5043 return;
5044 }
5045
5046 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5047
5048 if (rc < 0)
5049 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5050 else if (rc > 0)
5051 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5052}
5053
5054static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5055
5056static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5057{
5058 netif_addr_lock_bh(bp->dev);
5059
5060 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5061
5062
5063 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5064 bnx2x_set_storm_rx_mode(bp);
5065 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5066 &bp->sp_state))
5067 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5068 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5069 &bp->sp_state))
5070 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5071
5072 netif_addr_unlock_bh(bp->dev);
5073}
5074
5075static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5076 union event_ring_elem *elem)
5077{
5078 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5079 DP(BNX2X_MSG_SP,
5080 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5081 elem->message.data.vif_list_event.func_bit_map);
5082 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5083 elem->message.data.vif_list_event.func_bit_map);
5084 } else if (elem->message.data.vif_list_event.echo ==
5085 VIF_LIST_RULE_SET) {
5086 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5087 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5088 }
5089}
5090
5091
5092static void bnx2x_after_function_update(struct bnx2x *bp)
5093{
5094 int q, rc;
5095 struct bnx2x_fastpath *fp;
5096 struct bnx2x_queue_state_params queue_params = {NULL};
5097 struct bnx2x_queue_update_params *q_update_params =
5098 &queue_params.params.update;
5099
5100
5101 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5102
5103
5104 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5105 &q_update_params->update_flags);
5106 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5107 &q_update_params->update_flags);
5108 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5109
5110
5111 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5112 q_update_params->silent_removal_value = 0;
5113 q_update_params->silent_removal_mask = 0;
5114 } else {
5115 q_update_params->silent_removal_value =
5116 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5117 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5118 }
5119
5120 for_each_eth_queue(bp, q) {
5121
5122 fp = &bp->fp[q];
5123 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5124
5125
5126 rc = bnx2x_queue_state_change(bp, &queue_params);
5127 if (rc < 0)
5128 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5129 q);
5130 }
5131
5132 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5133 fp = &bp->fp[FCOE_IDX(bp)];
5134 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5135
5136
5137 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5138
5139
5140 smp_mb__before_clear_bit();
5141 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5142 smp_mb__after_clear_bit();
5143
5144
5145 rc = bnx2x_queue_state_change(bp, &queue_params);
5146 if (rc < 0)
5147 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5148 q);
5149 } else {
5150
5151 bnx2x_link_report(bp);
5152 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5153 }
5154}
5155
5156static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5157 struct bnx2x *bp, u32 cid)
5158{
5159 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5160
5161 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5162 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5163 else
5164 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5165}
5166
5167static void bnx2x_eq_int(struct bnx2x *bp)
5168{
5169 u16 hw_cons, sw_cons, sw_prod;
5170 union event_ring_elem *elem;
5171 u8 echo;
5172 u32 cid;
5173 u8 opcode;
5174 int rc, spqe_cnt = 0;
5175 struct bnx2x_queue_sp_obj *q_obj;
5176 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5177 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5178
5179 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5180
5181
5182
5183
5184
5185
5186 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5187 hw_cons++;
5188
5189
5190
5191
5192
5193 sw_cons = bp->eq_cons;
5194 sw_prod = bp->eq_prod;
5195
5196 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5197 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5198
5199 for (; sw_cons != hw_cons;
5200 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5201
5202 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5203
5204 rc = bnx2x_iov_eq_sp_event(bp, elem);
5205 if (!rc) {
5206 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5207 rc);
5208 goto next_spqe;
5209 }
5210
5211
5212 cid = SW_CID((__force __le32)
5213 elem->message.data.cfc_del_event.cid);
5214 opcode = elem->message.opcode;
5215
5216
5217 switch (opcode) {
5218 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5219 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5220 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5221 continue;
5222
5223 case EVENT_RING_OPCODE_STAT_QUERY:
5224 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5225 "got statistics comp event %d\n",
5226 bp->stats_comp++);
5227
5228 goto next_spqe;
5229
5230 case EVENT_RING_OPCODE_CFC_DEL:
5231
5232
5233
5234
5235
5236 DP(BNX2X_MSG_SP,
5237 "got delete ramrod for MULTI[%d]\n", cid);
5238
5239 if (CNIC_LOADED(bp) &&
5240 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5241 goto next_spqe;
5242
5243 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5244
5245 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5246 break;
5247
5248 goto next_spqe;
5249
5250 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5251 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5252 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5253 if (f_obj->complete_cmd(bp, f_obj,
5254 BNX2X_F_CMD_TX_STOP))
5255 break;
5256 goto next_spqe;
5257
5258 case EVENT_RING_OPCODE_START_TRAFFIC:
5259 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5260 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5261 if (f_obj->complete_cmd(bp, f_obj,
5262 BNX2X_F_CMD_TX_START))
5263 break;
5264 goto next_spqe;
5265
5266 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5267 echo = elem->message.data.function_update_event.echo;
5268 if (echo == SWITCH_UPDATE) {
5269 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5270 "got FUNC_SWITCH_UPDATE ramrod\n");
5271 if (f_obj->complete_cmd(
5272 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5273 break;
5274
5275 } else {
5276 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5277 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5278 f_obj->complete_cmd(bp, f_obj,
5279 BNX2X_F_CMD_AFEX_UPDATE);
5280
5281
5282
5283
5284
5285 smp_mb__before_clear_bit();
5286 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5287 &bp->sp_rtnl_state);
5288 smp_mb__after_clear_bit();
5289
5290 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5291 }
5292
5293 goto next_spqe;
5294
5295 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5296 f_obj->complete_cmd(bp, f_obj,
5297 BNX2X_F_CMD_AFEX_VIFLISTS);
5298 bnx2x_after_afex_vif_lists(bp, elem);
5299 goto next_spqe;
5300 case EVENT_RING_OPCODE_FUNCTION_START:
5301 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5302 "got FUNC_START ramrod\n");
5303 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5304 break;
5305
5306 goto next_spqe;
5307
5308 case EVENT_RING_OPCODE_FUNCTION_STOP:
5309 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5310 "got FUNC_STOP ramrod\n");
5311 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5312 break;
5313
5314 goto next_spqe;
5315 }
5316
5317 switch (opcode | bp->state) {
5318 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5319 BNX2X_STATE_OPEN):
5320 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5321 BNX2X_STATE_OPENING_WAIT4_PORT):
5322 cid = elem->message.data.eth_event.echo &
5323 BNX2X_SWCID_MASK;
5324 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5325 cid);
5326 rss_raw->clear_pending(rss_raw);
5327 break;
5328
5329 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5330 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5331 case (EVENT_RING_OPCODE_SET_MAC |
5332 BNX2X_STATE_CLOSING_WAIT4_HALT):
5333 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5334 BNX2X_STATE_OPEN):
5335 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5336 BNX2X_STATE_DIAG):
5337 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5338 BNX2X_STATE_CLOSING_WAIT4_HALT):
5339 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5340 bnx2x_handle_classification_eqe(bp, elem);
5341 break;
5342
5343 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5344 BNX2X_STATE_OPEN):
5345 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5346 BNX2X_STATE_DIAG):
5347 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5348 BNX2X_STATE_CLOSING_WAIT4_HALT):
5349 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5350 bnx2x_handle_mcast_eqe(bp);
5351 break;
5352
5353 case (EVENT_RING_OPCODE_FILTERS_RULES |
5354 BNX2X_STATE_OPEN):
5355 case (EVENT_RING_OPCODE_FILTERS_RULES |
5356 BNX2X_STATE_DIAG):
5357 case (EVENT_RING_OPCODE_FILTERS_RULES |
5358 BNX2X_STATE_CLOSING_WAIT4_HALT):
5359 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5360 bnx2x_handle_rx_mode_eqe(bp);
5361 break;
5362 default:
5363
5364 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5365 elem->message.opcode, bp->state);
5366 }
5367next_spqe:
5368 spqe_cnt++;
5369 }
5370
5371 smp_mb__before_atomic_inc();
5372 atomic_add(spqe_cnt, &bp->eq_spq_left);
5373
5374 bp->eq_cons = sw_cons;
5375 bp->eq_prod = sw_prod;
5376
5377 smp_wmb();
5378
5379
5380 bnx2x_update_eq_prod(bp, bp->eq_prod);
5381}
5382
5383static void bnx2x_sp_task(struct work_struct *work)
5384{
5385 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5386
5387 DP(BNX2X_MSG_SP, "sp task invoked\n");
5388
5389
5390 smp_rmb();
5391 if (atomic_read(&bp->interrupt_occurred)) {
5392
5393
5394 u16 status = bnx2x_update_dsb_idx(bp);
5395
5396 DP(BNX2X_MSG_SP, "status %x\n", status);
5397 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5398 atomic_set(&bp->interrupt_occurred, 0);
5399
5400
5401 if (status & BNX2X_DEF_SB_ATT_IDX) {
5402 bnx2x_attn_int(bp);
5403 status &= ~BNX2X_DEF_SB_ATT_IDX;
5404 }
5405
5406
5407 if (status & BNX2X_DEF_SB_IDX) {
5408 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5409
5410 if (FCOE_INIT(bp) &&
5411 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5412
5413
5414
5415 local_bh_disable();
5416 napi_schedule(&bnx2x_fcoe(bp, napi));
5417 local_bh_enable();
5418 }
5419
5420
5421 bnx2x_eq_int(bp);
5422 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5423 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5424
5425 status &= ~BNX2X_DEF_SB_IDX;
5426 }
5427
5428
5429 if (unlikely(status))
5430 DP(BNX2X_MSG_SP,
5431 "got an unknown interrupt! (status 0x%x)\n", status);
5432
5433
5434 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5435 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5436 }
5437
5438
5439
5440
5441
5442
5443 bnx2x_iov_sp_task(bp);
5444
5445
5446 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5447 &bp->sp_state)) {
5448 bnx2x_link_report(bp);
5449 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5450 }
5451}
5452
5453irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5454{
5455 struct net_device *dev = dev_instance;
5456 struct bnx2x *bp = netdev_priv(dev);
5457
5458 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5459 IGU_INT_DISABLE, 0);
5460
5461#ifdef BNX2X_STOP_ON_ERROR
5462 if (unlikely(bp->panic))
5463 return IRQ_HANDLED;
5464#endif
5465
5466 if (CNIC_LOADED(bp)) {
5467 struct cnic_ops *c_ops;
5468
5469 rcu_read_lock();
5470 c_ops = rcu_dereference(bp->cnic_ops);
5471 if (c_ops)
5472 c_ops->cnic_handler(bp->cnic_data, NULL);
5473 rcu_read_unlock();
5474 }
5475
5476
5477
5478
5479 bnx2x_schedule_sp_task(bp);
5480
5481 return IRQ_HANDLED;
5482}
5483
5484
5485
5486void bnx2x_drv_pulse(struct bnx2x *bp)
5487{
5488 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5489 bp->fw_drv_pulse_wr_seq);
5490}
5491
5492static void bnx2x_timer(unsigned long data)
5493{
5494 struct bnx2x *bp = (struct bnx2x *) data;
5495
5496 if (!netif_running(bp->dev))
5497 return;
5498
5499 if (IS_PF(bp) &&
5500 !BP_NOMCP(bp)) {
5501 int mb_idx = BP_FW_MB_IDX(bp);
5502 u16 drv_pulse;
5503 u16 mcp_pulse;
5504
5505 ++bp->fw_drv_pulse_wr_seq;
5506 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5507 drv_pulse = bp->fw_drv_pulse_wr_seq;
5508 bnx2x_drv_pulse(bp);
5509
5510 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5511 MCP_PULSE_SEQ_MASK);
5512
5513
5514
5515
5516
5517 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5518 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5519 drv_pulse, mcp_pulse);
5520 }
5521
5522 if (bp->state == BNX2X_STATE_OPEN)
5523 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5524
5525
5526 if (IS_VF(bp))
5527 bnx2x_timer_sriov(bp);
5528
5529 mod_timer(&bp->timer, jiffies + bp->current_interval);
5530}
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5541{
5542 u32 i;
5543 if (!(len%4) && !(addr%4))
5544 for (i = 0; i < len; i += 4)
5545 REG_WR(bp, addr + i, fill);
5546 else
5547 for (i = 0; i < len; i++)
5548 REG_WR8(bp, addr + i, fill);
5549}
5550
5551
5552static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5553 int fw_sb_id,
5554 u32 *sb_data_p,
5555 u32 data_size)
5556{
5557 int index;
5558 for (index = 0; index < data_size; index++)
5559 REG_WR(bp, BAR_CSTRORM_INTMEM +
5560 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5561 sizeof(u32)*index,
5562 *(sb_data_p + index));
5563}
5564
5565static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5566{
5567 u32 *sb_data_p;
5568 u32 data_size = 0;
5569 struct hc_status_block_data_e2 sb_data_e2;
5570 struct hc_status_block_data_e1x sb_data_e1x;
5571
5572
5573 if (!CHIP_IS_E1x(bp)) {
5574 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5575 sb_data_e2.common.state = SB_DISABLED;
5576 sb_data_e2.common.p_func.vf_valid = false;
5577 sb_data_p = (u32 *)&sb_data_e2;
5578 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5579 } else {
5580 memset(&sb_data_e1x, 0,
5581 sizeof(struct hc_status_block_data_e1x));
5582 sb_data_e1x.common.state = SB_DISABLED;
5583 sb_data_e1x.common.p_func.vf_valid = false;
5584 sb_data_p = (u32 *)&sb_data_e1x;
5585 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5586 }
5587 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5588
5589 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5590 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5591 CSTORM_STATUS_BLOCK_SIZE);
5592 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5593 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5594 CSTORM_SYNC_BLOCK_SIZE);
5595}
5596
5597
5598static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5599 struct hc_sp_status_block_data *sp_sb_data)
5600{
5601 int func = BP_FUNC(bp);
5602 int i;
5603 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5604 REG_WR(bp, BAR_CSTRORM_INTMEM +
5605 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5606 i*sizeof(u32),
5607 *((u32 *)sp_sb_data + i));
5608}
5609
5610static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5611{
5612 int func = BP_FUNC(bp);
5613 struct hc_sp_status_block_data sp_sb_data;
5614 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5615
5616 sp_sb_data.state = SB_DISABLED;
5617 sp_sb_data.p_func.vf_valid = false;
5618
5619 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5620
5621 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5622 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5623 CSTORM_SP_STATUS_BLOCK_SIZE);
5624 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5625 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5626 CSTORM_SP_SYNC_BLOCK_SIZE);
5627}
5628
5629static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5630 int igu_sb_id, int igu_seg_id)
5631{
5632 hc_sm->igu_sb_id = igu_sb_id;
5633 hc_sm->igu_seg_id = igu_seg_id;
5634 hc_sm->timer_value = 0xFF;
5635 hc_sm->time_to_expire = 0xFFFFFFFF;
5636}
5637
5638
5639static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5640{
5641
5642
5643 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5644
5645
5646 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5647 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5648 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5649 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5650
5651
5652
5653 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5654 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5655
5656
5657 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5658 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5659 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5660 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5661 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5662 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5663 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5664 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5665}
5666
5667void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5668 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5669{
5670 int igu_seg_id;
5671
5672 struct hc_status_block_data_e2 sb_data_e2;
5673 struct hc_status_block_data_e1x sb_data_e1x;
5674 struct hc_status_block_sm *hc_sm_p;
5675 int data_size;
5676 u32 *sb_data_p;
5677
5678 if (CHIP_INT_MODE_IS_BC(bp))
5679 igu_seg_id = HC_SEG_ACCESS_NORM;
5680 else
5681 igu_seg_id = IGU_SEG_ACCESS_NORM;
5682
5683 bnx2x_zero_fp_sb(bp, fw_sb_id);
5684
5685 if (!CHIP_IS_E1x(bp)) {
5686 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5687 sb_data_e2.common.state = SB_ENABLED;
5688 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5689 sb_data_e2.common.p_func.vf_id = vfid;
5690 sb_data_e2.common.p_func.vf_valid = vf_valid;
5691 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5692 sb_data_e2.common.same_igu_sb_1b = true;
5693 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5694 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5695 hc_sm_p = sb_data_e2.common.state_machine;
5696 sb_data_p = (u32 *)&sb_data_e2;
5697 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5698 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5699 } else {
5700 memset(&sb_data_e1x, 0,
5701 sizeof(struct hc_status_block_data_e1x));
5702 sb_data_e1x.common.state = SB_ENABLED;
5703 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5704 sb_data_e1x.common.p_func.vf_id = 0xff;
5705 sb_data_e1x.common.p_func.vf_valid = false;
5706 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5707 sb_data_e1x.common.same_igu_sb_1b = true;
5708 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5709 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5710 hc_sm_p = sb_data_e1x.common.state_machine;
5711 sb_data_p = (u32 *)&sb_data_e1x;
5712 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5713 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5714 }
5715
5716 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5717 igu_sb_id, igu_seg_id);
5718 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5719 igu_sb_id, igu_seg_id);
5720
5721 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5722
5723
5724 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5725}
5726
5727static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5728 u16 tx_usec, u16 rx_usec)
5729{
5730 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5731 false, rx_usec);
5732 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5733 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5734 tx_usec);
5735 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5736 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5737 tx_usec);
5738 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5739 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5740 tx_usec);
5741}
5742
5743static void bnx2x_init_def_sb(struct bnx2x *bp)
5744{
5745 struct host_sp_status_block *def_sb = bp->def_status_blk;
5746 dma_addr_t mapping = bp->def_status_blk_mapping;
5747 int igu_sp_sb_index;
5748 int igu_seg_id;
5749 int port = BP_PORT(bp);
5750 int func = BP_FUNC(bp);
5751 int reg_offset, reg_offset_en5;
5752 u64 section;
5753 int index;
5754 struct hc_sp_status_block_data sp_sb_data;
5755 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5756
5757 if (CHIP_INT_MODE_IS_BC(bp)) {
5758 igu_sp_sb_index = DEF_SB_IGU_ID;
5759 igu_seg_id = HC_SEG_ACCESS_DEF;
5760 } else {
5761 igu_sp_sb_index = bp->igu_dsb_id;
5762 igu_seg_id = IGU_SEG_ACCESS_DEF;
5763 }
5764
5765
5766 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5767 atten_status_block);
5768 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5769
5770 bp->attn_state = 0;
5771
5772 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5773 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5774 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5775 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5776 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5777 int sindex;
5778
5779 for (sindex = 0; sindex < 4; sindex++)
5780 bp->attn_group[index].sig[sindex] =
5781 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5782
5783 if (!CHIP_IS_E1x(bp))
5784
5785
5786
5787
5788
5789 bp->attn_group[index].sig[4] = REG_RD(bp,
5790 reg_offset_en5 + 0x4*index);
5791 else
5792 bp->attn_group[index].sig[4] = 0;
5793 }
5794
5795 if (bp->common.int_block == INT_BLOCK_HC) {
5796 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5797 HC_REG_ATTN_MSG0_ADDR_L);
5798
5799 REG_WR(bp, reg_offset, U64_LO(section));
5800 REG_WR(bp, reg_offset + 4, U64_HI(section));
5801 } else if (!CHIP_IS_E1x(bp)) {
5802 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5803 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5804 }
5805
5806 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5807 sp_sb);
5808
5809 bnx2x_zero_sp_sb(bp);
5810
5811
5812 sp_sb_data.state = SB_ENABLED;
5813 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5814 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5815 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5816 sp_sb_data.igu_seg_id = igu_seg_id;
5817 sp_sb_data.p_func.pf_id = func;
5818 sp_sb_data.p_func.vnic_id = BP_VN(bp);
5819 sp_sb_data.p_func.vf_id = 0xff;
5820
5821 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5822
5823 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5824}
5825
5826void bnx2x_update_coalesce(struct bnx2x *bp)
5827{
5828 int i;
5829
5830 for_each_eth_queue(bp, i)
5831 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5832 bp->tx_ticks, bp->rx_ticks);
5833}
5834
5835static void bnx2x_init_sp_ring(struct bnx2x *bp)
5836{
5837 spin_lock_init(&bp->spq_lock);
5838 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5839
5840 bp->spq_prod_idx = 0;
5841 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5842 bp->spq_prod_bd = bp->spq;
5843 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5844}
5845
5846static void bnx2x_init_eq_ring(struct bnx2x *bp)
5847{
5848 int i;
5849 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5850 union event_ring_elem *elem =
5851 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5852
5853 elem->next_page.addr.hi =
5854 cpu_to_le32(U64_HI(bp->eq_mapping +
5855 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5856 elem->next_page.addr.lo =
5857 cpu_to_le32(U64_LO(bp->eq_mapping +
5858 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5859 }
5860 bp->eq_cons = 0;
5861 bp->eq_prod = NUM_EQ_DESC;
5862 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5863
5864 atomic_set(&bp->eq_spq_left,
5865 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5866}
5867
5868
5869static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5870 unsigned long rx_mode_flags,
5871 unsigned long rx_accept_flags,
5872 unsigned long tx_accept_flags,
5873 unsigned long ramrod_flags)
5874{
5875 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5876 int rc;
5877
5878 memset(&ramrod_param, 0, sizeof(ramrod_param));
5879
5880
5881 ramrod_param.cid = 0;
5882 ramrod_param.cl_id = cl_id;
5883 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5884 ramrod_param.func_id = BP_FUNC(bp);
5885
5886 ramrod_param.pstate = &bp->sp_state;
5887 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5888
5889 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5890 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5891
5892 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5893
5894 ramrod_param.ramrod_flags = ramrod_flags;
5895 ramrod_param.rx_mode_flags = rx_mode_flags;
5896
5897 ramrod_param.rx_accept_flags = rx_accept_flags;
5898 ramrod_param.tx_accept_flags = tx_accept_flags;
5899
5900 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5901 if (rc < 0) {
5902 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5903 return rc;
5904 }
5905
5906 return 0;
5907}
5908
5909static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5910 unsigned long *rx_accept_flags,
5911 unsigned long *tx_accept_flags)
5912{
5913
5914 *rx_accept_flags = 0;
5915 *tx_accept_flags = 0;
5916
5917 switch (rx_mode) {
5918 case BNX2X_RX_MODE_NONE:
5919
5920
5921
5922
5923 break;
5924 case BNX2X_RX_MODE_NORMAL:
5925 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5926 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5927 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5928
5929
5930 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5931 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5932 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5933
5934 break;
5935 case BNX2X_RX_MODE_ALLMULTI:
5936 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5937 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5938 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5939
5940
5941 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5942 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5943 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5944
5945 break;
5946 case BNX2X_RX_MODE_PROMISC:
5947
5948
5949
5950
5951 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5952 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5953 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5954 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5955
5956
5957 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5958 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5959
5960 if (IS_MF_SI(bp))
5961 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
5962 else
5963 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5964
5965 break;
5966 default:
5967 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5968 return -EINVAL;
5969 }
5970
5971
5972 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5973 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5974 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
5975 }
5976
5977 return 0;
5978}
5979
5980
5981static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5982{
5983 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5984 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5985 int rc;
5986
5987 if (!NO_FCOE(bp))
5988
5989 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5990
5991 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5992 &tx_accept_flags);
5993 if (rc)
5994 return rc;
5995
5996 __set_bit(RAMROD_RX, &ramrod_flags);
5997 __set_bit(RAMROD_TX, &ramrod_flags);
5998
5999 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6000 rx_accept_flags, tx_accept_flags,
6001 ramrod_flags);
6002}
6003
6004static void bnx2x_init_internal_common(struct bnx2x *bp)
6005{
6006 int i;
6007
6008 if (IS_MF_SI(bp))
6009
6010
6011
6012
6013
6014 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6015 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
6016 else if (!CHIP_IS_E1(bp))
6017 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6018 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
6019
6020
6021
6022 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6023 REG_WR(bp, BAR_USTRORM_INTMEM +
6024 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6025 if (!CHIP_IS_E1x(bp)) {
6026 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6027 CHIP_INT_MODE_IS_BC(bp) ?
6028 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6029 }
6030}
6031
6032static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6033{
6034 switch (load_code) {
6035 case FW_MSG_CODE_DRV_LOAD_COMMON:
6036 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6037 bnx2x_init_internal_common(bp);
6038
6039
6040 case FW_MSG_CODE_DRV_LOAD_PORT:
6041
6042
6043
6044 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6045
6046
6047 break;
6048
6049 default:
6050 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6051 break;
6052 }
6053}
6054
6055static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6056{
6057 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6058}
6059
6060static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6061{
6062 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6063}
6064
6065static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6066{
6067 if (CHIP_IS_E1x(fp->bp))
6068 return BP_L_ID(fp->bp) + fp->index;
6069 else
6070 return bnx2x_fp_igu_sb_id(fp);
6071}
6072
6073static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6074{
6075 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6076 u8 cos;
6077 unsigned long q_type = 0;
6078 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6079 fp->rx_queue = fp_idx;
6080 fp->cid = fp_idx;
6081 fp->cl_id = bnx2x_fp_cl_id(fp);
6082 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6083 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6084
6085 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6086
6087
6088 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6089
6090
6091 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6092
6093
6094 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6095 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6096
6097 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6098
6099
6100 for_each_cos_in_tx_queue(fp, cos) {
6101 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6102 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6103 FP_COS_TO_TXQ(fp, cos, bp),
6104 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6105 cids[cos] = fp->txdata_ptr[cos]->cid;
6106 }
6107
6108
6109 if (IS_VF(bp))
6110 return;
6111
6112 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6113 fp->fw_sb_id, fp->igu_sb_id);
6114 bnx2x_update_fpsb_idx(fp);
6115 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6116 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6117 bnx2x_sp_mapping(bp, q_rdata), q_type);
6118
6119
6120
6121
6122 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6123
6124 DP(NETIF_MSG_IFUP,
6125 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6126 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6127 fp->igu_sb_id);
6128}
6129
6130static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6131{
6132 int i;
6133
6134 for (i = 1; i <= NUM_TX_RINGS; i++) {
6135 struct eth_tx_next_bd *tx_next_bd =
6136 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6137
6138 tx_next_bd->addr_hi =
6139 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6140 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6141 tx_next_bd->addr_lo =
6142 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6143 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6144 }
6145
6146 *txdata->tx_cons_sb = cpu_to_le16(0);
6147
6148 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6149 txdata->tx_db.data.zero_fill1 = 0;
6150 txdata->tx_db.data.prod = 0;
6151
6152 txdata->tx_pkt_prod = 0;
6153 txdata->tx_pkt_cons = 0;
6154 txdata->tx_bd_prod = 0;
6155 txdata->tx_bd_cons = 0;
6156 txdata->tx_pkt = 0;
6157}
6158
6159static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6160{
6161 int i;
6162
6163 for_each_tx_queue_cnic(bp, i)
6164 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6165}
6166
6167static void bnx2x_init_tx_rings(struct bnx2x *bp)
6168{
6169 int i;
6170 u8 cos;
6171
6172 for_each_eth_queue(bp, i)
6173 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6174 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6175}
6176
6177static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6178{
6179 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6180 unsigned long q_type = 0;
6181
6182 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6183 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6184 BNX2X_FCOE_ETH_CL_ID_IDX);
6185 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6186 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6187 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6188 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6189 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6190 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6191 fp);
6192
6193 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6194
6195
6196 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6197
6198 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6199 bnx2x_rx_ustorm_prods_offset(fp);
6200
6201
6202 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6203 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6204
6205
6206 BUG_ON(fp->max_cos != 1);
6207
6208 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6209 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6210 bnx2x_sp_mapping(bp, q_rdata), q_type);
6211
6212 DP(NETIF_MSG_IFUP,
6213 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6214 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6215 fp->igu_sb_id);
6216}
6217
6218void bnx2x_nic_init_cnic(struct bnx2x *bp)
6219{
6220 if (!NO_FCOE(bp))
6221 bnx2x_init_fcoe_fp(bp);
6222
6223 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6224 BNX2X_VF_ID_INVALID, false,
6225 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6226
6227
6228 rmb();
6229 bnx2x_init_rx_rings_cnic(bp);
6230 bnx2x_init_tx_rings_cnic(bp);
6231
6232
6233 mb();
6234 mmiowb();
6235}
6236
6237void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6238{
6239 int i;
6240
6241
6242 for_each_eth_queue(bp, i)
6243 bnx2x_init_eth_fp(bp, i);
6244
6245
6246 rmb();
6247 bnx2x_init_rx_rings(bp);
6248 bnx2x_init_tx_rings(bp);
6249
6250 if (IS_PF(bp)) {
6251
6252 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6253 bp->common.shmem_base,
6254 bp->common.shmem2_base, BP_PORT(bp));
6255
6256
6257 bnx2x_init_def_sb(bp);
6258 bnx2x_update_dsb_idx(bp);
6259 bnx2x_init_sp_ring(bp);
6260 } else {
6261 bnx2x_memset_stats(bp);
6262 }
6263}
6264
6265void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6266{
6267 bnx2x_init_eq_ring(bp);
6268 bnx2x_init_internal(bp, load_code);
6269 bnx2x_pf_init(bp);
6270 bnx2x_stats_init(bp);
6271
6272
6273 mb();
6274 mmiowb();
6275
6276 bnx2x_int_enable(bp);
6277
6278
6279 bnx2x_attn_int_deasserted0(bp,
6280 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6281 AEU_INPUTS_ATTN_BITS_SPIO5);
6282}
6283
6284
6285static int bnx2x_gunzip_init(struct bnx2x *bp)
6286{
6287 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6288 &bp->gunzip_mapping, GFP_KERNEL);
6289 if (bp->gunzip_buf == NULL)
6290 goto gunzip_nomem1;
6291
6292 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6293 if (bp->strm == NULL)
6294 goto gunzip_nomem2;
6295
6296 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6297 if (bp->strm->workspace == NULL)
6298 goto gunzip_nomem3;
6299
6300 return 0;
6301
6302gunzip_nomem3:
6303 kfree(bp->strm);
6304 bp->strm = NULL;
6305
6306gunzip_nomem2:
6307 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6308 bp->gunzip_mapping);
6309 bp->gunzip_buf = NULL;
6310
6311gunzip_nomem1:
6312 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6313 return -ENOMEM;
6314}
6315
6316static void bnx2x_gunzip_end(struct bnx2x *bp)
6317{
6318 if (bp->strm) {
6319 vfree(bp->strm->workspace);
6320 kfree(bp->strm);
6321 bp->strm = NULL;
6322 }
6323
6324 if (bp->gunzip_buf) {
6325 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6326 bp->gunzip_mapping);
6327 bp->gunzip_buf = NULL;
6328 }
6329}
6330
6331static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6332{
6333 int n, rc;
6334
6335
6336 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6337 BNX2X_ERR("Bad gzip header\n");
6338 return -EINVAL;
6339 }
6340
6341 n = 10;
6342
6343#define FNAME 0x8
6344
6345 if (zbuf[3] & FNAME)
6346 while ((zbuf[n++] != 0) && (n < len));
6347
6348 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6349 bp->strm->avail_in = len - n;
6350 bp->strm->next_out = bp->gunzip_buf;
6351 bp->strm->avail_out = FW_BUF_SIZE;
6352
6353 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6354 if (rc != Z_OK)
6355 return rc;
6356
6357 rc = zlib_inflate(bp->strm, Z_FINISH);
6358 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6359 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6360 bp->strm->msg);
6361
6362 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6363 if (bp->gunzip_outlen & 0x3)
6364 netdev_err(bp->dev,
6365 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6366 bp->gunzip_outlen);
6367 bp->gunzip_outlen >>= 2;
6368
6369 zlib_inflateEnd(bp->strm);
6370
6371 if (rc == Z_STREAM_END)
6372 return 0;
6373
6374 return rc;
6375}
6376
6377
6378
6379
6380
6381
6382
6383
6384static void bnx2x_lb_pckt(struct bnx2x *bp)
6385{
6386 u32 wb_write[3];
6387
6388
6389 wb_write[0] = 0x55555555;
6390 wb_write[1] = 0x55555555;
6391 wb_write[2] = 0x20;
6392 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6393
6394
6395 wb_write[0] = 0x09000000;
6396 wb_write[1] = 0x55555555;
6397 wb_write[2] = 0x10;
6398 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6399}
6400
6401
6402
6403
6404
6405static int bnx2x_int_mem_test(struct bnx2x *bp)
6406{
6407 int factor;
6408 int count, i;
6409 u32 val = 0;
6410
6411 if (CHIP_REV_IS_FPGA(bp))
6412 factor = 120;
6413 else if (CHIP_REV_IS_EMUL(bp))
6414 factor = 200;
6415 else
6416 factor = 1;
6417
6418
6419 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6420 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6421 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6422 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6423
6424
6425 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6426
6427
6428 bnx2x_lb_pckt(bp);
6429
6430
6431
6432 count = 1000 * factor;
6433 while (count) {
6434
6435 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6436 val = *bnx2x_sp(bp, wb_data[0]);
6437 if (val == 0x10)
6438 break;
6439
6440 usleep_range(10000, 20000);
6441 count--;
6442 }
6443 if (val != 0x10) {
6444 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6445 return -1;
6446 }
6447
6448
6449 count = 1000 * factor;
6450 while (count) {
6451 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6452 if (val == 1)
6453 break;
6454
6455 usleep_range(10000, 20000);
6456 count--;
6457 }
6458 if (val != 0x1) {
6459 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6460 return -2;
6461 }
6462
6463
6464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6465 msleep(50);
6466 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6467 msleep(50);
6468 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6469 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6470
6471 DP(NETIF_MSG_HW, "part2\n");
6472
6473
6474 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6475 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6476 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6477 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6478
6479
6480 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6481
6482
6483 for (i = 0; i < 10; i++)
6484 bnx2x_lb_pckt(bp);
6485
6486
6487
6488 count = 1000 * factor;
6489 while (count) {
6490
6491 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6492 val = *bnx2x_sp(bp, wb_data[0]);
6493 if (val == 0xb0)
6494 break;
6495
6496 usleep_range(10000, 20000);
6497 count--;
6498 }
6499 if (val != 0xb0) {
6500 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6501 return -3;
6502 }
6503
6504
6505 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6506 if (val != 2)
6507 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6508
6509
6510 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6511
6512
6513 msleep(10 * factor);
6514
6515 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6516 if (val != 3)
6517 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6518
6519
6520 for (i = 0; i < 11; i++)
6521 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6522 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6523 if (val != 1) {
6524 BNX2X_ERR("clear of NIG failed\n");
6525 return -4;
6526 }
6527
6528
6529 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6530 msleep(50);
6531 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6532 msleep(50);
6533 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6534 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6535 if (!CNIC_SUPPORT(bp))
6536
6537 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6538
6539
6540 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6541 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6542 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6543 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6544
6545 DP(NETIF_MSG_HW, "done\n");
6546
6547 return 0;
6548}
6549
6550static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6551{
6552 u32 val;
6553
6554 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6555 if (!CHIP_IS_E1x(bp))
6556 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6557 else
6558 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6559 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6560 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6561
6562
6563
6564
6565
6566
6567 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6568 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6569 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6570 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6571 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6572 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6573
6574
6575 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6576 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6577 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6578
6579
6580 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6581 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6582 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6583 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6584
6585
6586
6587 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6588 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6589 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6590 if (!CHIP_IS_E1x(bp))
6591 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6592 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6593 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6594
6595 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6596 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6597 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6598
6599
6600 if (!CHIP_IS_E1x(bp))
6601
6602 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6603
6604 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6605 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6606
6607 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6608}
6609
6610static void bnx2x_reset_common(struct bnx2x *bp)
6611{
6612 u32 val = 0x1400;
6613
6614
6615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6616 0xd3ffff7f);
6617
6618 if (CHIP_IS_E3(bp)) {
6619 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6620 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6621 }
6622
6623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6624}
6625
6626static void bnx2x_setup_dmae(struct bnx2x *bp)
6627{
6628 bp->dmae_ready = 0;
6629 spin_lock_init(&bp->dmae_lock);
6630}
6631
6632static void bnx2x_init_pxp(struct bnx2x *bp)
6633{
6634 u16 devctl;
6635 int r_order, w_order;
6636
6637 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6638 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6639 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6640 if (bp->mrrs == -1)
6641 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6642 else {
6643 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6644 r_order = bp->mrrs;
6645 }
6646
6647 bnx2x_init_pxp_arb(bp, r_order, w_order);
6648}
6649
6650static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6651{
6652 int is_required;
6653 u32 val;
6654 int port;
6655
6656 if (BP_NOMCP(bp))
6657 return;
6658
6659 is_required = 0;
6660 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6661 SHARED_HW_CFG_FAN_FAILURE_MASK;
6662
6663 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6664 is_required = 1;
6665
6666
6667
6668
6669
6670
6671 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6672 for (port = PORT_0; port < PORT_MAX; port++) {
6673 is_required |=
6674 bnx2x_fan_failure_det_req(
6675 bp,
6676 bp->common.shmem_base,
6677 bp->common.shmem2_base,
6678 port);
6679 }
6680
6681 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6682
6683 if (is_required == 0)
6684 return;
6685
6686
6687 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6688
6689
6690 val = REG_RD(bp, MISC_REG_SPIO_INT);
6691 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6692 REG_WR(bp, MISC_REG_SPIO_INT, val);
6693
6694
6695 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6696 val |= MISC_SPIO_SPIO5;
6697 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6698}
6699
6700void bnx2x_pf_disable(struct bnx2x *bp)
6701{
6702 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6703 val &= ~IGU_PF_CONF_FUNC_EN;
6704
6705 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6706 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6707 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6708}
6709
6710static void bnx2x__common_init_phy(struct bnx2x *bp)
6711{
6712 u32 shmem_base[2], shmem2_base[2];
6713
6714 if (SHMEM2_RD(bp, size) >
6715 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6716 return;
6717 shmem_base[0] = bp->common.shmem_base;
6718 shmem2_base[0] = bp->common.shmem2_base;
6719 if (!CHIP_IS_E1x(bp)) {
6720 shmem_base[1] =
6721 SHMEM2_RD(bp, other_shmem_base_addr);
6722 shmem2_base[1] =
6723 SHMEM2_RD(bp, other_shmem2_base_addr);
6724 }
6725 bnx2x_acquire_phy_lock(bp);
6726 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6727 bp->common.chip_id);
6728 bnx2x_release_phy_lock(bp);
6729}
6730
6731
6732
6733
6734
6735
6736static int bnx2x_init_hw_common(struct bnx2x *bp)
6737{
6738 u32 val;
6739
6740 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
6741
6742
6743
6744
6745
6746 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6747
6748 bnx2x_reset_common(bp);
6749 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6750
6751 val = 0xfffc;
6752 if (CHIP_IS_E3(bp)) {
6753 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6754 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6755 }
6756 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6757
6758 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6759
6760 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6761
6762 if (!CHIP_IS_E1x(bp)) {
6763 u8 abs_func_id;
6764
6765
6766
6767
6768
6769
6770
6771
6772 for (abs_func_id = BP_PATH(bp);
6773 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6774 if (abs_func_id == BP_ABS_FUNC(bp)) {
6775 REG_WR(bp,
6776 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6777 1);
6778 continue;
6779 }
6780
6781 bnx2x_pretend_func(bp, abs_func_id);
6782
6783 bnx2x_pf_disable(bp);
6784 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6785 }
6786 }
6787
6788 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6789 if (CHIP_IS_E1(bp)) {
6790
6791
6792 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6793 }
6794
6795 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6796 bnx2x_init_pxp(bp);
6797
6798#ifdef __BIG_ENDIAN
6799 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6800 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6801 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6802 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6803 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6804
6805 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6806
6807
6808 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6809 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6810 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6811 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6812#endif
6813
6814 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6815
6816 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6817 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6818
6819
6820 msleep(100);
6821
6822 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6823 if (val != 1) {
6824 BNX2X_ERR("PXP2 CFG failed\n");
6825 return -EBUSY;
6826 }
6827 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6828 if (val != 1) {
6829 BNX2X_ERR("PXP2 RD_INIT failed\n");
6830 return -EBUSY;
6831 }
6832
6833
6834
6835
6836
6837
6838 if (!CHIP_IS_E1x(bp)) {
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901 struct ilt_client_info ilt_cli;
6902 struct bnx2x_ilt ilt;
6903 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6904 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6905
6906
6907 ilt_cli.start = 0;
6908 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6909 ilt_cli.client_num = ILT_CLIENT_TM;
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6923 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6924 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6925
6926 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6927 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6928 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6929 }
6930
6931 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6932 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6933
6934 if (!CHIP_IS_E1x(bp)) {
6935 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6936 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6937 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6938
6939 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6940
6941
6942 do {
6943 msleep(200);
6944 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6945 } while (factor-- && (val != 1));
6946
6947 if (val != 1) {
6948 BNX2X_ERR("ATC_INIT failed\n");
6949 return -EBUSY;
6950 }
6951 }
6952
6953 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6954
6955 bnx2x_iov_init_dmae(bp);
6956
6957
6958 bp->dmae_ready = 1;
6959 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6960
6961 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6962
6963 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6964
6965 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6966
6967 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6968
6969 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6970 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6971 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6972 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6973
6974 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6975
6976
6977 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6978
6979
6980 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6981 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6982
6983 if (CNIC_SUPPORT(bp))
6984 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6985
6986 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6987
6988 if (!CHIP_REV_IS_SLOW(bp))
6989
6990 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6991
6992 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6993
6994 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6995 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6996
6997 if (!CHIP_IS_E1(bp))
6998 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6999
7000 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7001 if (IS_MF_AFEX(bp)) {
7002
7003
7004
7005 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7006 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7007 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7008 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7009 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7010 } else {
7011
7012
7013
7014 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7015 bp->path_has_ovlan ? 7 : 6);
7016 }
7017 }
7018
7019 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7020 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7021 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7022 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7023
7024 if (!CHIP_IS_E1x(bp)) {
7025
7026 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7027 VFC_MEMORIES_RST_REG_CAM_RST |
7028 VFC_MEMORIES_RST_REG_RAM_RST);
7029 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7030 VFC_MEMORIES_RST_REG_CAM_RST |
7031 VFC_MEMORIES_RST_REG_RAM_RST);
7032
7033 msleep(20);
7034 }
7035
7036 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7037 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7038 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7039 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7040
7041
7042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7043 0x80000000);
7044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7045 0x80000000);
7046
7047 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7048 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7049 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7050
7051 if (!CHIP_IS_E1x(bp)) {
7052 if (IS_MF_AFEX(bp)) {
7053
7054
7055
7056 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7057 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7058 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7059 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7060 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7061 } else {
7062 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7063 bp->path_has_ovlan ? 7 : 6);
7064 }
7065 }
7066
7067 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7068
7069 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7070
7071 if (CNIC_SUPPORT(bp)) {
7072 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7073 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7074 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7075 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7076 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7077 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7078 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7079 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7080 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7081 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7082 }
7083 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7084
7085 if (sizeof(union cdu_context) != 1024)
7086
7087 dev_alert(&bp->pdev->dev,
7088 "please adjust the size of cdu_context(%ld)\n",
7089 (long)sizeof(union cdu_context));
7090
7091 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7092 val = (4 << 24) + (0 << 12) + 1024;
7093 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7094
7095 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7096 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7097
7098 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7099
7100
7101 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7102
7103 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7104
7105 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7106 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7107
7108 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7109 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7110
7111
7112 REG_WR(bp, 0x2814, 0xffffffff);
7113 REG_WR(bp, 0x3820, 0xffffffff);
7114
7115 if (!CHIP_IS_E1x(bp)) {
7116 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7117 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7118 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7119 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7120 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7121 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7122 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7123 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7124 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7125 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7126 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7127 }
7128
7129 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7130 if (!CHIP_IS_E1(bp)) {
7131
7132 if (!CHIP_IS_E3(bp))
7133 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7134 }
7135 if (CHIP_IS_E1H(bp))
7136
7137 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7138
7139 if (CHIP_REV_IS_SLOW(bp))
7140 msleep(200);
7141
7142
7143 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7144 if (val != 1) {
7145 BNX2X_ERR("CFC LL_INIT failed\n");
7146 return -EBUSY;
7147 }
7148 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7149 if (val != 1) {
7150 BNX2X_ERR("CFC AC_INIT failed\n");
7151 return -EBUSY;
7152 }
7153 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7154 if (val != 1) {
7155 BNX2X_ERR("CFC CAM_INIT failed\n");
7156 return -EBUSY;
7157 }
7158 REG_WR(bp, CFC_REG_DEBUG0, 0);
7159
7160 if (CHIP_IS_E1(bp)) {
7161
7162
7163 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7164 val = *bnx2x_sp(bp, wb_data[0]);
7165
7166
7167 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7168 BNX2X_ERR("internal mem self test failed\n");
7169 return -EBUSY;
7170 }
7171 }
7172
7173 bnx2x_setup_fan_failure_detection(bp);
7174
7175
7176 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7177
7178 bnx2x_enable_blocks_attention(bp);
7179 bnx2x_enable_blocks_parity(bp);
7180
7181 if (!BP_NOMCP(bp)) {
7182 if (CHIP_IS_E1x(bp))
7183 bnx2x__common_init_phy(bp);
7184 } else
7185 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7186
7187 return 0;
7188}
7189
7190
7191
7192
7193
7194
7195static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7196{
7197 int rc = bnx2x_init_hw_common(bp);
7198
7199 if (rc)
7200 return rc;
7201
7202
7203 if (!BP_NOMCP(bp))
7204 bnx2x__common_init_phy(bp);
7205
7206 return 0;
7207}
7208
7209static int bnx2x_init_hw_port(struct bnx2x *bp)
7210{
7211 int port = BP_PORT(bp);
7212 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7213 u32 low, high;
7214 u32 val, reg;
7215
7216 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7217
7218 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7219
7220 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7221 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7222 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7223
7224
7225
7226
7227
7228
7229 if (!CHIP_IS_E1x(bp))
7230 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7231
7232 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7233 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7234 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7235 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7236
7237 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7238 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7239 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7240 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7241
7242
7243 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7244
7245 if (CNIC_SUPPORT(bp)) {
7246 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7247 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7248 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7249 }
7250
7251 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7252
7253 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7254
7255 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7256
7257 if (IS_MF(bp))
7258 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7259 else if (bp->dev->mtu > 4096) {
7260 if (bp->flags & ONE_PORT_FLAG)
7261 low = 160;
7262 else {
7263 val = bp->dev->mtu;
7264
7265 low = 96 + (val/64) +
7266 ((val % 64) ? 1 : 0);
7267 }
7268 } else
7269 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7270 high = low + 56;
7271 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7272 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7273 }
7274
7275 if (CHIP_MODE_IS_4_PORT(bp))
7276 REG_WR(bp, (BP_PORT(bp) ?
7277 BRB1_REG_MAC_GUARANTIED_1 :
7278 BRB1_REG_MAC_GUARANTIED_0), 40);
7279
7280 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7281 if (CHIP_IS_E3B0(bp)) {
7282 if (IS_MF_AFEX(bp)) {
7283
7284 REG_WR(bp, BP_PORT(bp) ?
7285 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7286 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7287 REG_WR(bp, BP_PORT(bp) ?
7288 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7289 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7290 REG_WR(bp, BP_PORT(bp) ?
7291 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7292 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7293 } else {
7294
7295
7296
7297
7298 REG_WR(bp, BP_PORT(bp) ?
7299 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7300 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7301 (bp->path_has_ovlan ? 7 : 6));
7302 }
7303 }
7304
7305 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7306 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7307 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7308 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7309
7310 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7311 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7312 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7313 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7314
7315 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7316 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7317
7318 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7319
7320 if (CHIP_IS_E1x(bp)) {
7321
7322 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7323
7324
7325 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7326
7327 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7328
7329
7330 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7331 udelay(50);
7332 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7333 }
7334
7335 if (CNIC_SUPPORT(bp))
7336 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7337
7338 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7339 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7340
7341 if (CHIP_IS_E1(bp)) {
7342 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7343 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7344 }
7345 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7346
7347 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7348
7349 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7350
7351
7352
7353
7354 val = IS_MF(bp) ? 0xF7 : 0x7;
7355
7356 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7357 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7358
7359
7360 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7361 REG_WR(bp, reg,
7362 REG_RD(bp, reg) &
7363 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7364
7365 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7366 REG_WR(bp, reg,
7367 REG_RD(bp, reg) &
7368 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7369
7370 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7371
7372 if (!CHIP_IS_E1x(bp)) {
7373
7374
7375
7376 if (IS_MF_AFEX(bp))
7377 REG_WR(bp, BP_PORT(bp) ?
7378 NIG_REG_P1_HDRS_AFTER_BASIC :
7379 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7380 else
7381 REG_WR(bp, BP_PORT(bp) ?
7382 NIG_REG_P1_HDRS_AFTER_BASIC :
7383 NIG_REG_P0_HDRS_AFTER_BASIC,
7384 IS_MF_SD(bp) ? 7 : 6);
7385
7386 if (CHIP_IS_E3(bp))
7387 REG_WR(bp, BP_PORT(bp) ?
7388 NIG_REG_LLH1_MF_MODE :
7389 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7390 }
7391 if (!CHIP_IS_E3(bp))
7392 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7393
7394 if (!CHIP_IS_E1(bp)) {
7395
7396 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7397 (IS_MF_SD(bp) ? 0x1 : 0x2));
7398
7399 if (!CHIP_IS_E1x(bp)) {
7400 val = 0;
7401 switch (bp->mf_mode) {
7402 case MULTI_FUNCTION_SD:
7403 val = 1;
7404 break;
7405 case MULTI_FUNCTION_SI:
7406 case MULTI_FUNCTION_AFEX:
7407 val = 2;
7408 break;
7409 }
7410
7411 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7412 NIG_REG_LLH0_CLS_TYPE), val);
7413 }
7414 {
7415 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7416 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7417 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7418 }
7419 }
7420
7421
7422 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7423 if (val & MISC_SPIO_SPIO5) {
7424 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7425 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7426 val = REG_RD(bp, reg_addr);
7427 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7428 REG_WR(bp, reg_addr, val);
7429 }
7430
7431 return 0;
7432}
7433
7434static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7435{
7436 int reg;
7437 u32 wb_write[2];
7438
7439 if (CHIP_IS_E1(bp))
7440 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7441 else
7442 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7443
7444 wb_write[0] = ONCHIP_ADDR1(addr);
7445 wb_write[1] = ONCHIP_ADDR2(addr);
7446 REG_WR_DMAE(bp, reg, wb_write, 2);
7447}
7448
7449void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7450{
7451 u32 data, ctl, cnt = 100;
7452 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7453 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7454 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7455 u32 sb_bit = 1 << (idu_sb_id%32);
7456 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7457 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7458
7459
7460 if (CHIP_INT_MODE_IS_BC(bp))
7461 return;
7462
7463 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7464 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7465 IGU_REGULAR_CLEANUP_SET |
7466 IGU_REGULAR_BCLEANUP;
7467
7468 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7469 func_encode << IGU_CTRL_REG_FID_SHIFT |
7470 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7471
7472 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7473 data, igu_addr_data);
7474 REG_WR(bp, igu_addr_data, data);
7475 mmiowb();
7476 barrier();
7477 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7478 ctl, igu_addr_ctl);
7479 REG_WR(bp, igu_addr_ctl, ctl);
7480 mmiowb();
7481 barrier();
7482
7483
7484 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7485 msleep(20);
7486
7487 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7488 DP(NETIF_MSG_HW,
7489 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7490 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7491 }
7492}
7493
7494static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7495{
7496 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7497}
7498
7499static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7500{
7501 u32 i, base = FUNC_ILT_BASE(func);
7502 for (i = base; i < base + ILT_PER_FUNC; i++)
7503 bnx2x_ilt_wr(bp, i, 0);
7504}
7505
7506static void bnx2x_init_searcher(struct bnx2x *bp)
7507{
7508 int port = BP_PORT(bp);
7509 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7510
7511 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7512}
7513
7514static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7515{
7516 int rc;
7517 struct bnx2x_func_state_params func_params = {NULL};
7518 struct bnx2x_func_switch_update_params *switch_update_params =
7519 &func_params.params.switch_update;
7520
7521
7522 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7523 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7524
7525 func_params.f_obj = &bp->func_obj;
7526 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7527
7528
7529 switch_update_params->suspend = suspend;
7530
7531 rc = bnx2x_func_state_change(bp, &func_params);
7532
7533 return rc;
7534}
7535
7536static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7537{
7538 int rc, i, port = BP_PORT(bp);
7539 int vlan_en = 0, mac_en[NUM_MACS];
7540
7541
7542 if (bp->mf_mode == SINGLE_FUNCTION) {
7543 bnx2x_set_rx_filter(&bp->link_params, 0);
7544 } else {
7545 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7546 NIG_REG_LLH0_FUNC_EN);
7547 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7548 NIG_REG_LLH0_FUNC_EN, 0);
7549 for (i = 0; i < NUM_MACS; i++) {
7550 mac_en[i] = REG_RD(bp, port ?
7551 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7552 4 * i) :
7553 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7554 4 * i));
7555 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7556 4 * i) :
7557 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7558 }
7559 }
7560
7561
7562 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7563 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7564
7565
7566
7567
7568
7569
7570 rc = bnx2x_func_switch_update(bp, 1);
7571 if (rc) {
7572 BNX2X_ERR("Can't suspend tx-switching!\n");
7573 return rc;
7574 }
7575
7576
7577 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7578
7579
7580 if (bp->mf_mode == SINGLE_FUNCTION) {
7581 bnx2x_set_rx_filter(&bp->link_params, 1);
7582 } else {
7583 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7584 NIG_REG_LLH0_FUNC_EN, vlan_en);
7585 for (i = 0; i < NUM_MACS; i++) {
7586 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7587 4 * i) :
7588 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7589 mac_en[i]);
7590 }
7591 }
7592
7593
7594 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7595 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7596
7597
7598 rc = bnx2x_func_switch_update(bp, 0);
7599 if (rc) {
7600 BNX2X_ERR("Can't resume tx-switching!\n");
7601 return rc;
7602 }
7603
7604 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7605 return 0;
7606}
7607
7608int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7609{
7610 int rc;
7611
7612 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7613
7614 if (CONFIGURE_NIC_MODE(bp)) {
7615
7616 bnx2x_init_searcher(bp);
7617
7618
7619 rc = bnx2x_reset_nic_mode(bp);
7620 if (rc)
7621 BNX2X_ERR("Can't change NIC mode!\n");
7622 return rc;
7623 }
7624
7625 return 0;
7626}
7627
7628static int bnx2x_init_hw_func(struct bnx2x *bp)
7629{
7630 int port = BP_PORT(bp);
7631 int func = BP_FUNC(bp);
7632 int init_phase = PHASE_PF0 + func;
7633 struct bnx2x_ilt *ilt = BP_ILT(bp);
7634 u16 cdu_ilt_start;
7635 u32 addr, val;
7636 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7637 int i, main_mem_width, rc;
7638
7639 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7640
7641
7642 if (!CHIP_IS_E1x(bp)) {
7643 rc = bnx2x_pf_flr_clnup(bp);
7644 if (rc) {
7645 bnx2x_fw_dump(bp);
7646 return rc;
7647 }
7648 }
7649
7650
7651 if (bp->common.int_block == INT_BLOCK_HC) {
7652 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7653 val = REG_RD(bp, addr);
7654 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7655 REG_WR(bp, addr, val);
7656 }
7657
7658 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7659 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7660
7661 ilt = BP_ILT(bp);
7662 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7663
7664 if (IS_SRIOV(bp))
7665 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7666 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7667
7668
7669
7670
7671 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7672 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7673 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7674 ilt->lines[cdu_ilt_start + i].page_mapping =
7675 bp->context[i].cxt_mapping;
7676 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7677 }
7678
7679 bnx2x_ilt_init_op(bp, INITOP_SET);
7680
7681 if (!CONFIGURE_NIC_MODE(bp)) {
7682 bnx2x_init_searcher(bp);
7683 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7684 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7685 } else {
7686
7687 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7688 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7689 }
7690
7691 if (!CHIP_IS_E1x(bp)) {
7692 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7693
7694
7695
7696
7697 if (!(bp->flags & USING_MSIX_FLAG))
7698 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7699
7700
7701
7702
7703
7704
7705 msleep(20);
7706
7707
7708
7709
7710
7711 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7712
7713 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7714 }
7715
7716 bp->dmae_ready = 1;
7717
7718 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7719
7720 if (!CHIP_IS_E1x(bp))
7721 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7722
7723 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7724 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7725 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7726 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7727 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7728 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7729 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7730 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7731 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7732 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7733 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7734 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7735 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7736
7737 if (!CHIP_IS_E1x(bp))
7738 REG_WR(bp, QM_REG_PF_EN, 1);
7739
7740 if (!CHIP_IS_E1x(bp)) {
7741 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7742 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7743 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7744 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7745 }
7746 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7747
7748 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7749 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7750 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
7751
7752 bnx2x_iov_init_dq(bp);
7753
7754 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7755 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7756 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7757 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7758 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7759 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7760 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7761 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7762 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7763 if (!CHIP_IS_E1x(bp))
7764 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7765
7766 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7767
7768 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7769
7770 if (!CHIP_IS_E1x(bp))
7771 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7772
7773 if (IS_MF(bp)) {
7774 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7775 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7776 }
7777
7778 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7779
7780
7781 if (bp->common.int_block == INT_BLOCK_HC) {
7782 if (CHIP_IS_E1H(bp)) {
7783 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7784
7785 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7786 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7787 }
7788 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7789
7790 } else {
7791 int num_segs, sb_idx, prod_offset;
7792
7793 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7794
7795 if (!CHIP_IS_E1x(bp)) {
7796 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7797 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7798 }
7799
7800 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7801
7802 if (!CHIP_IS_E1x(bp)) {
7803 int dsb_idx = 0;
7804
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7826 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7827 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7828 prod_offset = (bp->igu_base_sb + sb_idx) *
7829 num_segs;
7830
7831 for (i = 0; i < num_segs; i++) {
7832 addr = IGU_REG_PROD_CONS_MEMORY +
7833 (prod_offset + i) * 4;
7834 REG_WR(bp, addr, 0);
7835 }
7836
7837 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7838 USTORM_ID, 0, IGU_INT_NOP, 1);
7839 bnx2x_igu_clear_sb(bp,
7840 bp->igu_base_sb + sb_idx);
7841 }
7842
7843
7844 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7845 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7846
7847 if (CHIP_MODE_IS_4_PORT(bp))
7848 dsb_idx = BP_FUNC(bp);
7849 else
7850 dsb_idx = BP_VN(bp);
7851
7852 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7853 IGU_BC_BASE_DSB_PROD + dsb_idx :
7854 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7855
7856
7857
7858
7859
7860 for (i = 0; i < (num_segs * E1HVN_MAX);
7861 i += E1HVN_MAX) {
7862 addr = IGU_REG_PROD_CONS_MEMORY +
7863 (prod_offset + i)*4;
7864 REG_WR(bp, addr, 0);
7865 }
7866
7867 if (CHIP_INT_MODE_IS_BC(bp)) {
7868 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7869 USTORM_ID, 0, IGU_INT_NOP, 1);
7870 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7871 CSTORM_ID, 0, IGU_INT_NOP, 1);
7872 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7873 XSTORM_ID, 0, IGU_INT_NOP, 1);
7874 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7875 TSTORM_ID, 0, IGU_INT_NOP, 1);
7876 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7877 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7878 } else {
7879 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7880 USTORM_ID, 0, IGU_INT_NOP, 1);
7881 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7882 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7883 }
7884 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7885
7886
7887
7888 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7889 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7890 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7891 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7892 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7893 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7894 }
7895 }
7896
7897
7898 REG_WR(bp, 0x2114, 0xffffffff);
7899 REG_WR(bp, 0x2120, 0xffffffff);
7900
7901 if (CHIP_IS_E1x(bp)) {
7902 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
7903 main_mem_base = HC_REG_MAIN_MEMORY +
7904 BP_PORT(bp) * (main_mem_size * 4);
7905 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7906 main_mem_width = 8;
7907
7908 val = REG_RD(bp, main_mem_prty_clr);
7909 if (val)
7910 DP(NETIF_MSG_HW,
7911 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7912 val);
7913
7914
7915 for (i = main_mem_base;
7916 i < main_mem_base + main_mem_size * 4;
7917 i += main_mem_width) {
7918 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7919 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7920 i, main_mem_width / 4);
7921 }
7922
7923 REG_RD(bp, main_mem_prty_clr);
7924 }
7925
7926#ifdef BNX2X_STOP_ON_ERROR
7927
7928 REG_WR8(bp, BAR_USTRORM_INTMEM +
7929 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7930 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7931 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7932 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7933 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7934 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7935 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7936#endif
7937
7938 bnx2x_phy_probe(&bp->link_params);
7939
7940 return 0;
7941}
7942
7943void bnx2x_free_mem_cnic(struct bnx2x *bp)
7944{
7945 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7946
7947 if (!CHIP_IS_E1x(bp))
7948 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7949 sizeof(struct host_hc_status_block_e2));
7950 else
7951 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7952 sizeof(struct host_hc_status_block_e1x));
7953
7954 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7955}
7956
7957void bnx2x_free_mem(struct bnx2x *bp)
7958{
7959 int i;
7960
7961 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7962 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7963
7964 if (IS_VF(bp))
7965 return;
7966
7967 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7968 sizeof(struct host_sp_status_block));
7969
7970 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7971 sizeof(struct bnx2x_slowpath));
7972
7973 for (i = 0; i < L2_ILT_LINES(bp); i++)
7974 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7975 bp->context[i].size);
7976 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7977
7978 BNX2X_FREE(bp->ilt->lines);
7979
7980 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7981
7982 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7983 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7984
7985 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7986
7987 bnx2x_iov_free_mem(bp);
7988}
7989
7990int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7991{
7992 if (!CHIP_IS_E1x(bp))
7993
7994 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7995 sizeof(struct host_hc_status_block_e2));
7996 else
7997 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7998 &bp->cnic_sb_mapping,
7999 sizeof(struct
8000 host_hc_status_block_e1x));
8001
8002 if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
8003
8004 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
8005
8006
8007 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8008 &bp->slowpath->drv_info_to_mcp;
8009
8010 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8011 goto alloc_mem_err;
8012
8013 return 0;
8014
8015alloc_mem_err:
8016 bnx2x_free_mem_cnic(bp);
8017 BNX2X_ERR("Can't allocate memory\n");
8018 return -ENOMEM;
8019}
8020
8021int bnx2x_alloc_mem(struct bnx2x *bp)
8022{
8023 int i, allocated, context_size;
8024
8025 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
8026
8027 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
8028
8029 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
8030 sizeof(struct host_sp_status_block));
8031
8032 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
8033 sizeof(struct bnx2x_slowpath));
8034
8035
8036
8037
8038
8039
8040
8041
8042
8043
8044
8045
8046
8047
8048 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8049
8050 for (i = 0, allocated = 0; allocated < context_size; i++) {
8051 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8052 (context_size - allocated));
8053 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
8054 &bp->context[i].cxt_mapping,
8055 bp->context[i].size);
8056 allocated += bp->context[i].size;
8057 }
8058 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
8059
8060 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8061 goto alloc_mem_err;
8062
8063 if (bnx2x_iov_alloc_mem(bp))
8064 goto alloc_mem_err;
8065
8066
8067 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
8068
8069
8070 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
8071 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8072
8073 return 0;
8074
8075alloc_mem_err:
8076 bnx2x_free_mem(bp);
8077 BNX2X_ERR("Can't allocate memory\n");
8078 return -ENOMEM;
8079}
8080
8081
8082
8083
8084
8085int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8086 struct bnx2x_vlan_mac_obj *obj, bool set,
8087 int mac_type, unsigned long *ramrod_flags)
8088{
8089 int rc;
8090 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8091
8092 memset(&ramrod_param, 0, sizeof(ramrod_param));
8093
8094
8095 ramrod_param.vlan_mac_obj = obj;
8096 ramrod_param.ramrod_flags = *ramrod_flags;
8097
8098
8099 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8100 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8101
8102 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8103
8104
8105 if (set)
8106 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8107 else
8108 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8109 }
8110
8111 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8112
8113 if (rc == -EEXIST) {
8114 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8115
8116 rc = 0;
8117 } else if (rc < 0)
8118 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8119
8120 return rc;
8121}
8122
8123int bnx2x_del_all_macs(struct bnx2x *bp,
8124 struct bnx2x_vlan_mac_obj *mac_obj,
8125 int mac_type, bool wait_for_comp)
8126{
8127 int rc;
8128 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8129
8130
8131 if (wait_for_comp)
8132 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8133
8134
8135 __set_bit(mac_type, &vlan_mac_flags);
8136
8137 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8138 if (rc < 0)
8139 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8140
8141 return rc;
8142}
8143
8144int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8145{
8146 if (is_zero_ether_addr(bp->dev->dev_addr) &&
8147 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
8148 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
8149 "Ignoring Zero MAC for STORAGE SD mode\n");
8150 return 0;
8151 }
8152
8153 if (IS_PF(bp)) {
8154 unsigned long ramrod_flags = 0;
8155
8156 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8157 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8158 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8159 &bp->sp_objs->mac_obj, set,
8160 BNX2X_ETH_MAC, &ramrod_flags);
8161 } else {
8162 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8163 bp->fp->index, true);
8164 }
8165}
8166
8167int bnx2x_setup_leading(struct bnx2x *bp)
8168{
8169 if (IS_PF(bp))
8170 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8171 else
8172 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8173}
8174
8175
8176
8177
8178
8179
8180
8181
8182int bnx2x_set_int_mode(struct bnx2x *bp)
8183{
8184 int rc = 0;
8185
8186 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8187 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8188 return -EINVAL;
8189 }
8190
8191 switch (int_mode) {
8192 case BNX2X_INT_MODE_MSIX:
8193
8194 rc = bnx2x_enable_msix(bp);
8195
8196
8197 if (!rc)
8198 return 0;
8199
8200
8201 if (rc && IS_VF(bp))
8202 return rc;
8203
8204
8205 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8206 bp->num_queues,
8207 1 + bp->num_cnic_queues);
8208
8209
8210 case BNX2X_INT_MODE_MSI:
8211 bnx2x_enable_msi(bp);
8212
8213
8214 case BNX2X_INT_MODE_INTX:
8215 bp->num_ethernet_queues = 1;
8216 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8217 BNX2X_DEV_INFO("set number of queues to 1\n");
8218 break;
8219 default:
8220 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8221 return -EINVAL;
8222 }
8223 return 0;
8224}
8225
8226
8227static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8228{
8229 if (IS_SRIOV(bp))
8230 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8231 return L2_ILT_LINES(bp);
8232}
8233
8234void bnx2x_ilt_set_info(struct bnx2x *bp)
8235{
8236 struct ilt_client_info *ilt_client;
8237 struct bnx2x_ilt *ilt = BP_ILT(bp);
8238 u16 line = 0;
8239
8240 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8241 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8242
8243
8244 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8245 ilt_client->client_num = ILT_CLIENT_CDU;
8246 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8247 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8248 ilt_client->start = line;
8249 line += bnx2x_cid_ilt_lines(bp);
8250
8251 if (CNIC_SUPPORT(bp))
8252 line += CNIC_ILT_LINES;
8253 ilt_client->end = line - 1;
8254
8255 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8256 ilt_client->start,
8257 ilt_client->end,
8258 ilt_client->page_size,
8259 ilt_client->flags,
8260 ilog2(ilt_client->page_size >> 12));
8261
8262
8263 if (QM_INIT(bp->qm_cid_count)) {
8264 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8265 ilt_client->client_num = ILT_CLIENT_QM;
8266 ilt_client->page_size = QM_ILT_PAGE_SZ;
8267 ilt_client->flags = 0;
8268 ilt_client->start = line;
8269
8270
8271 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8272 QM_ILT_PAGE_SZ);
8273
8274 ilt_client->end = line - 1;
8275
8276 DP(NETIF_MSG_IFUP,
8277 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8278 ilt_client->start,
8279 ilt_client->end,
8280 ilt_client->page_size,
8281 ilt_client->flags,
8282 ilog2(ilt_client->page_size >> 12));
8283 }
8284
8285 if (CNIC_SUPPORT(bp)) {
8286
8287 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8288 ilt_client->client_num = ILT_CLIENT_SRC;
8289 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8290 ilt_client->flags = 0;
8291 ilt_client->start = line;
8292 line += SRC_ILT_LINES;
8293 ilt_client->end = line - 1;
8294
8295 DP(NETIF_MSG_IFUP,
8296 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8297 ilt_client->start,
8298 ilt_client->end,
8299 ilt_client->page_size,
8300 ilt_client->flags,
8301 ilog2(ilt_client->page_size >> 12));
8302
8303
8304 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8305 ilt_client->client_num = ILT_CLIENT_TM;
8306 ilt_client->page_size = TM_ILT_PAGE_SZ;
8307 ilt_client->flags = 0;
8308 ilt_client->start = line;
8309 line += TM_ILT_LINES;
8310 ilt_client->end = line - 1;
8311
8312 DP(NETIF_MSG_IFUP,
8313 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8314 ilt_client->start,
8315 ilt_client->end,
8316 ilt_client->page_size,
8317 ilt_client->flags,
8318 ilog2(ilt_client->page_size >> 12));
8319 }
8320
8321 BUG_ON(line > ILT_MAX_LINES);
8322}
8323
8324
8325
8326
8327
8328
8329
8330
8331
8332
8333
8334
8335static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8336 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8337{
8338 u8 cos;
8339 int cxt_index, cxt_offset;
8340
8341
8342 if (!IS_FCOE_FP(fp)) {
8343 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8344 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8345
8346
8347
8348
8349 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8350 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8351
8352
8353 init_params->rx.hc_rate = bp->rx_ticks ?
8354 (1000000 / bp->rx_ticks) : 0;
8355 init_params->tx.hc_rate = bp->tx_ticks ?
8356 (1000000 / bp->tx_ticks) : 0;
8357
8358
8359 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8360 fp->fw_sb_id;
8361
8362
8363
8364
8365
8366 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8367 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8368 }
8369
8370
8371 init_params->max_cos = fp->max_cos;
8372
8373 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8374 fp->index, init_params->max_cos);
8375
8376
8377 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8378 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8379 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8380 ILT_PAGE_CIDS);
8381 init_params->cxts[cos] =
8382 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8383 }
8384}
8385
8386static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8387 struct bnx2x_queue_state_params *q_params,
8388 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8389 int tx_index, bool leading)
8390{
8391 memset(tx_only_params, 0, sizeof(*tx_only_params));
8392
8393
8394 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8395
8396
8397 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8398
8399
8400 tx_only_params->cid_index = tx_index;
8401
8402
8403 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8404
8405
8406 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8407
8408 DP(NETIF_MSG_IFUP,
8409 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8410 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8411 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8412 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8413
8414
8415 return bnx2x_queue_state_change(bp, q_params);
8416}
8417
8418
8419
8420
8421
8422
8423
8424
8425
8426
8427
8428
8429int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8430 bool leading)
8431{
8432 struct bnx2x_queue_state_params q_params = {NULL};
8433 struct bnx2x_queue_setup_params *setup_params =
8434 &q_params.params.setup;
8435 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8436 &q_params.params.tx_only;
8437 int rc;
8438 u8 tx_index;
8439
8440 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8441
8442
8443 if (!IS_FCOE_FP(fp))
8444 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8445 IGU_INT_ENABLE, 0);
8446
8447 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8448
8449 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8450
8451
8452 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8453
8454
8455 q_params.cmd = BNX2X_Q_CMD_INIT;
8456
8457
8458 rc = bnx2x_queue_state_change(bp, &q_params);
8459 if (rc) {
8460 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8461 return rc;
8462 }
8463
8464 DP(NETIF_MSG_IFUP, "init complete\n");
8465
8466
8467 memset(setup_params, 0, sizeof(*setup_params));
8468
8469
8470 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8471
8472
8473 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8474 FIRST_TX_COS_INDEX);
8475
8476 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8477 &setup_params->rxq_params);
8478
8479 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8480 FIRST_TX_COS_INDEX);
8481
8482
8483 q_params.cmd = BNX2X_Q_CMD_SETUP;
8484
8485 if (IS_FCOE_FP(fp))
8486 bp->fcoe_init = true;
8487
8488
8489 rc = bnx2x_queue_state_change(bp, &q_params);
8490 if (rc) {
8491 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8492 return rc;
8493 }
8494
8495
8496 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8497 tx_index < fp->max_cos;
8498 tx_index++) {
8499
8500
8501 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8502 tx_only_params, tx_index, leading);
8503 if (rc) {
8504 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8505 fp->index, tx_index);
8506 return rc;
8507 }
8508 }
8509
8510 return rc;
8511}
8512
8513static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8514{
8515 struct bnx2x_fastpath *fp = &bp->fp[index];
8516 struct bnx2x_fp_txdata *txdata;
8517 struct bnx2x_queue_state_params q_params = {NULL};
8518 int rc, tx_index;
8519
8520 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8521
8522 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8523
8524 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8525
8526
8527 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8528 tx_index < fp->max_cos;
8529 tx_index++){
8530
8531
8532 txdata = fp->txdata_ptr[tx_index];
8533
8534 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8535 txdata->txq_index);
8536
8537
8538 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8539 memset(&q_params.params.terminate, 0,
8540 sizeof(q_params.params.terminate));
8541 q_params.params.terminate.cid_index = tx_index;
8542
8543 rc = bnx2x_queue_state_change(bp, &q_params);
8544 if (rc)
8545 return rc;
8546
8547
8548 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8549 memset(&q_params.params.cfc_del, 0,
8550 sizeof(q_params.params.cfc_del));
8551 q_params.params.cfc_del.cid_index = tx_index;
8552 rc = bnx2x_queue_state_change(bp, &q_params);
8553 if (rc)
8554 return rc;
8555 }
8556
8557
8558 q_params.cmd = BNX2X_Q_CMD_HALT;
8559 rc = bnx2x_queue_state_change(bp, &q_params);
8560 if (rc)
8561 return rc;
8562
8563
8564 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8565 memset(&q_params.params.terminate, 0,
8566 sizeof(q_params.params.terminate));
8567 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8568 rc = bnx2x_queue_state_change(bp, &q_params);
8569 if (rc)
8570 return rc;
8571
8572 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8573 memset(&q_params.params.cfc_del, 0,
8574 sizeof(q_params.params.cfc_del));
8575 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8576 return bnx2x_queue_state_change(bp, &q_params);
8577}
8578
8579static void bnx2x_reset_func(struct bnx2x *bp)
8580{
8581 int port = BP_PORT(bp);
8582 int func = BP_FUNC(bp);
8583 int i;
8584
8585
8586 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8587 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8588 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8589 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8590
8591
8592 for_each_eth_queue(bp, i) {
8593 struct bnx2x_fastpath *fp = &bp->fp[i];
8594 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8595 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8596 SB_DISABLED);
8597 }
8598
8599 if (CNIC_LOADED(bp))
8600
8601 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8602 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8603 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8604
8605
8606 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8607 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8608 SB_DISABLED);
8609
8610 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8611 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8612 0);
8613
8614
8615 if (bp->common.int_block == INT_BLOCK_HC) {
8616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8617 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8618 } else {
8619 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8620 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8621 }
8622
8623 if (CNIC_LOADED(bp)) {
8624
8625 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8626
8627
8628
8629
8630 for (i = 0; i < 200; i++) {
8631 usleep_range(10000, 20000);
8632 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8633 break;
8634 }
8635 }
8636
8637 bnx2x_clear_func_ilt(bp, func);
8638
8639
8640
8641
8642 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8643 struct ilt_client_info ilt_cli;
8644
8645 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8646 ilt_cli.start = 0;
8647 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8648 ilt_cli.client_num = ILT_CLIENT_TM;
8649
8650 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8651 }
8652
8653
8654 if (!CHIP_IS_E1x(bp))
8655 bnx2x_pf_disable(bp);
8656
8657 bp->dmae_ready = 0;
8658}
8659
8660static void bnx2x_reset_port(struct bnx2x *bp)
8661{
8662 int port = BP_PORT(bp);
8663 u32 val;
8664
8665
8666 bnx2x__link_reset(bp);
8667
8668 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8669
8670
8671 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8672
8673 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8674 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8675
8676
8677 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8678
8679 msleep(100);
8680
8681 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8682 if (val)
8683 DP(NETIF_MSG_IFDOWN,
8684 "BRB1 is not empty %d blocks are occupied\n", val);
8685
8686
8687}
8688
8689static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8690{
8691 struct bnx2x_func_state_params func_params = {NULL};
8692
8693
8694 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8695
8696 func_params.f_obj = &bp->func_obj;
8697 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8698
8699 func_params.params.hw_init.load_phase = load_code;
8700
8701 return bnx2x_func_state_change(bp, &func_params);
8702}
8703
8704static int bnx2x_func_stop(struct bnx2x *bp)
8705{
8706 struct bnx2x_func_state_params func_params = {NULL};
8707 int rc;
8708
8709
8710 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8711 func_params.f_obj = &bp->func_obj;
8712 func_params.cmd = BNX2X_F_CMD_STOP;
8713
8714
8715
8716
8717
8718
8719
8720 rc = bnx2x_func_state_change(bp, &func_params);
8721 if (rc) {
8722#ifdef BNX2X_STOP_ON_ERROR
8723 return rc;
8724#else
8725 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8726 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8727 return bnx2x_func_state_change(bp, &func_params);
8728#endif
8729 }
8730
8731 return 0;
8732}
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8743{
8744 u32 reset_code = 0;
8745 int port = BP_PORT(bp);
8746
8747
8748 if (unload_mode == UNLOAD_NORMAL)
8749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8750
8751 else if (bp->flags & NO_WOL_FLAG)
8752 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8753
8754 else if (bp->wol) {
8755 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8756 u8 *mac_addr = bp->dev->dev_addr;
8757 struct pci_dev *pdev = bp->pdev;
8758 u32 val;
8759 u16 pmc;
8760
8761
8762
8763
8764 u8 entry = (BP_VN(bp) + 1)*8;
8765
8766 val = (mac_addr[0] << 8) | mac_addr[1];
8767 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8768
8769 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8770 (mac_addr[4] << 8) | mac_addr[5];
8771 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8772
8773
8774 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
8775 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8776 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
8777
8778 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8779
8780 } else
8781 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8782
8783
8784 if (!BP_NOMCP(bp))
8785 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8786 else {
8787 int path = BP_PATH(bp);
8788
8789 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
8790 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
8791 bnx2x_load_count[path][2]);
8792 bnx2x_load_count[path][0]--;
8793 bnx2x_load_count[path][1 + port]--;
8794 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
8795 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
8796 bnx2x_load_count[path][2]);
8797 if (bnx2x_load_count[path][0] == 0)
8798 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8799 else if (bnx2x_load_count[path][1 + port] == 0)
8800 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8801 else
8802 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8803 }
8804
8805 return reset_code;
8806}
8807
8808
8809
8810
8811
8812
8813
8814void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8815{
8816 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8817
8818
8819 if (!BP_NOMCP(bp))
8820 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8821}
8822
8823static int bnx2x_func_wait_started(struct bnx2x *bp)
8824{
8825 int tout = 50;
8826 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8827
8828 if (!bp->port.pmf)
8829 return 0;
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846 if (msix)
8847 synchronize_irq(bp->msix_table[0].vector);
8848 else
8849 synchronize_irq(bp->pdev->irq);
8850
8851 flush_workqueue(bnx2x_wq);
8852
8853 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8854 BNX2X_F_STATE_STARTED && tout--)
8855 msleep(20);
8856
8857 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8858 BNX2X_F_STATE_STARTED) {
8859#ifdef BNX2X_STOP_ON_ERROR
8860 BNX2X_ERR("Wrong function state\n");
8861 return -EBUSY;
8862#else
8863
8864
8865
8866
8867 struct bnx2x_func_state_params func_params = {NULL};
8868
8869 DP(NETIF_MSG_IFDOWN,
8870 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8871
8872 func_params.f_obj = &bp->func_obj;
8873 __set_bit(RAMROD_DRV_CLR_ONLY,
8874 &func_params.ramrod_flags);
8875
8876
8877 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8878 bnx2x_func_state_change(bp, &func_params);
8879
8880
8881 func_params.cmd = BNX2X_F_CMD_TX_START;
8882 return bnx2x_func_state_change(bp, &func_params);
8883#endif
8884 }
8885
8886 return 0;
8887}
8888
8889void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8890{
8891 int port = BP_PORT(bp);
8892 int i, rc = 0;
8893 u8 cos;
8894 struct bnx2x_mcast_ramrod_params rparam = {NULL};
8895 u32 reset_code;
8896
8897
8898 for_each_tx_queue(bp, i) {
8899 struct bnx2x_fastpath *fp = &bp->fp[i];
8900
8901 for_each_cos_in_tx_queue(fp, cos)
8902 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8903#ifdef BNX2X_STOP_ON_ERROR
8904 if (rc)
8905 return;
8906#endif
8907 }
8908
8909
8910 usleep_range(1000, 2000);
8911
8912
8913 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8914 false);
8915 if (rc < 0)
8916 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8917
8918
8919 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8920 true);
8921 if (rc < 0)
8922 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8923 rc);
8924
8925
8926 if (!CHIP_IS_E1(bp))
8927 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8928
8929
8930
8931
8932
8933 netif_addr_lock_bh(bp->dev);
8934
8935 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8936 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8937 else
8938 bnx2x_set_storm_rx_mode(bp);
8939
8940
8941 rparam.mcast_obj = &bp->mcast_obj;
8942 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8943 if (rc < 0)
8944 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8945
8946 netif_addr_unlock_bh(bp->dev);
8947
8948 bnx2x_iov_chip_cleanup(bp);
8949
8950
8951
8952
8953
8954
8955 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8956
8957
8958
8959
8960
8961 rc = bnx2x_func_wait_started(bp);
8962 if (rc) {
8963 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8964#ifdef BNX2X_STOP_ON_ERROR
8965 return;
8966#endif
8967 }
8968
8969
8970
8971
8972 for_each_eth_queue(bp, i)
8973 if (bnx2x_stop_queue(bp, i))
8974#ifdef BNX2X_STOP_ON_ERROR
8975 return;
8976#else
8977 goto unload_error;
8978#endif
8979
8980 if (CNIC_LOADED(bp)) {
8981 for_each_cnic_queue(bp, i)
8982 if (bnx2x_stop_queue(bp, i))
8983#ifdef BNX2X_STOP_ON_ERROR
8984 return;
8985#else
8986 goto unload_error;
8987#endif
8988 }
8989
8990
8991
8992
8993 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8994 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8995
8996#ifndef BNX2X_STOP_ON_ERROR
8997unload_error:
8998#endif
8999 rc = bnx2x_func_stop(bp);
9000 if (rc) {
9001 BNX2X_ERR("Function stop failed!\n");
9002#ifdef BNX2X_STOP_ON_ERROR
9003 return;
9004#endif
9005 }
9006
9007
9008 bnx2x_netif_stop(bp, 1);
9009
9010 bnx2x_del_all_napi(bp);
9011 if (CNIC_LOADED(bp))
9012 bnx2x_del_all_napi_cnic(bp);
9013
9014
9015 bnx2x_free_irq(bp);
9016
9017
9018 rc = bnx2x_reset_hw(bp, reset_code);
9019 if (rc)
9020 BNX2X_ERR("HW_RESET failed\n");
9021
9022
9023 bnx2x_send_unload_done(bp, keep_link);
9024}
9025
9026void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9027{
9028 u32 val;
9029
9030 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9031
9032 if (CHIP_IS_E1(bp)) {
9033 int port = BP_PORT(bp);
9034 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9035 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9036
9037 val = REG_RD(bp, addr);
9038 val &= ~(0x300);
9039 REG_WR(bp, addr, val);
9040 } else {
9041 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9042 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9043 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9044 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9045 }
9046}
9047
9048
9049static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9050{
9051 u32 val;
9052
9053
9054 if (!CHIP_IS_E1(bp)) {
9055
9056 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9057
9058 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9059 }
9060
9061
9062 if (CHIP_IS_E1x(bp)) {
9063
9064 val = REG_RD(bp, HC_REG_CONFIG_1);
9065 REG_WR(bp, HC_REG_CONFIG_1,
9066 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9067 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9068
9069 val = REG_RD(bp, HC_REG_CONFIG_0);
9070 REG_WR(bp, HC_REG_CONFIG_0,
9071 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9072 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9073 } else {
9074
9075 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9076
9077 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9078 (!close) ?
9079 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9080 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9081 }
9082
9083 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9084 close ? "closing" : "opening");
9085 mmiowb();
9086}
9087
9088#define SHARED_MF_CLP_MAGIC 0x80000000
9089
9090static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9091{
9092
9093 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9094 *magic_val = val & SHARED_MF_CLP_MAGIC;
9095 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9096}
9097
9098
9099
9100
9101
9102
9103
9104static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9105{
9106
9107 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9108 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9109 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9110}
9111
9112
9113
9114
9115
9116
9117
9118
9119
9120static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9121{
9122 u32 shmem;
9123 u32 validity_offset;
9124
9125 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9126
9127
9128 if (!CHIP_IS_E1(bp))
9129 bnx2x_clp_reset_prep(bp, magic_val);
9130
9131
9132 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9133 validity_offset =
9134 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9135
9136
9137 if (shmem > 0)
9138 REG_WR(bp, shmem + validity_offset, 0);
9139}
9140
9141#define MCP_TIMEOUT 5000
9142#define MCP_ONE_TIMEOUT 100
9143
9144
9145
9146
9147
9148
9149static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9150{
9151
9152
9153 if (CHIP_REV_IS_SLOW(bp))
9154 msleep(MCP_ONE_TIMEOUT*10);
9155 else
9156 msleep(MCP_ONE_TIMEOUT);
9157}
9158
9159
9160
9161
9162static int bnx2x_init_shmem(struct bnx2x *bp)
9163{
9164 int cnt = 0;
9165 u32 val = 0;
9166
9167 do {
9168 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9169 if (bp->common.shmem_base) {
9170 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9171 if (val & SHR_MEM_VALIDITY_MB)
9172 return 0;
9173 }
9174
9175 bnx2x_mcp_wait_one(bp);
9176
9177 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9178
9179 BNX2X_ERR("BAD MCP validity signature\n");
9180
9181 return -ENODEV;
9182}
9183
9184static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9185{
9186 int rc = bnx2x_init_shmem(bp);
9187
9188
9189 if (!CHIP_IS_E1(bp))
9190 bnx2x_clp_reset_done(bp, magic_val);
9191
9192 return rc;
9193}
9194
9195static void bnx2x_pxp_prep(struct bnx2x *bp)
9196{
9197 if (!CHIP_IS_E1(bp)) {
9198 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9199 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9200 mmiowb();
9201 }
9202}
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9215{
9216 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9217 u32 global_bits2, stay_reset2;
9218
9219
9220
9221
9222
9223 global_bits2 =
9224 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9225 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9226
9227
9228
9229
9230
9231
9232 not_reset_mask1 =
9233 MISC_REGISTERS_RESET_REG_1_RST_HC |
9234 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9235 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9236
9237 not_reset_mask2 =
9238 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9239 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9240 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9241 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9242 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9243 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9244 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9245 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9246 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9247 MISC_REGISTERS_RESET_REG_2_PGLC |
9248 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9249 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9250 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9251 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9252 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9253 MISC_REGISTERS_RESET_REG_2_UMAC1;
9254
9255
9256
9257
9258
9259 stay_reset2 =
9260 MISC_REGISTERS_RESET_REG_2_XMAC |
9261 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9262
9263
9264 reset_mask1 = 0xffffffff;
9265
9266 if (CHIP_IS_E1(bp))
9267 reset_mask2 = 0xffff;
9268 else if (CHIP_IS_E1H(bp))
9269 reset_mask2 = 0x1ffff;
9270 else if (CHIP_IS_E2(bp))
9271 reset_mask2 = 0xfffff;
9272 else
9273 reset_mask2 = 0x3ffffff;
9274
9275
9276 if (!global)
9277 reset_mask2 &= ~global_bits2;
9278
9279
9280
9281
9282
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292
9293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9294 reset_mask2 & (~not_reset_mask2));
9295
9296 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9297 reset_mask1 & (~not_reset_mask1));
9298
9299 barrier();
9300 mmiowb();
9301
9302 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9303 reset_mask2 & (~stay_reset2));
9304
9305 barrier();
9306 mmiowb();
9307
9308 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9309 mmiowb();
9310}
9311
9312
9313
9314
9315
9316
9317
9318
9319
9320
9321static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9322{
9323 u32 cnt = 1000;
9324 u32 pend_bits = 0;
9325
9326 do {
9327 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9328
9329 if (pend_bits == 0)
9330 break;
9331
9332 usleep_range(1000, 2000);
9333 } while (cnt-- > 0);
9334
9335 if (cnt <= 0) {
9336 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9337 pend_bits);
9338 return -EBUSY;
9339 }
9340
9341 return 0;
9342}
9343
9344static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9345{
9346 int cnt = 1000;
9347 u32 val = 0;
9348 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9349 u32 tags_63_32 = 0;
9350
9351
9352 do {
9353 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9354 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9355 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9356 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9357 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9358 if (CHIP_IS_E3(bp))
9359 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9360
9361 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9362 ((port_is_idle_0 & 0x1) == 0x1) &&
9363 ((port_is_idle_1 & 0x1) == 0x1) &&
9364 (pgl_exp_rom2 == 0xffffffff) &&
9365 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9366 break;
9367 usleep_range(1000, 2000);
9368 } while (cnt-- > 0);
9369
9370 if (cnt <= 0) {
9371 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9372 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9373 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9374 pgl_exp_rom2);
9375 return -EAGAIN;
9376 }
9377
9378 barrier();
9379
9380
9381 bnx2x_set_234_gates(bp, true);
9382
9383
9384 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9385 return -EAGAIN;
9386
9387
9388
9389
9390 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9391 barrier();
9392
9393
9394 mmiowb();
9395
9396
9397
9398
9399 usleep_range(1000, 2000);
9400
9401
9402
9403 if (global)
9404 bnx2x_reset_mcp_prep(bp, &val);
9405
9406
9407 bnx2x_pxp_prep(bp);
9408 barrier();
9409
9410
9411 bnx2x_process_kill_chip_reset(bp, global);
9412 barrier();
9413
9414
9415 if (!CHIP_IS_E1x(bp))
9416 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9417
9418
9419
9420 if (global && bnx2x_reset_mcp_comp(bp, val))
9421 return -EAGAIN;
9422
9423
9424
9425
9426 bnx2x_set_234_gates(bp, false);
9427
9428
9429
9430
9431 return 0;
9432}
9433
9434static int bnx2x_leader_reset(struct bnx2x *bp)
9435{
9436 int rc = 0;
9437 bool global = bnx2x_reset_is_global(bp);
9438 u32 load_code;
9439
9440
9441
9442
9443 if (!global && !BP_NOMCP(bp)) {
9444 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9445 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9446 if (!load_code) {
9447 BNX2X_ERR("MCP response failure, aborting\n");
9448 rc = -EAGAIN;
9449 goto exit_leader_reset;
9450 }
9451 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9452 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9453 BNX2X_ERR("MCP unexpected resp, aborting\n");
9454 rc = -EAGAIN;
9455 goto exit_leader_reset2;
9456 }
9457 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9458 if (!load_code) {
9459 BNX2X_ERR("MCP response failure, aborting\n");
9460 rc = -EAGAIN;
9461 goto exit_leader_reset2;
9462 }
9463 }
9464
9465
9466 if (bnx2x_process_kill(bp, global)) {
9467 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9468 BP_PATH(bp));
9469 rc = -EAGAIN;
9470 goto exit_leader_reset2;
9471 }
9472
9473
9474
9475
9476
9477 bnx2x_set_reset_done(bp);
9478 if (global)
9479 bnx2x_clear_reset_global(bp);
9480
9481exit_leader_reset2:
9482
9483 if (!global && !BP_NOMCP(bp)) {
9484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9485 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9486 }
9487exit_leader_reset:
9488 bp->is_leader = 0;
9489 bnx2x_release_leader_lock(bp);
9490 smp_mb();
9491 return rc;
9492}
9493
9494static void bnx2x_recovery_failed(struct bnx2x *bp)
9495{
9496 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9497
9498
9499 netif_device_detach(bp->dev);
9500
9501
9502
9503
9504
9505 bnx2x_set_reset_in_progress(bp);
9506
9507
9508 bnx2x_set_power_state(bp, PCI_D3hot);
9509
9510 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9511
9512 smp_mb();
9513}
9514
9515
9516
9517
9518
9519
9520static void bnx2x_parity_recover(struct bnx2x *bp)
9521{
9522 bool global = false;
9523 u32 error_recovered, error_unrecovered;
9524 bool is_parity;
9525
9526 DP(NETIF_MSG_HW, "Handling parity\n");
9527 while (1) {
9528 switch (bp->recovery_state) {
9529 case BNX2X_RECOVERY_INIT:
9530 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9531 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9532 WARN_ON(!is_parity);
9533
9534
9535 if (bnx2x_trylock_leader_lock(bp)) {
9536 bnx2x_set_reset_in_progress(bp);
9537
9538
9539
9540
9541
9542
9543 if (global)
9544 bnx2x_set_reset_global(bp);
9545
9546 bp->is_leader = 1;
9547 }
9548
9549
9550
9551 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9552 return;
9553
9554 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9555
9556
9557
9558
9559
9560 smp_mb();
9561 break;
9562
9563 case BNX2X_RECOVERY_WAIT:
9564 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9565 if (bp->is_leader) {
9566 int other_engine = BP_PATH(bp) ? 0 : 1;
9567 bool other_load_status =
9568 bnx2x_get_load_status(bp, other_engine);
9569 bool load_status =
9570 bnx2x_get_load_status(bp, BP_PATH(bp));
9571 global = bnx2x_reset_is_global(bp);
9572
9573
9574
9575
9576
9577
9578
9579
9580
9581 if (load_status ||
9582 (global && other_load_status)) {
9583
9584
9585
9586 schedule_delayed_work(&bp->sp_rtnl_task,
9587 HZ/10);
9588 return;
9589 } else {
9590
9591
9592
9593
9594
9595 if (bnx2x_leader_reset(bp)) {
9596 bnx2x_recovery_failed(bp);
9597 return;
9598 }
9599
9600
9601
9602
9603
9604
9605 break;
9606 }
9607 } else {
9608 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9609
9610
9611
9612
9613
9614
9615 if (bnx2x_trylock_leader_lock(bp)) {
9616
9617
9618
9619 bp->is_leader = 1;
9620 break;
9621 }
9622
9623 schedule_delayed_work(&bp->sp_rtnl_task,
9624 HZ/10);
9625 return;
9626
9627 } else {
9628
9629
9630
9631
9632 if (bnx2x_reset_is_global(bp)) {
9633 schedule_delayed_work(
9634 &bp->sp_rtnl_task,
9635 HZ/10);
9636 return;
9637 }
9638
9639 error_recovered =
9640 bp->eth_stats.recoverable_error;
9641 error_unrecovered =
9642 bp->eth_stats.unrecoverable_error;
9643 bp->recovery_state =
9644 BNX2X_RECOVERY_NIC_LOADING;
9645 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9646 error_unrecovered++;
9647 netdev_err(bp->dev,
9648 "Recovery failed. Power cycle needed\n");
9649
9650 netif_device_detach(bp->dev);
9651
9652 bnx2x_set_power_state(
9653 bp, PCI_D3hot);
9654 smp_mb();
9655 } else {
9656 bp->recovery_state =
9657 BNX2X_RECOVERY_DONE;
9658 error_recovered++;
9659 smp_mb();
9660 }
9661 bp->eth_stats.recoverable_error =
9662 error_recovered;
9663 bp->eth_stats.unrecoverable_error =
9664 error_unrecovered;
9665
9666 return;
9667 }
9668 }
9669 default:
9670 return;
9671 }
9672 }
9673}
9674
9675static int bnx2x_close(struct net_device *dev);
9676
9677
9678
9679
9680static void bnx2x_sp_rtnl_task(struct work_struct *work)
9681{
9682 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9683
9684 rtnl_lock();
9685
9686 if (!netif_running(bp->dev)) {
9687 rtnl_unlock();
9688 return;
9689 }
9690
9691 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9692#ifdef BNX2X_STOP_ON_ERROR
9693 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9694 "you will need to reboot when done\n");
9695 goto sp_rtnl_not_reset;
9696#endif
9697
9698
9699
9700
9701 bp->sp_rtnl_state = 0;
9702 smp_mb();
9703
9704 bnx2x_parity_recover(bp);
9705
9706 rtnl_unlock();
9707 return;
9708 }
9709
9710 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9711#ifdef BNX2X_STOP_ON_ERROR
9712 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9713 "you will need to reboot when done\n");
9714 goto sp_rtnl_not_reset;
9715#endif
9716
9717
9718
9719
9720
9721 bp->sp_rtnl_state = 0;
9722 smp_mb();
9723
9724 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9725 bnx2x_nic_load(bp, LOAD_NORMAL);
9726
9727 rtnl_unlock();
9728 return;
9729 }
9730#ifdef BNX2X_STOP_ON_ERROR
9731sp_rtnl_not_reset:
9732#endif
9733 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9734 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9735 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9736 bnx2x_after_function_update(bp);
9737
9738
9739
9740
9741
9742 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9743 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9744 netif_device_detach(bp->dev);
9745 bnx2x_close(bp->dev);
9746 rtnl_unlock();
9747 return;
9748 }
9749
9750 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9751 DP(BNX2X_MSG_SP,
9752 "sending set mcast vf pf channel message from rtnl sp-task\n");
9753 bnx2x_vfpf_set_mcast(bp->dev);
9754 }
9755 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
9756 &bp->sp_rtnl_state)){
9757 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
9758 bnx2x_tx_disable(bp);
9759 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
9760 }
9761 }
9762
9763 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
9764 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
9765 bnx2x_set_rx_mode_inner(bp);
9766 }
9767
9768 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9769 &bp->sp_rtnl_state))
9770 bnx2x_pf_set_vfs_vlan(bp);
9771
9772 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
9773 bnx2x_dcbx_stop_hw_tx(bp);
9774 bnx2x_dcbx_resume_hw_tx(bp);
9775 }
9776
9777
9778
9779
9780 rtnl_unlock();
9781
9782
9783 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
9784 &bp->sp_rtnl_state)) {
9785 bnx2x_disable_sriov(bp);
9786 bnx2x_enable_sriov(bp);
9787 }
9788}
9789
9790static void bnx2x_period_task(struct work_struct *work)
9791{
9792 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9793
9794 if (!netif_running(bp->dev))
9795 goto period_task_exit;
9796
9797 if (CHIP_REV_IS_SLOW(bp)) {
9798 BNX2X_ERR("period task called on emulation, ignoring\n");
9799 goto period_task_exit;
9800 }
9801
9802 bnx2x_acquire_phy_lock(bp);
9803
9804
9805
9806
9807
9808 smp_mb();
9809 if (bp->port.pmf) {
9810 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9811
9812
9813 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9814 }
9815
9816 bnx2x_release_phy_lock(bp);
9817period_task_exit:
9818 return;
9819}
9820
9821
9822
9823
9824
9825static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9826{
9827 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9828 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9829 return base + (BP_ABS_FUNC(bp)) * stride;
9830}
9831
9832static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9833 struct bnx2x_mac_vals *vals)
9834{
9835 u32 val, base_addr, offset, mask, reset_reg;
9836 bool mac_stopped = false;
9837 u8 port = BP_PORT(bp);
9838
9839
9840 vals->bmac_addr = 0;
9841 vals->umac_addr = 0;
9842 vals->xmac_addr = 0;
9843 vals->emac_addr = 0;
9844
9845 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9846
9847 if (!CHIP_IS_E3(bp)) {
9848 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9849 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9850 if ((mask & reset_reg) && val) {
9851 u32 wb_data[2];
9852 BNX2X_DEV_INFO("Disable bmac Rx\n");
9853 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9854 : NIG_REG_INGRESS_BMAC0_MEM;
9855 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9856 : BIGMAC_REGISTER_BMAC_CONTROL;
9857
9858
9859
9860
9861
9862
9863
9864 wb_data[0] = REG_RD(bp, base_addr + offset);
9865 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9866 vals->bmac_addr = base_addr + offset;
9867 vals->bmac_val[0] = wb_data[0];
9868 vals->bmac_val[1] = wb_data[1];
9869 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9870 REG_WR(bp, vals->bmac_addr, wb_data[0]);
9871 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
9872 }
9873 BNX2X_DEV_INFO("Disable emac Rx\n");
9874 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9875 vals->emac_val = REG_RD(bp, vals->emac_addr);
9876 REG_WR(bp, vals->emac_addr, 0);
9877 mac_stopped = true;
9878 } else {
9879 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9880 BNX2X_DEV_INFO("Disable xmac Rx\n");
9881 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9882 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9883 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9884 val & ~(1 << 1));
9885 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9886 val | (1 << 1));
9887 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9888 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9889 REG_WR(bp, vals->xmac_addr, 0);
9890 mac_stopped = true;
9891 }
9892 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9893 if (mask & reset_reg) {
9894 BNX2X_DEV_INFO("Disable umac Rx\n");
9895 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9896 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9897 vals->umac_val = REG_RD(bp, vals->umac_addr);
9898 REG_WR(bp, vals->umac_addr, 0);
9899 mac_stopped = true;
9900 }
9901 }
9902
9903 if (mac_stopped)
9904 msleep(20);
9905}
9906
9907#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9908#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9909#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9910#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9911
9912#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
9913#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
9914#define BCM_5710_UNDI_FW_MF_VERS (0x05)
9915#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
9916#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
9917static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
9918{
9919 u8 major, minor, version;
9920 u32 fw;
9921
9922
9923 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
9924 MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
9925 BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
9926 return false;
9927 }
9928
9929
9930 fw = REG_RD(bp, XSEM_REG_PRAM);
9931 major = fw & 0xff;
9932 minor = (fw >> 0x8) & 0xff;
9933 version = (fw >> 0x10) & 0xff;
9934 BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
9935 fw, major, minor, version);
9936
9937 if (major > BCM_5710_UNDI_FW_MF_MAJOR)
9938 return true;
9939
9940 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
9941 (minor > BCM_5710_UNDI_FW_MF_MINOR))
9942 return true;
9943
9944 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
9945 (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
9946 (version >= BCM_5710_UNDI_FW_MF_VERS))
9947 return true;
9948
9949 return false;
9950}
9951
9952static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
9953{
9954 int i;
9955
9956
9957
9958
9959
9960
9961 for (i = 0; i < 2; i++)
9962 REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
9963
9964 for (i = 2; i < 8; i++)
9965 REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
9966
9967 BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
9968}
9969
9970static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9971{
9972 u16 rcq, bd;
9973 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9974
9975 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9976 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9977
9978 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9979 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9980
9981 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9982 port, bd, rcq);
9983}
9984
9985static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9986{
9987 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9988 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9989 if (!rc) {
9990 BNX2X_ERR("MCP response failure, aborting\n");
9991 return -EBUSY;
9992 }
9993
9994 return 0;
9995}
9996
9997static struct bnx2x_prev_path_list *
9998 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9999{
10000 struct bnx2x_prev_path_list *tmp_list;
10001
10002 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10003 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10004 bp->pdev->bus->number == tmp_list->bus &&
10005 BP_PATH(bp) == tmp_list->path)
10006 return tmp_list;
10007
10008 return NULL;
10009}
10010
10011static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10012{
10013 struct bnx2x_prev_path_list *tmp_list;
10014 int rc;
10015
10016 rc = down_interruptible(&bnx2x_prev_sem);
10017 if (rc) {
10018 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10019 return rc;
10020 }
10021
10022 tmp_list = bnx2x_prev_path_get_entry(bp);
10023 if (tmp_list) {
10024 tmp_list->aer = 1;
10025 rc = 0;
10026 } else {
10027 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10028 BP_PATH(bp));
10029 }
10030
10031 up(&bnx2x_prev_sem);
10032
10033 return rc;
10034}
10035
10036static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10037{
10038 struct bnx2x_prev_path_list *tmp_list;
10039 bool rc = false;
10040
10041 if (down_trylock(&bnx2x_prev_sem))
10042 return false;
10043
10044 tmp_list = bnx2x_prev_path_get_entry(bp);
10045 if (tmp_list) {
10046 if (tmp_list->aer) {
10047 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10048 BP_PATH(bp));
10049 } else {
10050 rc = true;
10051 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10052 BP_PATH(bp));
10053 }
10054 }
10055
10056 up(&bnx2x_prev_sem);
10057
10058 return rc;
10059}
10060
10061bool bnx2x_port_after_undi(struct bnx2x *bp)
10062{
10063 struct bnx2x_prev_path_list *entry;
10064 bool val;
10065
10066 down(&bnx2x_prev_sem);
10067
10068 entry = bnx2x_prev_path_get_entry(bp);
10069 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10070
10071 up(&bnx2x_prev_sem);
10072
10073 return val;
10074}
10075
10076static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10077{
10078 struct bnx2x_prev_path_list *tmp_list;
10079 int rc;
10080
10081 rc = down_interruptible(&bnx2x_prev_sem);
10082 if (rc) {
10083 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10084 return rc;
10085 }
10086
10087
10088 tmp_list = bnx2x_prev_path_get_entry(bp);
10089 if (tmp_list) {
10090 if (!tmp_list->aer) {
10091 BNX2X_ERR("Re-Marking the path.\n");
10092 } else {
10093 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10094 BP_PATH(bp));
10095 tmp_list->aer = 0;
10096 }
10097 up(&bnx2x_prev_sem);
10098 return 0;
10099 }
10100 up(&bnx2x_prev_sem);
10101
10102
10103 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10104 if (!tmp_list) {
10105 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10106 return -ENOMEM;
10107 }
10108
10109 tmp_list->bus = bp->pdev->bus->number;
10110 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10111 tmp_list->path = BP_PATH(bp);
10112 tmp_list->aer = 0;
10113 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10114
10115 rc = down_interruptible(&bnx2x_prev_sem);
10116 if (rc) {
10117 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10118 kfree(tmp_list);
10119 } else {
10120 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10121 BP_PATH(bp));
10122 list_add(&tmp_list->list, &bnx2x_prev_list);
10123 up(&bnx2x_prev_sem);
10124 }
10125
10126 return rc;
10127}
10128
10129static int bnx2x_do_flr(struct bnx2x *bp)
10130{
10131 struct pci_dev *dev = bp->pdev;
10132
10133 if (CHIP_IS_E1x(bp)) {
10134 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10135 return -EINVAL;
10136 }
10137
10138
10139 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10140 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10141 bp->common.bc_ver);
10142 return -EINVAL;
10143 }
10144
10145 if (!pci_wait_for_pending_transaction(dev))
10146 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10147
10148 BNX2X_DEV_INFO("Initiating FLR\n");
10149 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10150
10151 return 0;
10152}
10153
10154static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10155{
10156 int rc;
10157
10158 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10159
10160
10161 if (bnx2x_prev_is_path_marked(bp))
10162 return bnx2x_prev_mcp_done(bp);
10163
10164 BNX2X_DEV_INFO("Path is unmarked\n");
10165
10166
10167
10168
10169
10170 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10171
10172 if (!rc) {
10173
10174 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10175 rc = bnx2x_do_flr(bp);
10176 }
10177
10178 if (!rc) {
10179
10180 BNX2X_DEV_INFO("FLR successful\n");
10181 return 0;
10182 }
10183
10184 BNX2X_DEV_INFO("Could not FLR\n");
10185
10186
10187 rc = bnx2x_prev_mcp_done(bp);
10188 if (!rc)
10189 rc = BNX2X_PREV_WAIT_NEEDED;
10190
10191 return rc;
10192}
10193
10194static int bnx2x_prev_unload_common(struct bnx2x *bp)
10195{
10196 u32 reset_reg, tmp_reg = 0, rc;
10197 bool prev_undi = false;
10198 struct bnx2x_mac_vals mac_vals;
10199
10200
10201
10202
10203
10204 BNX2X_DEV_INFO("Common unload Flow\n");
10205
10206 memset(&mac_vals, 0, sizeof(mac_vals));
10207
10208 if (bnx2x_prev_is_path_marked(bp))
10209 return bnx2x_prev_mcp_done(bp);
10210
10211 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10212
10213
10214 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10215 u32 timer_count = 1000;
10216
10217
10218 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10219
10220
10221 bnx2x_set_rx_filter(&bp->link_params, 0);
10222
10223
10224
10225
10226 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
10227 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
10228 if (tmp_reg == 0x7) {
10229 BNX2X_DEV_INFO("UNDI previously loaded\n");
10230 prev_undi = true;
10231
10232 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10233
10234 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10235 }
10236 }
10237 if (!CHIP_IS_E1x(bp))
10238
10239 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10240
10241
10242 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10243 while (timer_count) {
10244 u32 prev_brb = tmp_reg;
10245
10246 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10247 if (!tmp_reg)
10248 break;
10249
10250 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10251
10252
10253 if (prev_brb > tmp_reg)
10254 timer_count = 1000;
10255 else
10256 timer_count--;
10257
10258
10259
10260
10261 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10262 bnx2x_prev_unload_undi_mf(bp);
10263 } else if (prev_undi) {
10264
10265
10266
10267 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
10268 }
10269 udelay(10);
10270 }
10271
10272 if (!timer_count)
10273 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10274 }
10275
10276
10277 bnx2x_reset_common(bp);
10278
10279 if (mac_vals.xmac_addr)
10280 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10281 if (mac_vals.umac_addr)
10282 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
10283 if (mac_vals.emac_addr)
10284 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10285 if (mac_vals.bmac_addr) {
10286 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10287 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10288 }
10289
10290 rc = bnx2x_prev_mark_path(bp, prev_undi);
10291 if (rc) {
10292 bnx2x_prev_mcp_done(bp);
10293 return rc;
10294 }
10295
10296 return bnx2x_prev_mcp_done(bp);
10297}
10298
10299
10300
10301
10302
10303
10304
10305
10306static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
10307{
10308 if (!CHIP_IS_E1x(bp)) {
10309 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10310 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
10311 DP(BNX2X_MSG_SP,
10312 "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
10313 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10314 1 << BP_FUNC(bp));
10315 }
10316 }
10317}
10318
10319static int bnx2x_prev_unload(struct bnx2x *bp)
10320{
10321 int time_counter = 10;
10322 u32 rc, fw, hw_lock_reg, hw_lock_val;
10323 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10324
10325
10326
10327
10328 bnx2x_prev_interrupted_dmae(bp);
10329
10330
10331 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10332 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10333 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10334
10335 hw_lock_val = REG_RD(bp, hw_lock_reg);
10336 if (hw_lock_val) {
10337 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10338 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10339 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10340 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10341 }
10342
10343 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10344 REG_WR(bp, hw_lock_reg, 0xffffffff);
10345 } else
10346 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10347
10348 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10349 BNX2X_DEV_INFO("Release previously held alr\n");
10350 bnx2x_release_alr(bp);
10351 }
10352
10353 do {
10354 int aer = 0;
10355
10356 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10357 if (!fw) {
10358 BNX2X_ERR("MCP response failure, aborting\n");
10359 rc = -EBUSY;
10360 break;
10361 }
10362
10363 rc = down_interruptible(&bnx2x_prev_sem);
10364 if (rc) {
10365 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10366 rc);
10367 } else {
10368
10369 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10370 bnx2x_prev_path_get_entry(bp)->aer);
10371 up(&bnx2x_prev_sem);
10372 }
10373
10374 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10375 rc = bnx2x_prev_unload_common(bp);
10376 break;
10377 }
10378
10379
10380 rc = bnx2x_prev_unload_uncommon(bp);
10381 if (rc != BNX2X_PREV_WAIT_NEEDED)
10382 break;
10383
10384 msleep(20);
10385 } while (--time_counter);
10386
10387 if (!time_counter || rc) {
10388 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10389 rc = -EPROBE_DEFER;
10390 }
10391
10392
10393 if (bnx2x_port_after_undi(bp))
10394 bp->link_params.feature_config_flags |=
10395 FEATURE_CONFIG_BOOT_FROM_SAN;
10396
10397 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10398
10399 return rc;
10400}
10401
10402static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10403{
10404 u32 val, val2, val3, val4, id, boot_mode;
10405 u16 pmc;
10406
10407
10408
10409 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10410 id = ((val & 0xffff) << 16);
10411 val = REG_RD(bp, MISC_REG_CHIP_REV);
10412 id |= ((val & 0xf) << 12);
10413
10414
10415
10416
10417 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10418 id |= (((val >> 24) & 0xf) << 4);
10419 val = REG_RD(bp, MISC_REG_BOND_ID);
10420 id |= (val & 0xf);
10421 bp->common.chip_id = id;
10422
10423
10424 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10425 if (CHIP_IS_57810(bp))
10426 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10427 (bp->common.chip_id & 0x0000FFFF);
10428 else if (CHIP_IS_57810_MF(bp))
10429 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10430 (bp->common.chip_id & 0x0000FFFF);
10431 bp->common.chip_id |= 0x1;
10432 }
10433
10434
10435 bp->db_size = (1 << BNX2X_DB_SHIFT);
10436
10437 if (!CHIP_IS_E1x(bp)) {
10438 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10439 if ((val & 1) == 0)
10440 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10441 else
10442 val = (val >> 1) & 1;
10443 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10444 "2_PORT_MODE");
10445 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10446 CHIP_2_PORT_MODE;
10447
10448 if (CHIP_MODE_IS_4_PORT(bp))
10449 bp->pfid = (bp->pf_num >> 1);
10450 else
10451 bp->pfid = (bp->pf_num & 0x6);
10452 } else {
10453 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10454 bp->pfid = bp->pf_num;
10455 }
10456
10457 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10458
10459 bp->link_params.chip_id = bp->common.chip_id;
10460 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10461
10462 val = (REG_RD(bp, 0x2874) & 0x55);
10463 if ((bp->common.chip_id & 0x1) ||
10464 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10465 bp->flags |= ONE_PORT_FLAG;
10466 BNX2X_DEV_INFO("single port device\n");
10467 }
10468
10469 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10470 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10471 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10472 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10473 bp->common.flash_size, bp->common.flash_size);
10474
10475 bnx2x_init_shmem(bp);
10476
10477 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10478 MISC_REG_GENERIC_CR_1 :
10479 MISC_REG_GENERIC_CR_0));
10480
10481 bp->link_params.shmem_base = bp->common.shmem_base;
10482 bp->link_params.shmem2_base = bp->common.shmem2_base;
10483 if (SHMEM2_RD(bp, size) >
10484 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10485 bp->link_params.lfa_base =
10486 REG_RD(bp, bp->common.shmem2_base +
10487 (u32)offsetof(struct shmem2_region,
10488 lfa_host_addr[BP_PORT(bp)]));
10489 else
10490 bp->link_params.lfa_base = 0;
10491 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10492 bp->common.shmem_base, bp->common.shmem2_base);
10493
10494 if (!bp->common.shmem_base) {
10495 BNX2X_DEV_INFO("MCP not active\n");
10496 bp->flags |= NO_MCP_FLAG;
10497 return;
10498 }
10499
10500 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10501 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10502
10503 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10504 SHARED_HW_CFG_LED_MODE_MASK) >>
10505 SHARED_HW_CFG_LED_MODE_SHIFT);
10506
10507 bp->link_params.feature_config_flags = 0;
10508 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10509 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10510 bp->link_params.feature_config_flags |=
10511 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10512 else
10513 bp->link_params.feature_config_flags &=
10514 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10515
10516 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10517 bp->common.bc_ver = val;
10518 BNX2X_DEV_INFO("bc_ver %X\n", val);
10519 if (val < BNX2X_BC_VER) {
10520
10521
10522 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10523 BNX2X_BC_VER, val);
10524 }
10525 bp->link_params.feature_config_flags |=
10526 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10527 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10528
10529 bp->link_params.feature_config_flags |=
10530 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10531 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10532 bp->link_params.feature_config_flags |=
10533 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10534 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10535 bp->link_params.feature_config_flags |=
10536 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10537 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10538
10539 bp->link_params.feature_config_flags |=
10540 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10541 FEATURE_CONFIG_MT_SUPPORT : 0;
10542
10543 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10544 BC_SUPPORTS_PFC_STATS : 0;
10545
10546 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10547 BC_SUPPORTS_FCOE_FEATURES : 0;
10548
10549 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10550 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10551
10552 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10553 BC_SUPPORTS_RMMOD_CMD : 0;
10554
10555 boot_mode = SHMEM_RD(bp,
10556 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10557 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10558 switch (boot_mode) {
10559 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10560 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10561 break;
10562 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10563 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10564 break;
10565 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10566 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10567 break;
10568 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10569 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10570 break;
10571 }
10572
10573 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10574 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10575
10576 BNX2X_DEV_INFO("%sWoL capable\n",
10577 (bp->flags & NO_WOL_FLAG) ? "not " : "");
10578
10579 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10580 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10581 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10582 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10583
10584 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10585 val, val2, val3, val4);
10586}
10587
10588#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10589#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10590
10591static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10592{
10593 int pfid = BP_FUNC(bp);
10594 int igu_sb_id;
10595 u32 val;
10596 u8 fid, igu_sb_cnt = 0;
10597
10598 bp->igu_base_sb = 0xff;
10599 if (CHIP_INT_MODE_IS_BC(bp)) {
10600 int vn = BP_VN(bp);
10601 igu_sb_cnt = bp->igu_sb_cnt;
10602 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10603 FP_SB_MAX_E1x;
10604
10605 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10606 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10607
10608 return 0;
10609 }
10610
10611
10612 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10613 igu_sb_id++) {
10614 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10615 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10616 continue;
10617 fid = IGU_FID(val);
10618 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10619 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10620 continue;
10621 if (IGU_VEC(val) == 0)
10622
10623 bp->igu_dsb_id = igu_sb_id;
10624 else {
10625 if (bp->igu_base_sb == 0xff)
10626 bp->igu_base_sb = igu_sb_id;
10627 igu_sb_cnt++;
10628 }
10629 }
10630 }
10631
10632#ifdef CONFIG_PCI_MSI
10633
10634
10635
10636
10637
10638
10639 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10640#endif
10641
10642 if (igu_sb_cnt == 0) {
10643 BNX2X_ERR("CAM configuration error\n");
10644 return -EINVAL;
10645 }
10646
10647 return 0;
10648}
10649
10650static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10651{
10652 int cfg_size = 0, idx, port = BP_PORT(bp);
10653
10654
10655 bp->port.supported[0] = 0;
10656 bp->port.supported[1] = 0;
10657 switch (bp->link_params.num_phys) {
10658 case 1:
10659 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10660 cfg_size = 1;
10661 break;
10662 case 2:
10663 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10664 cfg_size = 1;
10665 break;
10666 case 3:
10667 if (bp->link_params.multi_phy_config &
10668 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10669 bp->port.supported[1] =
10670 bp->link_params.phy[EXT_PHY1].supported;
10671 bp->port.supported[0] =
10672 bp->link_params.phy[EXT_PHY2].supported;
10673 } else {
10674 bp->port.supported[0] =
10675 bp->link_params.phy[EXT_PHY1].supported;
10676 bp->port.supported[1] =
10677 bp->link_params.phy[EXT_PHY2].supported;
10678 }
10679 cfg_size = 2;
10680 break;
10681 }
10682
10683 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10684 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10685 SHMEM_RD(bp,
10686 dev_info.port_hw_config[port].external_phy_config),
10687 SHMEM_RD(bp,
10688 dev_info.port_hw_config[port].external_phy_config2));
10689 return;
10690 }
10691
10692 if (CHIP_IS_E3(bp))
10693 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10694 else {
10695 switch (switch_cfg) {
10696 case SWITCH_CFG_1G:
10697 bp->port.phy_addr = REG_RD(
10698 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10699 break;
10700 case SWITCH_CFG_10G:
10701 bp->port.phy_addr = REG_RD(
10702 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10703 break;
10704 default:
10705 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10706 bp->port.link_config[0]);
10707 return;
10708 }
10709 }
10710 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10711
10712 for (idx = 0; idx < cfg_size; idx++) {
10713 if (!(bp->link_params.speed_cap_mask[idx] &
10714 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10715 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10716
10717 if (!(bp->link_params.speed_cap_mask[idx] &
10718 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10719 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10720
10721 if (!(bp->link_params.speed_cap_mask[idx] &
10722 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10723 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10724
10725 if (!(bp->link_params.speed_cap_mask[idx] &
10726 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10727 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10728
10729 if (!(bp->link_params.speed_cap_mask[idx] &
10730 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10731 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10732 SUPPORTED_1000baseT_Full);
10733
10734 if (!(bp->link_params.speed_cap_mask[idx] &
10735 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10736 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10737
10738 if (!(bp->link_params.speed_cap_mask[idx] &
10739 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10740 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10741
10742 if (!(bp->link_params.speed_cap_mask[idx] &
10743 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
10744 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
10745 }
10746
10747 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10748 bp->port.supported[1]);
10749}
10750
10751static void bnx2x_link_settings_requested(struct bnx2x *bp)
10752{
10753 u32 link_config, idx, cfg_size = 0;
10754 bp->port.advertising[0] = 0;
10755 bp->port.advertising[1] = 0;
10756 switch (bp->link_params.num_phys) {
10757 case 1:
10758 case 2:
10759 cfg_size = 1;
10760 break;
10761 case 3:
10762 cfg_size = 2;
10763 break;
10764 }
10765 for (idx = 0; idx < cfg_size; idx++) {
10766 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10767 link_config = bp->port.link_config[idx];
10768 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10769 case PORT_FEATURE_LINK_SPEED_AUTO:
10770 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10771 bp->link_params.req_line_speed[idx] =
10772 SPEED_AUTO_NEG;
10773 bp->port.advertising[idx] |=
10774 bp->port.supported[idx];
10775 if (bp->link_params.phy[EXT_PHY1].type ==
10776 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10777 bp->port.advertising[idx] |=
10778 (SUPPORTED_100baseT_Half |
10779 SUPPORTED_100baseT_Full);
10780 } else {
10781
10782 bp->link_params.req_line_speed[idx] =
10783 SPEED_10000;
10784 bp->port.advertising[idx] |=
10785 (ADVERTISED_10000baseT_Full |
10786 ADVERTISED_FIBRE);
10787 continue;
10788 }
10789 break;
10790
10791 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10792 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10793 bp->link_params.req_line_speed[idx] =
10794 SPEED_10;
10795 bp->port.advertising[idx] |=
10796 (ADVERTISED_10baseT_Full |
10797 ADVERTISED_TP);
10798 } else {
10799 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10800 link_config,
10801 bp->link_params.speed_cap_mask[idx]);
10802 return;
10803 }
10804 break;
10805
10806 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10807 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10808 bp->link_params.req_line_speed[idx] =
10809 SPEED_10;
10810 bp->link_params.req_duplex[idx] =
10811 DUPLEX_HALF;
10812 bp->port.advertising[idx] |=
10813 (ADVERTISED_10baseT_Half |
10814 ADVERTISED_TP);
10815 } else {
10816 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10817 link_config,
10818 bp->link_params.speed_cap_mask[idx]);
10819 return;
10820 }
10821 break;
10822
10823 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10824 if (bp->port.supported[idx] &
10825 SUPPORTED_100baseT_Full) {
10826 bp->link_params.req_line_speed[idx] =
10827 SPEED_100;
10828 bp->port.advertising[idx] |=
10829 (ADVERTISED_100baseT_Full |
10830 ADVERTISED_TP);
10831 } else {
10832 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10833 link_config,
10834 bp->link_params.speed_cap_mask[idx]);
10835 return;
10836 }
10837 break;
10838
10839 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10840 if (bp->port.supported[idx] &
10841 SUPPORTED_100baseT_Half) {
10842 bp->link_params.req_line_speed[idx] =
10843 SPEED_100;
10844 bp->link_params.req_duplex[idx] =
10845 DUPLEX_HALF;
10846 bp->port.advertising[idx] |=
10847 (ADVERTISED_100baseT_Half |
10848 ADVERTISED_TP);
10849 } else {
10850 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10851 link_config,
10852 bp->link_params.speed_cap_mask[idx]);
10853 return;
10854 }
10855 break;
10856
10857 case PORT_FEATURE_LINK_SPEED_1G:
10858 if (bp->port.supported[idx] &
10859 SUPPORTED_1000baseT_Full) {
10860 bp->link_params.req_line_speed[idx] =
10861 SPEED_1000;
10862 bp->port.advertising[idx] |=
10863 (ADVERTISED_1000baseT_Full |
10864 ADVERTISED_TP);
10865 } else {
10866 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10867 link_config,
10868 bp->link_params.speed_cap_mask[idx]);
10869 return;
10870 }
10871 break;
10872
10873 case PORT_FEATURE_LINK_SPEED_2_5G:
10874 if (bp->port.supported[idx] &
10875 SUPPORTED_2500baseX_Full) {
10876 bp->link_params.req_line_speed[idx] =
10877 SPEED_2500;
10878 bp->port.advertising[idx] |=
10879 (ADVERTISED_2500baseX_Full |
10880 ADVERTISED_TP);
10881 } else {
10882 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10883 link_config,
10884 bp->link_params.speed_cap_mask[idx]);
10885 return;
10886 }
10887 break;
10888
10889 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10890 if (bp->port.supported[idx] &
10891 SUPPORTED_10000baseT_Full) {
10892 bp->link_params.req_line_speed[idx] =
10893 SPEED_10000;
10894 bp->port.advertising[idx] |=
10895 (ADVERTISED_10000baseT_Full |
10896 ADVERTISED_FIBRE);
10897 } else {
10898 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10899 link_config,
10900 bp->link_params.speed_cap_mask[idx]);
10901 return;
10902 }
10903 break;
10904 case PORT_FEATURE_LINK_SPEED_20G:
10905 bp->link_params.req_line_speed[idx] = SPEED_20000;
10906
10907 break;
10908 default:
10909 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10910 link_config);
10911 bp->link_params.req_line_speed[idx] =
10912 SPEED_AUTO_NEG;
10913 bp->port.advertising[idx] =
10914 bp->port.supported[idx];
10915 break;
10916 }
10917
10918 bp->link_params.req_flow_ctrl[idx] = (link_config &
10919 PORT_FEATURE_FLOW_CONTROL_MASK);
10920 if (bp->link_params.req_flow_ctrl[idx] ==
10921 BNX2X_FLOW_CTRL_AUTO) {
10922 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10923 bp->link_params.req_flow_ctrl[idx] =
10924 BNX2X_FLOW_CTRL_NONE;
10925 else
10926 bnx2x_set_requested_fc(bp);
10927 }
10928
10929 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10930 bp->link_params.req_line_speed[idx],
10931 bp->link_params.req_duplex[idx],
10932 bp->link_params.req_flow_ctrl[idx],
10933 bp->port.advertising[idx]);
10934 }
10935}
10936
10937static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10938{
10939 __be16 mac_hi_be = cpu_to_be16(mac_hi);
10940 __be32 mac_lo_be = cpu_to_be32(mac_lo);
10941 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
10942 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
10943}
10944
10945static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10946{
10947 int port = BP_PORT(bp);
10948 u32 config;
10949 u32 ext_phy_type, ext_phy_config, eee_mode;
10950
10951 bp->link_params.bp = bp;
10952 bp->link_params.port = port;
10953
10954 bp->link_params.lane_config =
10955 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10956
10957 bp->link_params.speed_cap_mask[0] =
10958 SHMEM_RD(bp,
10959 dev_info.port_hw_config[port].speed_capability_mask) &
10960 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10961 bp->link_params.speed_cap_mask[1] =
10962 SHMEM_RD(bp,
10963 dev_info.port_hw_config[port].speed_capability_mask2) &
10964 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10965 bp->port.link_config[0] =
10966 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10967
10968 bp->port.link_config[1] =
10969 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10970
10971 bp->link_params.multi_phy_config =
10972 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10973
10974
10975
10976 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10977 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10978 (config & PORT_FEATURE_WOL_ENABLED));
10979
10980 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10981 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
10982 bp->flags |= NO_ISCSI_FLAG;
10983 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10984 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
10985 bp->flags |= NO_FCOE_FLAG;
10986
10987 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
10988 bp->link_params.lane_config,
10989 bp->link_params.speed_cap_mask[0],
10990 bp->port.link_config[0]);
10991
10992 bp->link_params.switch_cfg = (bp->port.link_config[0] &
10993 PORT_FEATURE_CONNECTED_SWITCH_MASK);
10994 bnx2x_phy_probe(&bp->link_params);
10995 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10996
10997 bnx2x_link_settings_requested(bp);
10998
10999
11000
11001
11002
11003 ext_phy_config =
11004 SHMEM_RD(bp,
11005 dev_info.port_hw_config[port].external_phy_config);
11006 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11007 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11008 bp->mdio.prtad = bp->port.phy_addr;
11009
11010 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11011 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11012 bp->mdio.prtad =
11013 XGXS_EXT_PHY_ADDR(ext_phy_config);
11014
11015
11016 eee_mode = (((SHMEM_RD(bp, dev_info.
11017 port_feature_config[port].eee_power_mode)) &
11018 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11019 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11020 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11021 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11022 EEE_MODE_ENABLE_LPI |
11023 EEE_MODE_OUTPUT_TIME;
11024 } else {
11025 bp->link_params.eee_mode = 0;
11026 }
11027}
11028
11029void bnx2x_get_iscsi_info(struct bnx2x *bp)
11030{
11031 u32 no_flags = NO_ISCSI_FLAG;
11032 int port = BP_PORT(bp);
11033 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11034 drv_lic_key[port].max_iscsi_conn);
11035
11036 if (!CNIC_SUPPORT(bp)) {
11037 bp->flags |= no_flags;
11038 return;
11039 }
11040
11041
11042 bp->cnic_eth_dev.max_iscsi_conn =
11043 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11044 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11045
11046 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11047 bp->cnic_eth_dev.max_iscsi_conn);
11048
11049
11050
11051
11052
11053 if (!bp->cnic_eth_dev.max_iscsi_conn)
11054 bp->flags |= no_flags;
11055}
11056
11057static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11058{
11059
11060 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11061 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11062 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11063 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11064
11065
11066 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11067 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11068 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11069 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11070}
11071
11072static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11073{
11074 u8 count = 0;
11075
11076 if (IS_MF(bp)) {
11077 u8 fid;
11078
11079
11080 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11081 if (IS_MF_SD(bp)) {
11082 u32 cfg = MF_CFG_RD(bp,
11083 func_mf_config[fid].config);
11084
11085 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11086 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11087 FUNC_MF_CFG_PROTOCOL_FCOE))
11088 count++;
11089 } else {
11090 u32 cfg = MF_CFG_RD(bp,
11091 func_ext_config[fid].
11092 func_cfg);
11093
11094 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11095 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11096 count++;
11097 }
11098 }
11099 } else {
11100 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11101
11102 for (port = 0; port < port_cnt; port++) {
11103 u32 lic = SHMEM_RD(bp,
11104 drv_lic_key[port].max_fcoe_conn) ^
11105 FW_ENCODE_32BIT_PATTERN;
11106 if (lic)
11107 count++;
11108 }
11109 }
11110
11111 return count;
11112}
11113
11114static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11115{
11116 int port = BP_PORT(bp);
11117 int func = BP_ABS_FUNC(bp);
11118 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11119 drv_lic_key[port].max_fcoe_conn);
11120 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11121
11122 if (!CNIC_SUPPORT(bp)) {
11123 bp->flags |= NO_FCOE_FLAG;
11124 return;
11125 }
11126
11127
11128 bp->cnic_eth_dev.max_fcoe_conn =
11129 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11130 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11131
11132
11133 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11134
11135
11136 if (num_fcoe_func)
11137 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11138
11139
11140 if (!IS_MF(bp)) {
11141
11142 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11143 SHMEM_RD(bp,
11144 dev_info.port_hw_config[port].
11145 fcoe_wwn_port_name_upper);
11146 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11147 SHMEM_RD(bp,
11148 dev_info.port_hw_config[port].
11149 fcoe_wwn_port_name_lower);
11150
11151
11152 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11153 SHMEM_RD(bp,
11154 dev_info.port_hw_config[port].
11155 fcoe_wwn_node_name_upper);
11156 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11157 SHMEM_RD(bp,
11158 dev_info.port_hw_config[port].
11159 fcoe_wwn_node_name_lower);
11160 } else if (!IS_MF_SD(bp)) {
11161
11162
11163
11164
11165 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11166 bnx2x_get_ext_wwn_info(bp, func);
11167
11168 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
11169 bnx2x_get_ext_wwn_info(bp, func);
11170 }
11171
11172 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11173
11174
11175
11176
11177
11178 if (!bp->cnic_eth_dev.max_fcoe_conn)
11179 bp->flags |= NO_FCOE_FLAG;
11180}
11181
11182static void bnx2x_get_cnic_info(struct bnx2x *bp)
11183{
11184
11185
11186
11187
11188
11189 bnx2x_get_iscsi_info(bp);
11190 bnx2x_get_fcoe_info(bp);
11191}
11192
11193static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11194{
11195 u32 val, val2;
11196 int func = BP_ABS_FUNC(bp);
11197 int port = BP_PORT(bp);
11198 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11199 u8 *fip_mac = bp->fip_mac;
11200
11201 if (IS_MF(bp)) {
11202
11203
11204
11205
11206
11207 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
11208 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11209 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11210 val2 = MF_CFG_RD(bp, func_ext_config[func].
11211 iscsi_mac_addr_upper);
11212 val = MF_CFG_RD(bp, func_ext_config[func].
11213 iscsi_mac_addr_lower);
11214 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11215 BNX2X_DEV_INFO
11216 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11217 } else {
11218 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11219 }
11220
11221 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11222 val2 = MF_CFG_RD(bp, func_ext_config[func].
11223 fcoe_mac_addr_upper);
11224 val = MF_CFG_RD(bp, func_ext_config[func].
11225 fcoe_mac_addr_lower);
11226 bnx2x_set_mac_buf(fip_mac, val, val2);
11227 BNX2X_DEV_INFO
11228 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11229 } else {
11230 bp->flags |= NO_FCOE_FLAG;
11231 }
11232
11233 bp->mf_ext_config = cfg;
11234
11235 } else {
11236 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11237
11238 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11239
11240 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11241 BNX2X_DEV_INFO
11242 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11243 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11244
11245 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11246 BNX2X_DEV_INFO("SD FCoE MODE\n");
11247 BNX2X_DEV_INFO
11248 ("Read FIP MAC: %pM\n", fip_mac);
11249 }
11250 }
11251
11252
11253
11254
11255
11256 if (IS_MF_FCOE_AFEX(bp))
11257 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11258 } else {
11259 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11260 iscsi_mac_upper);
11261 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11262 iscsi_mac_lower);
11263 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11264
11265 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11266 fcoe_fip_mac_upper);
11267 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11268 fcoe_fip_mac_lower);
11269 bnx2x_set_mac_buf(fip_mac, val, val2);
11270 }
11271
11272
11273 if (!is_valid_ether_addr(iscsi_mac)) {
11274 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11275 memset(iscsi_mac, 0, ETH_ALEN);
11276 }
11277
11278
11279 if (!is_valid_ether_addr(fip_mac)) {
11280 bp->flags |= NO_FCOE_FLAG;
11281 memset(bp->fip_mac, 0, ETH_ALEN);
11282 }
11283}
11284
11285static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11286{
11287 u32 val, val2;
11288 int func = BP_ABS_FUNC(bp);
11289 int port = BP_PORT(bp);
11290
11291
11292 memset(bp->dev->dev_addr, 0, ETH_ALEN);
11293
11294 if (BP_NOMCP(bp)) {
11295 BNX2X_ERROR("warning: random MAC workaround active\n");
11296 eth_hw_addr_random(bp->dev);
11297 } else if (IS_MF(bp)) {
11298 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11299 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11300 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11301 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11302 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11303
11304 if (CNIC_SUPPORT(bp))
11305 bnx2x_get_cnic_mac_hwinfo(bp);
11306 } else {
11307
11308 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11309 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11310 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11311
11312 if (CNIC_SUPPORT(bp))
11313 bnx2x_get_cnic_mac_hwinfo(bp);
11314 }
11315
11316 if (!BP_NOMCP(bp)) {
11317
11318 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11319 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11320 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11321 bp->flags |= HAS_PHYS_PORT_ID;
11322 }
11323
11324 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11325
11326 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
11327 dev_err(&bp->pdev->dev,
11328 "bad Ethernet MAC address configuration: %pM\n"
11329 "change it manually before bringing up the appropriate network interface\n",
11330 bp->dev->dev_addr);
11331}
11332
11333static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11334{
11335 int tmp;
11336 u32 cfg;
11337
11338 if (IS_VF(bp))
11339 return 0;
11340
11341 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11342
11343 tmp = BP_ABS_FUNC(bp);
11344 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11345 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11346 } else {
11347
11348 tmp = BP_PORT(bp);
11349 cfg = SHMEM_RD(bp,
11350 dev_info.port_hw_config[tmp].generic_features);
11351 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11352 }
11353 return cfg;
11354}
11355
11356static int bnx2x_get_hwinfo(struct bnx2x *bp)
11357{
11358 int func = BP_ABS_FUNC(bp);
11359 int vn;
11360 u32 val = 0;
11361 int rc = 0;
11362
11363 bnx2x_get_common_hwinfo(bp);
11364
11365
11366
11367
11368 if (CHIP_IS_E1x(bp)) {
11369 bp->common.int_block = INT_BLOCK_HC;
11370
11371 bp->igu_dsb_id = DEF_SB_IGU_ID;
11372 bp->igu_base_sb = 0;
11373 } else {
11374 bp->common.int_block = INT_BLOCK_IGU;
11375
11376
11377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11378
11379 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11380
11381 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11382 int tout = 5000;
11383
11384 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11385
11386 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11387 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11388 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11389
11390 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11391 tout--;
11392 usleep_range(1000, 2000);
11393 }
11394
11395 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11396 dev_err(&bp->pdev->dev,
11397 "FORCING Normal Mode failed!!!\n");
11398 bnx2x_release_hw_lock(bp,
11399 HW_LOCK_RESOURCE_RESET);
11400 return -EPERM;
11401 }
11402 }
11403
11404 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11405 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11406 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11407 } else
11408 BNX2X_DEV_INFO("IGU Normal Mode\n");
11409
11410 rc = bnx2x_get_igu_cam_info(bp);
11411 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11412 if (rc)
11413 return rc;
11414 }
11415
11416
11417
11418
11419
11420
11421 if (CHIP_IS_E1x(bp))
11422 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11423 else
11424
11425
11426
11427
11428 bp->base_fw_ndsb = bp->igu_base_sb;
11429
11430 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11431 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11432 bp->igu_sb_cnt, bp->base_fw_ndsb);
11433
11434
11435
11436
11437
11438 bp->mf_ov = 0;
11439 bp->mf_mode = 0;
11440 vn = BP_VN(bp);
11441
11442 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11443 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11444 bp->common.shmem2_base, SHMEM2_RD(bp, size),
11445 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11446
11447 if (SHMEM2_HAS(bp, mf_cfg_addr))
11448 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11449 else
11450 bp->common.mf_cfg_base = bp->common.shmem_base +
11451 offsetof(struct shmem_region, func_mb) +
11452 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11453
11454
11455
11456
11457
11458
11459
11460
11461 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11462
11463 val = SHMEM_RD(bp,
11464 dev_info.shared_feature_config.config);
11465 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11466
11467 switch (val) {
11468 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11469 val = MF_CFG_RD(bp, func_mf_config[func].
11470 mac_upper);
11471
11472 if (val != 0xffff) {
11473 bp->mf_mode = MULTI_FUNCTION_SI;
11474 bp->mf_config[vn] = MF_CFG_RD(bp,
11475 func_mf_config[func].config);
11476 } else
11477 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11478 break;
11479 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11480 if ((!CHIP_IS_E1x(bp)) &&
11481 (MF_CFG_RD(bp, func_mf_config[func].
11482 mac_upper) != 0xffff) &&
11483 (SHMEM2_HAS(bp,
11484 afex_driver_support))) {
11485 bp->mf_mode = MULTI_FUNCTION_AFEX;
11486 bp->mf_config[vn] = MF_CFG_RD(bp,
11487 func_mf_config[func].config);
11488 } else {
11489 BNX2X_DEV_INFO("can not configure afex mode\n");
11490 }
11491 break;
11492 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11493
11494 val = MF_CFG_RD(bp,
11495 func_mf_config[FUNC_0].e1hov_tag);
11496 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11497
11498 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11499 bp->mf_mode = MULTI_FUNCTION_SD;
11500 bp->mf_config[vn] = MF_CFG_RD(bp,
11501 func_mf_config[func].config);
11502 } else
11503 BNX2X_DEV_INFO("illegal OV for SD\n");
11504 break;
11505 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11506 bp->mf_config[vn] = 0;
11507 break;
11508 default:
11509
11510 bp->mf_config[vn] = 0;
11511 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11512 }
11513 }
11514
11515 BNX2X_DEV_INFO("%s function mode\n",
11516 IS_MF(bp) ? "multi" : "single");
11517
11518 switch (bp->mf_mode) {
11519 case MULTI_FUNCTION_SD:
11520 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11521 FUNC_MF_CFG_E1HOV_TAG_MASK;
11522 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11523 bp->mf_ov = val;
11524 bp->path_has_ovlan = true;
11525
11526 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11527 func, bp->mf_ov, bp->mf_ov);
11528 } else {
11529 dev_err(&bp->pdev->dev,
11530 "No valid MF OV for func %d, aborting\n",
11531 func);
11532 return -EPERM;
11533 }
11534 break;
11535 case MULTI_FUNCTION_AFEX:
11536 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11537 break;
11538 case MULTI_FUNCTION_SI:
11539 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11540 func);
11541 break;
11542 default:
11543 if (vn) {
11544 dev_err(&bp->pdev->dev,
11545 "VN %d is in a single function mode, aborting\n",
11546 vn);
11547 return -EPERM;
11548 }
11549 break;
11550 }
11551
11552
11553
11554
11555
11556
11557 if (CHIP_MODE_IS_4_PORT(bp) &&
11558 !bp->path_has_ovlan &&
11559 !IS_MF(bp) &&
11560 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11561 u8 other_port = !BP_PORT(bp);
11562 u8 other_func = BP_PATH(bp) + 2*other_port;
11563 val = MF_CFG_RD(bp,
11564 func_mf_config[other_func].e1hov_tag);
11565 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11566 bp->path_has_ovlan = true;
11567 }
11568 }
11569
11570
11571 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11572 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11573
11574
11575 bnx2x_get_port_hwinfo(bp);
11576
11577
11578 bnx2x_get_mac_hwinfo(bp);
11579
11580 bnx2x_get_cnic_info(bp);
11581
11582 return rc;
11583}
11584
11585static void bnx2x_read_fwinfo(struct bnx2x *bp)
11586{
11587 int cnt, i, block_end, rodi;
11588 char vpd_start[BNX2X_VPD_LEN+1];
11589 char str_id_reg[VENDOR_ID_LEN+1];
11590 char str_id_cap[VENDOR_ID_LEN+1];
11591 char *vpd_data;
11592 char *vpd_extended_data = NULL;
11593 u8 len;
11594
11595 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11596 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11597
11598 if (cnt < BNX2X_VPD_LEN)
11599 goto out_not_found;
11600
11601
11602
11603
11604 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11605 PCI_VPD_LRDT_RO_DATA);
11606 if (i < 0)
11607 goto out_not_found;
11608
11609 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11610 pci_vpd_lrdt_size(&vpd_start[i]);
11611
11612 i += PCI_VPD_LRDT_TAG_SIZE;
11613
11614 if (block_end > BNX2X_VPD_LEN) {
11615 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11616 if (vpd_extended_data == NULL)
11617 goto out_not_found;
11618
11619
11620 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11621 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11622 block_end - BNX2X_VPD_LEN,
11623 vpd_extended_data + BNX2X_VPD_LEN);
11624 if (cnt < (block_end - BNX2X_VPD_LEN))
11625 goto out_not_found;
11626 vpd_data = vpd_extended_data;
11627 } else
11628 vpd_data = vpd_start;
11629
11630
11631
11632 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11633 PCI_VPD_RO_KEYWORD_MFR_ID);
11634 if (rodi < 0)
11635 goto out_not_found;
11636
11637 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11638
11639 if (len != VENDOR_ID_LEN)
11640 goto out_not_found;
11641
11642 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11643
11644
11645 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11646 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11647 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11648 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11649
11650 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11651 PCI_VPD_RO_KEYWORD_VENDOR0);
11652 if (rodi >= 0) {
11653 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11654
11655 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11656
11657 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11658 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11659 bp->fw_ver[len] = ' ';
11660 }
11661 }
11662 kfree(vpd_extended_data);
11663 return;
11664 }
11665out_not_found:
11666 kfree(vpd_extended_data);
11667 return;
11668}
11669
11670static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
11671{
11672 u32 flags = 0;
11673
11674 if (CHIP_REV_IS_FPGA(bp))
11675 SET_FLAGS(flags, MODE_FPGA);
11676 else if (CHIP_REV_IS_EMUL(bp))
11677 SET_FLAGS(flags, MODE_EMUL);
11678 else
11679 SET_FLAGS(flags, MODE_ASIC);
11680
11681 if (CHIP_MODE_IS_4_PORT(bp))
11682 SET_FLAGS(flags, MODE_PORT4);
11683 else
11684 SET_FLAGS(flags, MODE_PORT2);
11685
11686 if (CHIP_IS_E2(bp))
11687 SET_FLAGS(flags, MODE_E2);
11688 else if (CHIP_IS_E3(bp)) {
11689 SET_FLAGS(flags, MODE_E3);
11690 if (CHIP_REV(bp) == CHIP_REV_Ax)
11691 SET_FLAGS(flags, MODE_E3_A0);
11692 else
11693 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
11694 }
11695
11696 if (IS_MF(bp)) {
11697 SET_FLAGS(flags, MODE_MF);
11698 switch (bp->mf_mode) {
11699 case MULTI_FUNCTION_SD:
11700 SET_FLAGS(flags, MODE_MF_SD);
11701 break;
11702 case MULTI_FUNCTION_SI:
11703 SET_FLAGS(flags, MODE_MF_SI);
11704 break;
11705 case MULTI_FUNCTION_AFEX:
11706 SET_FLAGS(flags, MODE_MF_AFEX);
11707 break;
11708 }
11709 } else
11710 SET_FLAGS(flags, MODE_SF);
11711
11712#if defined(__LITTLE_ENDIAN)
11713 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11714#else
11715 SET_FLAGS(flags, MODE_BIG_ENDIAN);
11716#endif
11717 INIT_MODE_FLAGS(bp) = flags;
11718}
11719
11720static int bnx2x_init_bp(struct bnx2x *bp)
11721{
11722 int func;
11723 int rc;
11724
11725 mutex_init(&bp->port.phy_mutex);
11726 mutex_init(&bp->fw_mb_mutex);
11727 spin_lock_init(&bp->stats_lock);
11728 sema_init(&bp->stats_sema, 1);
11729
11730 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11731 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11732 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11733 if (IS_PF(bp)) {
11734 rc = bnx2x_get_hwinfo(bp);
11735 if (rc)
11736 return rc;
11737 } else {
11738 eth_zero_addr(bp->dev->dev_addr);
11739 }
11740
11741 bnx2x_set_modes_bitmap(bp);
11742
11743 rc = bnx2x_alloc_mem_bp(bp);
11744 if (rc)
11745 return rc;
11746
11747 bnx2x_read_fwinfo(bp);
11748
11749 func = BP_FUNC(bp);
11750
11751
11752 if (IS_PF(bp) && !BP_NOMCP(bp)) {
11753
11754 bp->fw_seq =
11755 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11756 DRV_MSG_SEQ_NUMBER_MASK;
11757 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11758
11759 rc = bnx2x_prev_unload(bp);
11760 if (rc) {
11761 bnx2x_free_mem_bp(bp);
11762 return rc;
11763 }
11764 }
11765
11766 if (CHIP_REV_IS_FPGA(bp))
11767 dev_err(&bp->pdev->dev, "FPGA detected\n");
11768
11769 if (BP_NOMCP(bp) && (func == 0))
11770 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11771
11772 bp->disable_tpa = disable_tpa;
11773 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11774
11775
11776 if (bp->disable_tpa) {
11777 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11778 bp->dev->features &= ~NETIF_F_LRO;
11779 } else {
11780 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11781 bp->dev->features |= NETIF_F_LRO;
11782 }
11783
11784 if (CHIP_IS_E1(bp))
11785 bp->dropless_fc = 0;
11786 else
11787 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
11788
11789 bp->mrrs = mrrs;
11790
11791 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11792 if (IS_VF(bp))
11793 bp->rx_ring_size = MAX_RX_AVAIL;
11794
11795
11796 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11797 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11798
11799 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11800
11801 init_timer(&bp->timer);
11802 bp->timer.expires = jiffies + bp->current_interval;
11803 bp->timer.data = (unsigned long) bp;
11804 bp->timer.function = bnx2x_timer;
11805
11806 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11807 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11808 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11809 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11810 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11811 bnx2x_dcbx_init_params(bp);
11812 } else {
11813 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11814 }
11815
11816 if (CHIP_IS_E1x(bp))
11817 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11818 else
11819 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11820
11821
11822 if (IS_VF(bp))
11823 bp->max_cos = 1;
11824 else if (CHIP_IS_E1x(bp))
11825 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11826 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11827 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11828 else if (CHIP_IS_E3B0(bp))
11829 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11830 else
11831 BNX2X_ERR("unknown chip %x revision %x\n",
11832 CHIP_NUM(bp), CHIP_REV(bp));
11833 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
11834
11835
11836
11837
11838
11839 if (IS_VF(bp))
11840 bp->min_msix_vec_cnt = 1;
11841 else if (CNIC_SUPPORT(bp))
11842 bp->min_msix_vec_cnt = 3;
11843 else
11844 bp->min_msix_vec_cnt = 2;
11845 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11846
11847 bp->dump_preset_idx = 1;
11848
11849 return rc;
11850}
11851
11852
11853
11854
11855
11856
11857
11858
11859
11860
11861static int bnx2x_open(struct net_device *dev)
11862{
11863 struct bnx2x *bp = netdev_priv(dev);
11864 int rc;
11865
11866 bp->stats_init = true;
11867
11868 netif_carrier_off(dev);
11869
11870 bnx2x_set_power_state(bp, PCI_D0);
11871
11872
11873
11874
11875
11876
11877
11878 if (IS_PF(bp)) {
11879 int other_engine = BP_PATH(bp) ? 0 : 1;
11880 bool other_load_status, load_status;
11881 bool global = false;
11882
11883 other_load_status = bnx2x_get_load_status(bp, other_engine);
11884 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11885 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11886 bnx2x_chk_parity_attn(bp, &global, true)) {
11887 do {
11888
11889
11890
11891
11892
11893 if (global)
11894 bnx2x_set_reset_global(bp);
11895
11896
11897
11898
11899
11900
11901 if ((!load_status &&
11902 (!global || !other_load_status)) &&
11903 bnx2x_trylock_leader_lock(bp) &&
11904 !bnx2x_leader_reset(bp)) {
11905 netdev_info(bp->dev,
11906 "Recovered in open\n");
11907 break;
11908 }
11909
11910
11911 bnx2x_set_power_state(bp, PCI_D3hot);
11912 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11913
11914 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11915 "If you still see this message after a few retries then power cycle is required.\n");
11916
11917 return -EAGAIN;
11918 } while (0);
11919 }
11920 }
11921
11922 bp->recovery_state = BNX2X_RECOVERY_DONE;
11923 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11924 if (rc)
11925 return rc;
11926 return 0;
11927}
11928
11929
11930static int bnx2x_close(struct net_device *dev)
11931{
11932 struct bnx2x *bp = netdev_priv(dev);
11933
11934
11935 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11936
11937 return 0;
11938}
11939
11940static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11941 struct bnx2x_mcast_ramrod_params *p)
11942{
11943 int mc_count = netdev_mc_count(bp->dev);
11944 struct bnx2x_mcast_list_elem *mc_mac =
11945 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11946 struct netdev_hw_addr *ha;
11947
11948 if (!mc_mac)
11949 return -ENOMEM;
11950
11951 INIT_LIST_HEAD(&p->mcast_list);
11952
11953 netdev_for_each_mc_addr(ha, bp->dev) {
11954 mc_mac->mac = bnx2x_mc_addr(ha);
11955 list_add_tail(&mc_mac->link, &p->mcast_list);
11956 mc_mac++;
11957 }
11958
11959 p->mcast_list_len = mc_count;
11960
11961 return 0;
11962}
11963
11964static void bnx2x_free_mcast_macs_list(
11965 struct bnx2x_mcast_ramrod_params *p)
11966{
11967 struct bnx2x_mcast_list_elem *mc_mac =
11968 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11969 link);
11970
11971 WARN_ON(!mc_mac);
11972 kfree(mc_mac);
11973}
11974
11975
11976
11977
11978
11979
11980
11981
11982static int bnx2x_set_uc_list(struct bnx2x *bp)
11983{
11984 int rc;
11985 struct net_device *dev = bp->dev;
11986 struct netdev_hw_addr *ha;
11987 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11988 unsigned long ramrod_flags = 0;
11989
11990
11991 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11992 if (rc < 0) {
11993 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11994 return rc;
11995 }
11996
11997 netdev_for_each_uc_addr(ha, dev) {
11998 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11999 BNX2X_UC_LIST_MAC, &ramrod_flags);
12000 if (rc == -EEXIST) {
12001 DP(BNX2X_MSG_SP,
12002 "Failed to schedule ADD operations: %d\n", rc);
12003
12004 rc = 0;
12005
12006 } else if (rc < 0) {
12007
12008 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12009 rc);
12010 return rc;
12011 }
12012 }
12013
12014
12015 __set_bit(RAMROD_CONT, &ramrod_flags);
12016 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12017 BNX2X_UC_LIST_MAC, &ramrod_flags);
12018}
12019
12020static int bnx2x_set_mc_list(struct bnx2x *bp)
12021{
12022 struct net_device *dev = bp->dev;
12023 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12024 int rc = 0;
12025
12026 rparam.mcast_obj = &bp->mcast_obj;
12027
12028
12029 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12030 if (rc < 0) {
12031 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12032 return rc;
12033 }
12034
12035
12036 if (netdev_mc_count(dev)) {
12037 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12038 if (rc) {
12039 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12040 rc);
12041 return rc;
12042 }
12043
12044
12045 rc = bnx2x_config_mcast(bp, &rparam,
12046 BNX2X_MCAST_CMD_ADD);
12047 if (rc < 0)
12048 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12049 rc);
12050
12051 bnx2x_free_mcast_macs_list(&rparam);
12052 }
12053
12054 return rc;
12055}
12056
12057
12058static void bnx2x_set_rx_mode(struct net_device *dev)
12059{
12060 struct bnx2x *bp = netdev_priv(dev);
12061
12062 if (bp->state != BNX2X_STATE_OPEN) {
12063 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12064 return;
12065 } else {
12066
12067 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
12068 smp_mb__before_clear_bit();
12069 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
12070 smp_mb__after_clear_bit();
12071 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12072 }
12073}
12074
12075void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12076{
12077 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12078
12079 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12080
12081 netif_addr_lock_bh(bp->dev);
12082
12083 if (bp->dev->flags & IFF_PROMISC) {
12084 rx_mode = BNX2X_RX_MODE_PROMISC;
12085 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12086 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12087 CHIP_IS_E1(bp))) {
12088 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12089 } else {
12090 if (IS_PF(bp)) {
12091
12092 if (bnx2x_set_mc_list(bp) < 0)
12093 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12094
12095
12096 netif_addr_unlock_bh(bp->dev);
12097 if (bnx2x_set_uc_list(bp) < 0)
12098 rx_mode = BNX2X_RX_MODE_PROMISC;
12099 netif_addr_lock_bh(bp->dev);
12100 } else {
12101
12102
12103
12104 smp_mb__before_clear_bit();
12105 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
12106 &bp->sp_rtnl_state);
12107 smp_mb__after_clear_bit();
12108 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12109 }
12110 }
12111
12112 bp->rx_mode = rx_mode;
12113
12114 if (IS_MF_ISCSI_SD(bp))
12115 bp->rx_mode = BNX2X_RX_MODE_NONE;
12116
12117
12118 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12119 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12120 netif_addr_unlock_bh(bp->dev);
12121 return;
12122 }
12123
12124 if (IS_PF(bp)) {
12125 bnx2x_set_storm_rx_mode(bp);
12126 netif_addr_unlock_bh(bp->dev);
12127 } else {
12128
12129
12130
12131
12132 netif_addr_unlock_bh(bp->dev);
12133 bnx2x_vfpf_storm_rx_mode(bp);
12134 }
12135}
12136
12137
12138static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12139 int devad, u16 addr)
12140{
12141 struct bnx2x *bp = netdev_priv(netdev);
12142 u16 value;
12143 int rc;
12144
12145 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12146 prtad, devad, addr);
12147
12148
12149 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12150
12151 bnx2x_acquire_phy_lock(bp);
12152 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12153 bnx2x_release_phy_lock(bp);
12154 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12155
12156 if (!rc)
12157 rc = value;
12158 return rc;
12159}
12160
12161
12162static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12163 u16 addr, u16 value)
12164{
12165 struct bnx2x *bp = netdev_priv(netdev);
12166 int rc;
12167
12168 DP(NETIF_MSG_LINK,
12169 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12170 prtad, devad, addr, value);
12171
12172
12173 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12174
12175 bnx2x_acquire_phy_lock(bp);
12176 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12177 bnx2x_release_phy_lock(bp);
12178 return rc;
12179}
12180
12181
12182static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12183{
12184 struct bnx2x *bp = netdev_priv(dev);
12185 struct mii_ioctl_data *mdio = if_mii(ifr);
12186
12187 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12188 mdio->phy_id, mdio->reg_num, mdio->val_in);
12189
12190 if (!netif_running(dev))
12191 return -EAGAIN;
12192
12193 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12194}
12195
12196#ifdef CONFIG_NET_POLL_CONTROLLER
12197static void poll_bnx2x(struct net_device *dev)
12198{
12199 struct bnx2x *bp = netdev_priv(dev);
12200 int i;
12201
12202 for_each_eth_queue(bp, i) {
12203 struct bnx2x_fastpath *fp = &bp->fp[i];
12204 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12205 }
12206}
12207#endif
12208
12209static int bnx2x_validate_addr(struct net_device *dev)
12210{
12211 struct bnx2x *bp = netdev_priv(dev);
12212
12213
12214 if (IS_VF(bp))
12215 bnx2x_sample_bulletin(bp);
12216
12217 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
12218 BNX2X_ERR("Non-valid Ethernet address\n");
12219 return -EADDRNOTAVAIL;
12220 }
12221 return 0;
12222}
12223
12224static int bnx2x_get_phys_port_id(struct net_device *netdev,
12225 struct netdev_phys_port_id *ppid)
12226{
12227 struct bnx2x *bp = netdev_priv(netdev);
12228
12229 if (!(bp->flags & HAS_PHYS_PORT_ID))
12230 return -EOPNOTSUPP;
12231
12232 ppid->id_len = sizeof(bp->phys_port_id);
12233 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12234
12235 return 0;
12236}
12237
12238static const struct net_device_ops bnx2x_netdev_ops = {
12239 .ndo_open = bnx2x_open,
12240 .ndo_stop = bnx2x_close,
12241 .ndo_start_xmit = bnx2x_start_xmit,
12242 .ndo_select_queue = bnx2x_select_queue,
12243 .ndo_set_rx_mode = bnx2x_set_rx_mode,
12244 .ndo_set_mac_address = bnx2x_change_mac_addr,
12245 .ndo_validate_addr = bnx2x_validate_addr,
12246 .ndo_do_ioctl = bnx2x_ioctl,
12247 .ndo_change_mtu = bnx2x_change_mtu,
12248 .ndo_fix_features = bnx2x_fix_features,
12249 .ndo_set_features = bnx2x_set_features,
12250 .ndo_tx_timeout = bnx2x_tx_timeout,
12251#ifdef CONFIG_NET_POLL_CONTROLLER
12252 .ndo_poll_controller = poll_bnx2x,
12253#endif
12254 .ndo_setup_tc = bnx2x_setup_tc,
12255#ifdef CONFIG_BNX2X_SRIOV
12256 .ndo_set_vf_mac = bnx2x_set_vf_mac,
12257 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
12258 .ndo_get_vf_config = bnx2x_get_vf_config,
12259#endif
12260#ifdef NETDEV_FCOE_WWNN
12261 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12262#endif
12263
12264#ifdef CONFIG_NET_RX_BUSY_POLL
12265 .ndo_busy_poll = bnx2x_low_latency_recv,
12266#endif
12267 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
12268};
12269
12270static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12271{
12272 struct device *dev = &bp->pdev->dev;
12273
12274 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
12275 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
12276 dev_err(dev, "System does not support DMA, aborting\n");
12277 return -EIO;
12278 }
12279
12280 return 0;
12281}
12282
12283static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
12284{
12285 if (bp->flags & AER_ENABLED) {
12286 pci_disable_pcie_error_reporting(bp->pdev);
12287 bp->flags &= ~AER_ENABLED;
12288 }
12289}
12290
12291static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12292 struct net_device *dev, unsigned long board_type)
12293{
12294 int rc;
12295 u32 pci_cfg_dword;
12296 bool chip_is_e1x = (board_type == BCM57710 ||
12297 board_type == BCM57711 ||
12298 board_type == BCM57711E);
12299
12300 SET_NETDEV_DEV(dev, &pdev->dev);
12301
12302 bp->dev = dev;
12303 bp->pdev = pdev;
12304
12305 rc = pci_enable_device(pdev);
12306 if (rc) {
12307 dev_err(&bp->pdev->dev,
12308 "Cannot enable PCI device, aborting\n");
12309 goto err_out;
12310 }
12311
12312 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12313 dev_err(&bp->pdev->dev,
12314 "Cannot find PCI device base address, aborting\n");
12315 rc = -ENODEV;
12316 goto err_out_disable;
12317 }
12318
12319 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12320 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
12321 rc = -ENODEV;
12322 goto err_out_disable;
12323 }
12324
12325 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12326 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12327 PCICFG_REVESION_ID_ERROR_VAL) {
12328 pr_err("PCI device error, probably due to fan failure, aborting\n");
12329 rc = -ENODEV;
12330 goto err_out_disable;
12331 }
12332
12333 if (atomic_read(&pdev->enable_cnt) == 1) {
12334 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12335 if (rc) {
12336 dev_err(&bp->pdev->dev,
12337 "Cannot obtain PCI resources, aborting\n");
12338 goto err_out_disable;
12339 }
12340
12341 pci_set_master(pdev);
12342 pci_save_state(pdev);
12343 }
12344
12345 if (IS_PF(bp)) {
12346 if (!pdev->pm_cap) {
12347 dev_err(&bp->pdev->dev,
12348 "Cannot find power management capability, aborting\n");
12349 rc = -EIO;
12350 goto err_out_release;
12351 }
12352 }
12353
12354 if (!pci_is_pcie(pdev)) {
12355 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
12356 rc = -EIO;
12357 goto err_out_release;
12358 }
12359
12360 rc = bnx2x_set_coherency_mask(bp);
12361 if (rc)
12362 goto err_out_release;
12363
12364 dev->mem_start = pci_resource_start(pdev, 0);
12365 dev->base_addr = dev->mem_start;
12366 dev->mem_end = pci_resource_end(pdev, 0);
12367
12368 dev->irq = pdev->irq;
12369
12370 bp->regview = pci_ioremap_bar(pdev, 0);
12371 if (!bp->regview) {
12372 dev_err(&bp->pdev->dev,
12373 "Cannot map register space, aborting\n");
12374 rc = -ENOMEM;
12375 goto err_out_release;
12376 }
12377
12378
12379
12380
12381
12382
12383 if (chip_is_e1x) {
12384 bp->pf_num = PCI_FUNC(pdev->devfn);
12385 } else {
12386
12387 pci_read_config_dword(bp->pdev,
12388 PCICFG_ME_REGISTER, &pci_cfg_dword);
12389 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
12390 ME_REG_ABS_PF_NUM_SHIFT);
12391 }
12392 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12393
12394
12395 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12396 PCICFG_VENDOR_ID_OFFSET);
12397
12398
12399 rc = pci_enable_pcie_error_reporting(pdev);
12400 if (!rc)
12401 bp->flags |= AER_ENABLED;
12402 else
12403 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
12404
12405
12406
12407
12408
12409 if (IS_PF(bp)) {
12410 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12411 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12412 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12413 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12414
12415 if (chip_is_e1x) {
12416 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12417 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12418 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12419 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12420 }
12421
12422
12423
12424
12425
12426 if (!chip_is_e1x)
12427 REG_WR(bp,
12428 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
12429 }
12430
12431 dev->watchdog_timeo = TX_TIMEOUT;
12432
12433 dev->netdev_ops = &bnx2x_netdev_ops;
12434 bnx2x_set_ethtool_ops(bp, dev);
12435
12436 dev->priv_flags |= IFF_UNICAST_FLT;
12437
12438 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12439 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12440 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12441 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12442 if (!CHIP_IS_E1x(bp)) {
12443 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12444 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12445 dev->hw_enc_features =
12446 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12447 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12448 NETIF_F_GSO_IPIP |
12449 NETIF_F_GSO_SIT |
12450 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12451 }
12452
12453 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12454 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12455
12456 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12457 dev->features |= NETIF_F_HIGHDMA;
12458
12459
12460 dev->hw_features |= NETIF_F_LOOPBACK;
12461
12462#ifdef BCM_DCBNL
12463 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12464#endif
12465
12466
12467 bp->mdio.prtad = MDIO_PRTAD_NONE;
12468 bp->mdio.mmds = 0;
12469 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12470 bp->mdio.dev = dev;
12471 bp->mdio.mdio_read = bnx2x_mdio_read;
12472 bp->mdio.mdio_write = bnx2x_mdio_write;
12473
12474 return 0;
12475
12476err_out_release:
12477 if (atomic_read(&pdev->enable_cnt) == 1)
12478 pci_release_regions(pdev);
12479
12480err_out_disable:
12481 pci_disable_device(pdev);
12482
12483err_out:
12484 return rc;
12485}
12486
12487static int bnx2x_check_firmware(struct bnx2x *bp)
12488{
12489 const struct firmware *firmware = bp->firmware;
12490 struct bnx2x_fw_file_hdr *fw_hdr;
12491 struct bnx2x_fw_file_section *sections;
12492 u32 offset, len, num_ops;
12493 __be16 *ops_offsets;
12494 int i;
12495 const u8 *fw_ver;
12496
12497 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12498 BNX2X_ERR("Wrong FW size\n");
12499 return -EINVAL;
12500 }
12501
12502 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12503 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12504
12505
12506
12507 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12508 offset = be32_to_cpu(sections[i].offset);
12509 len = be32_to_cpu(sections[i].len);
12510 if (offset + len > firmware->size) {
12511 BNX2X_ERR("Section %d length is out of bounds\n", i);
12512 return -EINVAL;
12513 }
12514 }
12515
12516
12517 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12518 ops_offsets = (__force __be16 *)(firmware->data + offset);
12519 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12520
12521 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12522 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12523 BNX2X_ERR("Section offset %d is out of bounds\n", i);
12524 return -EINVAL;
12525 }
12526 }
12527
12528
12529 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12530 fw_ver = firmware->data + offset;
12531 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12532 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12533 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12534 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12535 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12536 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12537 BCM_5710_FW_MAJOR_VERSION,
12538 BCM_5710_FW_MINOR_VERSION,
12539 BCM_5710_FW_REVISION_VERSION,
12540 BCM_5710_FW_ENGINEERING_VERSION);
12541 return -EINVAL;
12542 }
12543
12544 return 0;
12545}
12546
12547static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12548{
12549 const __be32 *source = (const __be32 *)_source;
12550 u32 *target = (u32 *)_target;
12551 u32 i;
12552
12553 for (i = 0; i < n/4; i++)
12554 target[i] = be32_to_cpu(source[i]);
12555}
12556
12557
12558
12559
12560
12561static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12562{
12563 const __be32 *source = (const __be32 *)_source;
12564 struct raw_op *target = (struct raw_op *)_target;
12565 u32 i, j, tmp;
12566
12567 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12568 tmp = be32_to_cpu(source[j]);
12569 target[i].op = (tmp >> 24) & 0xff;
12570 target[i].offset = tmp & 0xffffff;
12571 target[i].raw_data = be32_to_cpu(source[j + 1]);
12572 }
12573}
12574
12575
12576
12577
12578static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12579{
12580 const __be32 *source = (const __be32 *)_source;
12581 struct iro *target = (struct iro *)_target;
12582 u32 i, j, tmp;
12583
12584 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12585 target[i].base = be32_to_cpu(source[j]);
12586 j++;
12587 tmp = be32_to_cpu(source[j]);
12588 target[i].m1 = (tmp >> 16) & 0xffff;
12589 target[i].m2 = tmp & 0xffff;
12590 j++;
12591 tmp = be32_to_cpu(source[j]);
12592 target[i].m3 = (tmp >> 16) & 0xffff;
12593 target[i].size = tmp & 0xffff;
12594 j++;
12595 }
12596}
12597
12598static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12599{
12600 const __be16 *source = (const __be16 *)_source;
12601 u16 *target = (u16 *)_target;
12602 u32 i;
12603
12604 for (i = 0; i < n/2; i++)
12605 target[i] = be16_to_cpu(source[i]);
12606}
12607
12608#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12609do { \
12610 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12611 bp->arr = kmalloc(len, GFP_KERNEL); \
12612 if (!bp->arr) \
12613 goto lbl; \
12614 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12615 (u8 *)bp->arr, len); \
12616} while (0)
12617
12618static int bnx2x_init_firmware(struct bnx2x *bp)
12619{
12620 const char *fw_file_name;
12621 struct bnx2x_fw_file_hdr *fw_hdr;
12622 int rc;
12623
12624 if (bp->firmware)
12625 return 0;
12626
12627 if (CHIP_IS_E1(bp))
12628 fw_file_name = FW_FILE_NAME_E1;
12629 else if (CHIP_IS_E1H(bp))
12630 fw_file_name = FW_FILE_NAME_E1H;
12631 else if (!CHIP_IS_E1x(bp))
12632 fw_file_name = FW_FILE_NAME_E2;
12633 else {
12634 BNX2X_ERR("Unsupported chip revision\n");
12635 return -EINVAL;
12636 }
12637 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
12638
12639 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12640 if (rc) {
12641 BNX2X_ERR("Can't load firmware file %s\n",
12642 fw_file_name);
12643 goto request_firmware_exit;
12644 }
12645
12646 rc = bnx2x_check_firmware(bp);
12647 if (rc) {
12648 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12649 goto request_firmware_exit;
12650 }
12651
12652 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12653
12654
12655
12656 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12657
12658
12659 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12660
12661
12662 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12663 be16_to_cpu_n);
12664
12665
12666 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12667 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12668 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12669 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12670 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12671 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12672 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12673 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12674 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12675 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12676 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12677 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12678 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12679 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12680 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12681 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12682
12683 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
12684
12685 return 0;
12686
12687iro_alloc_err:
12688 kfree(bp->init_ops_offsets);
12689init_offsets_alloc_err:
12690 kfree(bp->init_ops);
12691init_ops_alloc_err:
12692 kfree(bp->init_data);
12693request_firmware_exit:
12694 release_firmware(bp->firmware);
12695 bp->firmware = NULL;
12696
12697 return rc;
12698}
12699
12700static void bnx2x_release_firmware(struct bnx2x *bp)
12701{
12702 kfree(bp->init_ops_offsets);
12703 kfree(bp->init_ops);
12704 kfree(bp->init_data);
12705 release_firmware(bp->firmware);
12706 bp->firmware = NULL;
12707}
12708
12709static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12710 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12711 .init_hw_cmn = bnx2x_init_hw_common,
12712 .init_hw_port = bnx2x_init_hw_port,
12713 .init_hw_func = bnx2x_init_hw_func,
12714
12715 .reset_hw_cmn = bnx2x_reset_common,
12716 .reset_hw_port = bnx2x_reset_port,
12717 .reset_hw_func = bnx2x_reset_func,
12718
12719 .gunzip_init = bnx2x_gunzip_init,
12720 .gunzip_end = bnx2x_gunzip_end,
12721
12722 .init_fw = bnx2x_init_firmware,
12723 .release_fw = bnx2x_release_firmware,
12724};
12725
12726void bnx2x__init_func_obj(struct bnx2x *bp)
12727{
12728
12729 bnx2x_setup_dmae(bp);
12730
12731 bnx2x_init_func_obj(bp, &bp->func_obj,
12732 bnx2x_sp(bp, func_rdata),
12733 bnx2x_sp_mapping(bp, func_rdata),
12734 bnx2x_sp(bp, func_afex_rdata),
12735 bnx2x_sp_mapping(bp, func_afex_rdata),
12736 &bnx2x_func_sp_drv);
12737}
12738
12739
12740static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12741{
12742 int cid_count = BNX2X_L2_MAX_CID(bp);
12743
12744 if (IS_SRIOV(bp))
12745 cid_count += BNX2X_VF_CIDS;
12746
12747 if (CNIC_SUPPORT(bp))
12748 cid_count += CNIC_CID_MAX;
12749
12750 return roundup(cid_count, QM_CID_ROUND);
12751}
12752
12753
12754
12755
12756
12757
12758
12759static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12760{
12761 int index;
12762 u16 control = 0;
12763
12764
12765
12766
12767
12768 if (!pdev->msix_cap) {
12769 dev_info(&pdev->dev, "no msix capability found\n");
12770 return 1 + cnic_cnt;
12771 }
12772 dev_info(&pdev->dev, "msix capability found\n");
12773
12774
12775
12776
12777
12778
12779
12780
12781 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
12782
12783 index = control & PCI_MSIX_FLAGS_QSIZE;
12784
12785 return index;
12786}
12787
12788static int set_max_cos_est(int chip_id)
12789{
12790 switch (chip_id) {
12791 case BCM57710:
12792 case BCM57711:
12793 case BCM57711E:
12794 return BNX2X_MULTI_TX_COS_E1X;
12795 case BCM57712:
12796 case BCM57712_MF:
12797 return BNX2X_MULTI_TX_COS_E2_E3A0;
12798 case BCM57800:
12799 case BCM57800_MF:
12800 case BCM57810:
12801 case BCM57810_MF:
12802 case BCM57840_4_10:
12803 case BCM57840_2_20:
12804 case BCM57840_O:
12805 case BCM57840_MFO:
12806 case BCM57840_MF:
12807 case BCM57811:
12808 case BCM57811_MF:
12809 return BNX2X_MULTI_TX_COS_E3B0;
12810 case BCM57712_VF:
12811 case BCM57800_VF:
12812 case BCM57810_VF:
12813 case BCM57840_VF:
12814 case BCM57811_VF:
12815 return 1;
12816 default:
12817 pr_err("Unknown board_type (%d), aborting\n", chip_id);
12818 return -ENODEV;
12819 }
12820}
12821
12822static int set_is_vf(int chip_id)
12823{
12824 switch (chip_id) {
12825 case BCM57712_VF:
12826 case BCM57800_VF:
12827 case BCM57810_VF:
12828 case BCM57840_VF:
12829 case BCM57811_VF:
12830 return true;
12831 default:
12832 return false;
12833 }
12834}
12835
12836static int bnx2x_init_one(struct pci_dev *pdev,
12837 const struct pci_device_id *ent)
12838{
12839 struct net_device *dev = NULL;
12840 struct bnx2x *bp;
12841 enum pcie_link_width pcie_width;
12842 enum pci_bus_speed pcie_speed;
12843 int rc, max_non_def_sbs;
12844 int rx_count, tx_count, rss_count, doorbell_size;
12845 int max_cos_est;
12846 bool is_vf;
12847 int cnic_cnt;
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857 max_cos_est = set_max_cos_est(ent->driver_data);
12858 if (max_cos_est < 0)
12859 return max_cos_est;
12860 is_vf = set_is_vf(ent->driver_data);
12861 cnic_cnt = is_vf ? 0 : 1;
12862
12863 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12864
12865
12866 max_non_def_sbs += is_vf ? 1 : 0;
12867
12868
12869 rss_count = max_non_def_sbs - cnic_cnt;
12870
12871 if (rss_count < 1)
12872 return -EINVAL;
12873
12874
12875 rx_count = rss_count + cnic_cnt;
12876
12877
12878
12879
12880 tx_count = rss_count * max_cos_est + cnic_cnt;
12881
12882
12883 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12884 if (!dev)
12885 return -ENOMEM;
12886
12887 bp = netdev_priv(dev);
12888
12889 bp->flags = 0;
12890 if (is_vf)
12891 bp->flags |= IS_VF_FLAG;
12892
12893 bp->igu_sb_cnt = max_non_def_sbs;
12894 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
12895 bp->msg_enable = debug;
12896 bp->cnic_support = cnic_cnt;
12897 bp->cnic_probe = bnx2x_cnic_probe;
12898
12899 pci_set_drvdata(pdev, dev);
12900
12901 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
12902 if (rc < 0) {
12903 free_netdev(dev);
12904 return rc;
12905 }
12906
12907 BNX2X_DEV_INFO("This is a %s function\n",
12908 IS_PF(bp) ? "physical" : "virtual");
12909 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12910 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
12911 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12912 tx_count, rx_count);
12913
12914 rc = bnx2x_init_bp(bp);
12915 if (rc)
12916 goto init_one_exit;
12917
12918
12919
12920
12921
12922 if (IS_VF(bp)) {
12923 bp->doorbells = bnx2x_vf_doorbells(bp);
12924 rc = bnx2x_vf_pci_alloc(bp);
12925 if (rc)
12926 goto init_one_exit;
12927 } else {
12928 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12929 if (doorbell_size > pci_resource_len(pdev, 2)) {
12930 dev_err(&bp->pdev->dev,
12931 "Cannot map doorbells, bar size too small, aborting\n");
12932 rc = -ENOMEM;
12933 goto init_one_exit;
12934 }
12935 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12936 doorbell_size);
12937 }
12938 if (!bp->doorbells) {
12939 dev_err(&bp->pdev->dev,
12940 "Cannot map doorbell space, aborting\n");
12941 rc = -ENOMEM;
12942 goto init_one_exit;
12943 }
12944
12945 if (IS_VF(bp)) {
12946 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12947 if (rc)
12948 goto init_one_exit;
12949 }
12950
12951
12952 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
12953 if (rc)
12954 goto init_one_exit;
12955
12956
12957 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12958 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
12959
12960
12961 if (CHIP_IS_E1x(bp))
12962 bp->flags |= NO_FCOE_FLAG;
12963
12964
12965 bnx2x_set_num_queues(bp);
12966
12967
12968
12969
12970 rc = bnx2x_set_int_mode(bp);
12971 if (rc) {
12972 dev_err(&pdev->dev, "Cannot set interrupts\n");
12973 goto init_one_exit;
12974 }
12975 BNX2X_DEV_INFO("set interrupts successfully\n");
12976
12977
12978 rc = register_netdev(dev);
12979 if (rc) {
12980 dev_err(&pdev->dev, "Cannot register net device\n");
12981 goto init_one_exit;
12982 }
12983 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
12984
12985 if (!NO_FCOE(bp)) {
12986
12987 rtnl_lock();
12988 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12989 rtnl_unlock();
12990 }
12991 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
12992 pcie_speed == PCI_SPEED_UNKNOWN ||
12993 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
12994 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
12995 else
12996 BNX2X_DEV_INFO(
12997 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12998 board_info[ent->driver_data].name,
12999 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13000 pcie_width,
13001 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
13002 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
13003 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
13004 "Unknown",
13005 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13006
13007 return 0;
13008
13009init_one_exit:
13010 bnx2x_disable_pcie_error_reporting(bp);
13011
13012 if (bp->regview)
13013 iounmap(bp->regview);
13014
13015 if (IS_PF(bp) && bp->doorbells)
13016 iounmap(bp->doorbells);
13017
13018 free_netdev(dev);
13019
13020 if (atomic_read(&pdev->enable_cnt) == 1)
13021 pci_release_regions(pdev);
13022
13023 pci_disable_device(pdev);
13024
13025 return rc;
13026}
13027
13028static void __bnx2x_remove(struct pci_dev *pdev,
13029 struct net_device *dev,
13030 struct bnx2x *bp,
13031 bool remove_netdev)
13032{
13033
13034 if (!NO_FCOE(bp)) {
13035 rtnl_lock();
13036 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13037 rtnl_unlock();
13038 }
13039
13040#ifdef BCM_DCBNL
13041
13042 bnx2x_dcbnl_update_applist(bp, true);
13043#endif
13044
13045 if (IS_PF(bp) &&
13046 !BP_NOMCP(bp) &&
13047 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
13048 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
13049
13050
13051 if (remove_netdev) {
13052 unregister_netdev(dev);
13053 } else {
13054 rtnl_lock();
13055 dev_close(dev);
13056 rtnl_unlock();
13057 }
13058
13059 bnx2x_iov_remove_one(bp);
13060
13061
13062 if (IS_PF(bp))
13063 bnx2x_set_power_state(bp, PCI_D0);
13064
13065
13066 bnx2x_disable_msi(bp);
13067
13068
13069 if (IS_PF(bp))
13070 bnx2x_set_power_state(bp, PCI_D3hot);
13071
13072
13073 cancel_delayed_work_sync(&bp->sp_rtnl_task);
13074
13075
13076 if (IS_VF(bp))
13077 bnx2x_vfpf_release(bp);
13078
13079
13080 if (system_state == SYSTEM_POWER_OFF) {
13081 pci_wake_from_d3(pdev, bp->wol);
13082 pci_set_power_state(pdev, PCI_D3hot);
13083 }
13084
13085 bnx2x_disable_pcie_error_reporting(bp);
13086 if (remove_netdev) {
13087 if (bp->regview)
13088 iounmap(bp->regview);
13089
13090
13091
13092
13093 if (IS_PF(bp)) {
13094 if (bp->doorbells)
13095 iounmap(bp->doorbells);
13096
13097 bnx2x_release_firmware(bp);
13098 }
13099 bnx2x_free_mem_bp(bp);
13100
13101 free_netdev(dev);
13102
13103 if (atomic_read(&pdev->enable_cnt) == 1)
13104 pci_release_regions(pdev);
13105
13106 pci_disable_device(pdev);
13107 }
13108}
13109
13110static void bnx2x_remove_one(struct pci_dev *pdev)
13111{
13112 struct net_device *dev = pci_get_drvdata(pdev);
13113 struct bnx2x *bp;
13114
13115 if (!dev) {
13116 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13117 return;
13118 }
13119 bp = netdev_priv(dev);
13120
13121 __bnx2x_remove(pdev, dev, bp, true);
13122}
13123
13124static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13125{
13126 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
13127
13128 bp->rx_mode = BNX2X_RX_MODE_NONE;
13129
13130 if (CNIC_LOADED(bp))
13131 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
13132
13133
13134 bnx2x_tx_disable(bp);
13135
13136 bnx2x_del_all_napi(bp);
13137 if (CNIC_LOADED(bp))
13138 bnx2x_del_all_napi_cnic(bp);
13139 netdev_reset_tc(bp->dev);
13140
13141 del_timer_sync(&bp->timer);
13142 cancel_delayed_work(&bp->sp_task);
13143 cancel_delayed_work(&bp->period_task);
13144
13145 spin_lock_bh(&bp->stats_lock);
13146 bp->stats_state = STATS_STATE_DISABLED;
13147 spin_unlock_bh(&bp->stats_lock);
13148
13149 bnx2x_save_statistics(bp);
13150
13151 netif_carrier_off(bp->dev);
13152
13153 return 0;
13154}
13155
13156
13157
13158
13159
13160
13161
13162
13163
13164static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13165 pci_channel_state_t state)
13166{
13167 struct net_device *dev = pci_get_drvdata(pdev);
13168 struct bnx2x *bp = netdev_priv(dev);
13169
13170 rtnl_lock();
13171
13172 BNX2X_ERR("IO error detected\n");
13173
13174 netif_device_detach(dev);
13175
13176 if (state == pci_channel_io_perm_failure) {
13177 rtnl_unlock();
13178 return PCI_ERS_RESULT_DISCONNECT;
13179 }
13180
13181 if (netif_running(dev))
13182 bnx2x_eeh_nic_unload(bp);
13183
13184 bnx2x_prev_path_mark_eeh(bp);
13185
13186 pci_disable_device(pdev);
13187
13188 rtnl_unlock();
13189
13190
13191 return PCI_ERS_RESULT_NEED_RESET;
13192}
13193
13194
13195
13196
13197
13198
13199
13200static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13201{
13202 struct net_device *dev = pci_get_drvdata(pdev);
13203 struct bnx2x *bp = netdev_priv(dev);
13204 int i;
13205
13206 rtnl_lock();
13207 BNX2X_ERR("IO slot reset initializing...\n");
13208 if (pci_enable_device(pdev)) {
13209 dev_err(&pdev->dev,
13210 "Cannot re-enable PCI device after reset\n");
13211 rtnl_unlock();
13212 return PCI_ERS_RESULT_DISCONNECT;
13213 }
13214
13215 pci_set_master(pdev);
13216 pci_restore_state(pdev);
13217 pci_save_state(pdev);
13218
13219 if (netif_running(dev))
13220 bnx2x_set_power_state(bp, PCI_D0);
13221
13222 if (netif_running(dev)) {
13223 BNX2X_ERR("IO slot reset --> driver unload\n");
13224
13225
13226 bnx2x_init_shmem(bp);
13227
13228 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13229 u32 v;
13230
13231 v = SHMEM2_RD(bp,
13232 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13233 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13234 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13235 }
13236 bnx2x_drain_tx_queues(bp);
13237 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13238 bnx2x_netif_stop(bp, 1);
13239 bnx2x_free_irq(bp);
13240
13241
13242 bnx2x_send_unload_done(bp, true);
13243
13244 bp->sp_state = 0;
13245 bp->port.pmf = 0;
13246
13247 bnx2x_prev_unload(bp);
13248
13249
13250
13251
13252 bnx2x_squeeze_objects(bp);
13253 bnx2x_free_skbs(bp);
13254 for_each_rx_queue(bp, i)
13255 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13256 bnx2x_free_fp_mem(bp);
13257 bnx2x_free_mem(bp);
13258
13259 bp->state = BNX2X_STATE_CLOSED;
13260 }
13261
13262 rtnl_unlock();
13263
13264
13265 if (bp->flags & AER_ENABLED) {
13266 if (pci_cleanup_aer_uncorrect_error_status(pdev))
13267 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
13268 else
13269 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
13270 }
13271
13272 return PCI_ERS_RESULT_RECOVERED;
13273}
13274
13275
13276
13277
13278
13279
13280
13281
13282static void bnx2x_io_resume(struct pci_dev *pdev)
13283{
13284 struct net_device *dev = pci_get_drvdata(pdev);
13285 struct bnx2x *bp = netdev_priv(dev);
13286
13287 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13288 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
13289 return;
13290 }
13291
13292 rtnl_lock();
13293
13294 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13295 DRV_MSG_SEQ_NUMBER_MASK;
13296
13297 if (netif_running(dev))
13298 bnx2x_nic_load(bp, LOAD_NORMAL);
13299
13300 netif_device_attach(dev);
13301
13302 rtnl_unlock();
13303}
13304
13305static const struct pci_error_handlers bnx2x_err_handler = {
13306 .error_detected = bnx2x_io_error_detected,
13307 .slot_reset = bnx2x_io_slot_reset,
13308 .resume = bnx2x_io_resume,
13309};
13310
13311static void bnx2x_shutdown(struct pci_dev *pdev)
13312{
13313 struct net_device *dev = pci_get_drvdata(pdev);
13314 struct bnx2x *bp;
13315
13316 if (!dev)
13317 return;
13318
13319 bp = netdev_priv(dev);
13320 if (!bp)
13321 return;
13322
13323 rtnl_lock();
13324 netif_device_detach(dev);
13325 rtnl_unlock();
13326
13327
13328
13329
13330
13331 __bnx2x_remove(pdev, dev, bp, false);
13332}
13333
13334static struct pci_driver bnx2x_pci_driver = {
13335 .name = DRV_MODULE_NAME,
13336 .id_table = bnx2x_pci_tbl,
13337 .probe = bnx2x_init_one,
13338 .remove = bnx2x_remove_one,
13339 .suspend = bnx2x_suspend,
13340 .resume = bnx2x_resume,
13341 .err_handler = &bnx2x_err_handler,
13342#ifdef CONFIG_BNX2X_SRIOV
13343 .sriov_configure = bnx2x_sriov_configure,
13344#endif
13345 .shutdown = bnx2x_shutdown,
13346};
13347
13348static int __init bnx2x_init(void)
13349{
13350 int ret;
13351
13352 pr_info("%s", version);
13353
13354 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13355 if (bnx2x_wq == NULL) {
13356 pr_err("Cannot create workqueue\n");
13357 return -ENOMEM;
13358 }
13359
13360 ret = pci_register_driver(&bnx2x_pci_driver);
13361 if (ret) {
13362 pr_err("Cannot register driver\n");
13363 destroy_workqueue(bnx2x_wq);
13364 }
13365 return ret;
13366}
13367
13368static void __exit bnx2x_cleanup(void)
13369{
13370 struct list_head *pos, *q;
13371
13372 pci_unregister_driver(&bnx2x_pci_driver);
13373
13374 destroy_workqueue(bnx2x_wq);
13375
13376
13377 list_for_each_safe(pos, q, &bnx2x_prev_list) {
13378 struct bnx2x_prev_path_list *tmp =
13379 list_entry(pos, struct bnx2x_prev_path_list, list);
13380 list_del(pos);
13381 kfree(tmp);
13382 }
13383}
13384
13385void bnx2x_notify_link_changed(struct bnx2x *bp)
13386{
13387 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13388}
13389
13390module_init(bnx2x_init);
13391module_exit(bnx2x_cleanup);
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401
13402static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
13403{
13404 unsigned long ramrod_flags = 0;
13405
13406 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13407 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13408 &bp->iscsi_l2_mac_obj, true,
13409 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13410}
13411
13412
13413static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13414{
13415 struct eth_spe *spe;
13416 int cxt_index, cxt_offset;
13417
13418#ifdef BNX2X_STOP_ON_ERROR
13419 if (unlikely(bp->panic))
13420 return;
13421#endif
13422
13423 spin_lock_bh(&bp->spq_lock);
13424 BUG_ON(bp->cnic_spq_pending < count);
13425 bp->cnic_spq_pending -= count;
13426
13427 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13428 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13429 & SPE_HDR_CONN_TYPE) >>
13430 SPE_HDR_CONN_TYPE_SHIFT;
13431 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13432 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
13433
13434
13435
13436
13437 if (type == ETH_CONNECTION_TYPE) {
13438 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
13439 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
13440 ILT_PAGE_CIDS;
13441 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
13442 (cxt_index * ILT_PAGE_CIDS);
13443 bnx2x_set_ctx_validation(bp,
13444 &bp->context[cxt_index].
13445 vcxt[cxt_offset].eth,
13446 BNX2X_ISCSI_ETH_CID(bp));
13447 }
13448 }
13449
13450
13451
13452
13453
13454
13455
13456 if (type == ETH_CONNECTION_TYPE) {
13457 if (!atomic_read(&bp->cq_spq_left))
13458 break;
13459 else
13460 atomic_dec(&bp->cq_spq_left);
13461 } else if (type == NONE_CONNECTION_TYPE) {
13462 if (!atomic_read(&bp->eq_spq_left))
13463 break;
13464 else
13465 atomic_dec(&bp->eq_spq_left);
13466 } else if ((type == ISCSI_CONNECTION_TYPE) ||
13467 (type == FCOE_CONNECTION_TYPE)) {
13468 if (bp->cnic_spq_pending >=
13469 bp->cnic_eth_dev.max_kwqe_pending)
13470 break;
13471 else
13472 bp->cnic_spq_pending++;
13473 } else {
13474 BNX2X_ERR("Unknown SPE type: %d\n", type);
13475 bnx2x_panic();
13476 break;
13477 }
13478
13479 spe = bnx2x_sp_get_next(bp);
13480 *spe = *bp->cnic_kwq_cons;
13481
13482 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
13483 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13484
13485 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13486 bp->cnic_kwq_cons = bp->cnic_kwq;
13487 else
13488 bp->cnic_kwq_cons++;
13489 }
13490 bnx2x_sp_prod_update(bp);
13491 spin_unlock_bh(&bp->spq_lock);
13492}
13493
13494static int bnx2x_cnic_sp_queue(struct net_device *dev,
13495 struct kwqe_16 *kwqes[], u32 count)
13496{
13497 struct bnx2x *bp = netdev_priv(dev);
13498 int i;
13499
13500#ifdef BNX2X_STOP_ON_ERROR
13501 if (unlikely(bp->panic)) {
13502 BNX2X_ERR("Can't post to SP queue while panic\n");
13503 return -EIO;
13504 }
13505#endif
13506
13507 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
13508 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
13509 BNX2X_ERR("Handling parity error recovery. Try again later\n");
13510 return -EAGAIN;
13511 }
13512
13513 spin_lock_bh(&bp->spq_lock);
13514
13515 for (i = 0; i < count; i++) {
13516 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13517
13518 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13519 break;
13520
13521 *bp->cnic_kwq_prod = *spe;
13522
13523 bp->cnic_kwq_pending++;
13524
13525 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
13526 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13527 spe->data.update_data_addr.hi,
13528 spe->data.update_data_addr.lo,
13529 bp->cnic_kwq_pending);
13530
13531 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13532 bp->cnic_kwq_prod = bp->cnic_kwq;
13533 else
13534 bp->cnic_kwq_prod++;
13535 }
13536
13537 spin_unlock_bh(&bp->spq_lock);
13538
13539 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13540 bnx2x_cnic_sp_post(bp, 0);
13541
13542 return i;
13543}
13544
13545static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13546{
13547 struct cnic_ops *c_ops;
13548 int rc = 0;
13549
13550 mutex_lock(&bp->cnic_mutex);
13551 c_ops = rcu_dereference_protected(bp->cnic_ops,
13552 lockdep_is_held(&bp->cnic_mutex));
13553 if (c_ops)
13554 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13555 mutex_unlock(&bp->cnic_mutex);
13556
13557 return rc;
13558}
13559
13560static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13561{
13562 struct cnic_ops *c_ops;
13563 int rc = 0;
13564
13565 rcu_read_lock();
13566 c_ops = rcu_dereference(bp->cnic_ops);
13567 if (c_ops)
13568 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13569 rcu_read_unlock();
13570
13571 return rc;
13572}
13573
13574
13575
13576
13577int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13578{
13579 struct cnic_ctl_info ctl = {0};
13580
13581 ctl.cmd = cmd;
13582
13583 return bnx2x_cnic_ctl_send(bp, &ctl);
13584}
13585
13586static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
13587{
13588 struct cnic_ctl_info ctl = {0};
13589
13590
13591 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13592 ctl.data.comp.cid = cid;
13593 ctl.data.comp.error = err;
13594
13595 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13596 bnx2x_cnic_sp_post(bp, 0);
13597}
13598
13599
13600
13601
13602
13603
13604static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13605{
13606 unsigned long accept_flags = 0, ramrod_flags = 0;
13607 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13608 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13609
13610 if (start) {
13611
13612
13613
13614
13615
13616
13617 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13618 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13620 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13621
13622
13623 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13624
13625 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13626 } else
13627
13628 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13629
13630 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13631 set_bit(sched_state, &bp->sp_state);
13632 else {
13633 __set_bit(RAMROD_RX, &ramrod_flags);
13634 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13635 ramrod_flags);
13636 }
13637}
13638
13639static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13640{
13641 struct bnx2x *bp = netdev_priv(dev);
13642 int rc = 0;
13643
13644 switch (ctl->cmd) {
13645 case DRV_CTL_CTXTBL_WR_CMD: {
13646 u32 index = ctl->data.io.offset;
13647 dma_addr_t addr = ctl->data.io.dma_addr;
13648
13649 bnx2x_ilt_wr(bp, index, addr);
13650 break;
13651 }
13652
13653 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13654 int count = ctl->data.credit.credit_count;
13655
13656 bnx2x_cnic_sp_post(bp, count);
13657 break;
13658 }
13659
13660
13661 case DRV_CTL_START_L2_CMD: {
13662 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13663 unsigned long sp_bits = 0;
13664
13665
13666 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13667 cp->iscsi_l2_client_id,
13668 cp->iscsi_l2_cid, BP_FUNC(bp),
13669 bnx2x_sp(bp, mac_rdata),
13670 bnx2x_sp_mapping(bp, mac_rdata),
13671 BNX2X_FILTER_MAC_PENDING,
13672 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13673 &bp->macs_pool);
13674
13675
13676 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13677 if (rc)
13678 break;
13679
13680 mmiowb();
13681 barrier();
13682
13683
13684
13685 netif_addr_lock_bh(dev);
13686 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13687 netif_addr_unlock_bh(dev);
13688
13689
13690 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13691 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13692
13693 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13694 BNX2X_ERR("rx_mode completion timed out!\n");
13695
13696 break;
13697 }
13698
13699
13700 case DRV_CTL_STOP_L2_CMD: {
13701 unsigned long sp_bits = 0;
13702
13703
13704 netif_addr_lock_bh(dev);
13705 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13706 netif_addr_unlock_bh(dev);
13707
13708
13709 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13710 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13711
13712 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13713 BNX2X_ERR("rx_mode completion timed out!\n");
13714
13715 mmiowb();
13716 barrier();
13717
13718
13719 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13720 BNX2X_ISCSI_ETH_MAC, true);
13721 break;
13722 }
13723 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13724 int count = ctl->data.credit.credit_count;
13725
13726 smp_mb__before_atomic_inc();
13727 atomic_add(count, &bp->cq_spq_left);
13728 smp_mb__after_atomic_inc();
13729 break;
13730 }
13731 case DRV_CTL_ULP_REGISTER_CMD: {
13732 int ulp_type = ctl->data.register_data.ulp_type;
13733
13734 if (CHIP_IS_E3(bp)) {
13735 int idx = BP_FW_MB_IDX(bp);
13736 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13737 int path = BP_PATH(bp);
13738 int port = BP_PORT(bp);
13739 int i;
13740 u32 scratch_offset;
13741 u32 *host_addr;
13742
13743
13744 if (ulp_type == CNIC_ULP_ISCSI)
13745 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13746 else if (ulp_type == CNIC_ULP_FCOE)
13747 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13748 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13749
13750 if ((ulp_type != CNIC_ULP_FCOE) ||
13751 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13752 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
13753 break;
13754
13755
13756 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13757 if (!scratch_offset)
13758 break;
13759 scratch_offset += offsetof(struct glob_ncsi_oem_data,
13760 fcoe_features[path][port]);
13761 host_addr = (u32 *) &(ctl->data.register_data.
13762 fcoe_features);
13763 for (i = 0; i < sizeof(struct fcoe_capabilities);
13764 i += 4)
13765 REG_WR(bp, scratch_offset + i,
13766 *(host_addr + i/4));
13767 }
13768 break;
13769 }
13770
13771 case DRV_CTL_ULP_UNREGISTER_CMD: {
13772 int ulp_type = ctl->data.ulp_type;
13773
13774 if (CHIP_IS_E3(bp)) {
13775 int idx = BP_FW_MB_IDX(bp);
13776 u32 cap;
13777
13778 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13779 if (ulp_type == CNIC_ULP_ISCSI)
13780 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13781 else if (ulp_type == CNIC_ULP_FCOE)
13782 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13783 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13784 }
13785 break;
13786 }
13787
13788 default:
13789 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13790 rc = -EINVAL;
13791 }
13792
13793 return rc;
13794}
13795
13796void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13797{
13798 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13799
13800 if (bp->flags & USING_MSIX_FLAG) {
13801 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13802 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13803 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13804 } else {
13805 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13806 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13807 }
13808 if (!CHIP_IS_E1x(bp))
13809 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13810 else
13811 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13812
13813 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
13814 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
13815 cp->irq_arr[1].status_blk = bp->def_status_blk;
13816 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13817 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
13818
13819 cp->num_irq = 2;
13820}
13821
13822void bnx2x_setup_cnic_info(struct bnx2x *bp)
13823{
13824 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13825
13826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13827 bnx2x_cid_ilt_lines(bp);
13828 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13829 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13830 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13831
13832 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
13833 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
13834 cp->iscsi_l2_cid);
13835
13836 if (NO_ISCSI_OOO(bp))
13837 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13838}
13839
13840static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13841 void *data)
13842{
13843 struct bnx2x *bp = netdev_priv(dev);
13844 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13845 int rc;
13846
13847 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
13848
13849 if (ops == NULL) {
13850 BNX2X_ERR("NULL ops received\n");
13851 return -EINVAL;
13852 }
13853
13854 if (!CNIC_SUPPORT(bp)) {
13855 BNX2X_ERR("Can't register CNIC when not supported\n");
13856 return -EOPNOTSUPP;
13857 }
13858
13859 if (!CNIC_LOADED(bp)) {
13860 rc = bnx2x_load_cnic(bp);
13861 if (rc) {
13862 BNX2X_ERR("CNIC-related load failed\n");
13863 return rc;
13864 }
13865 }
13866
13867 bp->cnic_enabled = true;
13868
13869 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13870 if (!bp->cnic_kwq)
13871 return -ENOMEM;
13872
13873 bp->cnic_kwq_cons = bp->cnic_kwq;
13874 bp->cnic_kwq_prod = bp->cnic_kwq;
13875 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13876
13877 bp->cnic_spq_pending = 0;
13878 bp->cnic_kwq_pending = 0;
13879
13880 bp->cnic_data = data;
13881
13882 cp->num_irq = 0;
13883 cp->drv_state |= CNIC_DRV_STATE_REGD;
13884 cp->iro_arr = bp->iro_arr;
13885
13886 bnx2x_setup_cnic_irq_info(bp);
13887
13888 rcu_assign_pointer(bp->cnic_ops, ops);
13889
13890 return 0;
13891}
13892
13893static int bnx2x_unregister_cnic(struct net_device *dev)
13894{
13895 struct bnx2x *bp = netdev_priv(dev);
13896 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13897
13898 mutex_lock(&bp->cnic_mutex);
13899 cp->drv_state = 0;
13900 RCU_INIT_POINTER(bp->cnic_ops, NULL);
13901 mutex_unlock(&bp->cnic_mutex);
13902 synchronize_rcu();
13903 bp->cnic_enabled = false;
13904 kfree(bp->cnic_kwq);
13905 bp->cnic_kwq = NULL;
13906
13907 return 0;
13908}
13909
13910static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13911{
13912 struct bnx2x *bp = netdev_priv(dev);
13913 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13914
13915
13916
13917
13918
13919 if (NO_ISCSI(bp) && NO_FCOE(bp))
13920 return NULL;
13921
13922 cp->drv_owner = THIS_MODULE;
13923 cp->chip_id = CHIP_ID(bp);
13924 cp->pdev = bp->pdev;
13925 cp->io_base = bp->regview;
13926 cp->io_base2 = bp->doorbells;
13927 cp->max_kwqe_pending = 8;
13928 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
13929 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13930 bnx2x_cid_ilt_lines(bp);
13931 cp->ctx_tbl_len = CNIC_ILT_LINES;
13932 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13933 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13934 cp->drv_ctl = bnx2x_drv_ctl;
13935 cp->drv_register_cnic = bnx2x_register_cnic;
13936 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13937 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13938 cp->iscsi_l2_client_id =
13939 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13940 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13941
13942 if (NO_ISCSI_OOO(bp))
13943 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13944
13945 if (NO_ISCSI(bp))
13946 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13947
13948 if (NO_FCOE(bp))
13949 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13950
13951 BNX2X_DEV_INFO(
13952 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
13953 cp->ctx_blk_size,
13954 cp->ctx_tbl_offset,
13955 cp->ctx_tbl_len,
13956 cp->starting_cid);
13957 return cp;
13958}
13959
13960static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
13961{
13962 struct bnx2x *bp = fp->bp;
13963 u32 offset = BAR_USTRORM_INTMEM;
13964
13965 if (IS_VF(bp))
13966 return bnx2x_vf_ustorm_prods_offset(bp, fp);
13967 else if (!CHIP_IS_E1x(bp))
13968 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
13969 else
13970 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
13971
13972 return offset;
13973}
13974
13975
13976
13977
13978
13979
13980int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
13981{
13982 u32 pretend_reg;
13983
13984 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
13985 return -1;
13986
13987
13988 pretend_reg = bnx2x_get_pretend_reg(bp);
13989 REG_WR(bp, pretend_reg, pretend_func_val);
13990 REG_RD(bp, pretend_reg);
13991 return 0;
13992}
13993