1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h>
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/aer.h>
31#include <linux/init.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/dma-mapping.h>
36#include <linux/bitops.h>
37#include <linux/irq.h>
38#include <linux/delay.h>
39#include <asm/byteorder.h>
40#include <linux/time.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/if_vlan.h>
44#include <linux/crash_dump.h>
45#include <net/ip.h>
46#include <net/ipv6.h>
47#include <net/tcp.h>
48#include <net/vxlan.h>
49#include <net/checksum.h>
50#include <net/ip6_checksum.h>
51#include <linux/workqueue.h>
52#include <linux/crc32.h>
53#include <linux/crc32c.h>
54#include <linux/prefetch.h>
55#include <linux/zlib.h>
56#include <linux/io.h>
57#include <linux/semaphore.h>
58#include <linux/stringify.h>
59#include <linux/vmalloc.h>
60
61#include "bnx2x.h"
62#include "bnx2x_init.h"
63#include "bnx2x_init_ops.h"
64#include "bnx2x_cmn.h"
65#include "bnx2x_vfpf.h"
66#include "bnx2x_dcb.h"
67#include "bnx2x_sp.h"
68#include <linux/firmware.h>
69#include "bnx2x_fw_file_hdr.h"
70
71#define FW_FILE_VERSION \
72 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
73 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
74 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
75 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
76#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
77#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
79
80
81#define TX_TIMEOUT (5*HZ)
82
83static char version[] =
84 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
85 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
86
87MODULE_AUTHOR("Eliezer Tamir");
88MODULE_DESCRIPTION("Broadcom NetXtreme II "
89 "BCM57710/57711/57711E/"
90 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
91 "57840/57840_MF Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION);
94MODULE_FIRMWARE(FW_FILE_NAME_E1);
95MODULE_FIRMWARE(FW_FILE_NAME_E1H);
96MODULE_FIRMWARE(FW_FILE_NAME_E2);
97
98int bnx2x_num_queues;
99module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
100MODULE_PARM_DESC(num_queues,
101 " Set number of queues (default is as a number of CPUs)");
102
103static int disable_tpa;
104module_param(disable_tpa, int, S_IRUGO);
105MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
106
107static int int_mode;
108module_param(int_mode, int, S_IRUGO);
109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
110 "(1 INT#x; 2 MSI)");
111
112static int dropless_fc;
113module_param(dropless_fc, int, S_IRUGO);
114MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
115
116static int mrrs = -1;
117module_param(mrrs, int, S_IRUGO);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120static int debug;
121module_param(debug, int, S_IRUGO);
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static struct workqueue_struct *bnx2x_wq;
125struct workqueue_struct *bnx2x_iov_wq;
126
127struct bnx2x_mac_vals {
128 u32 xmac_addr;
129 u32 xmac_val;
130 u32 emac_addr;
131 u32 emac_val;
132 u32 umac_addr[2];
133 u32 umac_val[2];
134 u32 bmac_addr;
135 u32 bmac_val[2];
136};
137
138enum bnx2x_board_type {
139 BCM57710 = 0,
140 BCM57711,
141 BCM57711E,
142 BCM57712,
143 BCM57712_MF,
144 BCM57712_VF,
145 BCM57800,
146 BCM57800_MF,
147 BCM57800_VF,
148 BCM57810,
149 BCM57810_MF,
150 BCM57810_VF,
151 BCM57840_4_10,
152 BCM57840_2_20,
153 BCM57840_MF,
154 BCM57840_VF,
155 BCM57811,
156 BCM57811_MF,
157 BCM57840_O,
158 BCM57840_MFO,
159 BCM57811_VF
160};
161
162
163static struct {
164 char *name;
165} board_info[] = {
166 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
167 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
168 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
169 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
170 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
171 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
172 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
173 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
174 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
175 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
176 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
177 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
178 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
179 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
180 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
181 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
182 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
183 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
184 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
185 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
186 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
187};
188
189#ifndef PCI_DEVICE_ID_NX2_57710
190#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
191#endif
192#ifndef PCI_DEVICE_ID_NX2_57711
193#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
194#endif
195#ifndef PCI_DEVICE_ID_NX2_57711E
196#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
197#endif
198#ifndef PCI_DEVICE_ID_NX2_57712
199#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
200#endif
201#ifndef PCI_DEVICE_ID_NX2_57712_MF
202#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
203#endif
204#ifndef PCI_DEVICE_ID_NX2_57712_VF
205#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
206#endif
207#ifndef PCI_DEVICE_ID_NX2_57800
208#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
209#endif
210#ifndef PCI_DEVICE_ID_NX2_57800_MF
211#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
212#endif
213#ifndef PCI_DEVICE_ID_NX2_57800_VF
214#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
215#endif
216#ifndef PCI_DEVICE_ID_NX2_57810
217#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
218#endif
219#ifndef PCI_DEVICE_ID_NX2_57810_MF
220#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
221#endif
222#ifndef PCI_DEVICE_ID_NX2_57840_O
223#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
224#endif
225#ifndef PCI_DEVICE_ID_NX2_57810_VF
226#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
227#endif
228#ifndef PCI_DEVICE_ID_NX2_57840_4_10
229#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
230#endif
231#ifndef PCI_DEVICE_ID_NX2_57840_2_20
232#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
233#endif
234#ifndef PCI_DEVICE_ID_NX2_57840_MFO
235#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
236#endif
237#ifndef PCI_DEVICE_ID_NX2_57840_MF
238#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
239#endif
240#ifndef PCI_DEVICE_ID_NX2_57840_VF
241#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
242#endif
243#ifndef PCI_DEVICE_ID_NX2_57811
244#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
245#endif
246#ifndef PCI_DEVICE_ID_NX2_57811_MF
247#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
248#endif
249#ifndef PCI_DEVICE_ID_NX2_57811_VF
250#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
251#endif
252
253static const struct pci_device_id bnx2x_pci_tbl[] = {
254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
275 { 0 }
276};
277
278MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
279
280
281#define BNX2X_PREV_WAIT_NEEDED 1
282static DEFINE_SEMAPHORE(bnx2x_prev_sem);
283static LIST_HEAD(bnx2x_prev_list);
284
285
286static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
287static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
288static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
289
290
291
292
293
294static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
295
296static void __storm_memset_dma_mapping(struct bnx2x *bp,
297 u32 addr, dma_addr_t mapping)
298{
299 REG_WR(bp, addr, U64_LO(mapping));
300 REG_WR(bp, addr + 4, U64_HI(mapping));
301}
302
303static void storm_memset_spq_addr(struct bnx2x *bp,
304 dma_addr_t mapping, u16 abs_fid)
305{
306 u32 addr = XSEM_REG_FAST_MEMORY +
307 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
308
309 __storm_memset_dma_mapping(bp, addr, mapping);
310}
311
312static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
313 u16 pf_id)
314{
315 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
316 pf_id);
317 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
318 pf_id);
319 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
320 pf_id);
321 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323}
324
325static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
326 u8 enable)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
329 enable);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
331 enable);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
333 enable);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336}
337
338static void storm_memset_eq_data(struct bnx2x *bp,
339 struct event_ring_data *eq_data,
340 u16 pfid)
341{
342 size_t size = sizeof(struct event_ring_data);
343
344 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
345
346 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
347}
348
349static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
350 u16 pfid)
351{
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
353 REG_WR16(bp, addr, eq_prod);
354}
355
356
357
358
359static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
360{
361 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
362 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
363 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
364 PCICFG_VENDOR_ID_OFFSET);
365}
366
367static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
368{
369 u32 val;
370
371 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
372 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
373 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
374 PCICFG_VENDOR_ID_OFFSET);
375
376 return val;
377}
378
379#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
380#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
381#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
382#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
383#define DMAE_DP_DST_NONE "dst_addr [none]"
384
385static void bnx2x_dp_dmae(struct bnx2x *bp,
386 struct dmae_command *dmae, int msglvl)
387{
388 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
389 int i;
390
391 switch (dmae->opcode & DMAE_COMMAND_DST) {
392 case DMAE_CMD_DST_PCI:
393 if (src_type == DMAE_CMD_SRC_PCI)
394 DP(msglvl, "DMAE: opcode 0x%08x\n"
395 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
396 "comp_addr [%x:%08x], comp_val 0x%08x\n",
397 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
398 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
399 dmae->comp_addr_hi, dmae->comp_addr_lo,
400 dmae->comp_val);
401 else
402 DP(msglvl, "DMAE: opcode 0x%08x\n"
403 "src [%08x], len [%d*4], dst [%x:%08x]\n"
404 "comp_addr [%x:%08x], comp_val 0x%08x\n",
405 dmae->opcode, dmae->src_addr_lo >> 2,
406 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
407 dmae->comp_addr_hi, dmae->comp_addr_lo,
408 dmae->comp_val);
409 break;
410 case DMAE_CMD_DST_GRC:
411 if (src_type == DMAE_CMD_SRC_PCI)
412 DP(msglvl, "DMAE: opcode 0x%08x\n"
413 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
414 "comp_addr [%x:%08x], comp_val 0x%08x\n",
415 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
416 dmae->len, dmae->dst_addr_lo >> 2,
417 dmae->comp_addr_hi, dmae->comp_addr_lo,
418 dmae->comp_val);
419 else
420 DP(msglvl, "DMAE: opcode 0x%08x\n"
421 "src [%08x], len [%d*4], dst [%08x]\n"
422 "comp_addr [%x:%08x], comp_val 0x%08x\n",
423 dmae->opcode, dmae->src_addr_lo >> 2,
424 dmae->len, dmae->dst_addr_lo >> 2,
425 dmae->comp_addr_hi, dmae->comp_addr_lo,
426 dmae->comp_val);
427 break;
428 default:
429 if (src_type == DMAE_CMD_SRC_PCI)
430 DP(msglvl, "DMAE: opcode 0x%08x\n"
431 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
432 "comp_addr [%x:%08x] comp_val 0x%08x\n",
433 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
434 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
435 dmae->comp_val);
436 else
437 DP(msglvl, "DMAE: opcode 0x%08x\n"
438 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
439 "comp_addr [%x:%08x] comp_val 0x%08x\n",
440 dmae->opcode, dmae->src_addr_lo >> 2,
441 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
442 dmae->comp_val);
443 break;
444 }
445
446 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
447 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
448 i, *(((u32 *)dmae) + i));
449}
450
451
452void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
453{
454 u32 cmd_offset;
455 int i;
456
457 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
458 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
459 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
460 }
461 REG_WR(bp, dmae_reg_go_c[idx], 1);
462}
463
464u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
465{
466 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
467 DMAE_CMD_C_ENABLE);
468}
469
470u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
471{
472 return opcode & ~DMAE_CMD_SRC_RESET;
473}
474
475u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
476 bool with_comp, u8 comp_type)
477{
478 u32 opcode = 0;
479
480 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
481 (dst_type << DMAE_COMMAND_DST_SHIFT));
482
483 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
484
485 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
486 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
487 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
488 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
489
490#ifdef __BIG_ENDIAN
491 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
492#else
493 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
494#endif
495 if (with_comp)
496 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
497 return opcode;
498}
499
500void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
501 struct dmae_command *dmae,
502 u8 src_type, u8 dst_type)
503{
504 memset(dmae, 0, sizeof(struct dmae_command));
505
506
507 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
508 true, DMAE_COMP_PCI);
509
510
511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
513 dmae->comp_val = DMAE_COMP_VAL;
514}
515
516
517int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
518 u32 *comp)
519{
520 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
521 int rc = 0;
522
523 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
524
525
526
527
528
529
530 spin_lock_bh(&bp->dmae_lock);
531
532
533 *comp = 0;
534
535
536 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
537
538
539 udelay(5);
540 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
541
542 if (!cnt ||
543 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
544 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
545 BNX2X_ERR("DMAE timeout!\n");
546 rc = DMAE_TIMEOUT;
547 goto unlock;
548 }
549 cnt--;
550 udelay(50);
551 }
552 if (*comp & DMAE_PCI_ERR_FLAG) {
553 BNX2X_ERR("DMAE PCI error!\n");
554 rc = DMAE_PCI_ERROR;
555 }
556
557unlock:
558
559 spin_unlock_bh(&bp->dmae_lock);
560
561 return rc;
562}
563
564void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
565 u32 len32)
566{
567 int rc;
568 struct dmae_command dmae;
569
570 if (!bp->dmae_ready) {
571 u32 *data = bnx2x_sp(bp, wb_data[0]);
572
573 if (CHIP_IS_E1(bp))
574 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
575 else
576 bnx2x_init_str_wr(bp, dst_addr, data, len32);
577 return;
578 }
579
580
581 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
582
583
584 dmae.src_addr_lo = U64_LO(dma_addr);
585 dmae.src_addr_hi = U64_HI(dma_addr);
586 dmae.dst_addr_lo = dst_addr >> 2;
587 dmae.dst_addr_hi = 0;
588 dmae.len = len32;
589
590
591 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
592 if (rc) {
593 BNX2X_ERR("DMAE returned failure %d\n", rc);
594#ifdef BNX2X_STOP_ON_ERROR
595 bnx2x_panic();
596#endif
597 }
598}
599
600void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
601{
602 int rc;
603 struct dmae_command dmae;
604
605 if (!bp->dmae_ready) {
606 u32 *data = bnx2x_sp(bp, wb_data[0]);
607 int i;
608
609 if (CHIP_IS_E1(bp))
610 for (i = 0; i < len32; i++)
611 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
612 else
613 for (i = 0; i < len32; i++)
614 data[i] = REG_RD(bp, src_addr + i*4);
615
616 return;
617 }
618
619
620 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
621
622
623 dmae.src_addr_lo = src_addr >> 2;
624 dmae.src_addr_hi = 0;
625 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
626 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
627 dmae.len = len32;
628
629
630 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
631 if (rc) {
632 BNX2X_ERR("DMAE returned failure %d\n", rc);
633#ifdef BNX2X_STOP_ON_ERROR
634 bnx2x_panic();
635#endif
636 }
637}
638
639static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
640 u32 addr, u32 len)
641{
642 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
643 int offset = 0;
644
645 while (len > dmae_wr_max) {
646 bnx2x_write_dmae(bp, phys_addr + offset,
647 addr + offset, dmae_wr_max);
648 offset += dmae_wr_max * 4;
649 len -= dmae_wr_max;
650 }
651
652 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
653}
654
655enum storms {
656 XSTORM,
657 TSTORM,
658 CSTORM,
659 USTORM,
660 MAX_STORMS
661};
662
663#define STORMS_NUM 4
664#define REGS_IN_ENTRY 4
665
666static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
667 enum storms storm,
668 int entry)
669{
670 switch (storm) {
671 case XSTORM:
672 return XSTORM_ASSERT_LIST_OFFSET(entry);
673 case TSTORM:
674 return TSTORM_ASSERT_LIST_OFFSET(entry);
675 case CSTORM:
676 return CSTORM_ASSERT_LIST_OFFSET(entry);
677 case USTORM:
678 return USTORM_ASSERT_LIST_OFFSET(entry);
679 case MAX_STORMS:
680 default:
681 BNX2X_ERR("unknown storm\n");
682 }
683 return -EINVAL;
684}
685
686static int bnx2x_mc_assert(struct bnx2x *bp)
687{
688 char last_idx;
689 int i, j, rc = 0;
690 enum storms storm;
691 u32 regs[REGS_IN_ENTRY];
692 u32 bar_storm_intmem[STORMS_NUM] = {
693 BAR_XSTRORM_INTMEM,
694 BAR_TSTRORM_INTMEM,
695 BAR_CSTRORM_INTMEM,
696 BAR_USTRORM_INTMEM
697 };
698 u32 storm_assert_list_index[STORMS_NUM] = {
699 XSTORM_ASSERT_LIST_INDEX_OFFSET,
700 TSTORM_ASSERT_LIST_INDEX_OFFSET,
701 CSTORM_ASSERT_LIST_INDEX_OFFSET,
702 USTORM_ASSERT_LIST_INDEX_OFFSET
703 };
704 char *storms_string[STORMS_NUM] = {
705 "XSTORM",
706 "TSTORM",
707 "CSTORM",
708 "USTORM"
709 };
710
711 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
712 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
713 storm_assert_list_index[storm]);
714 if (last_idx)
715 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
716 storms_string[storm], last_idx);
717
718
719 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
720
721 for (j = 0; j < REGS_IN_ENTRY; j++)
722 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
723 bnx2x_get_assert_list_entry(bp,
724 storm,
725 i) +
726 sizeof(u32) * j);
727
728
729 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
730 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
731 storms_string[storm], i, regs[3],
732 regs[2], regs[1], regs[0]);
733 rc++;
734 } else {
735 break;
736 }
737 }
738 }
739
740 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
741 CHIP_IS_E1(bp) ? "everest1" :
742 CHIP_IS_E1H(bp) ? "everest1h" :
743 CHIP_IS_E2(bp) ? "everest2" : "everest3",
744 BCM_5710_FW_MAJOR_VERSION,
745 BCM_5710_FW_MINOR_VERSION,
746 BCM_5710_FW_REVISION_VERSION);
747
748 return rc;
749}
750
751#define MCPR_TRACE_BUFFER_SIZE (0x800)
752#define SCRATCH_BUFFER_SIZE(bp) \
753 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
754
755void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
756{
757 u32 addr, val;
758 u32 mark, offset;
759 __be32 data[9];
760 int word;
761 u32 trace_shmem_base;
762 if (BP_NOMCP(bp)) {
763 BNX2X_ERR("NO MCP - can not dump\n");
764 return;
765 }
766 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
767 (bp->common.bc_ver & 0xff0000) >> 16,
768 (bp->common.bc_ver & 0xff00) >> 8,
769 (bp->common.bc_ver & 0xff));
770
771 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
772 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
773 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
774
775 if (BP_PATH(bp) == 0)
776 trace_shmem_base = bp->common.shmem_base;
777 else
778 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
779
780
781 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
782 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
783 SCRATCH_BUFFER_SIZE(bp)) {
784 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
785 trace_shmem_base);
786 return;
787 }
788
789 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
790
791
792 mark = REG_RD(bp, addr);
793 if (mark != MFW_TRACE_SIGNATURE) {
794 BNX2X_ERR("Trace buffer signature is missing.");
795 return ;
796 }
797
798
799 addr += 4;
800 mark = REG_RD(bp, addr);
801 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
802 if (mark >= trace_shmem_base || mark < addr + 4) {
803 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
804 return;
805 }
806 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
807
808 printk("%s", lvl);
809
810
811 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
812 for (word = 0; word < 8; word++)
813 data[word] = htonl(REG_RD(bp, offset + 4*word));
814 data[8] = 0x0;
815 pr_cont("%s", (char *)data);
816 }
817
818
819 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
820 for (word = 0; word < 8; word++)
821 data[word] = htonl(REG_RD(bp, offset + 4*word));
822 data[8] = 0x0;
823 pr_cont("%s", (char *)data);
824 }
825 printk("%s" "end of fw dump\n", lvl);
826}
827
828static void bnx2x_fw_dump(struct bnx2x *bp)
829{
830 bnx2x_fw_dump_lvl(bp, KERN_ERR);
831}
832
833static void bnx2x_hc_int_disable(struct bnx2x *bp)
834{
835 int port = BP_PORT(bp);
836 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
837 u32 val = REG_RD(bp, addr);
838
839
840
841
842
843 if (CHIP_IS_E1(bp)) {
844
845
846
847
848 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
849
850 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
851 HC_CONFIG_0_REG_INT_LINE_EN_0 |
852 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
853 } else
854 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
855 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
856 HC_CONFIG_0_REG_INT_LINE_EN_0 |
857 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
858
859 DP(NETIF_MSG_IFDOWN,
860 "write %x to HC %d (addr 0x%x)\n",
861 val, port, addr);
862
863
864 mmiowb();
865
866 REG_WR(bp, addr, val);
867 if (REG_RD(bp, addr) != val)
868 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
869}
870
871static void bnx2x_igu_int_disable(struct bnx2x *bp)
872{
873 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
874
875 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
876 IGU_PF_CONF_INT_LINE_EN |
877 IGU_PF_CONF_ATTN_BIT_EN);
878
879 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
880
881
882 mmiowb();
883
884 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
885 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
886 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
887}
888
889static void bnx2x_int_disable(struct bnx2x *bp)
890{
891 if (bp->common.int_block == INT_BLOCK_HC)
892 bnx2x_hc_int_disable(bp);
893 else
894 bnx2x_igu_int_disable(bp);
895}
896
897void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
898{
899 int i;
900 u16 j;
901 struct hc_sp_status_block_data sp_sb_data;
902 int func = BP_FUNC(bp);
903#ifdef BNX2X_STOP_ON_ERROR
904 u16 start = 0, end = 0;
905 u8 cos;
906#endif
907 if (IS_PF(bp) && disable_int)
908 bnx2x_int_disable(bp);
909
910 bp->stats_state = STATS_STATE_DISABLED;
911 bp->eth_stats.unrecoverable_error++;
912 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
913
914 BNX2X_ERR("begin crash dump -----------------\n");
915
916
917
918 if (IS_PF(bp)) {
919 struct host_sp_status_block *def_sb = bp->def_status_blk;
920 int data_size, cstorm_offset;
921
922 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
923 bp->def_idx, bp->def_att_idx, bp->attn_state,
924 bp->spq_prod_idx, bp->stats_counter);
925 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
926 def_sb->atten_status_block.attn_bits,
927 def_sb->atten_status_block.attn_bits_ack,
928 def_sb->atten_status_block.status_block_id,
929 def_sb->atten_status_block.attn_bits_index);
930 BNX2X_ERR(" def (");
931 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
932 pr_cont("0x%x%s",
933 def_sb->sp_sb.index_values[i],
934 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
935
936 data_size = sizeof(struct hc_sp_status_block_data) /
937 sizeof(u32);
938 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
939 for (i = 0; i < data_size; i++)
940 *((u32 *)&sp_sb_data + i) =
941 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
942 i * sizeof(u32));
943
944 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
945 sp_sb_data.igu_sb_id,
946 sp_sb_data.igu_seg_id,
947 sp_sb_data.p_func.pf_id,
948 sp_sb_data.p_func.vnic_id,
949 sp_sb_data.p_func.vf_id,
950 sp_sb_data.p_func.vf_valid,
951 sp_sb_data.state);
952 }
953
954 for_each_eth_queue(bp, i) {
955 struct bnx2x_fastpath *fp = &bp->fp[i];
956 int loop;
957 struct hc_status_block_data_e2 sb_data_e2;
958 struct hc_status_block_data_e1x sb_data_e1x;
959 struct hc_status_block_sm *hc_sm_p =
960 CHIP_IS_E1x(bp) ?
961 sb_data_e1x.common.state_machine :
962 sb_data_e2.common.state_machine;
963 struct hc_index_data *hc_index_p =
964 CHIP_IS_E1x(bp) ?
965 sb_data_e1x.index_data :
966 sb_data_e2.index_data;
967 u8 data_size, cos;
968 u32 *sb_data_p;
969 struct bnx2x_fp_txdata txdata;
970
971 if (!bp->fp)
972 break;
973
974 if (!fp->rx_cons_sb)
975 continue;
976
977
978 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
979 i, fp->rx_bd_prod, fp->rx_bd_cons,
980 fp->rx_comp_prod,
981 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
982 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
983 fp->rx_sge_prod, fp->last_max_sge,
984 le16_to_cpu(fp->fp_hc_idx));
985
986
987 for_each_cos_in_tx_queue(fp, cos)
988 {
989 if (!fp->txdata_ptr[cos])
990 break;
991
992 txdata = *fp->txdata_ptr[cos];
993
994 if (!txdata.tx_cons_sb)
995 continue;
996
997 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
998 i, txdata.tx_pkt_prod,
999 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1000 txdata.tx_bd_cons,
1001 le16_to_cpu(*txdata.tx_cons_sb));
1002 }
1003
1004 loop = CHIP_IS_E1x(bp) ?
1005 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1006
1007
1008
1009 if (IS_FCOE_FP(fp))
1010 continue;
1011
1012 BNX2X_ERR(" run indexes (");
1013 for (j = 0; j < HC_SB_MAX_SM; j++)
1014 pr_cont("0x%x%s",
1015 fp->sb_running_index[j],
1016 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1017
1018 BNX2X_ERR(" indexes (");
1019 for (j = 0; j < loop; j++)
1020 pr_cont("0x%x%s",
1021 fp->sb_index_values[j],
1022 (j == loop - 1) ? ")" : " ");
1023
1024
1025 if (IS_VF(bp))
1026 continue;
1027
1028
1029 data_size = CHIP_IS_E1x(bp) ?
1030 sizeof(struct hc_status_block_data_e1x) :
1031 sizeof(struct hc_status_block_data_e2);
1032 data_size /= sizeof(u32);
1033 sb_data_p = CHIP_IS_E1x(bp) ?
1034 (u32 *)&sb_data_e1x :
1035 (u32 *)&sb_data_e2;
1036
1037 for (j = 0; j < data_size; j++)
1038 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1039 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1040 j * sizeof(u32));
1041
1042 if (!CHIP_IS_E1x(bp)) {
1043 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1044 sb_data_e2.common.p_func.pf_id,
1045 sb_data_e2.common.p_func.vf_id,
1046 sb_data_e2.common.p_func.vf_valid,
1047 sb_data_e2.common.p_func.vnic_id,
1048 sb_data_e2.common.same_igu_sb_1b,
1049 sb_data_e2.common.state);
1050 } else {
1051 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1052 sb_data_e1x.common.p_func.pf_id,
1053 sb_data_e1x.common.p_func.vf_id,
1054 sb_data_e1x.common.p_func.vf_valid,
1055 sb_data_e1x.common.p_func.vnic_id,
1056 sb_data_e1x.common.same_igu_sb_1b,
1057 sb_data_e1x.common.state);
1058 }
1059
1060
1061 for (j = 0; j < HC_SB_MAX_SM; j++) {
1062 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1063 j, hc_sm_p[j].__flags,
1064 hc_sm_p[j].igu_sb_id,
1065 hc_sm_p[j].igu_seg_id,
1066 hc_sm_p[j].time_to_expire,
1067 hc_sm_p[j].timer_value);
1068 }
1069
1070
1071 for (j = 0; j < loop; j++) {
1072 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1073 hc_index_p[j].flags,
1074 hc_index_p[j].timeout);
1075 }
1076 }
1077
1078#ifdef BNX2X_STOP_ON_ERROR
1079 if (IS_PF(bp)) {
1080
1081 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1082 for (i = 0; i < NUM_EQ_DESC; i++) {
1083 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1084
1085 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1086 i, bp->eq_ring[i].message.opcode,
1087 bp->eq_ring[i].message.error);
1088 BNX2X_ERR("data: %x %x %x\n",
1089 data[0], data[1], data[2]);
1090 }
1091 }
1092
1093
1094
1095 for_each_valid_rx_queue(bp, i) {
1096 struct bnx2x_fastpath *fp = &bp->fp[i];
1097
1098 if (!bp->fp)
1099 break;
1100
1101 if (!fp->rx_cons_sb)
1102 continue;
1103
1104 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1105 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1106 for (j = start; j != end; j = RX_BD(j + 1)) {
1107 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1108 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1109
1110 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1111 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1112 }
1113
1114 start = RX_SGE(fp->rx_sge_prod);
1115 end = RX_SGE(fp->last_max_sge);
1116 for (j = start; j != end; j = RX_SGE(j + 1)) {
1117 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1118 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1119
1120 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1121 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1122 }
1123
1124 start = RCQ_BD(fp->rx_comp_cons - 10);
1125 end = RCQ_BD(fp->rx_comp_cons + 503);
1126 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1127 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1128
1129 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1130 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1131 }
1132 }
1133
1134
1135 for_each_valid_tx_queue(bp, i) {
1136 struct bnx2x_fastpath *fp = &bp->fp[i];
1137
1138 if (!bp->fp)
1139 break;
1140
1141 for_each_cos_in_tx_queue(fp, cos) {
1142 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1143
1144 if (!fp->txdata_ptr[cos])
1145 break;
1146
1147 if (!txdata->tx_cons_sb)
1148 continue;
1149
1150 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1151 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1152 for (j = start; j != end; j = TX_BD(j + 1)) {
1153 struct sw_tx_bd *sw_bd =
1154 &txdata->tx_buf_ring[j];
1155
1156 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1157 i, cos, j, sw_bd->skb,
1158 sw_bd->first_bd);
1159 }
1160
1161 start = TX_BD(txdata->tx_bd_cons - 10);
1162 end = TX_BD(txdata->tx_bd_cons + 254);
1163 for (j = start; j != end; j = TX_BD(j + 1)) {
1164 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1165
1166 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1167 i, cos, j, tx_bd[0], tx_bd[1],
1168 tx_bd[2], tx_bd[3]);
1169 }
1170 }
1171 }
1172#endif
1173 if (IS_PF(bp)) {
1174 bnx2x_fw_dump(bp);
1175 bnx2x_mc_assert(bp);
1176 }
1177 BNX2X_ERR("end crash dump -----------------\n");
1178}
1179
1180
1181
1182
1183
1184
1185
1186#define FLR_WAIT_USEC 10000
1187#define FLR_WAIT_INTERVAL 50
1188#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1189
1190struct pbf_pN_buf_regs {
1191 int pN;
1192 u32 init_crd;
1193 u32 crd;
1194 u32 crd_freed;
1195};
1196
1197struct pbf_pN_cmd_regs {
1198 int pN;
1199 u32 lines_occup;
1200 u32 lines_freed;
1201};
1202
1203static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1204 struct pbf_pN_buf_regs *regs,
1205 u32 poll_count)
1206{
1207 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1208 u32 cur_cnt = poll_count;
1209
1210 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1211 crd = crd_start = REG_RD(bp, regs->crd);
1212 init_crd = REG_RD(bp, regs->init_crd);
1213
1214 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1215 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1216 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1217
1218 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1219 (init_crd - crd_start))) {
1220 if (cur_cnt--) {
1221 udelay(FLR_WAIT_INTERVAL);
1222 crd = REG_RD(bp, regs->crd);
1223 crd_freed = REG_RD(bp, regs->crd_freed);
1224 } else {
1225 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1226 regs->pN);
1227 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1228 regs->pN, crd);
1229 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1230 regs->pN, crd_freed);
1231 break;
1232 }
1233 }
1234 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1235 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1236}
1237
1238static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1239 struct pbf_pN_cmd_regs *regs,
1240 u32 poll_count)
1241{
1242 u32 occup, to_free, freed, freed_start;
1243 u32 cur_cnt = poll_count;
1244
1245 occup = to_free = REG_RD(bp, regs->lines_occup);
1246 freed = freed_start = REG_RD(bp, regs->lines_freed);
1247
1248 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1249 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1250
1251 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1252 if (cur_cnt--) {
1253 udelay(FLR_WAIT_INTERVAL);
1254 occup = REG_RD(bp, regs->lines_occup);
1255 freed = REG_RD(bp, regs->lines_freed);
1256 } else {
1257 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1258 regs->pN);
1259 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1260 regs->pN, occup);
1261 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1262 regs->pN, freed);
1263 break;
1264 }
1265 }
1266 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1267 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1268}
1269
1270static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1271 u32 expected, u32 poll_count)
1272{
1273 u32 cur_cnt = poll_count;
1274 u32 val;
1275
1276 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1277 udelay(FLR_WAIT_INTERVAL);
1278
1279 return val;
1280}
1281
1282int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1283 char *msg, u32 poll_cnt)
1284{
1285 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1286 if (val != 0) {
1287 BNX2X_ERR("%s usage count=%d\n", msg, val);
1288 return 1;
1289 }
1290 return 0;
1291}
1292
1293
1294u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1295{
1296
1297 if (CHIP_REV_IS_EMUL(bp))
1298 return FLR_POLL_CNT * 2000;
1299
1300 if (CHIP_REV_IS_FPGA(bp))
1301 return FLR_POLL_CNT * 120;
1302
1303 return FLR_POLL_CNT;
1304}
1305
1306void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1307{
1308 struct pbf_pN_cmd_regs cmd_regs[] = {
1309 {0, (CHIP_IS_E3B0(bp)) ?
1310 PBF_REG_TQ_OCCUPANCY_Q0 :
1311 PBF_REG_P0_TQ_OCCUPANCY,
1312 (CHIP_IS_E3B0(bp)) ?
1313 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1314 PBF_REG_P0_TQ_LINES_FREED_CNT},
1315 {1, (CHIP_IS_E3B0(bp)) ?
1316 PBF_REG_TQ_OCCUPANCY_Q1 :
1317 PBF_REG_P1_TQ_OCCUPANCY,
1318 (CHIP_IS_E3B0(bp)) ?
1319 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1320 PBF_REG_P1_TQ_LINES_FREED_CNT},
1321 {4, (CHIP_IS_E3B0(bp)) ?
1322 PBF_REG_TQ_OCCUPANCY_LB_Q :
1323 PBF_REG_P4_TQ_OCCUPANCY,
1324 (CHIP_IS_E3B0(bp)) ?
1325 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1326 PBF_REG_P4_TQ_LINES_FREED_CNT}
1327 };
1328
1329 struct pbf_pN_buf_regs buf_regs[] = {
1330 {0, (CHIP_IS_E3B0(bp)) ?
1331 PBF_REG_INIT_CRD_Q0 :
1332 PBF_REG_P0_INIT_CRD ,
1333 (CHIP_IS_E3B0(bp)) ?
1334 PBF_REG_CREDIT_Q0 :
1335 PBF_REG_P0_CREDIT,
1336 (CHIP_IS_E3B0(bp)) ?
1337 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1338 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1339 {1, (CHIP_IS_E3B0(bp)) ?
1340 PBF_REG_INIT_CRD_Q1 :
1341 PBF_REG_P1_INIT_CRD,
1342 (CHIP_IS_E3B0(bp)) ?
1343 PBF_REG_CREDIT_Q1 :
1344 PBF_REG_P1_CREDIT,
1345 (CHIP_IS_E3B0(bp)) ?
1346 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1347 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1348 {4, (CHIP_IS_E3B0(bp)) ?
1349 PBF_REG_INIT_CRD_LB_Q :
1350 PBF_REG_P4_INIT_CRD,
1351 (CHIP_IS_E3B0(bp)) ?
1352 PBF_REG_CREDIT_LB_Q :
1353 PBF_REG_P4_CREDIT,
1354 (CHIP_IS_E3B0(bp)) ?
1355 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1356 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1357 };
1358
1359 int i;
1360
1361
1362 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1363 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1364
1365
1366 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1367 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1368}
1369
1370#define OP_GEN_PARAM(param) \
1371 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1372
1373#define OP_GEN_TYPE(type) \
1374 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1375
1376#define OP_GEN_AGG_VECT(index) \
1377 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1378
1379int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1380{
1381 u32 op_gen_command = 0;
1382 u32 comp_addr = BAR_CSTRORM_INTMEM +
1383 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1384 int ret = 0;
1385
1386 if (REG_RD(bp, comp_addr)) {
1387 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1388 return 1;
1389 }
1390
1391 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1392 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1393 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1394 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1395
1396 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1397 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1398
1399 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1400 BNX2X_ERR("FW final cleanup did not succeed\n");
1401 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1402 (REG_RD(bp, comp_addr)));
1403 bnx2x_panic();
1404 return 1;
1405 }
1406
1407 REG_WR(bp, comp_addr, 0);
1408
1409 return ret;
1410}
1411
1412u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1413{
1414 u16 status;
1415
1416 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1417 return status & PCI_EXP_DEVSTA_TRPND;
1418}
1419
1420
1421
1422static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1423{
1424
1425 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1426 CFC_REG_NUM_LCIDS_INSIDE_PF,
1427 "CFC PF usage counter timed out",
1428 poll_cnt))
1429 return 1;
1430
1431
1432 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1433 DORQ_REG_PF_USAGE_CNT,
1434 "DQ PF usage counter timed out",
1435 poll_cnt))
1436 return 1;
1437
1438
1439 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1441 "QM PF usage counter timed out",
1442 poll_cnt))
1443 return 1;
1444
1445
1446 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1448 "Timers VNIC usage counter timed out",
1449 poll_cnt))
1450 return 1;
1451 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1452 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1453 "Timers NUM_SCANS usage counter timed out",
1454 poll_cnt))
1455 return 1;
1456
1457
1458 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459 dmae_reg_go_c[INIT_DMAE_C(bp)],
1460 "DMAE command register timed out",
1461 poll_cnt))
1462 return 1;
1463
1464 return 0;
1465}
1466
1467static void bnx2x_hw_enable_status(struct bnx2x *bp)
1468{
1469 u32 val;
1470
1471 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1472 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1473
1474 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1475 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1476
1477 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1478 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1479
1480 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1481 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1482
1483 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1484 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1485
1486 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1487 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1488
1489 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1490 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1491
1492 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1493 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1494 val);
1495}
1496
1497static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1498{
1499 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1500
1501 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1502
1503
1504 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1505
1506
1507 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1508 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1509 return -EBUSY;
1510
1511
1512
1513
1514 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1515 return -EBUSY;
1516
1517
1518
1519
1520 bnx2x_tx_hw_flushed(bp, poll_cnt);
1521
1522
1523 msleep(100);
1524
1525
1526 if (bnx2x_is_pcie_pending(bp->pdev))
1527 BNX2X_ERR("PCIE Transactions still pending\n");
1528
1529
1530 bnx2x_hw_enable_status(bp);
1531
1532
1533
1534
1535
1536 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1537
1538 return 0;
1539}
1540
1541static void bnx2x_hc_int_enable(struct bnx2x *bp)
1542{
1543 int port = BP_PORT(bp);
1544 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1545 u32 val = REG_RD(bp, addr);
1546 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1547 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1548 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1549
1550 if (msix) {
1551 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1552 HC_CONFIG_0_REG_INT_LINE_EN_0);
1553 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1554 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1555 if (single_msix)
1556 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1557 } else if (msi) {
1558 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1559 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1560 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1561 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1562 } else {
1563 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1564 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1565 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1566 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1567
1568 if (!CHIP_IS_E1(bp)) {
1569 DP(NETIF_MSG_IFUP,
1570 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1571
1572 REG_WR(bp, addr, val);
1573
1574 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1575 }
1576 }
1577
1578 if (CHIP_IS_E1(bp))
1579 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1580
1581 DP(NETIF_MSG_IFUP,
1582 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1583 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1584
1585 REG_WR(bp, addr, val);
1586
1587
1588
1589 mmiowb();
1590 barrier();
1591
1592 if (!CHIP_IS_E1(bp)) {
1593
1594 if (IS_MF(bp)) {
1595 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1596 if (bp->port.pmf)
1597
1598 val |= 0x1100;
1599 } else
1600 val = 0xffff;
1601
1602 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1603 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1604 }
1605
1606
1607 mmiowb();
1608}
1609
1610static void bnx2x_igu_int_enable(struct bnx2x *bp)
1611{
1612 u32 val;
1613 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1614 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1615 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1616
1617 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1618
1619 if (msix) {
1620 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1621 IGU_PF_CONF_SINGLE_ISR_EN);
1622 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1623 IGU_PF_CONF_ATTN_BIT_EN);
1624
1625 if (single_msix)
1626 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1627 } else if (msi) {
1628 val &= ~IGU_PF_CONF_INT_LINE_EN;
1629 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1630 IGU_PF_CONF_ATTN_BIT_EN |
1631 IGU_PF_CONF_SINGLE_ISR_EN);
1632 } else {
1633 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1634 val |= (IGU_PF_CONF_INT_LINE_EN |
1635 IGU_PF_CONF_ATTN_BIT_EN |
1636 IGU_PF_CONF_SINGLE_ISR_EN);
1637 }
1638
1639
1640 if ((!msix) || single_msix) {
1641 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1642 bnx2x_ack_int(bp);
1643 }
1644
1645 val |= IGU_PF_CONF_FUNC_EN;
1646
1647 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1648 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1649
1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1651
1652 if (val & IGU_PF_CONF_INT_LINE_EN)
1653 pci_intx(bp->pdev, true);
1654
1655 barrier();
1656
1657
1658 if (IS_MF(bp)) {
1659 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1660 if (bp->port.pmf)
1661
1662 val |= 0x1100;
1663 } else
1664 val = 0xffff;
1665
1666 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1667 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1668
1669
1670 mmiowb();
1671}
1672
1673void bnx2x_int_enable(struct bnx2x *bp)
1674{
1675 if (bp->common.int_block == INT_BLOCK_HC)
1676 bnx2x_hc_int_enable(bp);
1677 else
1678 bnx2x_igu_int_enable(bp);
1679}
1680
1681void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1682{
1683 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1684 int i, offset;
1685
1686 if (disable_hw)
1687
1688 bnx2x_int_disable(bp);
1689
1690
1691 if (msix) {
1692 synchronize_irq(bp->msix_table[0].vector);
1693 offset = 1;
1694 if (CNIC_SUPPORT(bp))
1695 offset++;
1696 for_each_eth_queue(bp, i)
1697 synchronize_irq(bp->msix_table[offset++].vector);
1698 } else
1699 synchronize_irq(bp->pdev->irq);
1700
1701
1702 cancel_delayed_work(&bp->sp_task);
1703 cancel_delayed_work(&bp->period_task);
1704 flush_workqueue(bnx2x_wq);
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1715{
1716 u32 lock_status;
1717 u32 resource_bit = (1 << resource);
1718 int func = BP_FUNC(bp);
1719 u32 hw_lock_control_reg;
1720
1721 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1722 "Trying to take a lock on resource %d\n", resource);
1723
1724
1725 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1726 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1727 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1728 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1729 return false;
1730 }
1731
1732 if (func <= 5)
1733 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1734 else
1735 hw_lock_control_reg =
1736 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1737
1738
1739 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1740 lock_status = REG_RD(bp, hw_lock_control_reg);
1741 if (lock_status & resource_bit)
1742 return true;
1743
1744 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1745 "Failed to get a lock on resource %d\n", resource);
1746 return false;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1758{
1759 if (BP_PATH(bp))
1760 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1761 else
1762 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1773{
1774 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1775}
1776
1777static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1778
1779
1780static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1781{
1782
1783
1784
1785
1786 atomic_set(&bp->interrupt_occurred, 1);
1787
1788
1789
1790
1791
1792 smp_wmb();
1793
1794
1795 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1796}
1797
1798void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1799{
1800 struct bnx2x *bp = fp->bp;
1801 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1802 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1803 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1804 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1805
1806 DP(BNX2X_MSG_SP,
1807 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1808 fp->index, cid, command, bp->state,
1809 rr_cqe->ramrod_cqe.ramrod_type);
1810
1811
1812
1813
1814 if (cid >= BNX2X_FIRST_VF_CID &&
1815 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1816 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1817
1818 switch (command) {
1819 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1820 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1821 drv_cmd = BNX2X_Q_CMD_UPDATE;
1822 break;
1823
1824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1825 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1826 drv_cmd = BNX2X_Q_CMD_SETUP;
1827 break;
1828
1829 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1830 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1831 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1832 break;
1833
1834 case (RAMROD_CMD_ID_ETH_HALT):
1835 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1836 drv_cmd = BNX2X_Q_CMD_HALT;
1837 break;
1838
1839 case (RAMROD_CMD_ID_ETH_TERMINATE):
1840 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1841 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1842 break;
1843
1844 case (RAMROD_CMD_ID_ETH_EMPTY):
1845 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1846 drv_cmd = BNX2X_Q_CMD_EMPTY;
1847 break;
1848
1849 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1850 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1851 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1852 break;
1853
1854 default:
1855 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1856 command, fp->index);
1857 return;
1858 }
1859
1860 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1861 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1862
1863
1864
1865
1866
1867
1868
1869#ifdef BNX2X_STOP_ON_ERROR
1870 bnx2x_panic();
1871#else
1872 return;
1873#endif
1874
1875 smp_mb__before_atomic();
1876 atomic_inc(&bp->cq_spq_left);
1877
1878 smp_mb__after_atomic();
1879
1880 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1881
1882 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1883 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 smp_mb__before_atomic();
1894 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1895 wmb();
1896 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1897 smp_mb__after_atomic();
1898
1899
1900 bnx2x_schedule_sp_task(bp);
1901 }
1902
1903 return;
1904}
1905
1906irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1907{
1908 struct bnx2x *bp = netdev_priv(dev_instance);
1909 u16 status = bnx2x_ack_int(bp);
1910 u16 mask;
1911 int i;
1912 u8 cos;
1913
1914
1915 if (unlikely(status == 0)) {
1916 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1917 return IRQ_NONE;
1918 }
1919 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1920
1921#ifdef BNX2X_STOP_ON_ERROR
1922 if (unlikely(bp->panic))
1923 return IRQ_HANDLED;
1924#endif
1925
1926 for_each_eth_queue(bp, i) {
1927 struct bnx2x_fastpath *fp = &bp->fp[i];
1928
1929 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1930 if (status & mask) {
1931
1932 for_each_cos_in_tx_queue(fp, cos)
1933 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1934 prefetch(&fp->sb_running_index[SM_RX_ID]);
1935 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1936 status &= ~mask;
1937 }
1938 }
1939
1940 if (CNIC_SUPPORT(bp)) {
1941 mask = 0x2;
1942 if (status & (mask | 0x1)) {
1943 struct cnic_ops *c_ops = NULL;
1944
1945 rcu_read_lock();
1946 c_ops = rcu_dereference(bp->cnic_ops);
1947 if (c_ops && (bp->cnic_eth_dev.drv_state &
1948 CNIC_DRV_STATE_HANDLES_IRQ))
1949 c_ops->cnic_handler(bp->cnic_data, NULL);
1950 rcu_read_unlock();
1951
1952 status &= ~mask;
1953 }
1954 }
1955
1956 if (unlikely(status & 0x1)) {
1957
1958
1959
1960
1961 bnx2x_schedule_sp_task(bp);
1962
1963 status &= ~0x1;
1964 if (!status)
1965 return IRQ_HANDLED;
1966 }
1967
1968 if (unlikely(status))
1969 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1970 status);
1971
1972 return IRQ_HANDLED;
1973}
1974
1975
1976
1977
1978
1979
1980
1981int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1982{
1983 u32 lock_status;
1984 u32 resource_bit = (1 << resource);
1985 int func = BP_FUNC(bp);
1986 u32 hw_lock_control_reg;
1987 int cnt;
1988
1989
1990 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1991 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1992 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1993 return -EINVAL;
1994 }
1995
1996 if (func <= 5) {
1997 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1998 } else {
1999 hw_lock_control_reg =
2000 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2001 }
2002
2003
2004 lock_status = REG_RD(bp, hw_lock_control_reg);
2005 if (lock_status & resource_bit) {
2006 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2007 lock_status, resource_bit);
2008 return -EEXIST;
2009 }
2010
2011
2012 for (cnt = 0; cnt < 1000; cnt++) {
2013
2014 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2015 lock_status = REG_RD(bp, hw_lock_control_reg);
2016 if (lock_status & resource_bit)
2017 return 0;
2018
2019 usleep_range(5000, 10000);
2020 }
2021 BNX2X_ERR("Timeout\n");
2022 return -EAGAIN;
2023}
2024
2025int bnx2x_release_leader_lock(struct bnx2x *bp)
2026{
2027 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2028}
2029
2030int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2031{
2032 u32 lock_status;
2033 u32 resource_bit = (1 << resource);
2034 int func = BP_FUNC(bp);
2035 u32 hw_lock_control_reg;
2036
2037
2038 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2039 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2040 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2041 return -EINVAL;
2042 }
2043
2044 if (func <= 5) {
2045 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2046 } else {
2047 hw_lock_control_reg =
2048 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2049 }
2050
2051
2052 lock_status = REG_RD(bp, hw_lock_control_reg);
2053 if (!(lock_status & resource_bit)) {
2054 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2055 lock_status, resource_bit);
2056 return -EFAULT;
2057 }
2058
2059 REG_WR(bp, hw_lock_control_reg, resource_bit);
2060 return 0;
2061}
2062
2063int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2064{
2065
2066 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2067 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2068 int gpio_shift = gpio_num +
2069 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2070 u32 gpio_mask = (1 << gpio_shift);
2071 u32 gpio_reg;
2072 int value;
2073
2074 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2075 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2076 return -EINVAL;
2077 }
2078
2079
2080 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2081
2082
2083 if ((gpio_reg & gpio_mask) == gpio_mask)
2084 value = 1;
2085 else
2086 value = 0;
2087
2088 return value;
2089}
2090
2091int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2092{
2093
2094 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2095 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2096 int gpio_shift = gpio_num +
2097 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2098 u32 gpio_mask = (1 << gpio_shift);
2099 u32 gpio_reg;
2100
2101 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2102 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2103 return -EINVAL;
2104 }
2105
2106 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2107
2108 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2109
2110 switch (mode) {
2111 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2112 DP(NETIF_MSG_LINK,
2113 "Set GPIO %d (shift %d) -> output low\n",
2114 gpio_num, gpio_shift);
2115
2116 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2117 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2118 break;
2119
2120 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2121 DP(NETIF_MSG_LINK,
2122 "Set GPIO %d (shift %d) -> output high\n",
2123 gpio_num, gpio_shift);
2124
2125 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2126 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2127 break;
2128
2129 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2130 DP(NETIF_MSG_LINK,
2131 "Set GPIO %d (shift %d) -> input\n",
2132 gpio_num, gpio_shift);
2133
2134 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2135 break;
2136
2137 default:
2138 break;
2139 }
2140
2141 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2142 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2143
2144 return 0;
2145}
2146
2147int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2148{
2149 u32 gpio_reg = 0;
2150 int rc = 0;
2151
2152
2153
2154 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2155
2156 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2157 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2158 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2159 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2160
2161 switch (mode) {
2162 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2163 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2164
2165 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2166 break;
2167
2168 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2169 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2170
2171 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2172 break;
2173
2174 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2176
2177 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2178 break;
2179
2180 default:
2181 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2182 rc = -EINVAL;
2183 break;
2184 }
2185
2186 if (rc == 0)
2187 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2188
2189 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2190
2191 return rc;
2192}
2193
2194int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2195{
2196
2197 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2198 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2199 int gpio_shift = gpio_num +
2200 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2201 u32 gpio_mask = (1 << gpio_shift);
2202 u32 gpio_reg;
2203
2204 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2205 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2206 return -EINVAL;
2207 }
2208
2209 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2210
2211 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2212
2213 switch (mode) {
2214 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2215 DP(NETIF_MSG_LINK,
2216 "Clear GPIO INT %d (shift %d) -> output low\n",
2217 gpio_num, gpio_shift);
2218
2219 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2220 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2221 break;
2222
2223 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2224 DP(NETIF_MSG_LINK,
2225 "Set GPIO INT %d (shift %d) -> output high\n",
2226 gpio_num, gpio_shift);
2227
2228 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2229 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2230 break;
2231
2232 default:
2233 break;
2234 }
2235
2236 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2237 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2238
2239 return 0;
2240}
2241
2242static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2243{
2244 u32 spio_reg;
2245
2246
2247 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2248 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2249 return -EINVAL;
2250 }
2251
2252 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2253
2254 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2255
2256 switch (mode) {
2257 case MISC_SPIO_OUTPUT_LOW:
2258 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2259
2260 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2261 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2262 break;
2263
2264 case MISC_SPIO_OUTPUT_HIGH:
2265 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2266
2267 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268 spio_reg |= (spio << MISC_SPIO_SET_POS);
2269 break;
2270
2271 case MISC_SPIO_INPUT_HI_Z:
2272 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2273
2274 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2275 break;
2276
2277 default:
2278 break;
2279 }
2280
2281 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2282 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2283
2284 return 0;
2285}
2286
2287void bnx2x_calc_fc_adv(struct bnx2x *bp)
2288{
2289 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2290
2291 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2292 ADVERTISED_Pause);
2293 switch (bp->link_vars.ieee_fc &
2294 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2295 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2296 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2297 ADVERTISED_Pause);
2298 break;
2299
2300 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2301 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2302 break;
2303
2304 default:
2305 break;
2306 }
2307}
2308
2309static void bnx2x_set_requested_fc(struct bnx2x *bp)
2310{
2311
2312
2313
2314
2315 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2316 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2317 else
2318 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2319}
2320
2321static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2322{
2323 u32 pause_enabled = 0;
2324
2325 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2326 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2327 pause_enabled = 1;
2328
2329 REG_WR(bp, BAR_USTRORM_INTMEM +
2330 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2331 pause_enabled);
2332 }
2333
2334 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2335 pause_enabled ? "enabled" : "disabled");
2336}
2337
2338int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2339{
2340 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2341 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2342
2343 if (!BP_NOMCP(bp)) {
2344 bnx2x_set_requested_fc(bp);
2345 bnx2x_acquire_phy_lock(bp);
2346
2347 if (load_mode == LOAD_DIAG) {
2348 struct link_params *lp = &bp->link_params;
2349 lp->loopback_mode = LOOPBACK_XGXS;
2350
2351 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2352 if (lp->speed_cap_mask[cfx_idx] &
2353 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2354 lp->req_line_speed[cfx_idx] =
2355 SPEED_20000;
2356 else if (lp->speed_cap_mask[cfx_idx] &
2357 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2358 lp->req_line_speed[cfx_idx] =
2359 SPEED_10000;
2360 else
2361 lp->req_line_speed[cfx_idx] =
2362 SPEED_1000;
2363 }
2364 }
2365
2366 if (load_mode == LOAD_LOOPBACK_EXT) {
2367 struct link_params *lp = &bp->link_params;
2368 lp->loopback_mode = LOOPBACK_EXT;
2369 }
2370
2371 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2372
2373 bnx2x_release_phy_lock(bp);
2374
2375 bnx2x_init_dropless_fc(bp);
2376
2377 bnx2x_calc_fc_adv(bp);
2378
2379 if (bp->link_vars.link_up) {
2380 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2381 bnx2x_link_report(bp);
2382 }
2383 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2384 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2385 return rc;
2386 }
2387 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2388 return -EINVAL;
2389}
2390
2391void bnx2x_link_set(struct bnx2x *bp)
2392{
2393 if (!BP_NOMCP(bp)) {
2394 bnx2x_acquire_phy_lock(bp);
2395 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2396 bnx2x_release_phy_lock(bp);
2397
2398 bnx2x_init_dropless_fc(bp);
2399
2400 bnx2x_calc_fc_adv(bp);
2401 } else
2402 BNX2X_ERR("Bootcode is missing - can not set link\n");
2403}
2404
2405static void bnx2x__link_reset(struct bnx2x *bp)
2406{
2407 if (!BP_NOMCP(bp)) {
2408 bnx2x_acquire_phy_lock(bp);
2409 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2410 bnx2x_release_phy_lock(bp);
2411 } else
2412 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2413}
2414
2415void bnx2x_force_link_reset(struct bnx2x *bp)
2416{
2417 bnx2x_acquire_phy_lock(bp);
2418 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2419 bnx2x_release_phy_lock(bp);
2420}
2421
2422u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2423{
2424 u8 rc = 0;
2425
2426 if (!BP_NOMCP(bp)) {
2427 bnx2x_acquire_phy_lock(bp);
2428 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2429 is_serdes);
2430 bnx2x_release_phy_lock(bp);
2431 } else
2432 BNX2X_ERR("Bootcode is missing - can not test link\n");
2433
2434 return rc;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446static void bnx2x_calc_vn_min(struct bnx2x *bp,
2447 struct cmng_init_input *input)
2448{
2449 int all_zero = 1;
2450 int vn;
2451
2452 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2453 u32 vn_cfg = bp->mf_config[vn];
2454 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2455 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2456
2457
2458 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2459 vn_min_rate = 0;
2460
2461 else if (!vn_min_rate)
2462 vn_min_rate = DEF_MIN_RATE;
2463 else
2464 all_zero = 0;
2465
2466 input->vnic_min_rate[vn] = vn_min_rate;
2467 }
2468
2469
2470 if (BNX2X_IS_ETS_ENABLED(bp)) {
2471 input->flags.cmng_enables &=
2472 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2473 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2474 } else if (all_zero) {
2475 input->flags.cmng_enables &=
2476 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2477 DP(NETIF_MSG_IFUP,
2478 "All MIN values are zeroes fairness will be disabled\n");
2479 } else
2480 input->flags.cmng_enables |=
2481 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2482}
2483
2484static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2485 struct cmng_init_input *input)
2486{
2487 u16 vn_max_rate;
2488 u32 vn_cfg = bp->mf_config[vn];
2489
2490 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2491 vn_max_rate = 0;
2492 else {
2493 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2494
2495 if (IS_MF_SI(bp)) {
2496
2497 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2498 } else
2499
2500 vn_max_rate = maxCfg * 100;
2501 }
2502
2503 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2504
2505 input->vnic_max_rate[vn] = vn_max_rate;
2506}
2507
2508static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2509{
2510 if (CHIP_REV_IS_SLOW(bp))
2511 return CMNG_FNS_NONE;
2512 if (IS_MF(bp))
2513 return CMNG_FNS_MINMAX;
2514
2515 return CMNG_FNS_NONE;
2516}
2517
2518void bnx2x_read_mf_cfg(struct bnx2x *bp)
2519{
2520 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2521
2522 if (BP_NOMCP(bp))
2523 return;
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2537 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2538
2539 if (func >= E1H_FUNC_MAX)
2540 break;
2541
2542 bp->mf_config[vn] =
2543 MF_CFG_RD(bp, func_mf_config[func].config);
2544 }
2545 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2546 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2547 bp->flags |= MF_FUNC_DIS;
2548 } else {
2549 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2550 bp->flags &= ~MF_FUNC_DIS;
2551 }
2552}
2553
2554static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2555{
2556 struct cmng_init_input input;
2557 memset(&input, 0, sizeof(struct cmng_init_input));
2558
2559 input.port_rate = bp->link_vars.line_speed;
2560
2561 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2562 int vn;
2563
2564
2565 if (read_cfg)
2566 bnx2x_read_mf_cfg(bp);
2567
2568
2569 bnx2x_calc_vn_min(bp, &input);
2570
2571
2572 if (bp->port.pmf)
2573 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2574 bnx2x_calc_vn_max(bp, vn, &input);
2575
2576
2577 input.flags.cmng_enables |=
2578 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2579
2580 bnx2x_init_cmng(&input, &bp->cmng);
2581 return;
2582 }
2583
2584
2585 DP(NETIF_MSG_IFUP,
2586 "rate shaping and fairness are disabled\n");
2587}
2588
2589static void storm_memset_cmng(struct bnx2x *bp,
2590 struct cmng_init *cmng,
2591 u8 port)
2592{
2593 int vn;
2594 size_t size = sizeof(struct cmng_struct_per_port);
2595
2596 u32 addr = BAR_XSTRORM_INTMEM +
2597 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2598
2599 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2600
2601 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2602 int func = func_by_vn(bp, vn);
2603
2604 addr = BAR_XSTRORM_INTMEM +
2605 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2606 size = sizeof(struct rate_shaping_vars_per_vn);
2607 __storm_memset_struct(bp, addr, size,
2608 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2609
2610 addr = BAR_XSTRORM_INTMEM +
2611 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2612 size = sizeof(struct fairness_vars_per_vn);
2613 __storm_memset_struct(bp, addr, size,
2614 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2615 }
2616}
2617
2618
2619void bnx2x_set_local_cmng(struct bnx2x *bp)
2620{
2621 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2622
2623 if (cmng_fns != CMNG_FNS_NONE) {
2624 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2625 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2626 } else {
2627
2628 DP(NETIF_MSG_IFUP,
2629 "single function mode without fairness\n");
2630 }
2631}
2632
2633
2634static void bnx2x_link_attn(struct bnx2x *bp)
2635{
2636
2637 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2638
2639 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2640
2641 bnx2x_init_dropless_fc(bp);
2642
2643 if (bp->link_vars.link_up) {
2644
2645 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2646 struct host_port_stats *pstats;
2647
2648 pstats = bnx2x_sp(bp, port_stats);
2649
2650 memset(&(pstats->mac_stx[0]), 0,
2651 sizeof(struct mac_stx));
2652 }
2653 if (bp->state == BNX2X_STATE_OPEN)
2654 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2655 }
2656
2657 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2658 bnx2x_set_local_cmng(bp);
2659
2660 __bnx2x_link_report(bp);
2661
2662 if (IS_MF(bp))
2663 bnx2x_link_sync_notify(bp);
2664}
2665
2666void bnx2x__link_status_update(struct bnx2x *bp)
2667{
2668 if (bp->state != BNX2X_STATE_OPEN)
2669 return;
2670
2671
2672 if (IS_PF(bp)) {
2673 bnx2x_dcbx_pmf_update(bp);
2674 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2675 if (bp->link_vars.link_up)
2676 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2677 else
2678 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2679
2680 bnx2x_link_report(bp);
2681
2682 } else {
2683 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2684 SUPPORTED_10baseT_Full |
2685 SUPPORTED_100baseT_Half |
2686 SUPPORTED_100baseT_Full |
2687 SUPPORTED_1000baseT_Full |
2688 SUPPORTED_2500baseX_Full |
2689 SUPPORTED_10000baseT_Full |
2690 SUPPORTED_TP |
2691 SUPPORTED_FIBRE |
2692 SUPPORTED_Autoneg |
2693 SUPPORTED_Pause |
2694 SUPPORTED_Asym_Pause);
2695 bp->port.advertising[0] = bp->port.supported[0];
2696
2697 bp->link_params.bp = bp;
2698 bp->link_params.port = BP_PORT(bp);
2699 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2700 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2701 bp->link_params.req_line_speed[0] = SPEED_10000;
2702 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2703 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2704 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2705 bp->link_vars.line_speed = SPEED_10000;
2706 bp->link_vars.link_status =
2707 (LINK_STATUS_LINK_UP |
2708 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2709 bp->link_vars.link_up = 1;
2710 bp->link_vars.duplex = DUPLEX_FULL;
2711 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2712 __bnx2x_link_report(bp);
2713
2714 bnx2x_sample_bulletin(bp);
2715
2716
2717
2718
2719
2720
2721 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2722 }
2723}
2724
2725static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2726 u16 vlan_val, u8 allowed_prio)
2727{
2728 struct bnx2x_func_state_params func_params = {NULL};
2729 struct bnx2x_func_afex_update_params *f_update_params =
2730 &func_params.params.afex_update;
2731
2732 func_params.f_obj = &bp->func_obj;
2733 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2734
2735
2736
2737
2738
2739 f_update_params->vif_id = vifid;
2740 f_update_params->afex_default_vlan = vlan_val;
2741 f_update_params->allowed_priorities = allowed_prio;
2742
2743
2744 if (bnx2x_func_state_change(bp, &func_params) < 0)
2745 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2746
2747 return 0;
2748}
2749
2750static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2751 u16 vif_index, u8 func_bit_map)
2752{
2753 struct bnx2x_func_state_params func_params = {NULL};
2754 struct bnx2x_func_afex_viflists_params *update_params =
2755 &func_params.params.afex_viflists;
2756 int rc;
2757 u32 drv_msg_code;
2758
2759
2760 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2761 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2762 cmd_type);
2763
2764 func_params.f_obj = &bp->func_obj;
2765 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2766
2767
2768 update_params->afex_vif_list_command = cmd_type;
2769 update_params->vif_list_index = vif_index;
2770 update_params->func_bit_map =
2771 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2772 update_params->func_to_clear = 0;
2773 drv_msg_code =
2774 (cmd_type == VIF_LIST_RULE_GET) ?
2775 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2776 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2777
2778
2779
2780
2781 rc = bnx2x_func_state_change(bp, &func_params);
2782 if (rc < 0)
2783 bnx2x_fw_command(bp, drv_msg_code, 0);
2784
2785 return 0;
2786}
2787
2788static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2789{
2790 struct afex_stats afex_stats;
2791 u32 func = BP_ABS_FUNC(bp);
2792 u32 mf_config;
2793 u16 vlan_val;
2794 u32 vlan_prio;
2795 u16 vif_id;
2796 u8 allowed_prio;
2797 u8 vlan_mode;
2798 u32 addr_to_write, vifid, addrs, stats_type, i;
2799
2800 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2801 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2802 DP(BNX2X_MSG_MCP,
2803 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2804 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2805 }
2806
2807 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2808 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2810 DP(BNX2X_MSG_MCP,
2811 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2812 vifid, addrs);
2813 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2814 addrs);
2815 }
2816
2817 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2818 addr_to_write = SHMEM2_RD(bp,
2819 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2820 stats_type = SHMEM2_RD(bp,
2821 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2822
2823 DP(BNX2X_MSG_MCP,
2824 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2825 addr_to_write);
2826
2827 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2828
2829
2830 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2831 REG_WR(bp, addr_to_write + i*sizeof(u32),
2832 *(((u32 *)(&afex_stats))+i));
2833
2834
2835 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2836 }
2837
2838 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2839 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2840 bp->mf_config[BP_VN(bp)] = mf_config;
2841 DP(BNX2X_MSG_MCP,
2842 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2843 mf_config);
2844
2845
2846 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2847
2848 struct cmng_init_input cmng_input;
2849 struct rate_shaping_vars_per_vn m_rs_vn;
2850 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2851 u32 addr = BAR_XSTRORM_INTMEM +
2852 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2853
2854 bp->mf_config[BP_VN(bp)] = mf_config;
2855
2856 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2857 m_rs_vn.vn_counter.rate =
2858 cmng_input.vnic_max_rate[BP_VN(bp)];
2859 m_rs_vn.vn_counter.quota =
2860 (m_rs_vn.vn_counter.rate *
2861 RS_PERIODIC_TIMEOUT_USEC) / 8;
2862
2863 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2864
2865
2866 vif_id =
2867 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2868 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2869 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2870 vlan_val =
2871 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2872 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2873 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2874 vlan_prio = (mf_config &
2875 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2876 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2877 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2878 vlan_mode =
2879 (MF_CFG_RD(bp,
2880 func_mf_config[func].afex_config) &
2881 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2882 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2883 allowed_prio =
2884 (MF_CFG_RD(bp,
2885 func_mf_config[func].afex_config) &
2886 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2887 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2888
2889
2890 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2891 allowed_prio))
2892 return;
2893
2894 bp->afex_def_vlan_tag = vlan_val;
2895 bp->afex_vlan_mode = vlan_mode;
2896 } else {
2897
2898 bnx2x_link_report(bp);
2899
2900
2901 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2902
2903
2904 bp->afex_def_vlan_tag = -1;
2905 }
2906 }
2907}
2908
2909static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2910{
2911 struct bnx2x_func_switch_update_params *switch_update_params;
2912 struct bnx2x_func_state_params func_params;
2913
2914 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2915 switch_update_params = &func_params.params.switch_update;
2916 func_params.f_obj = &bp->func_obj;
2917 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2918
2919 if (IS_MF_UFP(bp)) {
2920 int func = BP_ABS_FUNC(bp);
2921 u32 val;
2922
2923
2924 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2925 FUNC_MF_CFG_E1HOV_TAG_MASK;
2926 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2927 bp->mf_ov = val;
2928 } else {
2929 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2930 goto fail;
2931 }
2932
2933
2934 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2935 bp->mf_ov);
2936
2937
2938 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2939 &switch_update_params->changes);
2940 switch_update_params->vlan = bp->mf_ov;
2941
2942 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2943 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2944 bp->mf_ov);
2945 goto fail;
2946 }
2947
2948 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
2949
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2951
2952 return;
2953 }
2954
2955
2956fail:
2957 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2958}
2959
2960static void bnx2x_pmf_update(struct bnx2x *bp)
2961{
2962 int port = BP_PORT(bp);
2963 u32 val;
2964
2965 bp->port.pmf = 1;
2966 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2967
2968
2969
2970
2971
2972 smp_mb();
2973
2974
2975 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2976
2977 bnx2x_dcbx_pmf_update(bp);
2978
2979
2980 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2981 if (bp->common.int_block == INT_BLOCK_HC) {
2982 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2984 } else if (!CHIP_IS_E1x(bp)) {
2985 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2987 }
2988
2989 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3002{
3003 int mb_idx = BP_FW_MB_IDX(bp);
3004 u32 seq;
3005 u32 rc = 0;
3006 u32 cnt = 1;
3007 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3008
3009 mutex_lock(&bp->fw_mb_mutex);
3010 seq = ++bp->fw_seq;
3011 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3012 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3013
3014 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3015 (command | seq), param);
3016
3017 do {
3018
3019 msleep(delay);
3020
3021 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3022
3023
3024 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3025
3026 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3027 cnt*delay, rc, seq);
3028
3029
3030 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3031 rc &= FW_MSG_CODE_MASK;
3032 else {
3033
3034 BNX2X_ERR("FW failed to respond!\n");
3035 bnx2x_fw_dump(bp);
3036 rc = 0;
3037 }
3038 mutex_unlock(&bp->fw_mb_mutex);
3039
3040 return rc;
3041}
3042
3043static void storm_memset_func_cfg(struct bnx2x *bp,
3044 struct tstorm_eth_function_common_config *tcfg,
3045 u16 abs_fid)
3046{
3047 size_t size = sizeof(struct tstorm_eth_function_common_config);
3048
3049 u32 addr = BAR_TSTRORM_INTMEM +
3050 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3051
3052 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3053}
3054
3055void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3056{
3057 if (CHIP_IS_E1x(bp)) {
3058 struct tstorm_eth_function_common_config tcfg = {0};
3059
3060 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3061 }
3062
3063
3064 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3065 storm_memset_func_en(bp, p->func_id, 1);
3066
3067
3068 if (p->func_flgs & FUNC_FLG_SPQ) {
3069 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3070 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3071 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3072 }
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3085 struct bnx2x_fastpath *fp,
3086 bool zero_stats)
3087{
3088 unsigned long flags = 0;
3089
3090
3091 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3092
3093
3094
3095
3096
3097
3098 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3099 if (zero_stats)
3100 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3101
3102 if (bp->flags & TX_SWITCHING)
3103 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3104
3105 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3106 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3107
3108#ifdef BNX2X_STOP_ON_ERROR
3109 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3110#endif
3111
3112 return flags;
3113}
3114
3115static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3116 struct bnx2x_fastpath *fp,
3117 bool leading)
3118{
3119 unsigned long flags = 0;
3120
3121
3122 if (IS_MF_SD(bp))
3123 __set_bit(BNX2X_Q_FLG_OV, &flags);
3124
3125 if (IS_FCOE_FP(fp)) {
3126 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3127
3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3129 }
3130
3131 if (fp->mode != TPA_MODE_DISABLED) {
3132 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3134 if (fp->mode == TPA_MODE_GRO)
3135 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3136 }
3137
3138 if (leading) {
3139 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3140 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3141 }
3142
3143
3144 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3145
3146
3147 if (IS_MF_AFEX(bp))
3148 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3149
3150 return flags | bnx2x_get_common_flags(bp, fp, true);
3151}
3152
3153static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3154 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3155 u8 cos)
3156{
3157 gen_init->stat_id = bnx2x_stats_id(fp);
3158 gen_init->spcl_id = fp->cl_id;
3159
3160
3161 if (IS_FCOE_FP(fp))
3162 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3163 else
3164 gen_init->mtu = bp->dev->mtu;
3165
3166 gen_init->cos = cos;
3167
3168 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3169}
3170
3171static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3172 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3173 struct bnx2x_rxq_setup_params *rxq_init)
3174{
3175 u8 max_sge = 0;
3176 u16 sge_sz = 0;
3177 u16 tpa_agg_size = 0;
3178
3179 if (fp->mode != TPA_MODE_DISABLED) {
3180 pause->sge_th_lo = SGE_TH_LO(bp);
3181 pause->sge_th_hi = SGE_TH_HI(bp);
3182
3183
3184 WARN_ON(bp->dropless_fc &&
3185 pause->sge_th_hi + FW_PREFETCH_CNT >
3186 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3187
3188 tpa_agg_size = TPA_AGG_SIZE;
3189 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3190 SGE_PAGE_SHIFT;
3191 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3192 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3193 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3194 }
3195
3196
3197 if (!CHIP_IS_E1(bp)) {
3198 pause->bd_th_lo = BD_TH_LO(bp);
3199 pause->bd_th_hi = BD_TH_HI(bp);
3200
3201 pause->rcq_th_lo = RCQ_TH_LO(bp);
3202 pause->rcq_th_hi = RCQ_TH_HI(bp);
3203
3204
3205
3206
3207 WARN_ON(bp->dropless_fc &&
3208 pause->bd_th_hi + FW_PREFETCH_CNT >
3209 bp->rx_ring_size);
3210 WARN_ON(bp->dropless_fc &&
3211 pause->rcq_th_hi + FW_PREFETCH_CNT >
3212 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3213
3214 pause->pri_map = 1;
3215 }
3216
3217
3218 rxq_init->dscr_map = fp->rx_desc_mapping;
3219 rxq_init->sge_map = fp->rx_sge_mapping;
3220 rxq_init->rcq_map = fp->rx_comp_mapping;
3221 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3222
3223
3224
3225
3226 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3227 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3228
3229 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3230 rxq_init->tpa_agg_sz = tpa_agg_size;
3231 rxq_init->sge_buf_sz = sge_sz;
3232 rxq_init->max_sges_pkt = max_sge;
3233 rxq_init->rss_engine_id = BP_FUNC(bp);
3234 rxq_init->mcast_engine_id = BP_FUNC(bp);
3235
3236
3237
3238
3239
3240
3241 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3242
3243 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3244 rxq_init->fw_sb_id = fp->fw_sb_id;
3245
3246 if (IS_FCOE_FP(fp))
3247 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3248 else
3249 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3250
3251
3252
3253 if (IS_MF_AFEX(bp)) {
3254 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3255 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3256 }
3257}
3258
3259static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3260 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3261 u8 cos)
3262{
3263 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3264 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3265 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3266 txq_init->fw_sb_id = fp->fw_sb_id;
3267
3268
3269
3270
3271
3272 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3273
3274 if (IS_FCOE_FP(fp)) {
3275 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3277 }
3278}
3279
3280static void bnx2x_pf_init(struct bnx2x *bp)
3281{
3282 struct bnx2x_func_init_params func_init = {0};
3283 struct event_ring_data eq_data = { {0} };
3284 u16 flags;
3285
3286 if (!CHIP_IS_E1x(bp)) {
3287
3288
3289 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3290 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3291 (CHIP_MODE_IS_4_PORT(bp) ?
3292 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3293
3294 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3295 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3296 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3297 (CHIP_MODE_IS_4_PORT(bp) ?
3298 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3299 }
3300
3301
3302 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3303
3304
3305
3306
3307 flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
3308
3309 func_init.func_flgs = flags;
3310 func_init.pf_id = BP_FUNC(bp);
3311 func_init.func_id = BP_FUNC(bp);
3312 func_init.spq_map = bp->spq_mapping;
3313 func_init.spq_prod = bp->spq_prod_idx;
3314
3315 bnx2x_func_init(bp, &func_init);
3316
3317 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3318
3319
3320
3321
3322
3323
3324
3325 bp->link_vars.line_speed = SPEED_10000;
3326 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3327
3328
3329 if (bp->port.pmf)
3330 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3331
3332
3333 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3334 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3335 eq_data.producer = bp->eq_prod;
3336 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3337 eq_data.sb_id = DEF_SB_ID;
3338 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3339}
3340
3341static void bnx2x_e1h_disable(struct bnx2x *bp)
3342{
3343 int port = BP_PORT(bp);
3344
3345 bnx2x_tx_disable(bp);
3346
3347 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3348}
3349
3350static void bnx2x_e1h_enable(struct bnx2x *bp)
3351{
3352 int port = BP_PORT(bp);
3353
3354 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3355 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3356
3357
3358 netif_tx_wake_all_queues(bp->dev);
3359
3360
3361
3362
3363
3364}
3365
3366#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3367
3368static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3369{
3370 struct eth_stats_info *ether_stat =
3371 &bp->slowpath->drv_info_to_mcp.ether_stat;
3372 struct bnx2x_vlan_mac_obj *mac_obj =
3373 &bp->sp_objs->mac_obj;
3374 int i;
3375
3376 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3377 ETH_STAT_INFO_VERSION_LEN);
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3388 memset(ether_stat->mac_local + i, 0,
3389 sizeof(ether_stat->mac_local[0]));
3390 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3391 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3392 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3393 ETH_ALEN);
3394 ether_stat->mtu_size = bp->dev->mtu;
3395 if (bp->dev->features & NETIF_F_RXCSUM)
3396 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3397 if (bp->dev->features & NETIF_F_TSO)
3398 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3399 ether_stat->feature_flags |= bp->common.boot_mode;
3400
3401 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3402
3403 ether_stat->txq_size = bp->tx_ring_size;
3404 ether_stat->rxq_size = bp->rx_ring_size;
3405
3406#ifdef CONFIG_BNX2X_SRIOV
3407 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3408#endif
3409}
3410
3411static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3412{
3413 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3414 struct fcoe_stats_info *fcoe_stat =
3415 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3416
3417 if (!CNIC_LOADED(bp))
3418 return;
3419
3420 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3421
3422 fcoe_stat->qos_priority =
3423 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3424
3425
3426 if (!NO_FCOE(bp)) {
3427 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3428 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3429 tstorm_queue_statistics;
3430
3431 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3432 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3433 xstorm_queue_statistics;
3434
3435 struct fcoe_statistics_params *fw_fcoe_stat =
3436 &bp->fw_stats_data->fcoe;
3437
3438 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3439 fcoe_stat->rx_bytes_lo,
3440 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3441
3442 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3443 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3444 fcoe_stat->rx_bytes_lo,
3445 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3446
3447 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3448 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3449 fcoe_stat->rx_bytes_lo,
3450 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3451
3452 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3453 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3454 fcoe_stat->rx_bytes_lo,
3455 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3456
3457 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3458 fcoe_stat->rx_frames_lo,
3459 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3460
3461 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3462 fcoe_stat->rx_frames_lo,
3463 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3464
3465 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3466 fcoe_stat->rx_frames_lo,
3467 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3468
3469 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3470 fcoe_stat->rx_frames_lo,
3471 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3472
3473 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3474 fcoe_stat->tx_bytes_lo,
3475 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3476
3477 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3478 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3479 fcoe_stat->tx_bytes_lo,
3480 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3481
3482 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3483 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3484 fcoe_stat->tx_bytes_lo,
3485 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3486
3487 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3488 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3489 fcoe_stat->tx_bytes_lo,
3490 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3491
3492 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3493 fcoe_stat->tx_frames_lo,
3494 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3495
3496 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3497 fcoe_stat->tx_frames_lo,
3498 fcoe_q_xstorm_stats->ucast_pkts_sent);
3499
3500 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3501 fcoe_stat->tx_frames_lo,
3502 fcoe_q_xstorm_stats->bcast_pkts_sent);
3503
3504 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3505 fcoe_stat->tx_frames_lo,
3506 fcoe_q_xstorm_stats->mcast_pkts_sent);
3507 }
3508
3509
3510 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3511}
3512
3513static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3514{
3515 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3516 struct iscsi_stats_info *iscsi_stat =
3517 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3518
3519 if (!CNIC_LOADED(bp))
3520 return;
3521
3522 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3523 ETH_ALEN);
3524
3525 iscsi_stat->qos_priority =
3526 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3527
3528
3529 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3530}
3531
3532
3533
3534
3535
3536
3537static void bnx2x_config_mf_bw(struct bnx2x *bp)
3538{
3539 if (bp->link_vars.link_up) {
3540 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3541 bnx2x_link_sync_notify(bp);
3542 }
3543 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3544}
3545
3546static void bnx2x_set_mf_bw(struct bnx2x *bp)
3547{
3548 bnx2x_config_mf_bw(bp);
3549 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3550}
3551
3552static void bnx2x_handle_eee_event(struct bnx2x *bp)
3553{
3554 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3555 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3556}
3557
3558#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3559#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3560
3561static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3562{
3563 enum drv_info_opcode op_code;
3564 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3565 bool release = false;
3566 int wait;
3567
3568
3569 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3570 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3571 return;
3572 }
3573
3574 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3575 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3576
3577
3578 mutex_lock(&bp->drv_info_mutex);
3579
3580 memset(&bp->slowpath->drv_info_to_mcp, 0,
3581 sizeof(union drv_info_to_mcp));
3582
3583 switch (op_code) {
3584 case ETH_STATS_OPCODE:
3585 bnx2x_drv_info_ether_stat(bp);
3586 break;
3587 case FCOE_STATS_OPCODE:
3588 bnx2x_drv_info_fcoe_stat(bp);
3589 break;
3590 case ISCSI_STATS_OPCODE:
3591 bnx2x_drv_info_iscsi_stat(bp);
3592 break;
3593 default:
3594
3595 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3596 goto out;
3597 }
3598
3599
3600
3601
3602 SHMEM2_WR(bp, drv_info_host_addr_lo,
3603 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3604 SHMEM2_WR(bp, drv_info_host_addr_hi,
3605 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3606
3607 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3608
3609
3610
3611
3612
3613 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3614 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3615 } else if (!bp->drv_info_mng_owner) {
3616 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3617
3618 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3619 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3620
3621
3622 if (indication & bit) {
3623 SHMEM2_WR(bp, mfw_drv_indication,
3624 indication & ~bit);
3625 release = true;
3626 break;
3627 }
3628
3629 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3630 }
3631 }
3632 if (!release) {
3633 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3634 bp->drv_info_mng_owner = true;
3635 }
3636
3637out:
3638 mutex_unlock(&bp->drv_info_mutex);
3639}
3640
3641static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3642{
3643 u8 vals[4];
3644 int i = 0;
3645
3646 if (bnx2x_format) {
3647 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3648 &vals[0], &vals[1], &vals[2], &vals[3]);
3649 if (i > 0)
3650 vals[0] -= '0';
3651 } else {
3652 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3653 &vals[0], &vals[1], &vals[2], &vals[3]);
3654 }
3655
3656 while (i < 4)
3657 vals[i++] = 0;
3658
3659 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3660}
3661
3662void bnx2x_update_mng_version(struct bnx2x *bp)
3663{
3664 u32 iscsiver = DRV_VER_NOT_LOADED;
3665 u32 fcoever = DRV_VER_NOT_LOADED;
3666 u32 ethver = DRV_VER_NOT_LOADED;
3667 int idx = BP_FW_MB_IDX(bp);
3668 u8 *version;
3669
3670 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3671 return;
3672
3673 mutex_lock(&bp->drv_info_mutex);
3674
3675 if (bp->drv_info_mng_owner)
3676 goto out;
3677
3678 if (bp->state != BNX2X_STATE_OPEN)
3679 goto out;
3680
3681
3682 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3683 if (!CNIC_LOADED(bp))
3684 goto out;
3685
3686
3687 memset(&bp->slowpath->drv_info_to_mcp, 0,
3688 sizeof(union drv_info_to_mcp));
3689 bnx2x_drv_info_iscsi_stat(bp);
3690 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3691 iscsiver = bnx2x_update_mng_version_utility(version, false);
3692
3693 memset(&bp->slowpath->drv_info_to_mcp, 0,
3694 sizeof(union drv_info_to_mcp));
3695 bnx2x_drv_info_fcoe_stat(bp);
3696 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3697 fcoever = bnx2x_update_mng_version_utility(version, false);
3698
3699out:
3700 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3701 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3702 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3703
3704 mutex_unlock(&bp->drv_info_mutex);
3705
3706 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3707 ethver, iscsiver, fcoever);
3708}
3709
3710static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3711{
3712 u32 cmd_ok, cmd_fail;
3713
3714
3715 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3716 event & DRV_STATUS_OEM_EVENT_MASK) {
3717 BNX2X_ERR("Received simultaneous events %08x\n", event);
3718 return;
3719 }
3720
3721 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3722 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3723 cmd_ok = DRV_MSG_CODE_DCC_OK;
3724 } else {
3725 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3726 cmd_ok = DRV_MSG_CODE_OEM_OK;
3727 }
3728
3729 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3730
3731 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3732 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3733
3734
3735
3736
3737 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3738 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3739 bp->flags |= MF_FUNC_DIS;
3740
3741 bnx2x_e1h_disable(bp);
3742 } else {
3743 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3744 bp->flags &= ~MF_FUNC_DIS;
3745
3746 bnx2x_e1h_enable(bp);
3747 }
3748 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3749 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3750 }
3751
3752 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3753 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3754 bnx2x_config_mf_bw(bp);
3755 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3756 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3757 }
3758
3759
3760 if (event)
3761 bnx2x_fw_command(bp, cmd_fail, 0);
3762 else
3763 bnx2x_fw_command(bp, cmd_ok, 0);
3764}
3765
3766
3767static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3768{
3769 struct eth_spe *next_spe = bp->spq_prod_bd;
3770
3771 if (bp->spq_prod_bd == bp->spq_last_bd) {
3772 bp->spq_prod_bd = bp->spq;
3773 bp->spq_prod_idx = 0;
3774 DP(BNX2X_MSG_SP, "end of spq\n");
3775 } else {
3776 bp->spq_prod_bd++;
3777 bp->spq_prod_idx++;
3778 }
3779 return next_spe;
3780}
3781
3782
3783static void bnx2x_sp_prod_update(struct bnx2x *bp)
3784{
3785 int func = BP_FUNC(bp);
3786
3787
3788
3789
3790
3791
3792 mb();
3793
3794 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3795 bp->spq_prod_idx);
3796 mmiowb();
3797}
3798
3799
3800
3801
3802
3803
3804
3805static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3806{
3807 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3808 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3809 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3810 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3811 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3812 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3813 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3814 return true;
3815 else
3816 return false;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3834 u32 data_hi, u32 data_lo, int cmd_type)
3835{
3836 struct eth_spe *spe;
3837 u16 type;
3838 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3839
3840#ifdef BNX2X_STOP_ON_ERROR
3841 if (unlikely(bp->panic)) {
3842 BNX2X_ERR("Can't post SP when there is panic\n");
3843 return -EIO;
3844 }
3845#endif
3846
3847 spin_lock_bh(&bp->spq_lock);
3848
3849 if (common) {
3850 if (!atomic_read(&bp->eq_spq_left)) {
3851 BNX2X_ERR("BUG! EQ ring full!\n");
3852 spin_unlock_bh(&bp->spq_lock);
3853 bnx2x_panic();
3854 return -EBUSY;
3855 }
3856 } else if (!atomic_read(&bp->cq_spq_left)) {
3857 BNX2X_ERR("BUG! SPQ ring full!\n");
3858 spin_unlock_bh(&bp->spq_lock);
3859 bnx2x_panic();
3860 return -EBUSY;
3861 }
3862
3863 spe = bnx2x_sp_get_next(bp);
3864
3865
3866 spe->hdr.conn_and_cmd_data =
3867 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3868 HW_CID(bp, cid));
3869
3870
3871
3872
3873
3874 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3875 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3876 SPE_HDR_CONN_TYPE;
3877 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3878 SPE_HDR_FUNCTION_ID);
3879 } else {
3880 type = cmd_type;
3881 }
3882
3883 spe->hdr.type = cpu_to_le16(type);
3884
3885 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3886 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3887
3888
3889
3890
3891
3892
3893 if (common)
3894 atomic_dec(&bp->eq_spq_left);
3895 else
3896 atomic_dec(&bp->cq_spq_left);
3897
3898 DP(BNX2X_MSG_SP,
3899 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3900 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3901 (u32)(U64_LO(bp->spq_mapping) +
3902 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3903 HW_CID(bp, cid), data_hi, data_lo, type,
3904 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3905
3906 bnx2x_sp_prod_update(bp);
3907 spin_unlock_bh(&bp->spq_lock);
3908 return 0;
3909}
3910
3911
3912static int bnx2x_acquire_alr(struct bnx2x *bp)
3913{
3914 u32 j, val;
3915 int rc = 0;
3916
3917 might_sleep();
3918 for (j = 0; j < 1000; j++) {
3919 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3920 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3921 if (val & MCPR_ACCESS_LOCK_LOCK)
3922 break;
3923
3924 usleep_range(5000, 10000);
3925 }
3926 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3927 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3928 rc = -EBUSY;
3929 }
3930
3931 return rc;
3932}
3933
3934
3935static void bnx2x_release_alr(struct bnx2x *bp)
3936{
3937 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3938}
3939
3940#define BNX2X_DEF_SB_ATT_IDX 0x0001
3941#define BNX2X_DEF_SB_IDX 0x0002
3942
3943static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3944{
3945 struct host_sp_status_block *def_sb = bp->def_status_blk;
3946 u16 rc = 0;
3947
3948 barrier();
3949 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3950 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3951 rc |= BNX2X_DEF_SB_ATT_IDX;
3952 }
3953
3954 if (bp->def_idx != def_sb->sp_sb.running_index) {
3955 bp->def_idx = def_sb->sp_sb.running_index;
3956 rc |= BNX2X_DEF_SB_IDX;
3957 }
3958
3959
3960 barrier();
3961 return rc;
3962}
3963
3964
3965
3966
3967
3968static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3969{
3970 int port = BP_PORT(bp);
3971 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3972 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3973 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3974 NIG_REG_MASK_INTERRUPT_PORT0;
3975 u32 aeu_mask;
3976 u32 nig_mask = 0;
3977 u32 reg_addr;
3978
3979 if (bp->attn_state & asserted)
3980 BNX2X_ERR("IGU ERROR\n");
3981
3982 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3983 aeu_mask = REG_RD(bp, aeu_addr);
3984
3985 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3986 aeu_mask, asserted);
3987 aeu_mask &= ~(asserted & 0x3ff);
3988 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3989
3990 REG_WR(bp, aeu_addr, aeu_mask);
3991 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3992
3993 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3994 bp->attn_state |= asserted;
3995 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3996
3997 if (asserted & ATTN_HARD_WIRED_MASK) {
3998 if (asserted & ATTN_NIG_FOR_FUNC) {
3999
4000 bnx2x_acquire_phy_lock(bp);
4001
4002
4003 nig_mask = REG_RD(bp, nig_int_mask_addr);
4004
4005
4006
4007
4008 if (nig_mask) {
4009 REG_WR(bp, nig_int_mask_addr, 0);
4010
4011 bnx2x_link_attn(bp);
4012 }
4013
4014
4015 }
4016 if (asserted & ATTN_SW_TIMER_4_FUNC)
4017 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4018
4019 if (asserted & GPIO_2_FUNC)
4020 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4021
4022 if (asserted & GPIO_3_FUNC)
4023 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4024
4025 if (asserted & GPIO_4_FUNC)
4026 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4027
4028 if (port == 0) {
4029 if (asserted & ATTN_GENERAL_ATTN_1) {
4030 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4032 }
4033 if (asserted & ATTN_GENERAL_ATTN_2) {
4034 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4035 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4036 }
4037 if (asserted & ATTN_GENERAL_ATTN_3) {
4038 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4039 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4040 }
4041 } else {
4042 if (asserted & ATTN_GENERAL_ATTN_4) {
4043 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4045 }
4046 if (asserted & ATTN_GENERAL_ATTN_5) {
4047 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4049 }
4050 if (asserted & ATTN_GENERAL_ATTN_6) {
4051 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4053 }
4054 }
4055
4056 }
4057
4058 if (bp->common.int_block == INT_BLOCK_HC)
4059 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4060 COMMAND_REG_ATTN_BITS_SET);
4061 else
4062 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4063
4064 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4065 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4066 REG_WR(bp, reg_addr, asserted);
4067
4068
4069 if (asserted & ATTN_NIG_FOR_FUNC) {
4070
4071
4072
4073 if (bp->common.int_block != INT_BLOCK_HC) {
4074 u32 cnt = 0, igu_acked;
4075 do {
4076 igu_acked = REG_RD(bp,
4077 IGU_REG_ATTENTION_ACK_BITS);
4078 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4079 (++cnt < MAX_IGU_ATTN_ACK_TO));
4080 if (!igu_acked)
4081 DP(NETIF_MSG_HW,
4082 "Failed to verify IGU ack on time\n");
4083 barrier();
4084 }
4085 REG_WR(bp, nig_int_mask_addr, nig_mask);
4086 bnx2x_release_phy_lock(bp);
4087 }
4088}
4089
4090static void bnx2x_fan_failure(struct bnx2x *bp)
4091{
4092 int port = BP_PORT(bp);
4093 u32 ext_phy_config;
4094
4095 ext_phy_config =
4096 SHMEM_RD(bp,
4097 dev_info.port_hw_config[port].external_phy_config);
4098
4099 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4100 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4101 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4102 ext_phy_config);
4103
4104
4105 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4106 "Please contact OEM Support for assistance\n");
4107
4108
4109
4110
4111
4112 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4113}
4114
4115static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4116{
4117 int port = BP_PORT(bp);
4118 int reg_offset;
4119 u32 val;
4120
4121 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4123
4124 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4125
4126 val = REG_RD(bp, reg_offset);
4127 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4128 REG_WR(bp, reg_offset, val);
4129
4130 BNX2X_ERR("SPIO5 hw attention\n");
4131
4132
4133 bnx2x_hw_reset_phy(&bp->link_params);
4134 bnx2x_fan_failure(bp);
4135 }
4136
4137 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4138 bnx2x_acquire_phy_lock(bp);
4139 bnx2x_handle_module_detect_int(&bp->link_params);
4140 bnx2x_release_phy_lock(bp);
4141 }
4142
4143 if (attn & HW_INTERRUT_ASSERT_SET_0) {
4144
4145 val = REG_RD(bp, reg_offset);
4146 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4147 REG_WR(bp, reg_offset, val);
4148
4149 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4150 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4151 bnx2x_panic();
4152 }
4153}
4154
4155static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4156{
4157 u32 val;
4158
4159 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4160
4161 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4162 BNX2X_ERR("DB hw attention 0x%x\n", val);
4163
4164 if (val & 0x2)
4165 BNX2X_ERR("FATAL error from DORQ\n");
4166 }
4167
4168 if (attn & HW_INTERRUT_ASSERT_SET_1) {
4169
4170 int port = BP_PORT(bp);
4171 int reg_offset;
4172
4173 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4174 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4175
4176 val = REG_RD(bp, reg_offset);
4177 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4178 REG_WR(bp, reg_offset, val);
4179
4180 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4181 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4182 bnx2x_panic();
4183 }
4184}
4185
4186static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4187{
4188 u32 val;
4189
4190 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4191
4192 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4193 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4194
4195 if (val & 0x2)
4196 BNX2X_ERR("FATAL error from CFC\n");
4197 }
4198
4199 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4200 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4201 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4202
4203 if (val & 0x18000)
4204 BNX2X_ERR("FATAL error from PXP\n");
4205
4206 if (!CHIP_IS_E1x(bp)) {
4207 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4208 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4209 }
4210 }
4211
4212 if (attn & HW_INTERRUT_ASSERT_SET_2) {
4213
4214 int port = BP_PORT(bp);
4215 int reg_offset;
4216
4217 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4218 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4219
4220 val = REG_RD(bp, reg_offset);
4221 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4222 REG_WR(bp, reg_offset, val);
4223
4224 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4225 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4226 bnx2x_panic();
4227 }
4228}
4229
4230static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4231{
4232 u32 val;
4233
4234 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4235
4236 if (attn & BNX2X_PMF_LINK_ASSERT) {
4237 int func = BP_FUNC(bp);
4238
4239 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4240 bnx2x_read_mf_cfg(bp);
4241 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4242 func_mf_config[BP_ABS_FUNC(bp)].config);
4243 val = SHMEM_RD(bp,
4244 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4245
4246 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4247 DRV_STATUS_OEM_EVENT_MASK))
4248 bnx2x_oem_event(bp,
4249 (val & (DRV_STATUS_DCC_EVENT_MASK |
4250 DRV_STATUS_OEM_EVENT_MASK)));
4251
4252 if (val & DRV_STATUS_SET_MF_BW)
4253 bnx2x_set_mf_bw(bp);
4254
4255 if (val & DRV_STATUS_DRV_INFO_REQ)
4256 bnx2x_handle_drv_info_req(bp);
4257
4258 if (val & DRV_STATUS_VF_DISABLED)
4259 bnx2x_schedule_iov_task(bp,
4260 BNX2X_IOV_HANDLE_FLR);
4261
4262 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4263 bnx2x_pmf_update(bp);
4264
4265 if (bp->port.pmf &&
4266 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4267 bp->dcbx_enabled > 0)
4268
4269 bnx2x_dcbx_set_params(bp,
4270 BNX2X_DCBX_STATE_NEG_RECEIVED);
4271 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4272 bnx2x_handle_afex_cmd(bp,
4273 val & DRV_STATUS_AFEX_EVENT_MASK);
4274 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4275 bnx2x_handle_eee_event(bp);
4276
4277 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4278 bnx2x_handle_update_svid_cmd(bp);
4279
4280 if (bp->link_vars.periodic_flags &
4281 PERIODIC_FLAGS_LINK_EVENT) {
4282
4283 bnx2x_acquire_phy_lock(bp);
4284 bp->link_vars.periodic_flags &=
4285 ~PERIODIC_FLAGS_LINK_EVENT;
4286 bnx2x_release_phy_lock(bp);
4287 if (IS_MF(bp))
4288 bnx2x_link_sync_notify(bp);
4289 bnx2x_link_report(bp);
4290 }
4291
4292
4293
4294 bnx2x__link_status_update(bp);
4295 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4296
4297 BNX2X_ERR("MC assert!\n");
4298 bnx2x_mc_assert(bp);
4299 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4300 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4301 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4302 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4303 bnx2x_panic();
4304
4305 } else if (attn & BNX2X_MCP_ASSERT) {
4306
4307 BNX2X_ERR("MCP assert!\n");
4308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4309 bnx2x_fw_dump(bp);
4310
4311 } else
4312 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4313 }
4314
4315 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4316 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4317 if (attn & BNX2X_GRC_TIMEOUT) {
4318 val = CHIP_IS_E1(bp) ? 0 :
4319 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4320 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4321 }
4322 if (attn & BNX2X_GRC_RSV) {
4323 val = CHIP_IS_E1(bp) ? 0 :
4324 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4325 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4326 }
4327 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4328 }
4329}
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4346
4347#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4348#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4349#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4350#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4351#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4352#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4353#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4354
4355
4356
4357
4358
4359
4360void bnx2x_set_reset_global(struct bnx2x *bp)
4361{
4362 u32 val;
4363 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4364 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4365 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4366 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4367}
4368
4369
4370
4371
4372
4373
4374static void bnx2x_clear_reset_global(struct bnx2x *bp)
4375{
4376 u32 val;
4377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4378 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4379 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4380 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4381}
4382
4383
4384
4385
4386
4387
4388static bool bnx2x_reset_is_global(struct bnx2x *bp)
4389{
4390 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4391
4392 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4393 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4394}
4395
4396
4397
4398
4399
4400
4401static void bnx2x_set_reset_done(struct bnx2x *bp)
4402{
4403 u32 val;
4404 u32 bit = BP_PATH(bp) ?
4405 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4406 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4407 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4408
4409
4410 val &= ~bit;
4411 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4412
4413 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4414}
4415
4416
4417
4418
4419
4420
4421void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4422{
4423 u32 val;
4424 u32 bit = BP_PATH(bp) ?
4425 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4426 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4427 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4428
4429
4430 val |= bit;
4431 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4432 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4433}
4434
4435
4436
4437
4438
4439bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4440{
4441 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4442 u32 bit = engine ?
4443 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4444
4445
4446 return (val & bit) ? false : true;
4447}
4448
4449
4450
4451
4452
4453
4454void bnx2x_set_pf_load(struct bnx2x *bp)
4455{
4456 u32 val1, val;
4457 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4458 BNX2X_PATH0_LOAD_CNT_MASK;
4459 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4460 BNX2X_PATH0_LOAD_CNT_SHIFT;
4461
4462 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4463 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4464
4465 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4466
4467
4468 val1 = (val & mask) >> shift;
4469
4470
4471 val1 |= (1 << bp->pf_num);
4472
4473
4474 val &= ~mask;
4475
4476
4477 val |= ((val1 << shift) & mask);
4478
4479 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4481}
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492bool bnx2x_clear_pf_load(struct bnx2x *bp)
4493{
4494 u32 val1, val;
4495 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4496 BNX2X_PATH0_LOAD_CNT_MASK;
4497 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4498 BNX2X_PATH0_LOAD_CNT_SHIFT;
4499
4500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4501 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4502 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4503
4504
4505 val1 = (val & mask) >> shift;
4506
4507
4508 val1 &= ~(1 << bp->pf_num);
4509
4510
4511 val &= ~mask;
4512
4513
4514 val |= ((val1 << shift) & mask);
4515
4516 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4517 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4518 return val1 != 0;
4519}
4520
4521
4522
4523
4524
4525
4526static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4527{
4528 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4529 BNX2X_PATH0_LOAD_CNT_MASK);
4530 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4531 BNX2X_PATH0_LOAD_CNT_SHIFT);
4532 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4533
4534 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4535
4536 val = (val & mask) >> shift;
4537
4538 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4539 engine, val);
4540
4541 return val != 0;
4542}
4543
4544static void _print_parity(struct bnx2x *bp, u32 reg)
4545{
4546 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4547}
4548
4549static void _print_next_block(int idx, const char *blk)
4550{
4551 pr_cont("%s%s", idx ? ", " : "", blk);
4552}
4553
4554static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4555 int *par_num, bool print)
4556{
4557 u32 cur_bit;
4558 bool res;
4559 int i;
4560
4561 res = false;
4562
4563 for (i = 0; sig; i++) {
4564 cur_bit = (0x1UL << i);
4565 if (sig & cur_bit) {
4566 res |= true;
4567
4568 if (print) {
4569 switch (cur_bit) {
4570 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4571 _print_next_block((*par_num)++, "BRB");
4572 _print_parity(bp,
4573 BRB1_REG_BRB1_PRTY_STS);
4574 break;
4575 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4576 _print_next_block((*par_num)++,
4577 "PARSER");
4578 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4579 break;
4580 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4581 _print_next_block((*par_num)++, "TSDM");
4582 _print_parity(bp,
4583 TSDM_REG_TSDM_PRTY_STS);
4584 break;
4585 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4586 _print_next_block((*par_num)++,
4587 "SEARCHER");
4588 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4589 break;
4590 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4591 _print_next_block((*par_num)++, "TCM");
4592 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4593 break;
4594 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4595 _print_next_block((*par_num)++,
4596 "TSEMI");
4597 _print_parity(bp,
4598 TSEM_REG_TSEM_PRTY_STS_0);
4599 _print_parity(bp,
4600 TSEM_REG_TSEM_PRTY_STS_1);
4601 break;
4602 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4603 _print_next_block((*par_num)++, "XPB");
4604 _print_parity(bp, GRCBASE_XPB +
4605 PB_REG_PB_PRTY_STS);
4606 break;
4607 }
4608 }
4609
4610
4611 sig &= ~cur_bit;
4612 }
4613 }
4614
4615 return res;
4616}
4617
4618static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4619 int *par_num, bool *global,
4620 bool print)
4621{
4622 u32 cur_bit;
4623 bool res;
4624 int i;
4625
4626 res = false;
4627
4628 for (i = 0; sig; i++) {
4629 cur_bit = (0x1UL << i);
4630 if (sig & cur_bit) {
4631 res |= true;
4632 switch (cur_bit) {
4633 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4634 if (print) {
4635 _print_next_block((*par_num)++, "PBF");
4636 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4637 }
4638 break;
4639 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4640 if (print) {
4641 _print_next_block((*par_num)++, "QM");
4642 _print_parity(bp, QM_REG_QM_PRTY_STS);
4643 }
4644 break;
4645 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4646 if (print) {
4647 _print_next_block((*par_num)++, "TM");
4648 _print_parity(bp, TM_REG_TM_PRTY_STS);
4649 }
4650 break;
4651 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4652 if (print) {
4653 _print_next_block((*par_num)++, "XSDM");
4654 _print_parity(bp,
4655 XSDM_REG_XSDM_PRTY_STS);
4656 }
4657 break;
4658 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4659 if (print) {
4660 _print_next_block((*par_num)++, "XCM");
4661 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4662 }
4663 break;
4664 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4665 if (print) {
4666 _print_next_block((*par_num)++,
4667 "XSEMI");
4668 _print_parity(bp,
4669 XSEM_REG_XSEM_PRTY_STS_0);
4670 _print_parity(bp,
4671 XSEM_REG_XSEM_PRTY_STS_1);
4672 }
4673 break;
4674 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4675 if (print) {
4676 _print_next_block((*par_num)++,
4677 "DOORBELLQ");
4678 _print_parity(bp,
4679 DORQ_REG_DORQ_PRTY_STS);
4680 }
4681 break;
4682 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4683 if (print) {
4684 _print_next_block((*par_num)++, "NIG");
4685 if (CHIP_IS_E1x(bp)) {
4686 _print_parity(bp,
4687 NIG_REG_NIG_PRTY_STS);
4688 } else {
4689 _print_parity(bp,
4690 NIG_REG_NIG_PRTY_STS_0);
4691 _print_parity(bp,
4692 NIG_REG_NIG_PRTY_STS_1);
4693 }
4694 }
4695 break;
4696 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4697 if (print)
4698 _print_next_block((*par_num)++,
4699 "VAUX PCI CORE");
4700 *global = true;
4701 break;
4702 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4703 if (print) {
4704 _print_next_block((*par_num)++,
4705 "DEBUG");
4706 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4707 }
4708 break;
4709 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4710 if (print) {
4711 _print_next_block((*par_num)++, "USDM");
4712 _print_parity(bp,
4713 USDM_REG_USDM_PRTY_STS);
4714 }
4715 break;
4716 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4717 if (print) {
4718 _print_next_block((*par_num)++, "UCM");
4719 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4720 }
4721 break;
4722 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4723 if (print) {
4724 _print_next_block((*par_num)++,
4725 "USEMI");
4726 _print_parity(bp,
4727 USEM_REG_USEM_PRTY_STS_0);
4728 _print_parity(bp,
4729 USEM_REG_USEM_PRTY_STS_1);
4730 }
4731 break;
4732 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4733 if (print) {
4734 _print_next_block((*par_num)++, "UPB");
4735 _print_parity(bp, GRCBASE_UPB +
4736 PB_REG_PB_PRTY_STS);
4737 }
4738 break;
4739 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4740 if (print) {
4741 _print_next_block((*par_num)++, "CSDM");
4742 _print_parity(bp,
4743 CSDM_REG_CSDM_PRTY_STS);
4744 }
4745 break;
4746 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4747 if (print) {
4748 _print_next_block((*par_num)++, "CCM");
4749 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4750 }
4751 break;
4752 }
4753
4754
4755 sig &= ~cur_bit;
4756 }
4757 }
4758
4759 return res;
4760}
4761
4762static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4763 int *par_num, bool print)
4764{
4765 u32 cur_bit;
4766 bool res;
4767 int i;
4768
4769 res = false;
4770
4771 for (i = 0; sig; i++) {
4772 cur_bit = (0x1UL << i);
4773 if (sig & cur_bit) {
4774 res = true;
4775 if (print) {
4776 switch (cur_bit) {
4777 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4778 _print_next_block((*par_num)++,
4779 "CSEMI");
4780 _print_parity(bp,
4781 CSEM_REG_CSEM_PRTY_STS_0);
4782 _print_parity(bp,
4783 CSEM_REG_CSEM_PRTY_STS_1);
4784 break;
4785 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4786 _print_next_block((*par_num)++, "PXP");
4787 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4788 _print_parity(bp,
4789 PXP2_REG_PXP2_PRTY_STS_0);
4790 _print_parity(bp,
4791 PXP2_REG_PXP2_PRTY_STS_1);
4792 break;
4793 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4794 _print_next_block((*par_num)++,
4795 "PXPPCICLOCKCLIENT");
4796 break;
4797 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4798 _print_next_block((*par_num)++, "CFC");
4799 _print_parity(bp,
4800 CFC_REG_CFC_PRTY_STS);
4801 break;
4802 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4803 _print_next_block((*par_num)++, "CDU");
4804 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4805 break;
4806 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4807 _print_next_block((*par_num)++, "DMAE");
4808 _print_parity(bp,
4809 DMAE_REG_DMAE_PRTY_STS);
4810 break;
4811 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4812 _print_next_block((*par_num)++, "IGU");
4813 if (CHIP_IS_E1x(bp))
4814 _print_parity(bp,
4815 HC_REG_HC_PRTY_STS);
4816 else
4817 _print_parity(bp,
4818 IGU_REG_IGU_PRTY_STS);
4819 break;
4820 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4821 _print_next_block((*par_num)++, "MISC");
4822 _print_parity(bp,
4823 MISC_REG_MISC_PRTY_STS);
4824 break;
4825 }
4826 }
4827
4828
4829 sig &= ~cur_bit;
4830 }
4831 }
4832
4833 return res;
4834}
4835
4836static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4837 int *par_num, bool *global,
4838 bool print)
4839{
4840 bool res = false;
4841 u32 cur_bit;
4842 int i;
4843
4844 for (i = 0; sig; i++) {
4845 cur_bit = (0x1UL << i);
4846 if (sig & cur_bit) {
4847 switch (cur_bit) {
4848 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4849 if (print)
4850 _print_next_block((*par_num)++,
4851 "MCP ROM");
4852 *global = true;
4853 res = true;
4854 break;
4855 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4856 if (print)
4857 _print_next_block((*par_num)++,
4858 "MCP UMP RX");
4859 *global = true;
4860 res = true;
4861 break;
4862 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4863 if (print)
4864 _print_next_block((*par_num)++,
4865 "MCP UMP TX");
4866 *global = true;
4867 res = true;
4868 break;
4869 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4870 (*par_num)++;
4871
4872 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4873 1UL << 10);
4874 break;
4875 }
4876
4877
4878 sig &= ~cur_bit;
4879 }
4880 }
4881
4882 return res;
4883}
4884
4885static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4886 int *par_num, bool print)
4887{
4888 u32 cur_bit;
4889 bool res;
4890 int i;
4891
4892 res = false;
4893
4894 for (i = 0; sig; i++) {
4895 cur_bit = (0x1UL << i);
4896 if (sig & cur_bit) {
4897 res = true;
4898 if (print) {
4899 switch (cur_bit) {
4900 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4901 _print_next_block((*par_num)++,
4902 "PGLUE_B");
4903 _print_parity(bp,
4904 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4905 break;
4906 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4907 _print_next_block((*par_num)++, "ATC");
4908 _print_parity(bp,
4909 ATC_REG_ATC_PRTY_STS);
4910 break;
4911 }
4912 }
4913
4914 sig &= ~cur_bit;
4915 }
4916 }
4917
4918 return res;
4919}
4920
4921static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4922 u32 *sig)
4923{
4924 bool res = false;
4925
4926 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4927 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4928 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4929 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4930 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4931 int par_num = 0;
4932
4933 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4934 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4935 sig[0] & HW_PRTY_ASSERT_SET_0,
4936 sig[1] & HW_PRTY_ASSERT_SET_1,
4937 sig[2] & HW_PRTY_ASSERT_SET_2,
4938 sig[3] & HW_PRTY_ASSERT_SET_3,
4939 sig[4] & HW_PRTY_ASSERT_SET_4);
4940 if (print) {
4941 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4942 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4943 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4944 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4945 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4946 netdev_err(bp->dev,
4947 "Parity errors detected in blocks: ");
4948 } else {
4949 print = false;
4950 }
4951 }
4952 res |= bnx2x_check_blocks_with_parity0(bp,
4953 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4954 res |= bnx2x_check_blocks_with_parity1(bp,
4955 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4956 res |= bnx2x_check_blocks_with_parity2(bp,
4957 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4958 res |= bnx2x_check_blocks_with_parity3(bp,
4959 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4960 res |= bnx2x_check_blocks_with_parity4(bp,
4961 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4962
4963 if (print)
4964 pr_cont("\n");
4965 }
4966
4967 return res;
4968}
4969
4970
4971
4972
4973
4974
4975
4976
4977bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4978{
4979 struct attn_route attn = { {0} };
4980 int port = BP_PORT(bp);
4981
4982 attn.sig[0] = REG_RD(bp,
4983 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4984 port*4);
4985 attn.sig[1] = REG_RD(bp,
4986 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4987 port*4);
4988 attn.sig[2] = REG_RD(bp,
4989 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4990 port*4);
4991 attn.sig[3] = REG_RD(bp,
4992 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4993 port*4);
4994
4995
4996
4997 attn.sig[3] &= ((REG_RD(bp,
4998 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4999 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5000 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5001 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5002
5003 if (!CHIP_IS_E1x(bp))
5004 attn.sig[4] = REG_RD(bp,
5005 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5006 port*4);
5007
5008 return bnx2x_parity_attn(bp, global, print, attn.sig);
5009}
5010
5011static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5012{
5013 u32 val;
5014 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5015
5016 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5017 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5018 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5019 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5020 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5021 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5022 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5023 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5024 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5025 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5026 if (val &
5027 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5028 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5029 if (val &
5030 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5031 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5032 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5033 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5034 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5035 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5036 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5037 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5038 }
5039 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5040 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5041 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5042 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5043 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5044 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5045 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5046 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5047 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5048 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5049 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5050 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5051 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5052 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5053 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5054 }
5055
5056 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5057 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5058 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5059 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5060 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5061 }
5062}
5063
5064static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5065{
5066 struct attn_route attn, *group_mask;
5067 int port = BP_PORT(bp);
5068 int index;
5069 u32 reg_addr;
5070 u32 val;
5071 u32 aeu_mask;
5072 bool global = false;
5073
5074
5075
5076 bnx2x_acquire_alr(bp);
5077
5078 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5079#ifndef BNX2X_STOP_ON_ERROR
5080 bp->recovery_state = BNX2X_RECOVERY_INIT;
5081 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5082
5083 bnx2x_int_disable(bp);
5084
5085
5086
5087#else
5088 bnx2x_panic();
5089#endif
5090 bnx2x_release_alr(bp);
5091 return;
5092 }
5093
5094 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5095 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5096 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5097 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5098 if (!CHIP_IS_E1x(bp))
5099 attn.sig[4] =
5100 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5101 else
5102 attn.sig[4] = 0;
5103
5104 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5105 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5106
5107 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5108 if (deasserted & (1 << index)) {
5109 group_mask = &bp->attn_group[index];
5110
5111 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5112 index,
5113 group_mask->sig[0], group_mask->sig[1],
5114 group_mask->sig[2], group_mask->sig[3],
5115 group_mask->sig[4]);
5116
5117 bnx2x_attn_int_deasserted4(bp,
5118 attn.sig[4] & group_mask->sig[4]);
5119 bnx2x_attn_int_deasserted3(bp,
5120 attn.sig[3] & group_mask->sig[3]);
5121 bnx2x_attn_int_deasserted1(bp,
5122 attn.sig[1] & group_mask->sig[1]);
5123 bnx2x_attn_int_deasserted2(bp,
5124 attn.sig[2] & group_mask->sig[2]);
5125 bnx2x_attn_int_deasserted0(bp,
5126 attn.sig[0] & group_mask->sig[0]);
5127 }
5128 }
5129
5130 bnx2x_release_alr(bp);
5131
5132 if (bp->common.int_block == INT_BLOCK_HC)
5133 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5134 COMMAND_REG_ATTN_BITS_CLR);
5135 else
5136 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5137
5138 val = ~deasserted;
5139 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5140 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5141 REG_WR(bp, reg_addr, val);
5142
5143 if (~bp->attn_state & deasserted)
5144 BNX2X_ERR("IGU ERROR\n");
5145
5146 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5147 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5148
5149 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5150 aeu_mask = REG_RD(bp, reg_addr);
5151
5152 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5153 aeu_mask, deasserted);
5154 aeu_mask |= (deasserted & 0x3ff);
5155 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5156
5157 REG_WR(bp, reg_addr, aeu_mask);
5158 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5159
5160 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5161 bp->attn_state &= ~deasserted;
5162 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5163}
5164
5165static void bnx2x_attn_int(struct bnx2x *bp)
5166{
5167
5168 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5169 attn_bits);
5170 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5171 attn_bits_ack);
5172 u32 attn_state = bp->attn_state;
5173
5174
5175 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5176 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5177
5178 DP(NETIF_MSG_HW,
5179 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5180 attn_bits, attn_ack, asserted, deasserted);
5181
5182 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5183 BNX2X_ERR("BAD attention state\n");
5184
5185
5186 if (asserted)
5187 bnx2x_attn_int_asserted(bp, asserted);
5188
5189 if (deasserted)
5190 bnx2x_attn_int_deasserted(bp, deasserted);
5191}
5192
5193void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5194 u16 index, u8 op, u8 update)
5195{
5196 u32 igu_addr = bp->igu_base_addr;
5197 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5198 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5199 igu_addr);
5200}
5201
5202static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5203{
5204
5205 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5206 mmiowb();
5207}
5208
5209static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5210 union event_ring_elem *elem)
5211{
5212 u8 err = elem->message.error;
5213
5214 if (!bp->cnic_eth_dev.starting_cid ||
5215 (cid < bp->cnic_eth_dev.starting_cid &&
5216 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5217 return 1;
5218
5219 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5220
5221 if (unlikely(err)) {
5222
5223 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5224 cid);
5225 bnx2x_panic_dump(bp, false);
5226 }
5227 bnx2x_cnic_cfc_comp(bp, cid, err);
5228 return 0;
5229}
5230
5231static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5232{
5233 struct bnx2x_mcast_ramrod_params rparam;
5234 int rc;
5235
5236 memset(&rparam, 0, sizeof(rparam));
5237
5238 rparam.mcast_obj = &bp->mcast_obj;
5239
5240 netif_addr_lock_bh(bp->dev);
5241
5242
5243 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5244
5245
5246 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5247 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5248 if (rc < 0)
5249 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5250 rc);
5251 }
5252
5253 netif_addr_unlock_bh(bp->dev);
5254}
5255
5256static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5257 union event_ring_elem *elem)
5258{
5259 unsigned long ramrod_flags = 0;
5260 int rc = 0;
5261 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5262 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5263
5264
5265 __set_bit(RAMROD_CONT, &ramrod_flags);
5266
5267 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5268 >> BNX2X_SWCID_SHIFT) {
5269 case BNX2X_FILTER_MAC_PENDING:
5270 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5271 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5272 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5273 else
5274 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5275
5276 break;
5277 case BNX2X_FILTER_MCAST_PENDING:
5278 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5279
5280
5281
5282 bnx2x_handle_mcast_eqe(bp);
5283 return;
5284 default:
5285 BNX2X_ERR("Unsupported classification command: %d\n",
5286 elem->message.data.eth_event.echo);
5287 return;
5288 }
5289
5290 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5291
5292 if (rc < 0)
5293 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5294 else if (rc > 0)
5295 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5296}
5297
5298static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5299
5300static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5301{
5302 netif_addr_lock_bh(bp->dev);
5303
5304 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5305
5306
5307 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5308 bnx2x_set_storm_rx_mode(bp);
5309 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5310 &bp->sp_state))
5311 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5312 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5313 &bp->sp_state))
5314 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5315
5316 netif_addr_unlock_bh(bp->dev);
5317}
5318
5319static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5320 union event_ring_elem *elem)
5321{
5322 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5323 DP(BNX2X_MSG_SP,
5324 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5325 elem->message.data.vif_list_event.func_bit_map);
5326 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5327 elem->message.data.vif_list_event.func_bit_map);
5328 } else if (elem->message.data.vif_list_event.echo ==
5329 VIF_LIST_RULE_SET) {
5330 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5331 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5332 }
5333}
5334
5335
5336static void bnx2x_after_function_update(struct bnx2x *bp)
5337{
5338 int q, rc;
5339 struct bnx2x_fastpath *fp;
5340 struct bnx2x_queue_state_params queue_params = {NULL};
5341 struct bnx2x_queue_update_params *q_update_params =
5342 &queue_params.params.update;
5343
5344
5345 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5346
5347
5348 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5349 &q_update_params->update_flags);
5350 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5351 &q_update_params->update_flags);
5352 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5353
5354
5355 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5356 q_update_params->silent_removal_value = 0;
5357 q_update_params->silent_removal_mask = 0;
5358 } else {
5359 q_update_params->silent_removal_value =
5360 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5361 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5362 }
5363
5364 for_each_eth_queue(bp, q) {
5365
5366 fp = &bp->fp[q];
5367 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5368
5369
5370 rc = bnx2x_queue_state_change(bp, &queue_params);
5371 if (rc < 0)
5372 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5373 q);
5374 }
5375
5376 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5377 fp = &bp->fp[FCOE_IDX(bp)];
5378 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5379
5380
5381 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5382
5383
5384 smp_mb__before_atomic();
5385 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5386 smp_mb__after_atomic();
5387
5388
5389 rc = bnx2x_queue_state_change(bp, &queue_params);
5390 if (rc < 0)
5391 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5392 q);
5393 } else {
5394
5395 bnx2x_link_report(bp);
5396 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5397 }
5398}
5399
5400static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5401 struct bnx2x *bp, u32 cid)
5402{
5403 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5404
5405 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5406 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5407 else
5408 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5409}
5410
5411static void bnx2x_eq_int(struct bnx2x *bp)
5412{
5413 u16 hw_cons, sw_cons, sw_prod;
5414 union event_ring_elem *elem;
5415 u8 echo;
5416 u32 cid;
5417 u8 opcode;
5418 int rc, spqe_cnt = 0;
5419 struct bnx2x_queue_sp_obj *q_obj;
5420 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5421 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5422
5423 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5424
5425
5426
5427
5428
5429
5430 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5431 hw_cons++;
5432
5433
5434
5435
5436
5437 sw_cons = bp->eq_cons;
5438 sw_prod = bp->eq_prod;
5439
5440 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5441 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5442
5443 for (; sw_cons != hw_cons;
5444 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5445
5446 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5447
5448 rc = bnx2x_iov_eq_sp_event(bp, elem);
5449 if (!rc) {
5450 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5451 rc);
5452 goto next_spqe;
5453 }
5454
5455
5456 cid = SW_CID((__force __le32)
5457 elem->message.data.cfc_del_event.cid);
5458 opcode = elem->message.opcode;
5459
5460
5461 switch (opcode) {
5462 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5463 bnx2x_vf_mbx_schedule(bp,
5464 &elem->message.data.vf_pf_event);
5465 continue;
5466
5467 case EVENT_RING_OPCODE_STAT_QUERY:
5468 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5469 "got statistics comp event %d\n",
5470 bp->stats_comp++);
5471
5472 goto next_spqe;
5473
5474 case EVENT_RING_OPCODE_CFC_DEL:
5475
5476
5477
5478
5479
5480 DP(BNX2X_MSG_SP,
5481 "got delete ramrod for MULTI[%d]\n", cid);
5482
5483 if (CNIC_LOADED(bp) &&
5484 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5485 goto next_spqe;
5486
5487 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5488
5489 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5490 break;
5491
5492 goto next_spqe;
5493
5494 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5495 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5496 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5497 if (f_obj->complete_cmd(bp, f_obj,
5498 BNX2X_F_CMD_TX_STOP))
5499 break;
5500 goto next_spqe;
5501
5502 case EVENT_RING_OPCODE_START_TRAFFIC:
5503 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5504 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5505 if (f_obj->complete_cmd(bp, f_obj,
5506 BNX2X_F_CMD_TX_START))
5507 break;
5508 goto next_spqe;
5509
5510 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5511 echo = elem->message.data.function_update_event.echo;
5512 if (echo == SWITCH_UPDATE) {
5513 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5514 "got FUNC_SWITCH_UPDATE ramrod\n");
5515 if (f_obj->complete_cmd(
5516 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5517 break;
5518
5519 } else {
5520 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5521
5522 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5523 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5524 f_obj->complete_cmd(bp, f_obj,
5525 BNX2X_F_CMD_AFEX_UPDATE);
5526
5527
5528
5529
5530
5531 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5532 }
5533
5534 goto next_spqe;
5535
5536 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5537 f_obj->complete_cmd(bp, f_obj,
5538 BNX2X_F_CMD_AFEX_VIFLISTS);
5539 bnx2x_after_afex_vif_lists(bp, elem);
5540 goto next_spqe;
5541 case EVENT_RING_OPCODE_FUNCTION_START:
5542 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5543 "got FUNC_START ramrod\n");
5544 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5545 break;
5546
5547 goto next_spqe;
5548
5549 case EVENT_RING_OPCODE_FUNCTION_STOP:
5550 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5551 "got FUNC_STOP ramrod\n");
5552 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5553 break;
5554
5555 goto next_spqe;
5556
5557 case EVENT_RING_OPCODE_SET_TIMESYNC:
5558 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5559 "got set_timesync ramrod completion\n");
5560 if (f_obj->complete_cmd(bp, f_obj,
5561 BNX2X_F_CMD_SET_TIMESYNC))
5562 break;
5563 goto next_spqe;
5564 }
5565
5566 switch (opcode | bp->state) {
5567 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5568 BNX2X_STATE_OPEN):
5569 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5570 BNX2X_STATE_OPENING_WAIT4_PORT):
5571 cid = elem->message.data.eth_event.echo &
5572 BNX2X_SWCID_MASK;
5573 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5574 cid);
5575 rss_raw->clear_pending(rss_raw);
5576 break;
5577
5578 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5579 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5580 case (EVENT_RING_OPCODE_SET_MAC |
5581 BNX2X_STATE_CLOSING_WAIT4_HALT):
5582 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5583 BNX2X_STATE_OPEN):
5584 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5585 BNX2X_STATE_DIAG):
5586 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5587 BNX2X_STATE_CLOSING_WAIT4_HALT):
5588 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5589 bnx2x_handle_classification_eqe(bp, elem);
5590 break;
5591
5592 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5593 BNX2X_STATE_OPEN):
5594 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5595 BNX2X_STATE_DIAG):
5596 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5597 BNX2X_STATE_CLOSING_WAIT4_HALT):
5598 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5599 bnx2x_handle_mcast_eqe(bp);
5600 break;
5601
5602 case (EVENT_RING_OPCODE_FILTERS_RULES |
5603 BNX2X_STATE_OPEN):
5604 case (EVENT_RING_OPCODE_FILTERS_RULES |
5605 BNX2X_STATE_DIAG):
5606 case (EVENT_RING_OPCODE_FILTERS_RULES |
5607 BNX2X_STATE_CLOSING_WAIT4_HALT):
5608 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5609 bnx2x_handle_rx_mode_eqe(bp);
5610 break;
5611 default:
5612
5613 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5614 elem->message.opcode, bp->state);
5615 }
5616next_spqe:
5617 spqe_cnt++;
5618 }
5619
5620 smp_mb__before_atomic();
5621 atomic_add(spqe_cnt, &bp->eq_spq_left);
5622
5623 bp->eq_cons = sw_cons;
5624 bp->eq_prod = sw_prod;
5625
5626 smp_wmb();
5627
5628
5629 bnx2x_update_eq_prod(bp, bp->eq_prod);
5630}
5631
5632static void bnx2x_sp_task(struct work_struct *work)
5633{
5634 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5635
5636 DP(BNX2X_MSG_SP, "sp task invoked\n");
5637
5638
5639 smp_rmb();
5640 if (atomic_read(&bp->interrupt_occurred)) {
5641
5642
5643 u16 status = bnx2x_update_dsb_idx(bp);
5644
5645 DP(BNX2X_MSG_SP, "status %x\n", status);
5646 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5647 atomic_set(&bp->interrupt_occurred, 0);
5648
5649
5650 if (status & BNX2X_DEF_SB_ATT_IDX) {
5651 bnx2x_attn_int(bp);
5652 status &= ~BNX2X_DEF_SB_ATT_IDX;
5653 }
5654
5655
5656 if (status & BNX2X_DEF_SB_IDX) {
5657 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5658
5659 if (FCOE_INIT(bp) &&
5660 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5661
5662
5663
5664 local_bh_disable();
5665 napi_schedule(&bnx2x_fcoe(bp, napi));
5666 local_bh_enable();
5667 }
5668
5669
5670 bnx2x_eq_int(bp);
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5672 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5673
5674 status &= ~BNX2X_DEF_SB_IDX;
5675 }
5676
5677
5678 if (unlikely(status))
5679 DP(BNX2X_MSG_SP,
5680 "got an unknown interrupt! (status 0x%x)\n", status);
5681
5682
5683 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5684 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5685 }
5686
5687
5688 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5689 &bp->sp_state)) {
5690 bnx2x_link_report(bp);
5691 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5692 }
5693}
5694
5695irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5696{
5697 struct net_device *dev = dev_instance;
5698 struct bnx2x *bp = netdev_priv(dev);
5699
5700 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5701 IGU_INT_DISABLE, 0);
5702
5703#ifdef BNX2X_STOP_ON_ERROR
5704 if (unlikely(bp->panic))
5705 return IRQ_HANDLED;
5706#endif
5707
5708 if (CNIC_LOADED(bp)) {
5709 struct cnic_ops *c_ops;
5710
5711 rcu_read_lock();
5712 c_ops = rcu_dereference(bp->cnic_ops);
5713 if (c_ops)
5714 c_ops->cnic_handler(bp->cnic_data, NULL);
5715 rcu_read_unlock();
5716 }
5717
5718
5719
5720
5721 bnx2x_schedule_sp_task(bp);
5722
5723 return IRQ_HANDLED;
5724}
5725
5726
5727
5728void bnx2x_drv_pulse(struct bnx2x *bp)
5729{
5730 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5731 bp->fw_drv_pulse_wr_seq);
5732}
5733
5734static void bnx2x_timer(unsigned long data)
5735{
5736 struct bnx2x *bp = (struct bnx2x *) data;
5737
5738 if (!netif_running(bp->dev))
5739 return;
5740
5741 if (IS_PF(bp) &&
5742 !BP_NOMCP(bp)) {
5743 int mb_idx = BP_FW_MB_IDX(bp);
5744 u16 drv_pulse;
5745 u16 mcp_pulse;
5746
5747 ++bp->fw_drv_pulse_wr_seq;
5748 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5749 drv_pulse = bp->fw_drv_pulse_wr_seq;
5750 bnx2x_drv_pulse(bp);
5751
5752 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5753 MCP_PULSE_SEQ_MASK);
5754
5755
5756
5757
5758
5759 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5760 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5761 drv_pulse, mcp_pulse);
5762 }
5763
5764 if (bp->state == BNX2X_STATE_OPEN)
5765 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5766
5767
5768 if (IS_VF(bp))
5769 bnx2x_timer_sriov(bp);
5770
5771 mod_timer(&bp->timer, jiffies + bp->current_interval);
5772}
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5783{
5784 u32 i;
5785 if (!(len%4) && !(addr%4))
5786 for (i = 0; i < len; i += 4)
5787 REG_WR(bp, addr + i, fill);
5788 else
5789 for (i = 0; i < len; i++)
5790 REG_WR8(bp, addr + i, fill);
5791}
5792
5793
5794static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5795 int fw_sb_id,
5796 u32 *sb_data_p,
5797 u32 data_size)
5798{
5799 int index;
5800 for (index = 0; index < data_size; index++)
5801 REG_WR(bp, BAR_CSTRORM_INTMEM +
5802 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5803 sizeof(u32)*index,
5804 *(sb_data_p + index));
5805}
5806
5807static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5808{
5809 u32 *sb_data_p;
5810 u32 data_size = 0;
5811 struct hc_status_block_data_e2 sb_data_e2;
5812 struct hc_status_block_data_e1x sb_data_e1x;
5813
5814
5815 if (!CHIP_IS_E1x(bp)) {
5816 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5817 sb_data_e2.common.state = SB_DISABLED;
5818 sb_data_e2.common.p_func.vf_valid = false;
5819 sb_data_p = (u32 *)&sb_data_e2;
5820 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5821 } else {
5822 memset(&sb_data_e1x, 0,
5823 sizeof(struct hc_status_block_data_e1x));
5824 sb_data_e1x.common.state = SB_DISABLED;
5825 sb_data_e1x.common.p_func.vf_valid = false;
5826 sb_data_p = (u32 *)&sb_data_e1x;
5827 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5828 }
5829 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5830
5831 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5832 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5833 CSTORM_STATUS_BLOCK_SIZE);
5834 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5835 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5836 CSTORM_SYNC_BLOCK_SIZE);
5837}
5838
5839
5840static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5841 struct hc_sp_status_block_data *sp_sb_data)
5842{
5843 int func = BP_FUNC(bp);
5844 int i;
5845 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5846 REG_WR(bp, BAR_CSTRORM_INTMEM +
5847 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5848 i*sizeof(u32),
5849 *((u32 *)sp_sb_data + i));
5850}
5851
5852static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5853{
5854 int func = BP_FUNC(bp);
5855 struct hc_sp_status_block_data sp_sb_data;
5856 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5857
5858 sp_sb_data.state = SB_DISABLED;
5859 sp_sb_data.p_func.vf_valid = false;
5860
5861 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5862
5863 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5864 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5865 CSTORM_SP_STATUS_BLOCK_SIZE);
5866 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5867 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5868 CSTORM_SP_SYNC_BLOCK_SIZE);
5869}
5870
5871static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5872 int igu_sb_id, int igu_seg_id)
5873{
5874 hc_sm->igu_sb_id = igu_sb_id;
5875 hc_sm->igu_seg_id = igu_seg_id;
5876 hc_sm->timer_value = 0xFF;
5877 hc_sm->time_to_expire = 0xFFFFFFFF;
5878}
5879
5880
5881static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5882{
5883
5884
5885 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5886
5887
5888 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5889 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5890 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5891 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5892
5893
5894
5895 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5896 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5897
5898
5899 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5900 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5901 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5902 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5903 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5904 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5905 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5906 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5907}
5908
5909void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5910 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5911{
5912 int igu_seg_id;
5913
5914 struct hc_status_block_data_e2 sb_data_e2;
5915 struct hc_status_block_data_e1x sb_data_e1x;
5916 struct hc_status_block_sm *hc_sm_p;
5917 int data_size;
5918 u32 *sb_data_p;
5919
5920 if (CHIP_INT_MODE_IS_BC(bp))
5921 igu_seg_id = HC_SEG_ACCESS_NORM;
5922 else
5923 igu_seg_id = IGU_SEG_ACCESS_NORM;
5924
5925 bnx2x_zero_fp_sb(bp, fw_sb_id);
5926
5927 if (!CHIP_IS_E1x(bp)) {
5928 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5929 sb_data_e2.common.state = SB_ENABLED;
5930 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5931 sb_data_e2.common.p_func.vf_id = vfid;
5932 sb_data_e2.common.p_func.vf_valid = vf_valid;
5933 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5934 sb_data_e2.common.same_igu_sb_1b = true;
5935 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5936 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5937 hc_sm_p = sb_data_e2.common.state_machine;
5938 sb_data_p = (u32 *)&sb_data_e2;
5939 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5940 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5941 } else {
5942 memset(&sb_data_e1x, 0,
5943 sizeof(struct hc_status_block_data_e1x));
5944 sb_data_e1x.common.state = SB_ENABLED;
5945 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5946 sb_data_e1x.common.p_func.vf_id = 0xff;
5947 sb_data_e1x.common.p_func.vf_valid = false;
5948 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5949 sb_data_e1x.common.same_igu_sb_1b = true;
5950 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5951 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5952 hc_sm_p = sb_data_e1x.common.state_machine;
5953 sb_data_p = (u32 *)&sb_data_e1x;
5954 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5955 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5956 }
5957
5958 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5959 igu_sb_id, igu_seg_id);
5960 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5961 igu_sb_id, igu_seg_id);
5962
5963 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5964
5965
5966 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5967}
5968
5969static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5970 u16 tx_usec, u16 rx_usec)
5971{
5972 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5973 false, rx_usec);
5974 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5975 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5976 tx_usec);
5977 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5978 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5979 tx_usec);
5980 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5981 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5982 tx_usec);
5983}
5984
5985static void bnx2x_init_def_sb(struct bnx2x *bp)
5986{
5987 struct host_sp_status_block *def_sb = bp->def_status_blk;
5988 dma_addr_t mapping = bp->def_status_blk_mapping;
5989 int igu_sp_sb_index;
5990 int igu_seg_id;
5991 int port = BP_PORT(bp);
5992 int func = BP_FUNC(bp);
5993 int reg_offset, reg_offset_en5;
5994 u64 section;
5995 int index;
5996 struct hc_sp_status_block_data sp_sb_data;
5997 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5998
5999 if (CHIP_INT_MODE_IS_BC(bp)) {
6000 igu_sp_sb_index = DEF_SB_IGU_ID;
6001 igu_seg_id = HC_SEG_ACCESS_DEF;
6002 } else {
6003 igu_sp_sb_index = bp->igu_dsb_id;
6004 igu_seg_id = IGU_SEG_ACCESS_DEF;
6005 }
6006
6007
6008 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6009 atten_status_block);
6010 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6011
6012 bp->attn_state = 0;
6013
6014 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6015 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6016 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6017 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6018 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6019 int sindex;
6020
6021 for (sindex = 0; sindex < 4; sindex++)
6022 bp->attn_group[index].sig[sindex] =
6023 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6024
6025 if (!CHIP_IS_E1x(bp))
6026
6027
6028
6029
6030
6031 bp->attn_group[index].sig[4] = REG_RD(bp,
6032 reg_offset_en5 + 0x4*index);
6033 else
6034 bp->attn_group[index].sig[4] = 0;
6035 }
6036
6037 if (bp->common.int_block == INT_BLOCK_HC) {
6038 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6039 HC_REG_ATTN_MSG0_ADDR_L);
6040
6041 REG_WR(bp, reg_offset, U64_LO(section));
6042 REG_WR(bp, reg_offset + 4, U64_HI(section));
6043 } else if (!CHIP_IS_E1x(bp)) {
6044 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6045 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6046 }
6047
6048 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6049 sp_sb);
6050
6051 bnx2x_zero_sp_sb(bp);
6052
6053
6054 sp_sb_data.state = SB_ENABLED;
6055 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6056 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6057 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6058 sp_sb_data.igu_seg_id = igu_seg_id;
6059 sp_sb_data.p_func.pf_id = func;
6060 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6061 sp_sb_data.p_func.vf_id = 0xff;
6062
6063 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6064
6065 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6066}
6067
6068void bnx2x_update_coalesce(struct bnx2x *bp)
6069{
6070 int i;
6071
6072 for_each_eth_queue(bp, i)
6073 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6074 bp->tx_ticks, bp->rx_ticks);
6075}
6076
6077static void bnx2x_init_sp_ring(struct bnx2x *bp)
6078{
6079 spin_lock_init(&bp->spq_lock);
6080 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6081
6082 bp->spq_prod_idx = 0;
6083 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6084 bp->spq_prod_bd = bp->spq;
6085 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6086}
6087
6088static void bnx2x_init_eq_ring(struct bnx2x *bp)
6089{
6090 int i;
6091 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6092 union event_ring_elem *elem =
6093 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6094
6095 elem->next_page.addr.hi =
6096 cpu_to_le32(U64_HI(bp->eq_mapping +
6097 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6098 elem->next_page.addr.lo =
6099 cpu_to_le32(U64_LO(bp->eq_mapping +
6100 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6101 }
6102 bp->eq_cons = 0;
6103 bp->eq_prod = NUM_EQ_DESC;
6104 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6105
6106 atomic_set(&bp->eq_spq_left,
6107 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6108}
6109
6110
6111static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6112 unsigned long rx_mode_flags,
6113 unsigned long rx_accept_flags,
6114 unsigned long tx_accept_flags,
6115 unsigned long ramrod_flags)
6116{
6117 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6118 int rc;
6119
6120 memset(&ramrod_param, 0, sizeof(ramrod_param));
6121
6122
6123 ramrod_param.cid = 0;
6124 ramrod_param.cl_id = cl_id;
6125 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6126 ramrod_param.func_id = BP_FUNC(bp);
6127
6128 ramrod_param.pstate = &bp->sp_state;
6129 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6130
6131 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6132 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6133
6134 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6135
6136 ramrod_param.ramrod_flags = ramrod_flags;
6137 ramrod_param.rx_mode_flags = rx_mode_flags;
6138
6139 ramrod_param.rx_accept_flags = rx_accept_flags;
6140 ramrod_param.tx_accept_flags = tx_accept_flags;
6141
6142 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6143 if (rc < 0) {
6144 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6145 return rc;
6146 }
6147
6148 return 0;
6149}
6150
6151static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6152 unsigned long *rx_accept_flags,
6153 unsigned long *tx_accept_flags)
6154{
6155
6156 *rx_accept_flags = 0;
6157 *tx_accept_flags = 0;
6158
6159 switch (rx_mode) {
6160 case BNX2X_RX_MODE_NONE:
6161
6162
6163
6164
6165 break;
6166 case BNX2X_RX_MODE_NORMAL:
6167 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6168 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6169 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6170
6171
6172 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6173 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6174 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6175
6176 break;
6177 case BNX2X_RX_MODE_ALLMULTI:
6178 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6179 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6180 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6181
6182
6183 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6184 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6185 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6186
6187 break;
6188 case BNX2X_RX_MODE_PROMISC:
6189
6190
6191
6192
6193 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6194 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6195 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6196 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6197
6198
6199 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6200 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6201
6202 if (IS_MF_SI(bp))
6203 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6204 else
6205 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6206
6207 break;
6208 default:
6209 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6210 return -EINVAL;
6211 }
6212
6213
6214 if (rx_mode != BNX2X_RX_MODE_NONE) {
6215 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6216 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6217 }
6218
6219 return 0;
6220}
6221
6222
6223static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6224{
6225 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6226 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6227 int rc;
6228
6229 if (!NO_FCOE(bp))
6230
6231 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6232
6233 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6234 &tx_accept_flags);
6235 if (rc)
6236 return rc;
6237
6238 __set_bit(RAMROD_RX, &ramrod_flags);
6239 __set_bit(RAMROD_TX, &ramrod_flags);
6240
6241 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6242 rx_accept_flags, tx_accept_flags,
6243 ramrod_flags);
6244}
6245
6246static void bnx2x_init_internal_common(struct bnx2x *bp)
6247{
6248 int i;
6249
6250
6251
6252 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6253 REG_WR(bp, BAR_USTRORM_INTMEM +
6254 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6255 if (!CHIP_IS_E1x(bp)) {
6256 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6257 CHIP_INT_MODE_IS_BC(bp) ?
6258 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6259 }
6260}
6261
6262static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6263{
6264 switch (load_code) {
6265 case FW_MSG_CODE_DRV_LOAD_COMMON:
6266 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6267 bnx2x_init_internal_common(bp);
6268
6269
6270 case FW_MSG_CODE_DRV_LOAD_PORT:
6271
6272
6273
6274 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6275
6276
6277 break;
6278
6279 default:
6280 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6281 break;
6282 }
6283}
6284
6285static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6286{
6287 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6288}
6289
6290static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6291{
6292 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6293}
6294
6295static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6296{
6297 if (CHIP_IS_E1x(fp->bp))
6298 return BP_L_ID(fp->bp) + fp->index;
6299 else
6300 return bnx2x_fp_igu_sb_id(fp);
6301}
6302
6303static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6304{
6305 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6306 u8 cos;
6307 unsigned long q_type = 0;
6308 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6309 fp->rx_queue = fp_idx;
6310 fp->cid = fp_idx;
6311 fp->cl_id = bnx2x_fp_cl_id(fp);
6312 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6313 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6314
6315 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6316
6317
6318 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6319
6320
6321 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6322
6323
6324 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6325 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6326
6327 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6328
6329
6330 for_each_cos_in_tx_queue(fp, cos) {
6331 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6332 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6333 FP_COS_TO_TXQ(fp, cos, bp),
6334 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6335 cids[cos] = fp->txdata_ptr[cos]->cid;
6336 }
6337
6338
6339 if (IS_VF(bp))
6340 return;
6341
6342 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6343 fp->fw_sb_id, fp->igu_sb_id);
6344 bnx2x_update_fpsb_idx(fp);
6345 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6346 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6347 bnx2x_sp_mapping(bp, q_rdata), q_type);
6348
6349
6350
6351
6352 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6353
6354 DP(NETIF_MSG_IFUP,
6355 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6356 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6357 fp->igu_sb_id);
6358}
6359
6360static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6361{
6362 int i;
6363
6364 for (i = 1; i <= NUM_TX_RINGS; i++) {
6365 struct eth_tx_next_bd *tx_next_bd =
6366 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6367
6368 tx_next_bd->addr_hi =
6369 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6370 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6371 tx_next_bd->addr_lo =
6372 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6373 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6374 }
6375
6376 *txdata->tx_cons_sb = cpu_to_le16(0);
6377
6378 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6379 txdata->tx_db.data.zero_fill1 = 0;
6380 txdata->tx_db.data.prod = 0;
6381
6382 txdata->tx_pkt_prod = 0;
6383 txdata->tx_pkt_cons = 0;
6384 txdata->tx_bd_prod = 0;
6385 txdata->tx_bd_cons = 0;
6386 txdata->tx_pkt = 0;
6387}
6388
6389static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6390{
6391 int i;
6392
6393 for_each_tx_queue_cnic(bp, i)
6394 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6395}
6396
6397static void bnx2x_init_tx_rings(struct bnx2x *bp)
6398{
6399 int i;
6400 u8 cos;
6401
6402 for_each_eth_queue(bp, i)
6403 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6404 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6405}
6406
6407static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6408{
6409 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6410 unsigned long q_type = 0;
6411
6412 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6413 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6414 BNX2X_FCOE_ETH_CL_ID_IDX);
6415 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6416 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6417 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6418 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6419 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6420 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6421 fp);
6422
6423 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6424
6425
6426 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6427
6428 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6429 bnx2x_rx_ustorm_prods_offset(fp);
6430
6431
6432 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6433 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6434
6435
6436 BUG_ON(fp->max_cos != 1);
6437
6438 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6439 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6440 bnx2x_sp_mapping(bp, q_rdata), q_type);
6441
6442 DP(NETIF_MSG_IFUP,
6443 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6444 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6445 fp->igu_sb_id);
6446}
6447
6448void bnx2x_nic_init_cnic(struct bnx2x *bp)
6449{
6450 if (!NO_FCOE(bp))
6451 bnx2x_init_fcoe_fp(bp);
6452
6453 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6454 BNX2X_VF_ID_INVALID, false,
6455 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6456
6457
6458 rmb();
6459 bnx2x_init_rx_rings_cnic(bp);
6460 bnx2x_init_tx_rings_cnic(bp);
6461
6462
6463 mb();
6464 mmiowb();
6465}
6466
6467void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6468{
6469 int i;
6470
6471
6472 for_each_eth_queue(bp, i)
6473 bnx2x_init_eth_fp(bp, i);
6474
6475
6476 rmb();
6477 bnx2x_init_rx_rings(bp);
6478 bnx2x_init_tx_rings(bp);
6479
6480 if (IS_PF(bp)) {
6481
6482 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6483 bp->common.shmem_base,
6484 bp->common.shmem2_base, BP_PORT(bp));
6485
6486
6487 bnx2x_init_def_sb(bp);
6488 bnx2x_update_dsb_idx(bp);
6489 bnx2x_init_sp_ring(bp);
6490 } else {
6491 bnx2x_memset_stats(bp);
6492 }
6493}
6494
6495void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6496{
6497 bnx2x_init_eq_ring(bp);
6498 bnx2x_init_internal(bp, load_code);
6499 bnx2x_pf_init(bp);
6500 bnx2x_stats_init(bp);
6501
6502
6503 mb();
6504 mmiowb();
6505
6506 bnx2x_int_enable(bp);
6507
6508
6509 bnx2x_attn_int_deasserted0(bp,
6510 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6511 AEU_INPUTS_ATTN_BITS_SPIO5);
6512}
6513
6514
6515static int bnx2x_gunzip_init(struct bnx2x *bp)
6516{
6517 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6518 &bp->gunzip_mapping, GFP_KERNEL);
6519 if (bp->gunzip_buf == NULL)
6520 goto gunzip_nomem1;
6521
6522 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6523 if (bp->strm == NULL)
6524 goto gunzip_nomem2;
6525
6526 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6527 if (bp->strm->workspace == NULL)
6528 goto gunzip_nomem3;
6529
6530 return 0;
6531
6532gunzip_nomem3:
6533 kfree(bp->strm);
6534 bp->strm = NULL;
6535
6536gunzip_nomem2:
6537 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6538 bp->gunzip_mapping);
6539 bp->gunzip_buf = NULL;
6540
6541gunzip_nomem1:
6542 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6543 return -ENOMEM;
6544}
6545
6546static void bnx2x_gunzip_end(struct bnx2x *bp)
6547{
6548 if (bp->strm) {
6549 vfree(bp->strm->workspace);
6550 kfree(bp->strm);
6551 bp->strm = NULL;
6552 }
6553
6554 if (bp->gunzip_buf) {
6555 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6556 bp->gunzip_mapping);
6557 bp->gunzip_buf = NULL;
6558 }
6559}
6560
6561static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6562{
6563 int n, rc;
6564
6565
6566 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6567 BNX2X_ERR("Bad gzip header\n");
6568 return -EINVAL;
6569 }
6570
6571 n = 10;
6572
6573#define FNAME 0x8
6574
6575 if (zbuf[3] & FNAME)
6576 while ((zbuf[n++] != 0) && (n < len));
6577
6578 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6579 bp->strm->avail_in = len - n;
6580 bp->strm->next_out = bp->gunzip_buf;
6581 bp->strm->avail_out = FW_BUF_SIZE;
6582
6583 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6584 if (rc != Z_OK)
6585 return rc;
6586
6587 rc = zlib_inflate(bp->strm, Z_FINISH);
6588 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6589 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6590 bp->strm->msg);
6591
6592 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6593 if (bp->gunzip_outlen & 0x3)
6594 netdev_err(bp->dev,
6595 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6596 bp->gunzip_outlen);
6597 bp->gunzip_outlen >>= 2;
6598
6599 zlib_inflateEnd(bp->strm);
6600
6601 if (rc == Z_STREAM_END)
6602 return 0;
6603
6604 return rc;
6605}
6606
6607
6608
6609
6610
6611
6612
6613
6614static void bnx2x_lb_pckt(struct bnx2x *bp)
6615{
6616 u32 wb_write[3];
6617
6618
6619 wb_write[0] = 0x55555555;
6620 wb_write[1] = 0x55555555;
6621 wb_write[2] = 0x20;
6622 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6623
6624
6625 wb_write[0] = 0x09000000;
6626 wb_write[1] = 0x55555555;
6627 wb_write[2] = 0x10;
6628 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6629}
6630
6631
6632
6633
6634
6635static int bnx2x_int_mem_test(struct bnx2x *bp)
6636{
6637 int factor;
6638 int count, i;
6639 u32 val = 0;
6640
6641 if (CHIP_REV_IS_FPGA(bp))
6642 factor = 120;
6643 else if (CHIP_REV_IS_EMUL(bp))
6644 factor = 200;
6645 else
6646 factor = 1;
6647
6648
6649 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6650 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6651 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6652 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6653
6654
6655 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6656
6657
6658 bnx2x_lb_pckt(bp);
6659
6660
6661
6662 count = 1000 * factor;
6663 while (count) {
6664
6665 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6666 val = *bnx2x_sp(bp, wb_data[0]);
6667 if (val == 0x10)
6668 break;
6669
6670 usleep_range(10000, 20000);
6671 count--;
6672 }
6673 if (val != 0x10) {
6674 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6675 return -1;
6676 }
6677
6678
6679 count = 1000 * factor;
6680 while (count) {
6681 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6682 if (val == 1)
6683 break;
6684
6685 usleep_range(10000, 20000);
6686 count--;
6687 }
6688 if (val != 0x1) {
6689 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6690 return -2;
6691 }
6692
6693
6694 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6695 msleep(50);
6696 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6697 msleep(50);
6698 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6699 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6700
6701 DP(NETIF_MSG_HW, "part2\n");
6702
6703
6704 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6705 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6706 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6707 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6708
6709
6710 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6711
6712
6713 for (i = 0; i < 10; i++)
6714 bnx2x_lb_pckt(bp);
6715
6716
6717
6718 count = 1000 * factor;
6719 while (count) {
6720
6721 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6722 val = *bnx2x_sp(bp, wb_data[0]);
6723 if (val == 0xb0)
6724 break;
6725
6726 usleep_range(10000, 20000);
6727 count--;
6728 }
6729 if (val != 0xb0) {
6730 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6731 return -3;
6732 }
6733
6734
6735 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6736 if (val != 2)
6737 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6738
6739
6740 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6741
6742
6743 msleep(10 * factor);
6744
6745 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6746 if (val != 3)
6747 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6748
6749
6750 for (i = 0; i < 11; i++)
6751 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6752 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6753 if (val != 1) {
6754 BNX2X_ERR("clear of NIG failed\n");
6755 return -4;
6756 }
6757
6758
6759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6760 msleep(50);
6761 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6762 msleep(50);
6763 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6764 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6765 if (!CNIC_SUPPORT(bp))
6766
6767 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6768
6769
6770 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6771 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6772 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6773 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6774
6775 DP(NETIF_MSG_HW, "done\n");
6776
6777 return 0;
6778}
6779
6780static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6781{
6782 u32 val;
6783
6784 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6785 if (!CHIP_IS_E1x(bp))
6786 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6787 else
6788 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6789 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6790 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6791
6792
6793
6794
6795
6796
6797 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6798 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6799 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6800 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6801 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6802 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6803
6804
6805 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6806 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6807 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6808
6809
6810 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6811 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6812 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6813 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6814
6815
6816
6817 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6818 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6819 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6820 if (!CHIP_IS_E1x(bp))
6821 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6822 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6823 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6824
6825 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6826 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6827 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6828
6829
6830 if (!CHIP_IS_E1x(bp))
6831
6832 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6833
6834 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6835 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6836
6837 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6838}
6839
6840static void bnx2x_reset_common(struct bnx2x *bp)
6841{
6842 u32 val = 0x1400;
6843
6844
6845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6846 0xd3ffff7f);
6847
6848 if (CHIP_IS_E3(bp)) {
6849 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6850 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6851 }
6852
6853 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6854}
6855
6856static void bnx2x_setup_dmae(struct bnx2x *bp)
6857{
6858 bp->dmae_ready = 0;
6859 spin_lock_init(&bp->dmae_lock);
6860}
6861
6862static void bnx2x_init_pxp(struct bnx2x *bp)
6863{
6864 u16 devctl;
6865 int r_order, w_order;
6866
6867 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6868 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6869 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6870 if (bp->mrrs == -1)
6871 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6872 else {
6873 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6874 r_order = bp->mrrs;
6875 }
6876
6877 bnx2x_init_pxp_arb(bp, r_order, w_order);
6878}
6879
6880static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6881{
6882 int is_required;
6883 u32 val;
6884 int port;
6885
6886 if (BP_NOMCP(bp))
6887 return;
6888
6889 is_required = 0;
6890 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6891 SHARED_HW_CFG_FAN_FAILURE_MASK;
6892
6893 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6894 is_required = 1;
6895
6896
6897
6898
6899
6900
6901 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6902 for (port = PORT_0; port < PORT_MAX; port++) {
6903 is_required |=
6904 bnx2x_fan_failure_det_req(
6905 bp,
6906 bp->common.shmem_base,
6907 bp->common.shmem2_base,
6908 port);
6909 }
6910
6911 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6912
6913 if (is_required == 0)
6914 return;
6915
6916
6917 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6918
6919
6920 val = REG_RD(bp, MISC_REG_SPIO_INT);
6921 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6922 REG_WR(bp, MISC_REG_SPIO_INT, val);
6923
6924
6925 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6926 val |= MISC_SPIO_SPIO5;
6927 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6928}
6929
6930void bnx2x_pf_disable(struct bnx2x *bp)
6931{
6932 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6933 val &= ~IGU_PF_CONF_FUNC_EN;
6934
6935 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6936 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6937 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6938}
6939
6940static void bnx2x__common_init_phy(struct bnx2x *bp)
6941{
6942 u32 shmem_base[2], shmem2_base[2];
6943
6944 if (SHMEM2_RD(bp, size) >
6945 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6946 return;
6947 shmem_base[0] = bp->common.shmem_base;
6948 shmem2_base[0] = bp->common.shmem2_base;
6949 if (!CHIP_IS_E1x(bp)) {
6950 shmem_base[1] =
6951 SHMEM2_RD(bp, other_shmem_base_addr);
6952 shmem2_base[1] =
6953 SHMEM2_RD(bp, other_shmem2_base_addr);
6954 }
6955 bnx2x_acquire_phy_lock(bp);
6956 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6957 bp->common.chip_id);
6958 bnx2x_release_phy_lock(bp);
6959}
6960
6961static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6962{
6963 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6964 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
6965 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
6966 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
6967 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
6968
6969
6970 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6971
6972 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
6973 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
6974 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
6975 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
6976}
6977
6978static void bnx2x_set_endianity(struct bnx2x *bp)
6979{
6980#ifdef __BIG_ENDIAN
6981 bnx2x_config_endianity(bp, 1);
6982#else
6983 bnx2x_config_endianity(bp, 0);
6984#endif
6985}
6986
6987static void bnx2x_reset_endianity(struct bnx2x *bp)
6988{
6989 bnx2x_config_endianity(bp, 0);
6990}
6991
6992
6993
6994
6995
6996
6997static int bnx2x_init_hw_common(struct bnx2x *bp)
6998{
6999 u32 val;
7000
7001 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7002
7003
7004
7005
7006
7007 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7008
7009 bnx2x_reset_common(bp);
7010 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7011
7012 val = 0xfffc;
7013 if (CHIP_IS_E3(bp)) {
7014 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7015 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7016 }
7017 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7018
7019 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7020
7021 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7022
7023 if (!CHIP_IS_E1x(bp)) {
7024 u8 abs_func_id;
7025
7026
7027
7028
7029
7030
7031
7032
7033 for (abs_func_id = BP_PATH(bp);
7034 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7035 if (abs_func_id == BP_ABS_FUNC(bp)) {
7036 REG_WR(bp,
7037 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7038 1);
7039 continue;
7040 }
7041
7042 bnx2x_pretend_func(bp, abs_func_id);
7043
7044 bnx2x_pf_disable(bp);
7045 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7046 }
7047 }
7048
7049 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7050 if (CHIP_IS_E1(bp)) {
7051
7052
7053 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7054 }
7055
7056 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7057 bnx2x_init_pxp(bp);
7058 bnx2x_set_endianity(bp);
7059 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7060
7061 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7062 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7063
7064
7065 msleep(100);
7066
7067 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7068 if (val != 1) {
7069 BNX2X_ERR("PXP2 CFG failed\n");
7070 return -EBUSY;
7071 }
7072 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7073 if (val != 1) {
7074 BNX2X_ERR("PXP2 RD_INIT failed\n");
7075 return -EBUSY;
7076 }
7077
7078
7079
7080
7081
7082
7083 if (!CHIP_IS_E1x(bp)) {
7084
7085
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146 struct ilt_client_info ilt_cli;
7147 struct bnx2x_ilt ilt;
7148 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7149 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7150
7151
7152 ilt_cli.start = 0;
7153 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7154 ilt_cli.client_num = ILT_CLIENT_TM;
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7168 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7169 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7170
7171 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7172 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7173 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7174 }
7175
7176 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7177 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7178
7179 if (!CHIP_IS_E1x(bp)) {
7180 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7181 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7182 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7183
7184 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7185
7186
7187 do {
7188 msleep(200);
7189 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7190 } while (factor-- && (val != 1));
7191
7192 if (val != 1) {
7193 BNX2X_ERR("ATC_INIT failed\n");
7194 return -EBUSY;
7195 }
7196 }
7197
7198 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7199
7200 bnx2x_iov_init_dmae(bp);
7201
7202
7203 bp->dmae_ready = 1;
7204 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7205
7206 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7207
7208 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7209
7210 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7211
7212 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7213
7214 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7215 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7216 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7217 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7218
7219 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7220
7221
7222 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7223
7224
7225 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7226 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7227
7228 if (CNIC_SUPPORT(bp))
7229 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7230
7231 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7232
7233 if (!CHIP_REV_IS_SLOW(bp))
7234
7235 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7236
7237 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7238
7239 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7240 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7241
7242 if (!CHIP_IS_E1(bp))
7243 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7244
7245 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7246 if (IS_MF_AFEX(bp)) {
7247
7248
7249
7250 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7251 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7252 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7253 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7254 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7255 } else {
7256
7257
7258
7259 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7260 bp->path_has_ovlan ? 7 : 6);
7261 }
7262 }
7263
7264 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7265 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7266 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7267 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7268
7269 if (!CHIP_IS_E1x(bp)) {
7270
7271 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7272 VFC_MEMORIES_RST_REG_CAM_RST |
7273 VFC_MEMORIES_RST_REG_RAM_RST);
7274 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7275 VFC_MEMORIES_RST_REG_CAM_RST |
7276 VFC_MEMORIES_RST_REG_RAM_RST);
7277
7278 msleep(20);
7279 }
7280
7281 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7282 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7283 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7284 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7285
7286
7287 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7288 0x80000000);
7289 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7290 0x80000000);
7291
7292 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7293 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7294 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7295
7296 if (!CHIP_IS_E1x(bp)) {
7297 if (IS_MF_AFEX(bp)) {
7298
7299
7300
7301 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7302 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7303 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7304 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7305 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7306 } else {
7307 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7308 bp->path_has_ovlan ? 7 : 6);
7309 }
7310 }
7311
7312 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7313
7314 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7315
7316 if (CNIC_SUPPORT(bp)) {
7317 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7318 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7319 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7320 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7321 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7322 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7323 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7324 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7325 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7326 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7327 }
7328 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7329
7330 if (sizeof(union cdu_context) != 1024)
7331
7332 dev_alert(&bp->pdev->dev,
7333 "please adjust the size of cdu_context(%ld)\n",
7334 (long)sizeof(union cdu_context));
7335
7336 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7337 val = (4 << 24) + (0 << 12) + 1024;
7338 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7339
7340 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7341 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7342
7343 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7344
7345
7346 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7347
7348 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7349
7350 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7351 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7352
7353 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7354 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7355
7356
7357 REG_WR(bp, 0x2814, 0xffffffff);
7358 REG_WR(bp, 0x3820, 0xffffffff);
7359
7360 if (!CHIP_IS_E1x(bp)) {
7361 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7362 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7363 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7364 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7365 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7366 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7367 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7368 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7369 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7370 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7371 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7372 }
7373
7374 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7375 if (!CHIP_IS_E1(bp)) {
7376
7377 if (!CHIP_IS_E3(bp))
7378 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7379 }
7380 if (CHIP_IS_E1H(bp))
7381
7382 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7383
7384 if (CHIP_REV_IS_SLOW(bp))
7385 msleep(200);
7386
7387
7388 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7389 if (val != 1) {
7390 BNX2X_ERR("CFC LL_INIT failed\n");
7391 return -EBUSY;
7392 }
7393 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7394 if (val != 1) {
7395 BNX2X_ERR("CFC AC_INIT failed\n");
7396 return -EBUSY;
7397 }
7398 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7399 if (val != 1) {
7400 BNX2X_ERR("CFC CAM_INIT failed\n");
7401 return -EBUSY;
7402 }
7403 REG_WR(bp, CFC_REG_DEBUG0, 0);
7404
7405 if (CHIP_IS_E1(bp)) {
7406
7407
7408 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7409 val = *bnx2x_sp(bp, wb_data[0]);
7410
7411
7412 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7413 BNX2X_ERR("internal mem self test failed\n");
7414 return -EBUSY;
7415 }
7416 }
7417
7418 bnx2x_setup_fan_failure_detection(bp);
7419
7420
7421 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7422
7423 bnx2x_enable_blocks_attention(bp);
7424 bnx2x_enable_blocks_parity(bp);
7425
7426 if (!BP_NOMCP(bp)) {
7427 if (CHIP_IS_E1x(bp))
7428 bnx2x__common_init_phy(bp);
7429 } else
7430 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7431
7432 return 0;
7433}
7434
7435
7436
7437
7438
7439
7440static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7441{
7442 int rc = bnx2x_init_hw_common(bp);
7443
7444 if (rc)
7445 return rc;
7446
7447
7448 if (!BP_NOMCP(bp))
7449 bnx2x__common_init_phy(bp);
7450
7451 return 0;
7452}
7453
7454static int bnx2x_init_hw_port(struct bnx2x *bp)
7455{
7456 int port = BP_PORT(bp);
7457 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7458 u32 low, high;
7459 u32 val, reg;
7460
7461 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7462
7463 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7464
7465 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7466 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7467 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7468
7469
7470
7471
7472
7473
7474 if (!CHIP_IS_E1x(bp))
7475 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7476
7477 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7478 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7479 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7480 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7481
7482 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7483 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7484 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7485 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7486
7487
7488 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7489
7490 if (CNIC_SUPPORT(bp)) {
7491 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7492 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7493 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7494 }
7495
7496 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7497
7498 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7499
7500 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7501
7502 if (IS_MF(bp))
7503 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7504 else if (bp->dev->mtu > 4096) {
7505 if (bp->flags & ONE_PORT_FLAG)
7506 low = 160;
7507 else {
7508 val = bp->dev->mtu;
7509
7510 low = 96 + (val/64) +
7511 ((val % 64) ? 1 : 0);
7512 }
7513 } else
7514 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7515 high = low + 56;
7516 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7517 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7518 }
7519
7520 if (CHIP_MODE_IS_4_PORT(bp))
7521 REG_WR(bp, (BP_PORT(bp) ?
7522 BRB1_REG_MAC_GUARANTIED_1 :
7523 BRB1_REG_MAC_GUARANTIED_0), 40);
7524
7525 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7526 if (CHIP_IS_E3B0(bp)) {
7527 if (IS_MF_AFEX(bp)) {
7528
7529 REG_WR(bp, BP_PORT(bp) ?
7530 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7531 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7532 REG_WR(bp, BP_PORT(bp) ?
7533 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7534 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7535 REG_WR(bp, BP_PORT(bp) ?
7536 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7537 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7538 } else {
7539
7540
7541
7542
7543 REG_WR(bp, BP_PORT(bp) ?
7544 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7545 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7546 (bp->path_has_ovlan ? 7 : 6));
7547 }
7548 }
7549
7550 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7551 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7552 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7553 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7554
7555 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7556 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7557 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7558 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7559
7560 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7561 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7562
7563 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7564
7565 if (CHIP_IS_E1x(bp)) {
7566
7567 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7568
7569
7570 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7571
7572 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7573
7574
7575 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7576 udelay(50);
7577 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7578 }
7579
7580 if (CNIC_SUPPORT(bp))
7581 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7582
7583 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7584 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7585
7586 if (CHIP_IS_E1(bp)) {
7587 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7588 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7589 }
7590 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7591
7592 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7593
7594 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7595
7596
7597
7598
7599 val = IS_MF(bp) ? 0xF7 : 0x7;
7600
7601 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7602 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7603
7604
7605 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7606 REG_WR(bp, reg,
7607 REG_RD(bp, reg) &
7608 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7609
7610 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7611 REG_WR(bp, reg,
7612 REG_RD(bp, reg) &
7613 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7614
7615 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7616
7617 if (!CHIP_IS_E1x(bp)) {
7618
7619
7620
7621 if (IS_MF_AFEX(bp))
7622 REG_WR(bp, BP_PORT(bp) ?
7623 NIG_REG_P1_HDRS_AFTER_BASIC :
7624 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7625 else
7626 REG_WR(bp, BP_PORT(bp) ?
7627 NIG_REG_P1_HDRS_AFTER_BASIC :
7628 NIG_REG_P0_HDRS_AFTER_BASIC,
7629 IS_MF_SD(bp) ? 7 : 6);
7630
7631 if (CHIP_IS_E3(bp))
7632 REG_WR(bp, BP_PORT(bp) ?
7633 NIG_REG_LLH1_MF_MODE :
7634 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7635 }
7636 if (!CHIP_IS_E3(bp))
7637 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7638
7639 if (!CHIP_IS_E1(bp)) {
7640
7641 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7642 (IS_MF_SD(bp) ? 0x1 : 0x2));
7643
7644 if (!CHIP_IS_E1x(bp)) {
7645 val = 0;
7646 switch (bp->mf_mode) {
7647 case MULTI_FUNCTION_SD:
7648 val = 1;
7649 break;
7650 case MULTI_FUNCTION_SI:
7651 case MULTI_FUNCTION_AFEX:
7652 val = 2;
7653 break;
7654 }
7655
7656 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7657 NIG_REG_LLH0_CLS_TYPE), val);
7658 }
7659 {
7660 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7661 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7662 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7663 }
7664 }
7665
7666
7667 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7668 if (val & MISC_SPIO_SPIO5) {
7669 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7670 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7671 val = REG_RD(bp, reg_addr);
7672 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7673 REG_WR(bp, reg_addr, val);
7674 }
7675
7676 return 0;
7677}
7678
7679static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7680{
7681 int reg;
7682 u32 wb_write[2];
7683
7684 if (CHIP_IS_E1(bp))
7685 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7686 else
7687 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7688
7689 wb_write[0] = ONCHIP_ADDR1(addr);
7690 wb_write[1] = ONCHIP_ADDR2(addr);
7691 REG_WR_DMAE(bp, reg, wb_write, 2);
7692}
7693
7694void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7695{
7696 u32 data, ctl, cnt = 100;
7697 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7698 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7699 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7700 u32 sb_bit = 1 << (idu_sb_id%32);
7701 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7702 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7703
7704
7705 if (CHIP_INT_MODE_IS_BC(bp))
7706 return;
7707
7708 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7709 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7710 IGU_REGULAR_CLEANUP_SET |
7711 IGU_REGULAR_BCLEANUP;
7712
7713 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7714 func_encode << IGU_CTRL_REG_FID_SHIFT |
7715 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7716
7717 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7718 data, igu_addr_data);
7719 REG_WR(bp, igu_addr_data, data);
7720 mmiowb();
7721 barrier();
7722 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7723 ctl, igu_addr_ctl);
7724 REG_WR(bp, igu_addr_ctl, ctl);
7725 mmiowb();
7726 barrier();
7727
7728
7729 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7730 msleep(20);
7731
7732 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7733 DP(NETIF_MSG_HW,
7734 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7735 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7736 }
7737}
7738
7739static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7740{
7741 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7742}
7743
7744static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7745{
7746 u32 i, base = FUNC_ILT_BASE(func);
7747 for (i = base; i < base + ILT_PER_FUNC; i++)
7748 bnx2x_ilt_wr(bp, i, 0);
7749}
7750
7751static void bnx2x_init_searcher(struct bnx2x *bp)
7752{
7753 int port = BP_PORT(bp);
7754 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7755
7756 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7757}
7758
7759static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7760{
7761 int rc;
7762 struct bnx2x_func_state_params func_params = {NULL};
7763 struct bnx2x_func_switch_update_params *switch_update_params =
7764 &func_params.params.switch_update;
7765
7766
7767 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7768 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7769
7770 func_params.f_obj = &bp->func_obj;
7771 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7772
7773
7774 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7775 &switch_update_params->changes);
7776 if (suspend)
7777 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7778 &switch_update_params->changes);
7779
7780 rc = bnx2x_func_state_change(bp, &func_params);
7781
7782 return rc;
7783}
7784
7785static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7786{
7787 int rc, i, port = BP_PORT(bp);
7788 int vlan_en = 0, mac_en[NUM_MACS];
7789
7790
7791 if (bp->mf_mode == SINGLE_FUNCTION) {
7792 bnx2x_set_rx_filter(&bp->link_params, 0);
7793 } else {
7794 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7795 NIG_REG_LLH0_FUNC_EN);
7796 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7797 NIG_REG_LLH0_FUNC_EN, 0);
7798 for (i = 0; i < NUM_MACS; i++) {
7799 mac_en[i] = REG_RD(bp, port ?
7800 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7801 4 * i) :
7802 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7803 4 * i));
7804 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7805 4 * i) :
7806 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7807 }
7808 }
7809
7810
7811 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7812 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7813
7814
7815
7816
7817
7818
7819 rc = bnx2x_func_switch_update(bp, 1);
7820 if (rc) {
7821 BNX2X_ERR("Can't suspend tx-switching!\n");
7822 return rc;
7823 }
7824
7825
7826 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7827
7828
7829 if (bp->mf_mode == SINGLE_FUNCTION) {
7830 bnx2x_set_rx_filter(&bp->link_params, 1);
7831 } else {
7832 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7833 NIG_REG_LLH0_FUNC_EN, vlan_en);
7834 for (i = 0; i < NUM_MACS; i++) {
7835 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7836 4 * i) :
7837 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7838 mac_en[i]);
7839 }
7840 }
7841
7842
7843 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7844 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7845
7846
7847 rc = bnx2x_func_switch_update(bp, 0);
7848 if (rc) {
7849 BNX2X_ERR("Can't resume tx-switching!\n");
7850 return rc;
7851 }
7852
7853 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7854 return 0;
7855}
7856
7857int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7858{
7859 int rc;
7860
7861 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7862
7863 if (CONFIGURE_NIC_MODE(bp)) {
7864
7865 bnx2x_init_searcher(bp);
7866
7867
7868 rc = bnx2x_reset_nic_mode(bp);
7869 if (rc)
7870 BNX2X_ERR("Can't change NIC mode!\n");
7871 return rc;
7872 }
7873
7874 return 0;
7875}
7876
7877
7878
7879
7880
7881
7882
7883
7884static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7885{
7886 if (!CHIP_IS_E1x(bp))
7887 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7888 1 << BP_ABS_FUNC(bp));
7889}
7890
7891static int bnx2x_init_hw_func(struct bnx2x *bp)
7892{
7893 int port = BP_PORT(bp);
7894 int func = BP_FUNC(bp);
7895 int init_phase = PHASE_PF0 + func;
7896 struct bnx2x_ilt *ilt = BP_ILT(bp);
7897 u16 cdu_ilt_start;
7898 u32 addr, val;
7899 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7900 int i, main_mem_width, rc;
7901
7902 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7903
7904
7905 if (!CHIP_IS_E1x(bp)) {
7906 rc = bnx2x_pf_flr_clnup(bp);
7907 if (rc) {
7908 bnx2x_fw_dump(bp);
7909 return rc;
7910 }
7911 }
7912
7913
7914 if (bp->common.int_block == INT_BLOCK_HC) {
7915 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7916 val = REG_RD(bp, addr);
7917 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7918 REG_WR(bp, addr, val);
7919 }
7920
7921 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7922 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7923
7924 ilt = BP_ILT(bp);
7925 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7926
7927 if (IS_SRIOV(bp))
7928 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7929 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7930
7931
7932
7933
7934 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7935 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7936 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7937 ilt->lines[cdu_ilt_start + i].page_mapping =
7938 bp->context[i].cxt_mapping;
7939 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7940 }
7941
7942 bnx2x_ilt_init_op(bp, INITOP_SET);
7943
7944 if (!CONFIGURE_NIC_MODE(bp)) {
7945 bnx2x_init_searcher(bp);
7946 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7947 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7948 } else {
7949
7950 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7951 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7952 }
7953
7954 if (!CHIP_IS_E1x(bp)) {
7955 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7956
7957
7958
7959
7960 if (!(bp->flags & USING_MSIX_FLAG))
7961 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7962
7963
7964
7965
7966
7967
7968 msleep(20);
7969
7970
7971
7972
7973
7974 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7975
7976 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7977 }
7978
7979 bp->dmae_ready = 1;
7980
7981 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7982
7983 bnx2x_clean_pglue_errors(bp);
7984
7985 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7986 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7987 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7988 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7989 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7990 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7991 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7992 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7993 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7994 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7995 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7996 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7997 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7998
7999 if (!CHIP_IS_E1x(bp))
8000 REG_WR(bp, QM_REG_PF_EN, 1);
8001
8002 if (!CHIP_IS_E1x(bp)) {
8003 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8004 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8005 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8006 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8007 }
8008 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8009
8010 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8011 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8012 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8013
8014 bnx2x_iov_init_dq(bp);
8015
8016 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8017 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8018 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8019 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8020 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8021 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8022 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8023 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8024 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8025 if (!CHIP_IS_E1x(bp))
8026 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8027
8028 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8029
8030 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8031
8032 if (!CHIP_IS_E1x(bp))
8033 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8034
8035 if (IS_MF(bp)) {
8036 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8037 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8038 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8039 bp->mf_ov);
8040 }
8041 }
8042
8043 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8044
8045
8046 if (bp->common.int_block == INT_BLOCK_HC) {
8047 if (CHIP_IS_E1H(bp)) {
8048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8049
8050 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8051 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8052 }
8053 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8054
8055 } else {
8056 int num_segs, sb_idx, prod_offset;
8057
8058 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8059
8060 if (!CHIP_IS_E1x(bp)) {
8061 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8062 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8063 }
8064
8065 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8066
8067 if (!CHIP_IS_E1x(bp)) {
8068 int dsb_idx = 0;
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082
8083
8084
8085
8086
8087
8088
8089
8090 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8091 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8092 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8093 prod_offset = (bp->igu_base_sb + sb_idx) *
8094 num_segs;
8095
8096 for (i = 0; i < num_segs; i++) {
8097 addr = IGU_REG_PROD_CONS_MEMORY +
8098 (prod_offset + i) * 4;
8099 REG_WR(bp, addr, 0);
8100 }
8101
8102 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8103 USTORM_ID, 0, IGU_INT_NOP, 1);
8104 bnx2x_igu_clear_sb(bp,
8105 bp->igu_base_sb + sb_idx);
8106 }
8107
8108
8109 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8110 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8111
8112 if (CHIP_MODE_IS_4_PORT(bp))
8113 dsb_idx = BP_FUNC(bp);
8114 else
8115 dsb_idx = BP_VN(bp);
8116
8117 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8118 IGU_BC_BASE_DSB_PROD + dsb_idx :
8119 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8120
8121
8122
8123
8124
8125 for (i = 0; i < (num_segs * E1HVN_MAX);
8126 i += E1HVN_MAX) {
8127 addr = IGU_REG_PROD_CONS_MEMORY +
8128 (prod_offset + i)*4;
8129 REG_WR(bp, addr, 0);
8130 }
8131
8132 if (CHIP_INT_MODE_IS_BC(bp)) {
8133 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8134 USTORM_ID, 0, IGU_INT_NOP, 1);
8135 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8136 CSTORM_ID, 0, IGU_INT_NOP, 1);
8137 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8138 XSTORM_ID, 0, IGU_INT_NOP, 1);
8139 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8140 TSTORM_ID, 0, IGU_INT_NOP, 1);
8141 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8142 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8143 } else {
8144 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8145 USTORM_ID, 0, IGU_INT_NOP, 1);
8146 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8147 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8148 }
8149 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8150
8151
8152
8153 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8154 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8155 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8156 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8157 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8158 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8159 }
8160 }
8161
8162
8163 REG_WR(bp, 0x2114, 0xffffffff);
8164 REG_WR(bp, 0x2120, 0xffffffff);
8165
8166 if (CHIP_IS_E1x(bp)) {
8167 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8168 main_mem_base = HC_REG_MAIN_MEMORY +
8169 BP_PORT(bp) * (main_mem_size * 4);
8170 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8171 main_mem_width = 8;
8172
8173 val = REG_RD(bp, main_mem_prty_clr);
8174 if (val)
8175 DP(NETIF_MSG_HW,
8176 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8177 val);
8178
8179
8180 for (i = main_mem_base;
8181 i < main_mem_base + main_mem_size * 4;
8182 i += main_mem_width) {
8183 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8184 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8185 i, main_mem_width / 4);
8186 }
8187
8188 REG_RD(bp, main_mem_prty_clr);
8189 }
8190
8191#ifdef BNX2X_STOP_ON_ERROR
8192
8193 REG_WR8(bp, BAR_USTRORM_INTMEM +
8194 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8195 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8196 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8197 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8198 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8199 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8200 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8201#endif
8202
8203 bnx2x_phy_probe(&bp->link_params);
8204
8205 return 0;
8206}
8207
8208void bnx2x_free_mem_cnic(struct bnx2x *bp)
8209{
8210 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8211
8212 if (!CHIP_IS_E1x(bp))
8213 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8214 sizeof(struct host_hc_status_block_e2));
8215 else
8216 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8217 sizeof(struct host_hc_status_block_e1x));
8218
8219 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8220}
8221
8222void bnx2x_free_mem(struct bnx2x *bp)
8223{
8224 int i;
8225
8226 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8227 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8228
8229 if (IS_VF(bp))
8230 return;
8231
8232 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8233 sizeof(struct host_sp_status_block));
8234
8235 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8236 sizeof(struct bnx2x_slowpath));
8237
8238 for (i = 0; i < L2_ILT_LINES(bp); i++)
8239 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8240 bp->context[i].size);
8241 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8242
8243 BNX2X_FREE(bp->ilt->lines);
8244
8245 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8246
8247 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8248 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8249
8250 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8251
8252 bnx2x_iov_free_mem(bp);
8253}
8254
8255int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8256{
8257 if (!CHIP_IS_E1x(bp)) {
8258
8259 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8260 sizeof(struct host_hc_status_block_e2));
8261 if (!bp->cnic_sb.e2_sb)
8262 goto alloc_mem_err;
8263 } else {
8264 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8265 sizeof(struct host_hc_status_block_e1x));
8266 if (!bp->cnic_sb.e1x_sb)
8267 goto alloc_mem_err;
8268 }
8269
8270 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8271
8272 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8273 if (!bp->t2)
8274 goto alloc_mem_err;
8275 }
8276
8277
8278 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8279 &bp->slowpath->drv_info_to_mcp;
8280
8281 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8282 goto alloc_mem_err;
8283
8284 return 0;
8285
8286alloc_mem_err:
8287 bnx2x_free_mem_cnic(bp);
8288 BNX2X_ERR("Can't allocate memory\n");
8289 return -ENOMEM;
8290}
8291
8292int bnx2x_alloc_mem(struct bnx2x *bp)
8293{
8294 int i, allocated, context_size;
8295
8296 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8297
8298 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8299 if (!bp->t2)
8300 goto alloc_mem_err;
8301 }
8302
8303 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8304 sizeof(struct host_sp_status_block));
8305 if (!bp->def_status_blk)
8306 goto alloc_mem_err;
8307
8308 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8309 sizeof(struct bnx2x_slowpath));
8310 if (!bp->slowpath)
8311 goto alloc_mem_err;
8312
8313
8314
8315
8316
8317
8318
8319
8320
8321
8322
8323
8324
8325
8326 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8327
8328 for (i = 0, allocated = 0; allocated < context_size; i++) {
8329 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8330 (context_size - allocated));
8331 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8332 bp->context[i].size);
8333 if (!bp->context[i].vcxt)
8334 goto alloc_mem_err;
8335 allocated += bp->context[i].size;
8336 }
8337 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8338 GFP_KERNEL);
8339 if (!bp->ilt->lines)
8340 goto alloc_mem_err;
8341
8342 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8343 goto alloc_mem_err;
8344
8345 if (bnx2x_iov_alloc_mem(bp))
8346 goto alloc_mem_err;
8347
8348
8349 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8350 if (!bp->spq)
8351 goto alloc_mem_err;
8352
8353
8354 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8355 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8356 if (!bp->eq_ring)
8357 goto alloc_mem_err;
8358
8359 return 0;
8360
8361alloc_mem_err:
8362 bnx2x_free_mem(bp);
8363 BNX2X_ERR("Can't allocate memory\n");
8364 return -ENOMEM;
8365}
8366
8367
8368
8369
8370
8371int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8372 struct bnx2x_vlan_mac_obj *obj, bool set,
8373 int mac_type, unsigned long *ramrod_flags)
8374{
8375 int rc;
8376 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8377
8378 memset(&ramrod_param, 0, sizeof(ramrod_param));
8379
8380
8381 ramrod_param.vlan_mac_obj = obj;
8382 ramrod_param.ramrod_flags = *ramrod_flags;
8383
8384
8385 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8386 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8387
8388 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8389
8390
8391 if (set)
8392 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8393 else
8394 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8395 }
8396
8397 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8398
8399 if (rc == -EEXIST) {
8400 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8401
8402 rc = 0;
8403 } else if (rc < 0)
8404 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8405
8406 return rc;
8407}
8408
8409int bnx2x_del_all_macs(struct bnx2x *bp,
8410 struct bnx2x_vlan_mac_obj *mac_obj,
8411 int mac_type, bool wait_for_comp)
8412{
8413 int rc;
8414 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8415
8416
8417 if (wait_for_comp)
8418 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8419
8420
8421 __set_bit(mac_type, &vlan_mac_flags);
8422
8423 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8424 if (rc < 0)
8425 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8426
8427 return rc;
8428}
8429
8430int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8431{
8432 if (IS_PF(bp)) {
8433 unsigned long ramrod_flags = 0;
8434
8435 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8436 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8437 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8438 &bp->sp_objs->mac_obj, set,
8439 BNX2X_ETH_MAC, &ramrod_flags);
8440 } else {
8441 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8442 bp->fp->index, set);
8443 }
8444}
8445
8446int bnx2x_setup_leading(struct bnx2x *bp)
8447{
8448 if (IS_PF(bp))
8449 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8450 else
8451 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8452}
8453
8454
8455
8456
8457
8458
8459
8460
8461int bnx2x_set_int_mode(struct bnx2x *bp)
8462{
8463 int rc = 0;
8464
8465 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8466 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8467 return -EINVAL;
8468 }
8469
8470 switch (int_mode) {
8471 case BNX2X_INT_MODE_MSIX:
8472
8473 rc = bnx2x_enable_msix(bp);
8474
8475
8476 if (!rc)
8477 return 0;
8478
8479
8480 if (rc && IS_VF(bp))
8481 return rc;
8482
8483
8484 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8485 bp->num_queues,
8486 1 + bp->num_cnic_queues);
8487
8488
8489 case BNX2X_INT_MODE_MSI:
8490 bnx2x_enable_msi(bp);
8491
8492
8493 case BNX2X_INT_MODE_INTX:
8494 bp->num_ethernet_queues = 1;
8495 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8496 BNX2X_DEV_INFO("set number of queues to 1\n");
8497 break;
8498 default:
8499 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8500 return -EINVAL;
8501 }
8502 return 0;
8503}
8504
8505
8506static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8507{
8508 if (IS_SRIOV(bp))
8509 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8510 return L2_ILT_LINES(bp);
8511}
8512
8513void bnx2x_ilt_set_info(struct bnx2x *bp)
8514{
8515 struct ilt_client_info *ilt_client;
8516 struct bnx2x_ilt *ilt = BP_ILT(bp);
8517 u16 line = 0;
8518
8519 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8520 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8521
8522
8523 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8524 ilt_client->client_num = ILT_CLIENT_CDU;
8525 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8526 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8527 ilt_client->start = line;
8528 line += bnx2x_cid_ilt_lines(bp);
8529
8530 if (CNIC_SUPPORT(bp))
8531 line += CNIC_ILT_LINES;
8532 ilt_client->end = line - 1;
8533
8534 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8535 ilt_client->start,
8536 ilt_client->end,
8537 ilt_client->page_size,
8538 ilt_client->flags,
8539 ilog2(ilt_client->page_size >> 12));
8540
8541
8542 if (QM_INIT(bp->qm_cid_count)) {
8543 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8544 ilt_client->client_num = ILT_CLIENT_QM;
8545 ilt_client->page_size = QM_ILT_PAGE_SZ;
8546 ilt_client->flags = 0;
8547 ilt_client->start = line;
8548
8549
8550 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8551 QM_ILT_PAGE_SZ);
8552
8553 ilt_client->end = line - 1;
8554
8555 DP(NETIF_MSG_IFUP,
8556 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8557 ilt_client->start,
8558 ilt_client->end,
8559 ilt_client->page_size,
8560 ilt_client->flags,
8561 ilog2(ilt_client->page_size >> 12));
8562 }
8563
8564 if (CNIC_SUPPORT(bp)) {
8565
8566 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8567 ilt_client->client_num = ILT_CLIENT_SRC;
8568 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8569 ilt_client->flags = 0;
8570 ilt_client->start = line;
8571 line += SRC_ILT_LINES;
8572 ilt_client->end = line - 1;
8573
8574 DP(NETIF_MSG_IFUP,
8575 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8576 ilt_client->start,
8577 ilt_client->end,
8578 ilt_client->page_size,
8579 ilt_client->flags,
8580 ilog2(ilt_client->page_size >> 12));
8581
8582
8583 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8584 ilt_client->client_num = ILT_CLIENT_TM;
8585 ilt_client->page_size = TM_ILT_PAGE_SZ;
8586 ilt_client->flags = 0;
8587 ilt_client->start = line;
8588 line += TM_ILT_LINES;
8589 ilt_client->end = line - 1;
8590
8591 DP(NETIF_MSG_IFUP,
8592 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8593 ilt_client->start,
8594 ilt_client->end,
8595 ilt_client->page_size,
8596 ilt_client->flags,
8597 ilog2(ilt_client->page_size >> 12));
8598 }
8599
8600 BUG_ON(line > ILT_MAX_LINES);
8601}
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8615 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8616{
8617 u8 cos;
8618 int cxt_index, cxt_offset;
8619
8620
8621 if (!IS_FCOE_FP(fp)) {
8622 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8623 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8624
8625
8626
8627
8628 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8629 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8630
8631
8632 init_params->rx.hc_rate = bp->rx_ticks ?
8633 (1000000 / bp->rx_ticks) : 0;
8634 init_params->tx.hc_rate = bp->tx_ticks ?
8635 (1000000 / bp->tx_ticks) : 0;
8636
8637
8638 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8639 fp->fw_sb_id;
8640
8641
8642
8643
8644
8645 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8646 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8647 }
8648
8649
8650 init_params->max_cos = fp->max_cos;
8651
8652 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8653 fp->index, init_params->max_cos);
8654
8655
8656 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8657 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8658 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8659 ILT_PAGE_CIDS);
8660 init_params->cxts[cos] =
8661 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8662 }
8663}
8664
8665static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8666 struct bnx2x_queue_state_params *q_params,
8667 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8668 int tx_index, bool leading)
8669{
8670 memset(tx_only_params, 0, sizeof(*tx_only_params));
8671
8672
8673 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8674
8675
8676 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8677
8678
8679 tx_only_params->cid_index = tx_index;
8680
8681
8682 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8683
8684
8685 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8686
8687 DP(NETIF_MSG_IFUP,
8688 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8689 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8690 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8691 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8692
8693
8694 return bnx2x_queue_state_change(bp, q_params);
8695}
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706
8707
8708int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8709 bool leading)
8710{
8711 struct bnx2x_queue_state_params q_params = {NULL};
8712 struct bnx2x_queue_setup_params *setup_params =
8713 &q_params.params.setup;
8714 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8715 &q_params.params.tx_only;
8716 int rc;
8717 u8 tx_index;
8718
8719 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8720
8721
8722 if (!IS_FCOE_FP(fp))
8723 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8724 IGU_INT_ENABLE, 0);
8725
8726 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8727
8728 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8729
8730
8731 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8732
8733
8734 q_params.cmd = BNX2X_Q_CMD_INIT;
8735
8736
8737 rc = bnx2x_queue_state_change(bp, &q_params);
8738 if (rc) {
8739 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8740 return rc;
8741 }
8742
8743 DP(NETIF_MSG_IFUP, "init complete\n");
8744
8745
8746 memset(setup_params, 0, sizeof(*setup_params));
8747
8748
8749 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8750
8751
8752 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8753 FIRST_TX_COS_INDEX);
8754
8755 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8756 &setup_params->rxq_params);
8757
8758 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8759 FIRST_TX_COS_INDEX);
8760
8761
8762 q_params.cmd = BNX2X_Q_CMD_SETUP;
8763
8764 if (IS_FCOE_FP(fp))
8765 bp->fcoe_init = true;
8766
8767
8768 rc = bnx2x_queue_state_change(bp, &q_params);
8769 if (rc) {
8770 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8771 return rc;
8772 }
8773
8774
8775 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8776 tx_index < fp->max_cos;
8777 tx_index++) {
8778
8779
8780 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8781 tx_only_params, tx_index, leading);
8782 if (rc) {
8783 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8784 fp->index, tx_index);
8785 return rc;
8786 }
8787 }
8788
8789 return rc;
8790}
8791
8792static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8793{
8794 struct bnx2x_fastpath *fp = &bp->fp[index];
8795 struct bnx2x_fp_txdata *txdata;
8796 struct bnx2x_queue_state_params q_params = {NULL};
8797 int rc, tx_index;
8798
8799 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8800
8801 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8802
8803 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8804
8805
8806 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8807 tx_index < fp->max_cos;
8808 tx_index++){
8809
8810
8811 txdata = fp->txdata_ptr[tx_index];
8812
8813 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8814 txdata->txq_index);
8815
8816
8817 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8818 memset(&q_params.params.terminate, 0,
8819 sizeof(q_params.params.terminate));
8820 q_params.params.terminate.cid_index = tx_index;
8821
8822 rc = bnx2x_queue_state_change(bp, &q_params);
8823 if (rc)
8824 return rc;
8825
8826
8827 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8828 memset(&q_params.params.cfc_del, 0,
8829 sizeof(q_params.params.cfc_del));
8830 q_params.params.cfc_del.cid_index = tx_index;
8831 rc = bnx2x_queue_state_change(bp, &q_params);
8832 if (rc)
8833 return rc;
8834 }
8835
8836
8837 q_params.cmd = BNX2X_Q_CMD_HALT;
8838 rc = bnx2x_queue_state_change(bp, &q_params);
8839 if (rc)
8840 return rc;
8841
8842
8843 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8844 memset(&q_params.params.terminate, 0,
8845 sizeof(q_params.params.terminate));
8846 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8847 rc = bnx2x_queue_state_change(bp, &q_params);
8848 if (rc)
8849 return rc;
8850
8851 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8852 memset(&q_params.params.cfc_del, 0,
8853 sizeof(q_params.params.cfc_del));
8854 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8855 return bnx2x_queue_state_change(bp, &q_params);
8856}
8857
8858static void bnx2x_reset_func(struct bnx2x *bp)
8859{
8860 int port = BP_PORT(bp);
8861 int func = BP_FUNC(bp);
8862 int i;
8863
8864
8865 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8866 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8867 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8868 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8869
8870
8871 for_each_eth_queue(bp, i) {
8872 struct bnx2x_fastpath *fp = &bp->fp[i];
8873 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8874 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8875 SB_DISABLED);
8876 }
8877
8878 if (CNIC_LOADED(bp))
8879
8880 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8881 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8882 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8883
8884
8885 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8886 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8887 SB_DISABLED);
8888
8889 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8890 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8891 0);
8892
8893
8894 if (bp->common.int_block == INT_BLOCK_HC) {
8895 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8896 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8897 } else {
8898 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8899 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8900 }
8901
8902 if (CNIC_LOADED(bp)) {
8903
8904 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8905
8906
8907
8908
8909 for (i = 0; i < 200; i++) {
8910 usleep_range(10000, 20000);
8911 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8912 break;
8913 }
8914 }
8915
8916 bnx2x_clear_func_ilt(bp, func);
8917
8918
8919
8920
8921 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8922 struct ilt_client_info ilt_cli;
8923
8924 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8925 ilt_cli.start = 0;
8926 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8927 ilt_cli.client_num = ILT_CLIENT_TM;
8928
8929 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8930 }
8931
8932
8933 if (!CHIP_IS_E1x(bp))
8934 bnx2x_pf_disable(bp);
8935
8936 bp->dmae_ready = 0;
8937}
8938
8939static void bnx2x_reset_port(struct bnx2x *bp)
8940{
8941 int port = BP_PORT(bp);
8942 u32 val;
8943
8944
8945 bnx2x__link_reset(bp);
8946
8947 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8948
8949
8950 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8951
8952 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8953 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8954
8955
8956 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8957
8958 msleep(100);
8959
8960 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8961 if (val)
8962 DP(NETIF_MSG_IFDOWN,
8963 "BRB1 is not empty %d blocks are occupied\n", val);
8964
8965
8966}
8967
8968static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8969{
8970 struct bnx2x_func_state_params func_params = {NULL};
8971
8972
8973 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8974
8975 func_params.f_obj = &bp->func_obj;
8976 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8977
8978 func_params.params.hw_init.load_phase = load_code;
8979
8980 return bnx2x_func_state_change(bp, &func_params);
8981}
8982
8983static int bnx2x_func_stop(struct bnx2x *bp)
8984{
8985 struct bnx2x_func_state_params func_params = {NULL};
8986 int rc;
8987
8988
8989 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8990 func_params.f_obj = &bp->func_obj;
8991 func_params.cmd = BNX2X_F_CMD_STOP;
8992
8993
8994
8995
8996
8997
8998
8999 rc = bnx2x_func_state_change(bp, &func_params);
9000 if (rc) {
9001#ifdef BNX2X_STOP_ON_ERROR
9002 return rc;
9003#else
9004 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9005 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9006 return bnx2x_func_state_change(bp, &func_params);
9007#endif
9008 }
9009
9010 return 0;
9011}
9012
9013
9014
9015
9016
9017
9018
9019
9020
9021u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9022{
9023 u32 reset_code = 0;
9024 int port = BP_PORT(bp);
9025
9026
9027 if (unload_mode == UNLOAD_NORMAL)
9028 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9029
9030 else if (bp->flags & NO_WOL_FLAG)
9031 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9032
9033 else if (bp->wol) {
9034 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9035 u8 *mac_addr = bp->dev->dev_addr;
9036 struct pci_dev *pdev = bp->pdev;
9037 u32 val;
9038 u16 pmc;
9039
9040
9041
9042
9043 u8 entry = (BP_VN(bp) + 1)*8;
9044
9045 val = (mac_addr[0] << 8) | mac_addr[1];
9046 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9047
9048 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9049 (mac_addr[4] << 8) | mac_addr[5];
9050 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9051
9052
9053 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9054 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9055 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9056
9057 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9058
9059 } else
9060 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9061
9062
9063 if (!BP_NOMCP(bp))
9064 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9065 else {
9066 int path = BP_PATH(bp);
9067
9068 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9069 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9070 bnx2x_load_count[path][2]);
9071 bnx2x_load_count[path][0]--;
9072 bnx2x_load_count[path][1 + port]--;
9073 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9074 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9075 bnx2x_load_count[path][2]);
9076 if (bnx2x_load_count[path][0] == 0)
9077 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9078 else if (bnx2x_load_count[path][1 + port] == 0)
9079 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9080 else
9081 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9082 }
9083
9084 return reset_code;
9085}
9086
9087
9088
9089
9090
9091
9092
9093void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9094{
9095 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9096
9097
9098 if (!BP_NOMCP(bp))
9099 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9100}
9101
9102static int bnx2x_func_wait_started(struct bnx2x *bp)
9103{
9104 int tout = 50;
9105 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9106
9107 if (!bp->port.pmf)
9108 return 0;
9109
9110
9111
9112
9113
9114
9115
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125 if (msix)
9126 synchronize_irq(bp->msix_table[0].vector);
9127 else
9128 synchronize_irq(bp->pdev->irq);
9129
9130 flush_workqueue(bnx2x_wq);
9131 flush_workqueue(bnx2x_iov_wq);
9132
9133 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9134 BNX2X_F_STATE_STARTED && tout--)
9135 msleep(20);
9136
9137 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9138 BNX2X_F_STATE_STARTED) {
9139#ifdef BNX2X_STOP_ON_ERROR
9140 BNX2X_ERR("Wrong function state\n");
9141 return -EBUSY;
9142#else
9143
9144
9145
9146
9147 struct bnx2x_func_state_params func_params = {NULL};
9148
9149 DP(NETIF_MSG_IFDOWN,
9150 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9151
9152 func_params.f_obj = &bp->func_obj;
9153 __set_bit(RAMROD_DRV_CLR_ONLY,
9154 &func_params.ramrod_flags);
9155
9156
9157 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9158 bnx2x_func_state_change(bp, &func_params);
9159
9160
9161 func_params.cmd = BNX2X_F_CMD_TX_START;
9162 return bnx2x_func_state_change(bp, &func_params);
9163#endif
9164 }
9165
9166 return 0;
9167}
9168
9169static void bnx2x_disable_ptp(struct bnx2x *bp)
9170{
9171 int port = BP_PORT(bp);
9172
9173
9174 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9175 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9176
9177
9178 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9179 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9180 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9181 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9182 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9183 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9184 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9185 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9186
9187
9188 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9189 NIG_REG_P0_PTP_EN, 0x0);
9190}
9191
9192
9193static void bnx2x_stop_ptp(struct bnx2x *bp)
9194{
9195
9196
9197
9198 cancel_work_sync(&bp->ptp_task);
9199
9200 if (bp->ptp_tx_skb) {
9201 dev_kfree_skb_any(bp->ptp_tx_skb);
9202 bp->ptp_tx_skb = NULL;
9203 }
9204
9205
9206 bnx2x_disable_ptp(bp);
9207
9208 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9209}
9210
9211void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9212{
9213 int port = BP_PORT(bp);
9214 int i, rc = 0;
9215 u8 cos;
9216 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9217 u32 reset_code;
9218
9219
9220 for_each_tx_queue(bp, i) {
9221 struct bnx2x_fastpath *fp = &bp->fp[i];
9222
9223 for_each_cos_in_tx_queue(fp, cos)
9224 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9225#ifdef BNX2X_STOP_ON_ERROR
9226 if (rc)
9227 return;
9228#endif
9229 }
9230
9231
9232 usleep_range(1000, 2000);
9233
9234
9235 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9236 false);
9237 if (rc < 0)
9238 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9239
9240
9241 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9242 true);
9243 if (rc < 0)
9244 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9245 rc);
9246
9247
9248 if (!CHIP_IS_E1(bp))
9249 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9250
9251
9252
9253
9254
9255 netif_addr_lock_bh(bp->dev);
9256
9257 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9258 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9259 else
9260 bnx2x_set_storm_rx_mode(bp);
9261
9262
9263 rparam.mcast_obj = &bp->mcast_obj;
9264 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9265 if (rc < 0)
9266 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9267
9268 netif_addr_unlock_bh(bp->dev);
9269
9270 bnx2x_iov_chip_cleanup(bp);
9271
9272
9273
9274
9275
9276
9277 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9278
9279
9280
9281
9282
9283 rc = bnx2x_func_wait_started(bp);
9284 if (rc) {
9285 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9286#ifdef BNX2X_STOP_ON_ERROR
9287 return;
9288#endif
9289 }
9290
9291
9292
9293
9294 for_each_eth_queue(bp, i)
9295 if (bnx2x_stop_queue(bp, i))
9296#ifdef BNX2X_STOP_ON_ERROR
9297 return;
9298#else
9299 goto unload_error;
9300#endif
9301
9302 if (CNIC_LOADED(bp)) {
9303 for_each_cnic_queue(bp, i)
9304 if (bnx2x_stop_queue(bp, i))
9305#ifdef BNX2X_STOP_ON_ERROR
9306 return;
9307#else
9308 goto unload_error;
9309#endif
9310 }
9311
9312
9313
9314
9315 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9316 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9317
9318#ifndef BNX2X_STOP_ON_ERROR
9319unload_error:
9320#endif
9321 rc = bnx2x_func_stop(bp);
9322 if (rc) {
9323 BNX2X_ERR("Function stop failed!\n");
9324#ifdef BNX2X_STOP_ON_ERROR
9325 return;
9326#endif
9327 }
9328
9329
9330
9331
9332
9333
9334 if (bp->flags & PTP_SUPPORTED)
9335 bnx2x_stop_ptp(bp);
9336
9337
9338 bnx2x_netif_stop(bp, 1);
9339
9340 bnx2x_del_all_napi(bp);
9341 if (CNIC_LOADED(bp))
9342 bnx2x_del_all_napi_cnic(bp);
9343
9344
9345 bnx2x_free_irq(bp);
9346
9347
9348 rc = bnx2x_reset_hw(bp, reset_code);
9349 if (rc)
9350 BNX2X_ERR("HW_RESET failed\n");
9351
9352
9353 bnx2x_send_unload_done(bp, keep_link);
9354}
9355
9356void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9357{
9358 u32 val;
9359
9360 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9361
9362 if (CHIP_IS_E1(bp)) {
9363 int port = BP_PORT(bp);
9364 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9365 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9366
9367 val = REG_RD(bp, addr);
9368 val &= ~(0x300);
9369 REG_WR(bp, addr, val);
9370 } else {
9371 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9372 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9373 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9374 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9375 }
9376}
9377
9378
9379static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9380{
9381 u32 val;
9382
9383
9384 if (!CHIP_IS_E1(bp)) {
9385
9386 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9387
9388 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9389 }
9390
9391
9392 if (CHIP_IS_E1x(bp)) {
9393
9394 val = REG_RD(bp, HC_REG_CONFIG_1);
9395 REG_WR(bp, HC_REG_CONFIG_1,
9396 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9397 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9398
9399 val = REG_RD(bp, HC_REG_CONFIG_0);
9400 REG_WR(bp, HC_REG_CONFIG_0,
9401 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9402 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9403 } else {
9404
9405 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9406
9407 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9408 (!close) ?
9409 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9410 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9411 }
9412
9413 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9414 close ? "closing" : "opening");
9415 mmiowb();
9416}
9417
9418#define SHARED_MF_CLP_MAGIC 0x80000000
9419
9420static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9421{
9422
9423 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9424 *magic_val = val & SHARED_MF_CLP_MAGIC;
9425 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9426}
9427
9428
9429
9430
9431
9432
9433
9434static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9435{
9436
9437 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9438 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9439 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9440}
9441
9442
9443
9444
9445
9446
9447
9448
9449
9450static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9451{
9452 u32 shmem;
9453 u32 validity_offset;
9454
9455 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9456
9457
9458 if (!CHIP_IS_E1(bp))
9459 bnx2x_clp_reset_prep(bp, magic_val);
9460
9461
9462 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9463 validity_offset =
9464 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9465
9466
9467 if (shmem > 0)
9468 REG_WR(bp, shmem + validity_offset, 0);
9469}
9470
9471#define MCP_TIMEOUT 5000
9472#define MCP_ONE_TIMEOUT 100
9473
9474
9475
9476
9477
9478
9479static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9480{
9481
9482
9483 if (CHIP_REV_IS_SLOW(bp))
9484 msleep(MCP_ONE_TIMEOUT*10);
9485 else
9486 msleep(MCP_ONE_TIMEOUT);
9487}
9488
9489
9490
9491
9492static int bnx2x_init_shmem(struct bnx2x *bp)
9493{
9494 int cnt = 0;
9495 u32 val = 0;
9496
9497 do {
9498 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9499 if (bp->common.shmem_base) {
9500 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9501 if (val & SHR_MEM_VALIDITY_MB)
9502 return 0;
9503 }
9504
9505 bnx2x_mcp_wait_one(bp);
9506
9507 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9508
9509 BNX2X_ERR("BAD MCP validity signature\n");
9510
9511 return -ENODEV;
9512}
9513
9514static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9515{
9516 int rc = bnx2x_init_shmem(bp);
9517
9518
9519 if (!CHIP_IS_E1(bp))
9520 bnx2x_clp_reset_done(bp, magic_val);
9521
9522 return rc;
9523}
9524
9525static void bnx2x_pxp_prep(struct bnx2x *bp)
9526{
9527 if (!CHIP_IS_E1(bp)) {
9528 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9529 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9530 mmiowb();
9531 }
9532}
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542
9543
9544static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9545{
9546 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9547 u32 global_bits2, stay_reset2;
9548
9549
9550
9551
9552
9553 global_bits2 =
9554 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9555 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9556
9557
9558
9559
9560
9561
9562 not_reset_mask1 =
9563 MISC_REGISTERS_RESET_REG_1_RST_HC |
9564 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9565 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9566
9567 not_reset_mask2 =
9568 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9569 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9570 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9571 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9572 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9573 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9574 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9575 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9576 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9577 MISC_REGISTERS_RESET_REG_2_PGLC |
9578 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9579 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9580 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9581 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9582 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9583 MISC_REGISTERS_RESET_REG_2_UMAC1;
9584
9585
9586
9587
9588
9589 stay_reset2 =
9590 MISC_REGISTERS_RESET_REG_2_XMAC |
9591 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9592
9593
9594 reset_mask1 = 0xffffffff;
9595
9596 if (CHIP_IS_E1(bp))
9597 reset_mask2 = 0xffff;
9598 else if (CHIP_IS_E1H(bp))
9599 reset_mask2 = 0x1ffff;
9600 else if (CHIP_IS_E2(bp))
9601 reset_mask2 = 0xfffff;
9602 else
9603 reset_mask2 = 0x3ffffff;
9604
9605
9606 if (!global)
9607 reset_mask2 &= ~global_bits2;
9608
9609
9610
9611
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
9623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9624 reset_mask2 & (~not_reset_mask2));
9625
9626 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9627 reset_mask1 & (~not_reset_mask1));
9628
9629 barrier();
9630 mmiowb();
9631
9632 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9633 reset_mask2 & (~stay_reset2));
9634
9635 barrier();
9636 mmiowb();
9637
9638 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9639 mmiowb();
9640}
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9652{
9653 u32 cnt = 1000;
9654 u32 pend_bits = 0;
9655
9656 do {
9657 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9658
9659 if (pend_bits == 0)
9660 break;
9661
9662 usleep_range(1000, 2000);
9663 } while (cnt-- > 0);
9664
9665 if (cnt <= 0) {
9666 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9667 pend_bits);
9668 return -EBUSY;
9669 }
9670
9671 return 0;
9672}
9673
9674static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9675{
9676 int cnt = 1000;
9677 u32 val = 0;
9678 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9679 u32 tags_63_32 = 0;
9680
9681
9682 do {
9683 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9684 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9685 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9686 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9687 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9688 if (CHIP_IS_E3(bp))
9689 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9690
9691 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9692 ((port_is_idle_0 & 0x1) == 0x1) &&
9693 ((port_is_idle_1 & 0x1) == 0x1) &&
9694 (pgl_exp_rom2 == 0xffffffff) &&
9695 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9696 break;
9697 usleep_range(1000, 2000);
9698 } while (cnt-- > 0);
9699
9700 if (cnt <= 0) {
9701 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9702 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9703 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9704 pgl_exp_rom2);
9705 return -EAGAIN;
9706 }
9707
9708 barrier();
9709
9710
9711 bnx2x_set_234_gates(bp, true);
9712
9713
9714 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9715 return -EAGAIN;
9716
9717
9718
9719
9720 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9721 barrier();
9722
9723
9724 mmiowb();
9725
9726
9727
9728
9729 usleep_range(1000, 2000);
9730
9731
9732
9733 if (global)
9734 bnx2x_reset_mcp_prep(bp, &val);
9735
9736
9737 bnx2x_pxp_prep(bp);
9738 barrier();
9739
9740
9741 bnx2x_process_kill_chip_reset(bp, global);
9742 barrier();
9743
9744
9745 if (!CHIP_IS_E1x(bp))
9746 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9747
9748
9749
9750 if (global && bnx2x_reset_mcp_comp(bp, val))
9751 return -EAGAIN;
9752
9753
9754
9755
9756 bnx2x_set_234_gates(bp, false);
9757
9758
9759
9760
9761 return 0;
9762}
9763
9764static int bnx2x_leader_reset(struct bnx2x *bp)
9765{
9766 int rc = 0;
9767 bool global = bnx2x_reset_is_global(bp);
9768 u32 load_code;
9769
9770
9771
9772
9773 if (!global && !BP_NOMCP(bp)) {
9774 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9775 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9776 if (!load_code) {
9777 BNX2X_ERR("MCP response failure, aborting\n");
9778 rc = -EAGAIN;
9779 goto exit_leader_reset;
9780 }
9781 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9782 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9783 BNX2X_ERR("MCP unexpected resp, aborting\n");
9784 rc = -EAGAIN;
9785 goto exit_leader_reset2;
9786 }
9787 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9788 if (!load_code) {
9789 BNX2X_ERR("MCP response failure, aborting\n");
9790 rc = -EAGAIN;
9791 goto exit_leader_reset2;
9792 }
9793 }
9794
9795
9796 if (bnx2x_process_kill(bp, global)) {
9797 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9798 BP_PATH(bp));
9799 rc = -EAGAIN;
9800 goto exit_leader_reset2;
9801 }
9802
9803
9804
9805
9806
9807 bnx2x_set_reset_done(bp);
9808 if (global)
9809 bnx2x_clear_reset_global(bp);
9810
9811exit_leader_reset2:
9812
9813 if (!global && !BP_NOMCP(bp)) {
9814 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9815 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9816 }
9817exit_leader_reset:
9818 bp->is_leader = 0;
9819 bnx2x_release_leader_lock(bp);
9820 smp_mb();
9821 return rc;
9822}
9823
9824static void bnx2x_recovery_failed(struct bnx2x *bp)
9825{
9826 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9827
9828
9829 netif_device_detach(bp->dev);
9830
9831
9832
9833
9834
9835 bnx2x_set_reset_in_progress(bp);
9836
9837
9838 bnx2x_set_power_state(bp, PCI_D3hot);
9839
9840 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9841
9842 smp_mb();
9843}
9844
9845
9846
9847
9848
9849
9850static void bnx2x_parity_recover(struct bnx2x *bp)
9851{
9852 bool global = false;
9853 u32 error_recovered, error_unrecovered;
9854 bool is_parity;
9855
9856 DP(NETIF_MSG_HW, "Handling parity\n");
9857 while (1) {
9858 switch (bp->recovery_state) {
9859 case BNX2X_RECOVERY_INIT:
9860 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9861 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9862 WARN_ON(!is_parity);
9863
9864
9865 if (bnx2x_trylock_leader_lock(bp)) {
9866 bnx2x_set_reset_in_progress(bp);
9867
9868
9869
9870
9871
9872
9873 if (global)
9874 bnx2x_set_reset_global(bp);
9875
9876 bp->is_leader = 1;
9877 }
9878
9879
9880
9881 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9882 return;
9883
9884 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9885
9886
9887
9888
9889
9890 smp_mb();
9891 break;
9892
9893 case BNX2X_RECOVERY_WAIT:
9894 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9895 if (bp->is_leader) {
9896 int other_engine = BP_PATH(bp) ? 0 : 1;
9897 bool other_load_status =
9898 bnx2x_get_load_status(bp, other_engine);
9899 bool load_status =
9900 bnx2x_get_load_status(bp, BP_PATH(bp));
9901 global = bnx2x_reset_is_global(bp);
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911 if (load_status ||
9912 (global && other_load_status)) {
9913
9914
9915
9916 schedule_delayed_work(&bp->sp_rtnl_task,
9917 HZ/10);
9918 return;
9919 } else {
9920
9921
9922
9923
9924
9925 if (bnx2x_leader_reset(bp)) {
9926 bnx2x_recovery_failed(bp);
9927 return;
9928 }
9929
9930
9931
9932
9933
9934
9935 break;
9936 }
9937 } else {
9938 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9939
9940
9941
9942
9943
9944
9945 if (bnx2x_trylock_leader_lock(bp)) {
9946
9947
9948
9949 bp->is_leader = 1;
9950 break;
9951 }
9952
9953 schedule_delayed_work(&bp->sp_rtnl_task,
9954 HZ/10);
9955 return;
9956
9957 } else {
9958
9959
9960
9961
9962 if (bnx2x_reset_is_global(bp)) {
9963 schedule_delayed_work(
9964 &bp->sp_rtnl_task,
9965 HZ/10);
9966 return;
9967 }
9968
9969 error_recovered =
9970 bp->eth_stats.recoverable_error;
9971 error_unrecovered =
9972 bp->eth_stats.unrecoverable_error;
9973 bp->recovery_state =
9974 BNX2X_RECOVERY_NIC_LOADING;
9975 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9976 error_unrecovered++;
9977 netdev_err(bp->dev,
9978 "Recovery failed. Power cycle needed\n");
9979
9980 netif_device_detach(bp->dev);
9981
9982 bnx2x_set_power_state(
9983 bp, PCI_D3hot);
9984 smp_mb();
9985 } else {
9986 bp->recovery_state =
9987 BNX2X_RECOVERY_DONE;
9988 error_recovered++;
9989 smp_mb();
9990 }
9991 bp->eth_stats.recoverable_error =
9992 error_recovered;
9993 bp->eth_stats.unrecoverable_error =
9994 error_unrecovered;
9995
9996 return;
9997 }
9998 }
9999 default:
10000 return;
10001 }
10002 }
10003}
10004
10005static int bnx2x_close(struct net_device *dev);
10006
10007
10008
10009
10010static void bnx2x_sp_rtnl_task(struct work_struct *work)
10011{
10012 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10013
10014 rtnl_lock();
10015
10016 if (!netif_running(bp->dev)) {
10017 rtnl_unlock();
10018 return;
10019 }
10020
10021 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10022#ifdef BNX2X_STOP_ON_ERROR
10023 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10024 "you will need to reboot when done\n");
10025 goto sp_rtnl_not_reset;
10026#endif
10027
10028
10029
10030
10031 bp->sp_rtnl_state = 0;
10032 smp_mb();
10033
10034 bnx2x_parity_recover(bp);
10035
10036 rtnl_unlock();
10037 return;
10038 }
10039
10040 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10041#ifdef BNX2X_STOP_ON_ERROR
10042 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10043 "you will need to reboot when done\n");
10044 goto sp_rtnl_not_reset;
10045#endif
10046
10047
10048
10049
10050
10051 bp->sp_rtnl_state = 0;
10052 smp_mb();
10053
10054 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10055 bnx2x_nic_load(bp, LOAD_NORMAL);
10056
10057 rtnl_unlock();
10058 return;
10059 }
10060#ifdef BNX2X_STOP_ON_ERROR
10061sp_rtnl_not_reset:
10062#endif
10063 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10064 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10065 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10066 bnx2x_after_function_update(bp);
10067
10068
10069
10070
10071
10072 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10073 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10074 netif_device_detach(bp->dev);
10075 bnx2x_close(bp->dev);
10076 rtnl_unlock();
10077 return;
10078 }
10079
10080 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10081 DP(BNX2X_MSG_SP,
10082 "sending set mcast vf pf channel message from rtnl sp-task\n");
10083 bnx2x_vfpf_set_mcast(bp->dev);
10084 }
10085 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10086 &bp->sp_rtnl_state)){
10087 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
10088 bnx2x_tx_disable(bp);
10089 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10090 }
10091 }
10092
10093 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10094 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10095 bnx2x_set_rx_mode_inner(bp);
10096 }
10097
10098 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10099 &bp->sp_rtnl_state))
10100 bnx2x_pf_set_vfs_vlan(bp);
10101
10102 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10103 bnx2x_dcbx_stop_hw_tx(bp);
10104 bnx2x_dcbx_resume_hw_tx(bp);
10105 }
10106
10107 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10108 &bp->sp_rtnl_state))
10109 bnx2x_update_mng_version(bp);
10110
10111
10112
10113
10114 rtnl_unlock();
10115
10116
10117 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10118 &bp->sp_rtnl_state)) {
10119 bnx2x_disable_sriov(bp);
10120 bnx2x_enable_sriov(bp);
10121 }
10122}
10123
10124static void bnx2x_period_task(struct work_struct *work)
10125{
10126 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10127
10128 if (!netif_running(bp->dev))
10129 goto period_task_exit;
10130
10131 if (CHIP_REV_IS_SLOW(bp)) {
10132 BNX2X_ERR("period task called on emulation, ignoring\n");
10133 goto period_task_exit;
10134 }
10135
10136 bnx2x_acquire_phy_lock(bp);
10137
10138
10139
10140
10141
10142 smp_mb();
10143 if (bp->port.pmf) {
10144 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10145
10146
10147 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10148 }
10149
10150 bnx2x_release_phy_lock(bp);
10151period_task_exit:
10152 return;
10153}
10154
10155
10156
10157
10158
10159static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10160{
10161 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10162 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10163 return base + (BP_ABS_FUNC(bp)) * stride;
10164}
10165
10166static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10167 u8 port, u32 reset_reg,
10168 struct bnx2x_mac_vals *vals)
10169{
10170 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10171 u32 base_addr;
10172
10173 if (!(mask & reset_reg))
10174 return false;
10175
10176 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10177 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10178 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10179 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10180 REG_WR(bp, vals->umac_addr[port], 0);
10181
10182 return true;
10183}
10184
10185static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10186 struct bnx2x_mac_vals *vals)
10187{
10188 u32 val, base_addr, offset, mask, reset_reg;
10189 bool mac_stopped = false;
10190 u8 port = BP_PORT(bp);
10191
10192
10193 memset(vals, 0, sizeof(*vals));
10194
10195 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10196
10197 if (!CHIP_IS_E3(bp)) {
10198 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10199 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10200 if ((mask & reset_reg) && val) {
10201 u32 wb_data[2];
10202 BNX2X_DEV_INFO("Disable bmac Rx\n");
10203 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10204 : NIG_REG_INGRESS_BMAC0_MEM;
10205 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10206 : BIGMAC_REGISTER_BMAC_CONTROL;
10207
10208
10209
10210
10211
10212
10213
10214 wb_data[0] = REG_RD(bp, base_addr + offset);
10215 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10216 vals->bmac_addr = base_addr + offset;
10217 vals->bmac_val[0] = wb_data[0];
10218 vals->bmac_val[1] = wb_data[1];
10219 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10220 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10221 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10222 }
10223 BNX2X_DEV_INFO("Disable emac Rx\n");
10224 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10225 vals->emac_val = REG_RD(bp, vals->emac_addr);
10226 REG_WR(bp, vals->emac_addr, 0);
10227 mac_stopped = true;
10228 } else {
10229 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10230 BNX2X_DEV_INFO("Disable xmac Rx\n");
10231 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10232 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10233 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10234 val & ~(1 << 1));
10235 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10236 val | (1 << 1));
10237 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10238 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10239 REG_WR(bp, vals->xmac_addr, 0);
10240 mac_stopped = true;
10241 }
10242
10243 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10244 reset_reg, vals);
10245 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10246 reset_reg, vals);
10247 }
10248
10249 if (mac_stopped)
10250 msleep(20);
10251}
10252
10253#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10254#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10255 0x1848 + ((f) << 4))
10256#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10257#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10258#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10259
10260#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10261#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10262#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10263
10264static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10265{
10266
10267
10268
10269 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10270 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10271 return false;
10272
10273 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10274 BNX2X_DEV_INFO("UNDI previously loaded\n");
10275 return true;
10276 }
10277
10278 return false;
10279}
10280
10281static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10282{
10283 u16 rcq, bd;
10284 u32 addr, tmp_reg;
10285
10286 if (BP_FUNC(bp) < 2)
10287 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10288 else
10289 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10290
10291 tmp_reg = REG_RD(bp, addr);
10292 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10293 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10294
10295 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10296 REG_WR(bp, addr, tmp_reg);
10297
10298 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10299 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10300}
10301
10302static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10303{
10304 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10305 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10306 if (!rc) {
10307 BNX2X_ERR("MCP response failure, aborting\n");
10308 return -EBUSY;
10309 }
10310
10311 return 0;
10312}
10313
10314static struct bnx2x_prev_path_list *
10315 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10316{
10317 struct bnx2x_prev_path_list *tmp_list;
10318
10319 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10320 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10321 bp->pdev->bus->number == tmp_list->bus &&
10322 BP_PATH(bp) == tmp_list->path)
10323 return tmp_list;
10324
10325 return NULL;
10326}
10327
10328static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10329{
10330 struct bnx2x_prev_path_list *tmp_list;
10331 int rc;
10332
10333 rc = down_interruptible(&bnx2x_prev_sem);
10334 if (rc) {
10335 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10336 return rc;
10337 }
10338
10339 tmp_list = bnx2x_prev_path_get_entry(bp);
10340 if (tmp_list) {
10341 tmp_list->aer = 1;
10342 rc = 0;
10343 } else {
10344 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10345 BP_PATH(bp));
10346 }
10347
10348 up(&bnx2x_prev_sem);
10349
10350 return rc;
10351}
10352
10353static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10354{
10355 struct bnx2x_prev_path_list *tmp_list;
10356 bool rc = false;
10357
10358 if (down_trylock(&bnx2x_prev_sem))
10359 return false;
10360
10361 tmp_list = bnx2x_prev_path_get_entry(bp);
10362 if (tmp_list) {
10363 if (tmp_list->aer) {
10364 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10365 BP_PATH(bp));
10366 } else {
10367 rc = true;
10368 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10369 BP_PATH(bp));
10370 }
10371 }
10372
10373 up(&bnx2x_prev_sem);
10374
10375 return rc;
10376}
10377
10378bool bnx2x_port_after_undi(struct bnx2x *bp)
10379{
10380 struct bnx2x_prev_path_list *entry;
10381 bool val;
10382
10383 down(&bnx2x_prev_sem);
10384
10385 entry = bnx2x_prev_path_get_entry(bp);
10386 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10387
10388 up(&bnx2x_prev_sem);
10389
10390 return val;
10391}
10392
10393static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10394{
10395 struct bnx2x_prev_path_list *tmp_list;
10396 int rc;
10397
10398 rc = down_interruptible(&bnx2x_prev_sem);
10399 if (rc) {
10400 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10401 return rc;
10402 }
10403
10404
10405 tmp_list = bnx2x_prev_path_get_entry(bp);
10406 if (tmp_list) {
10407 if (!tmp_list->aer) {
10408 BNX2X_ERR("Re-Marking the path.\n");
10409 } else {
10410 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10411 BP_PATH(bp));
10412 tmp_list->aer = 0;
10413 }
10414 up(&bnx2x_prev_sem);
10415 return 0;
10416 }
10417 up(&bnx2x_prev_sem);
10418
10419
10420 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10421 if (!tmp_list) {
10422 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10423 return -ENOMEM;
10424 }
10425
10426 tmp_list->bus = bp->pdev->bus->number;
10427 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10428 tmp_list->path = BP_PATH(bp);
10429 tmp_list->aer = 0;
10430 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10431
10432 rc = down_interruptible(&bnx2x_prev_sem);
10433 if (rc) {
10434 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10435 kfree(tmp_list);
10436 } else {
10437 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10438 BP_PATH(bp));
10439 list_add(&tmp_list->list, &bnx2x_prev_list);
10440 up(&bnx2x_prev_sem);
10441 }
10442
10443 return rc;
10444}
10445
10446static int bnx2x_do_flr(struct bnx2x *bp)
10447{
10448 struct pci_dev *dev = bp->pdev;
10449
10450 if (CHIP_IS_E1x(bp)) {
10451 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10452 return -EINVAL;
10453 }
10454
10455
10456 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10457 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10458 bp->common.bc_ver);
10459 return -EINVAL;
10460 }
10461
10462 if (!pci_wait_for_pending_transaction(dev))
10463 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10464
10465 BNX2X_DEV_INFO("Initiating FLR\n");
10466 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10467
10468 return 0;
10469}
10470
10471static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10472{
10473 int rc;
10474
10475 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10476
10477
10478 if (bnx2x_prev_is_path_marked(bp))
10479 return bnx2x_prev_mcp_done(bp);
10480
10481 BNX2X_DEV_INFO("Path is unmarked\n");
10482
10483
10484 if (bnx2x_prev_is_after_undi(bp))
10485 goto out;
10486
10487
10488
10489
10490
10491 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10492
10493 if (!rc) {
10494
10495 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10496 rc = bnx2x_do_flr(bp);
10497 }
10498
10499 if (!rc) {
10500
10501 BNX2X_DEV_INFO("FLR successful\n");
10502 return 0;
10503 }
10504
10505 BNX2X_DEV_INFO("Could not FLR\n");
10506
10507out:
10508
10509 rc = bnx2x_prev_mcp_done(bp);
10510 if (!rc)
10511 rc = BNX2X_PREV_WAIT_NEEDED;
10512
10513 return rc;
10514}
10515
10516static int bnx2x_prev_unload_common(struct bnx2x *bp)
10517{
10518 u32 reset_reg, tmp_reg = 0, rc;
10519 bool prev_undi = false;
10520 struct bnx2x_mac_vals mac_vals;
10521
10522
10523
10524
10525
10526 BNX2X_DEV_INFO("Common unload Flow\n");
10527
10528 memset(&mac_vals, 0, sizeof(mac_vals));
10529
10530 if (bnx2x_prev_is_path_marked(bp))
10531 return bnx2x_prev_mcp_done(bp);
10532
10533 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10534
10535
10536 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10537 u32 timer_count = 1000;
10538
10539
10540 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10541
10542
10543 bnx2x_set_rx_filter(&bp->link_params, 0);
10544 bp->link_params.port ^= 1;
10545 bnx2x_set_rx_filter(&bp->link_params, 0);
10546 bp->link_params.port ^= 1;
10547
10548
10549 if (bnx2x_prev_is_after_undi(bp)) {
10550 prev_undi = true;
10551
10552 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10553
10554 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10555 }
10556 if (!CHIP_IS_E1x(bp))
10557
10558 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10559
10560
10561 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10562 while (timer_count) {
10563 u32 prev_brb = tmp_reg;
10564
10565 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10566 if (!tmp_reg)
10567 break;
10568
10569 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10570
10571
10572 if (prev_brb > tmp_reg)
10573 timer_count = 1000;
10574 else
10575 timer_count--;
10576
10577
10578 if (prev_undi)
10579 bnx2x_prev_unload_undi_inc(bp, 1);
10580
10581 udelay(10);
10582 }
10583
10584 if (!timer_count)
10585 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10586 }
10587
10588
10589 bnx2x_reset_common(bp);
10590
10591 if (mac_vals.xmac_addr)
10592 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10593 if (mac_vals.umac_addr[0])
10594 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10595 if (mac_vals.umac_addr[1])
10596 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10597 if (mac_vals.emac_addr)
10598 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10599 if (mac_vals.bmac_addr) {
10600 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10601 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10602 }
10603
10604 rc = bnx2x_prev_mark_path(bp, prev_undi);
10605 if (rc) {
10606 bnx2x_prev_mcp_done(bp);
10607 return rc;
10608 }
10609
10610 return bnx2x_prev_mcp_done(bp);
10611}
10612
10613static int bnx2x_prev_unload(struct bnx2x *bp)
10614{
10615 int time_counter = 10;
10616 u32 rc, fw, hw_lock_reg, hw_lock_val;
10617 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10618
10619
10620
10621
10622 bnx2x_clean_pglue_errors(bp);
10623
10624
10625 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10626 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10627 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10628
10629 hw_lock_val = REG_RD(bp, hw_lock_reg);
10630 if (hw_lock_val) {
10631 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10632 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10633 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10634 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10635 }
10636
10637 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10638 REG_WR(bp, hw_lock_reg, 0xffffffff);
10639 } else
10640 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10641
10642 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10643 BNX2X_DEV_INFO("Release previously held alr\n");
10644 bnx2x_release_alr(bp);
10645 }
10646
10647 do {
10648 int aer = 0;
10649
10650 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10651 if (!fw) {
10652 BNX2X_ERR("MCP response failure, aborting\n");
10653 rc = -EBUSY;
10654 break;
10655 }
10656
10657 rc = down_interruptible(&bnx2x_prev_sem);
10658 if (rc) {
10659 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10660 rc);
10661 } else {
10662
10663 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10664 bnx2x_prev_path_get_entry(bp)->aer);
10665 up(&bnx2x_prev_sem);
10666 }
10667
10668 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10669 rc = bnx2x_prev_unload_common(bp);
10670 break;
10671 }
10672
10673
10674 rc = bnx2x_prev_unload_uncommon(bp);
10675 if (rc != BNX2X_PREV_WAIT_NEEDED)
10676 break;
10677
10678 msleep(20);
10679 } while (--time_counter);
10680
10681 if (!time_counter || rc) {
10682 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10683 rc = -EPROBE_DEFER;
10684 }
10685
10686
10687 if (bnx2x_port_after_undi(bp))
10688 bp->link_params.feature_config_flags |=
10689 FEATURE_CONFIG_BOOT_FROM_SAN;
10690
10691 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10692
10693 return rc;
10694}
10695
10696static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10697{
10698 u32 val, val2, val3, val4, id, boot_mode;
10699 u16 pmc;
10700
10701
10702
10703 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10704 id = ((val & 0xffff) << 16);
10705 val = REG_RD(bp, MISC_REG_CHIP_REV);
10706 id |= ((val & 0xf) << 12);
10707
10708
10709
10710
10711 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10712 id |= (((val >> 24) & 0xf) << 4);
10713 val = REG_RD(bp, MISC_REG_BOND_ID);
10714 id |= (val & 0xf);
10715 bp->common.chip_id = id;
10716
10717
10718 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10719 if (CHIP_IS_57810(bp))
10720 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10721 (bp->common.chip_id & 0x0000FFFF);
10722 else if (CHIP_IS_57810_MF(bp))
10723 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10724 (bp->common.chip_id & 0x0000FFFF);
10725 bp->common.chip_id |= 0x1;
10726 }
10727
10728
10729 bp->db_size = (1 << BNX2X_DB_SHIFT);
10730
10731 if (!CHIP_IS_E1x(bp)) {
10732 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10733 if ((val & 1) == 0)
10734 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10735 else
10736 val = (val >> 1) & 1;
10737 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10738 "2_PORT_MODE");
10739 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10740 CHIP_2_PORT_MODE;
10741
10742 if (CHIP_MODE_IS_4_PORT(bp))
10743 bp->pfid = (bp->pf_num >> 1);
10744 else
10745 bp->pfid = (bp->pf_num & 0x6);
10746 } else {
10747 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10748 bp->pfid = bp->pf_num;
10749 }
10750
10751 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10752
10753 bp->link_params.chip_id = bp->common.chip_id;
10754 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10755
10756 val = (REG_RD(bp, 0x2874) & 0x55);
10757 if ((bp->common.chip_id & 0x1) ||
10758 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10759 bp->flags |= ONE_PORT_FLAG;
10760 BNX2X_DEV_INFO("single port device\n");
10761 }
10762
10763 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10764 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10765 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10766 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10767 bp->common.flash_size, bp->common.flash_size);
10768
10769 bnx2x_init_shmem(bp);
10770
10771 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10772 MISC_REG_GENERIC_CR_1 :
10773 MISC_REG_GENERIC_CR_0));
10774
10775 bp->link_params.shmem_base = bp->common.shmem_base;
10776 bp->link_params.shmem2_base = bp->common.shmem2_base;
10777 if (SHMEM2_RD(bp, size) >
10778 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10779 bp->link_params.lfa_base =
10780 REG_RD(bp, bp->common.shmem2_base +
10781 (u32)offsetof(struct shmem2_region,
10782 lfa_host_addr[BP_PORT(bp)]));
10783 else
10784 bp->link_params.lfa_base = 0;
10785 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10786 bp->common.shmem_base, bp->common.shmem2_base);
10787
10788 if (!bp->common.shmem_base) {
10789 BNX2X_DEV_INFO("MCP not active\n");
10790 bp->flags |= NO_MCP_FLAG;
10791 return;
10792 }
10793
10794 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10795 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10796
10797 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10798 SHARED_HW_CFG_LED_MODE_MASK) >>
10799 SHARED_HW_CFG_LED_MODE_SHIFT);
10800
10801 bp->link_params.feature_config_flags = 0;
10802 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10803 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10804 bp->link_params.feature_config_flags |=
10805 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10806 else
10807 bp->link_params.feature_config_flags &=
10808 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10809
10810 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10811 bp->common.bc_ver = val;
10812 BNX2X_DEV_INFO("bc_ver %X\n", val);
10813 if (val < BNX2X_BC_VER) {
10814
10815
10816 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10817 BNX2X_BC_VER, val);
10818 }
10819 bp->link_params.feature_config_flags |=
10820 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10821 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10822
10823 bp->link_params.feature_config_flags |=
10824 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10825 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10826 bp->link_params.feature_config_flags |=
10827 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10828 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10829 bp->link_params.feature_config_flags |=
10830 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10831 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10832
10833 bp->link_params.feature_config_flags |=
10834 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10835 FEATURE_CONFIG_MT_SUPPORT : 0;
10836
10837 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10838 BC_SUPPORTS_PFC_STATS : 0;
10839
10840 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10841 BC_SUPPORTS_FCOE_FEATURES : 0;
10842
10843 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10844 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10845
10846 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10847 BC_SUPPORTS_RMMOD_CMD : 0;
10848
10849 boot_mode = SHMEM_RD(bp,
10850 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10851 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10852 switch (boot_mode) {
10853 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10854 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10855 break;
10856 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10857 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10858 break;
10859 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10860 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10861 break;
10862 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10863 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10864 break;
10865 }
10866
10867 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10868 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10869
10870 BNX2X_DEV_INFO("%sWoL capable\n",
10871 (bp->flags & NO_WOL_FLAG) ? "not " : "");
10872
10873 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10874 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10875 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10876 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10877
10878 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10879 val, val2, val3, val4);
10880}
10881
10882#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10883#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10884
10885static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10886{
10887 int pfid = BP_FUNC(bp);
10888 int igu_sb_id;
10889 u32 val;
10890 u8 fid, igu_sb_cnt = 0;
10891
10892 bp->igu_base_sb = 0xff;
10893 if (CHIP_INT_MODE_IS_BC(bp)) {
10894 int vn = BP_VN(bp);
10895 igu_sb_cnt = bp->igu_sb_cnt;
10896 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10897 FP_SB_MAX_E1x;
10898
10899 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10900 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10901
10902 return 0;
10903 }
10904
10905
10906 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10907 igu_sb_id++) {
10908 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10909 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10910 continue;
10911 fid = IGU_FID(val);
10912 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10913 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10914 continue;
10915 if (IGU_VEC(val) == 0)
10916
10917 bp->igu_dsb_id = igu_sb_id;
10918 else {
10919 if (bp->igu_base_sb == 0xff)
10920 bp->igu_base_sb = igu_sb_id;
10921 igu_sb_cnt++;
10922 }
10923 }
10924 }
10925
10926#ifdef CONFIG_PCI_MSI
10927
10928
10929
10930
10931
10932
10933 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10934#endif
10935
10936 if (igu_sb_cnt == 0) {
10937 BNX2X_ERR("CAM configuration error\n");
10938 return -EINVAL;
10939 }
10940
10941 return 0;
10942}
10943
10944static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10945{
10946 int cfg_size = 0, idx, port = BP_PORT(bp);
10947
10948
10949 bp->port.supported[0] = 0;
10950 bp->port.supported[1] = 0;
10951 switch (bp->link_params.num_phys) {
10952 case 1:
10953 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10954 cfg_size = 1;
10955 break;
10956 case 2:
10957 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10958 cfg_size = 1;
10959 break;
10960 case 3:
10961 if (bp->link_params.multi_phy_config &
10962 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10963 bp->port.supported[1] =
10964 bp->link_params.phy[EXT_PHY1].supported;
10965 bp->port.supported[0] =
10966 bp->link_params.phy[EXT_PHY2].supported;
10967 } else {
10968 bp->port.supported[0] =
10969 bp->link_params.phy[EXT_PHY1].supported;
10970 bp->port.supported[1] =
10971 bp->link_params.phy[EXT_PHY2].supported;
10972 }
10973 cfg_size = 2;
10974 break;
10975 }
10976
10977 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10978 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10979 SHMEM_RD(bp,
10980 dev_info.port_hw_config[port].external_phy_config),
10981 SHMEM_RD(bp,
10982 dev_info.port_hw_config[port].external_phy_config2));
10983 return;
10984 }
10985
10986 if (CHIP_IS_E3(bp))
10987 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10988 else {
10989 switch (switch_cfg) {
10990 case SWITCH_CFG_1G:
10991 bp->port.phy_addr = REG_RD(
10992 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10993 break;
10994 case SWITCH_CFG_10G:
10995 bp->port.phy_addr = REG_RD(
10996 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10997 break;
10998 default:
10999 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11000 bp->port.link_config[0]);
11001 return;
11002 }
11003 }
11004 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11005
11006 for (idx = 0; idx < cfg_size; idx++) {
11007 if (!(bp->link_params.speed_cap_mask[idx] &
11008 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11009 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11010
11011 if (!(bp->link_params.speed_cap_mask[idx] &
11012 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11013 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11014
11015 if (!(bp->link_params.speed_cap_mask[idx] &
11016 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11017 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11018
11019 if (!(bp->link_params.speed_cap_mask[idx] &
11020 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11021 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11022
11023 if (!(bp->link_params.speed_cap_mask[idx] &
11024 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11025 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11026 SUPPORTED_1000baseT_Full);
11027
11028 if (!(bp->link_params.speed_cap_mask[idx] &
11029 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11030 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11031
11032 if (!(bp->link_params.speed_cap_mask[idx] &
11033 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11034 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11035
11036 if (!(bp->link_params.speed_cap_mask[idx] &
11037 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11038 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11039 }
11040
11041 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11042 bp->port.supported[1]);
11043}
11044
11045static void bnx2x_link_settings_requested(struct bnx2x *bp)
11046{
11047 u32 link_config, idx, cfg_size = 0;
11048 bp->port.advertising[0] = 0;
11049 bp->port.advertising[1] = 0;
11050 switch (bp->link_params.num_phys) {
11051 case 1:
11052 case 2:
11053 cfg_size = 1;
11054 break;
11055 case 3:
11056 cfg_size = 2;
11057 break;
11058 }
11059 for (idx = 0; idx < cfg_size; idx++) {
11060 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11061 link_config = bp->port.link_config[idx];
11062 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11063 case PORT_FEATURE_LINK_SPEED_AUTO:
11064 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11065 bp->link_params.req_line_speed[idx] =
11066 SPEED_AUTO_NEG;
11067 bp->port.advertising[idx] |=
11068 bp->port.supported[idx];
11069 if (bp->link_params.phy[EXT_PHY1].type ==
11070 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11071 bp->port.advertising[idx] |=
11072 (SUPPORTED_100baseT_Half |
11073 SUPPORTED_100baseT_Full);
11074 } else {
11075
11076 bp->link_params.req_line_speed[idx] =
11077 SPEED_10000;
11078 bp->port.advertising[idx] |=
11079 (ADVERTISED_10000baseT_Full |
11080 ADVERTISED_FIBRE);
11081 continue;
11082 }
11083 break;
11084
11085 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11086 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11087 bp->link_params.req_line_speed[idx] =
11088 SPEED_10;
11089 bp->port.advertising[idx] |=
11090 (ADVERTISED_10baseT_Full |
11091 ADVERTISED_TP);
11092 } else {
11093 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11094 link_config,
11095 bp->link_params.speed_cap_mask[idx]);
11096 return;
11097 }
11098 break;
11099
11100 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11101 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11102 bp->link_params.req_line_speed[idx] =
11103 SPEED_10;
11104 bp->link_params.req_duplex[idx] =
11105 DUPLEX_HALF;
11106 bp->port.advertising[idx] |=
11107 (ADVERTISED_10baseT_Half |
11108 ADVERTISED_TP);
11109 } else {
11110 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11111 link_config,
11112 bp->link_params.speed_cap_mask[idx]);
11113 return;
11114 }
11115 break;
11116
11117 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11118 if (bp->port.supported[idx] &
11119 SUPPORTED_100baseT_Full) {
11120 bp->link_params.req_line_speed[idx] =
11121 SPEED_100;
11122 bp->port.advertising[idx] |=
11123 (ADVERTISED_100baseT_Full |
11124 ADVERTISED_TP);
11125 } else {
11126 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11127 link_config,
11128 bp->link_params.speed_cap_mask[idx]);
11129 return;
11130 }
11131 break;
11132
11133 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11134 if (bp->port.supported[idx] &
11135 SUPPORTED_100baseT_Half) {
11136 bp->link_params.req_line_speed[idx] =
11137 SPEED_100;
11138 bp->link_params.req_duplex[idx] =
11139 DUPLEX_HALF;
11140 bp->port.advertising[idx] |=
11141 (ADVERTISED_100baseT_Half |
11142 ADVERTISED_TP);
11143 } else {
11144 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11145 link_config,
11146 bp->link_params.speed_cap_mask[idx]);
11147 return;
11148 }
11149 break;
11150
11151 case PORT_FEATURE_LINK_SPEED_1G:
11152 if (bp->port.supported[idx] &
11153 SUPPORTED_1000baseT_Full) {
11154 bp->link_params.req_line_speed[idx] =
11155 SPEED_1000;
11156 bp->port.advertising[idx] |=
11157 (ADVERTISED_1000baseT_Full |
11158 ADVERTISED_TP);
11159 } else if (bp->port.supported[idx] &
11160 SUPPORTED_1000baseKX_Full) {
11161 bp->link_params.req_line_speed[idx] =
11162 SPEED_1000;
11163 bp->port.advertising[idx] |=
11164 ADVERTISED_1000baseKX_Full;
11165 } else {
11166 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11167 link_config,
11168 bp->link_params.speed_cap_mask[idx]);
11169 return;
11170 }
11171 break;
11172
11173 case PORT_FEATURE_LINK_SPEED_2_5G:
11174 if (bp->port.supported[idx] &
11175 SUPPORTED_2500baseX_Full) {
11176 bp->link_params.req_line_speed[idx] =
11177 SPEED_2500;
11178 bp->port.advertising[idx] |=
11179 (ADVERTISED_2500baseX_Full |
11180 ADVERTISED_TP);
11181 } else {
11182 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11183 link_config,
11184 bp->link_params.speed_cap_mask[idx]);
11185 return;
11186 }
11187 break;
11188
11189 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11190 if (bp->port.supported[idx] &
11191 SUPPORTED_10000baseT_Full) {
11192 bp->link_params.req_line_speed[idx] =
11193 SPEED_10000;
11194 bp->port.advertising[idx] |=
11195 (ADVERTISED_10000baseT_Full |
11196 ADVERTISED_FIBRE);
11197 } else if (bp->port.supported[idx] &
11198 SUPPORTED_10000baseKR_Full) {
11199 bp->link_params.req_line_speed[idx] =
11200 SPEED_10000;
11201 bp->port.advertising[idx] |=
11202 (ADVERTISED_10000baseKR_Full |
11203 ADVERTISED_FIBRE);
11204 } else {
11205 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11206 link_config,
11207 bp->link_params.speed_cap_mask[idx]);
11208 return;
11209 }
11210 break;
11211 case PORT_FEATURE_LINK_SPEED_20G:
11212 bp->link_params.req_line_speed[idx] = SPEED_20000;
11213
11214 break;
11215 default:
11216 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11217 link_config);
11218 bp->link_params.req_line_speed[idx] =
11219 SPEED_AUTO_NEG;
11220 bp->port.advertising[idx] =
11221 bp->port.supported[idx];
11222 break;
11223 }
11224
11225 bp->link_params.req_flow_ctrl[idx] = (link_config &
11226 PORT_FEATURE_FLOW_CONTROL_MASK);
11227 if (bp->link_params.req_flow_ctrl[idx] ==
11228 BNX2X_FLOW_CTRL_AUTO) {
11229 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11230 bp->link_params.req_flow_ctrl[idx] =
11231 BNX2X_FLOW_CTRL_NONE;
11232 else
11233 bnx2x_set_requested_fc(bp);
11234 }
11235
11236 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11237 bp->link_params.req_line_speed[idx],
11238 bp->link_params.req_duplex[idx],
11239 bp->link_params.req_flow_ctrl[idx],
11240 bp->port.advertising[idx]);
11241 }
11242}
11243
11244static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11245{
11246 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11247 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11248 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11249 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11250}
11251
11252static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11253{
11254 int port = BP_PORT(bp);
11255 u32 config;
11256 u32 ext_phy_type, ext_phy_config, eee_mode;
11257
11258 bp->link_params.bp = bp;
11259 bp->link_params.port = port;
11260
11261 bp->link_params.lane_config =
11262 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11263
11264 bp->link_params.speed_cap_mask[0] =
11265 SHMEM_RD(bp,
11266 dev_info.port_hw_config[port].speed_capability_mask) &
11267 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11268 bp->link_params.speed_cap_mask[1] =
11269 SHMEM_RD(bp,
11270 dev_info.port_hw_config[port].speed_capability_mask2) &
11271 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11272 bp->port.link_config[0] =
11273 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11274
11275 bp->port.link_config[1] =
11276 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11277
11278 bp->link_params.multi_phy_config =
11279 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11280
11281
11282
11283 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11284 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11285 (config & PORT_FEATURE_WOL_ENABLED));
11286
11287 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11288 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11289 bp->flags |= NO_ISCSI_FLAG;
11290 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11291 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11292 bp->flags |= NO_FCOE_FLAG;
11293
11294 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11295 bp->link_params.lane_config,
11296 bp->link_params.speed_cap_mask[0],
11297 bp->port.link_config[0]);
11298
11299 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11300 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11301 bnx2x_phy_probe(&bp->link_params);
11302 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11303
11304 bnx2x_link_settings_requested(bp);
11305
11306
11307
11308
11309
11310 ext_phy_config =
11311 SHMEM_RD(bp,
11312 dev_info.port_hw_config[port].external_phy_config);
11313 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11314 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11315 bp->mdio.prtad = bp->port.phy_addr;
11316
11317 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11318 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11319 bp->mdio.prtad =
11320 XGXS_EXT_PHY_ADDR(ext_phy_config);
11321
11322
11323 eee_mode = (((SHMEM_RD(bp, dev_info.
11324 port_feature_config[port].eee_power_mode)) &
11325 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11326 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11327 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11328 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11329 EEE_MODE_ENABLE_LPI |
11330 EEE_MODE_OUTPUT_TIME;
11331 } else {
11332 bp->link_params.eee_mode = 0;
11333 }
11334}
11335
11336void bnx2x_get_iscsi_info(struct bnx2x *bp)
11337{
11338 u32 no_flags = NO_ISCSI_FLAG;
11339 int port = BP_PORT(bp);
11340 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11341 drv_lic_key[port].max_iscsi_conn);
11342
11343 if (!CNIC_SUPPORT(bp)) {
11344 bp->flags |= no_flags;
11345 return;
11346 }
11347
11348
11349 bp->cnic_eth_dev.max_iscsi_conn =
11350 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11351 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11352
11353 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11354 bp->cnic_eth_dev.max_iscsi_conn);
11355
11356
11357
11358
11359
11360 if (!bp->cnic_eth_dev.max_iscsi_conn)
11361 bp->flags |= no_flags;
11362}
11363
11364static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11365{
11366
11367 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11368 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11369 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11370 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11371
11372
11373 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11374 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11375 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11376 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11377}
11378
11379static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11380{
11381 u8 count = 0;
11382
11383 if (IS_MF(bp)) {
11384 u8 fid;
11385
11386
11387 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11388 if (IS_MF_SD(bp)) {
11389 u32 cfg = MF_CFG_RD(bp,
11390 func_mf_config[fid].config);
11391
11392 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11393 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11394 FUNC_MF_CFG_PROTOCOL_FCOE))
11395 count++;
11396 } else {
11397 u32 cfg = MF_CFG_RD(bp,
11398 func_ext_config[fid].
11399 func_cfg);
11400
11401 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11402 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11403 count++;
11404 }
11405 }
11406 } else {
11407 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11408
11409 for (port = 0; port < port_cnt; port++) {
11410 u32 lic = SHMEM_RD(bp,
11411 drv_lic_key[port].max_fcoe_conn) ^
11412 FW_ENCODE_32BIT_PATTERN;
11413 if (lic)
11414 count++;
11415 }
11416 }
11417
11418 return count;
11419}
11420
11421static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11422{
11423 int port = BP_PORT(bp);
11424 int func = BP_ABS_FUNC(bp);
11425 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11426 drv_lic_key[port].max_fcoe_conn);
11427 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11428
11429 if (!CNIC_SUPPORT(bp)) {
11430 bp->flags |= NO_FCOE_FLAG;
11431 return;
11432 }
11433
11434
11435 bp->cnic_eth_dev.max_fcoe_conn =
11436 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11437 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11438
11439
11440 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11441
11442
11443 if (num_fcoe_func)
11444 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11445
11446
11447 if (!IS_MF(bp)) {
11448
11449 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11450 SHMEM_RD(bp,
11451 dev_info.port_hw_config[port].
11452 fcoe_wwn_port_name_upper);
11453 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11454 SHMEM_RD(bp,
11455 dev_info.port_hw_config[port].
11456 fcoe_wwn_port_name_lower);
11457
11458
11459 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11460 SHMEM_RD(bp,
11461 dev_info.port_hw_config[port].
11462 fcoe_wwn_node_name_upper);
11463 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11464 SHMEM_RD(bp,
11465 dev_info.port_hw_config[port].
11466 fcoe_wwn_node_name_lower);
11467 } else if (!IS_MF_SD(bp)) {
11468
11469
11470
11471 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11472 bnx2x_get_ext_wwn_info(bp, func);
11473 } else {
11474 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11475 bnx2x_get_ext_wwn_info(bp, func);
11476 }
11477
11478 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11479
11480
11481
11482
11483
11484 if (!bp->cnic_eth_dev.max_fcoe_conn)
11485 bp->flags |= NO_FCOE_FLAG;
11486}
11487
11488static void bnx2x_get_cnic_info(struct bnx2x *bp)
11489{
11490
11491
11492
11493
11494
11495 bnx2x_get_iscsi_info(bp);
11496 bnx2x_get_fcoe_info(bp);
11497}
11498
11499static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11500{
11501 u32 val, val2;
11502 int func = BP_ABS_FUNC(bp);
11503 int port = BP_PORT(bp);
11504 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11505 u8 *fip_mac = bp->fip_mac;
11506
11507 if (IS_MF(bp)) {
11508
11509
11510
11511
11512
11513 if (!IS_MF_SD(bp)) {
11514 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11515 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11516 val2 = MF_CFG_RD(bp, func_ext_config[func].
11517 iscsi_mac_addr_upper);
11518 val = MF_CFG_RD(bp, func_ext_config[func].
11519 iscsi_mac_addr_lower);
11520 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11521 BNX2X_DEV_INFO
11522 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11523 } else {
11524 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11525 }
11526
11527 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11528 val2 = MF_CFG_RD(bp, func_ext_config[func].
11529 fcoe_mac_addr_upper);
11530 val = MF_CFG_RD(bp, func_ext_config[func].
11531 fcoe_mac_addr_lower);
11532 bnx2x_set_mac_buf(fip_mac, val, val2);
11533 BNX2X_DEV_INFO
11534 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11535 } else {
11536 bp->flags |= NO_FCOE_FLAG;
11537 }
11538
11539 bp->mf_ext_config = cfg;
11540
11541 } else {
11542 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11543
11544 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11545
11546 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11547 BNX2X_DEV_INFO
11548 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11549 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11550
11551 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11552 BNX2X_DEV_INFO("SD FCoE MODE\n");
11553 BNX2X_DEV_INFO
11554 ("Read FIP MAC: %pM\n", fip_mac);
11555 }
11556 }
11557
11558
11559
11560
11561
11562 if (IS_MF_FCOE_AFEX(bp))
11563 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11564 } else {
11565 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11566 iscsi_mac_upper);
11567 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11568 iscsi_mac_lower);
11569 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11570
11571 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11572 fcoe_fip_mac_upper);
11573 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11574 fcoe_fip_mac_lower);
11575 bnx2x_set_mac_buf(fip_mac, val, val2);
11576 }
11577
11578
11579 if (!is_valid_ether_addr(iscsi_mac)) {
11580 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11581 eth_zero_addr(iscsi_mac);
11582 }
11583
11584
11585 if (!is_valid_ether_addr(fip_mac)) {
11586 bp->flags |= NO_FCOE_FLAG;
11587 eth_zero_addr(bp->fip_mac);
11588 }
11589}
11590
11591static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11592{
11593 u32 val, val2;
11594 int func = BP_ABS_FUNC(bp);
11595 int port = BP_PORT(bp);
11596
11597
11598 eth_zero_addr(bp->dev->dev_addr);
11599
11600 if (BP_NOMCP(bp)) {
11601 BNX2X_ERROR("warning: random MAC workaround active\n");
11602 eth_hw_addr_random(bp->dev);
11603 } else if (IS_MF(bp)) {
11604 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11605 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11606 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11607 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11608 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11609
11610 if (CNIC_SUPPORT(bp))
11611 bnx2x_get_cnic_mac_hwinfo(bp);
11612 } else {
11613
11614 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11615 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11616 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11617
11618 if (CNIC_SUPPORT(bp))
11619 bnx2x_get_cnic_mac_hwinfo(bp);
11620 }
11621
11622 if (!BP_NOMCP(bp)) {
11623
11624 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11625 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11626 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11627 bp->flags |= HAS_PHYS_PORT_ID;
11628 }
11629
11630 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11631
11632 if (!is_valid_ether_addr(bp->dev->dev_addr))
11633 dev_err(&bp->pdev->dev,
11634 "bad Ethernet MAC address configuration: %pM\n"
11635 "change it manually before bringing up the appropriate network interface\n",
11636 bp->dev->dev_addr);
11637}
11638
11639static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11640{
11641 int tmp;
11642 u32 cfg;
11643
11644 if (IS_VF(bp))
11645 return false;
11646
11647 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11648
11649 tmp = BP_ABS_FUNC(bp);
11650 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11651 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11652 } else {
11653
11654 tmp = BP_PORT(bp);
11655 cfg = SHMEM_RD(bp,
11656 dev_info.port_hw_config[tmp].generic_features);
11657 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11658 }
11659 return cfg;
11660}
11661
11662static void validate_set_si_mode(struct bnx2x *bp)
11663{
11664 u8 func = BP_ABS_FUNC(bp);
11665 u32 val;
11666
11667 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11668
11669
11670 if (val != 0xffff) {
11671 bp->mf_mode = MULTI_FUNCTION_SI;
11672 bp->mf_config[BP_VN(bp)] =
11673 MF_CFG_RD(bp, func_mf_config[func].config);
11674 } else
11675 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11676}
11677
11678static int bnx2x_get_hwinfo(struct bnx2x *bp)
11679{
11680 int func = BP_ABS_FUNC(bp);
11681 int vn;
11682 u32 val = 0, val2 = 0;
11683 int rc = 0;
11684
11685
11686 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11687 dev_err(&bp->pdev->dev,
11688 "Chip read returns all Fs. Preventing probe from continuing\n");
11689 return -EINVAL;
11690 }
11691
11692 bnx2x_get_common_hwinfo(bp);
11693
11694
11695
11696
11697 if (CHIP_IS_E1x(bp)) {
11698 bp->common.int_block = INT_BLOCK_HC;
11699
11700 bp->igu_dsb_id = DEF_SB_IGU_ID;
11701 bp->igu_base_sb = 0;
11702 } else {
11703 bp->common.int_block = INT_BLOCK_IGU;
11704
11705
11706 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11707
11708 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11709
11710 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11711 int tout = 5000;
11712
11713 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11714
11715 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11716 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11717 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11718
11719 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11720 tout--;
11721 usleep_range(1000, 2000);
11722 }
11723
11724 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11725 dev_err(&bp->pdev->dev,
11726 "FORCING Normal Mode failed!!!\n");
11727 bnx2x_release_hw_lock(bp,
11728 HW_LOCK_RESOURCE_RESET);
11729 return -EPERM;
11730 }
11731 }
11732
11733 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11734 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11735 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11736 } else
11737 BNX2X_DEV_INFO("IGU Normal Mode\n");
11738
11739 rc = bnx2x_get_igu_cam_info(bp);
11740 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11741 if (rc)
11742 return rc;
11743 }
11744
11745
11746
11747
11748
11749
11750 if (CHIP_IS_E1x(bp))
11751 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11752 else
11753
11754
11755
11756
11757 bp->base_fw_ndsb = bp->igu_base_sb;
11758
11759 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11760 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11761 bp->igu_sb_cnt, bp->base_fw_ndsb);
11762
11763
11764
11765
11766
11767 bp->mf_ov = 0;
11768 bp->mf_mode = 0;
11769 bp->mf_sub_mode = 0;
11770 vn = BP_VN(bp);
11771
11772 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11773 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11774 bp->common.shmem2_base, SHMEM2_RD(bp, size),
11775 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11776
11777 if (SHMEM2_HAS(bp, mf_cfg_addr))
11778 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11779 else
11780 bp->common.mf_cfg_base = bp->common.shmem_base +
11781 offsetof(struct shmem_region, func_mb) +
11782 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11783
11784
11785
11786
11787
11788
11789
11790
11791 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11792
11793 val = SHMEM_RD(bp,
11794 dev_info.shared_feature_config.config);
11795 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11796
11797 switch (val) {
11798 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11799 validate_set_si_mode(bp);
11800 break;
11801 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11802 if ((!CHIP_IS_E1x(bp)) &&
11803 (MF_CFG_RD(bp, func_mf_config[func].
11804 mac_upper) != 0xffff) &&
11805 (SHMEM2_HAS(bp,
11806 afex_driver_support))) {
11807 bp->mf_mode = MULTI_FUNCTION_AFEX;
11808 bp->mf_config[vn] = MF_CFG_RD(bp,
11809 func_mf_config[func].config);
11810 } else {
11811 BNX2X_DEV_INFO("can not configure afex mode\n");
11812 }
11813 break;
11814 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11815
11816 val = MF_CFG_RD(bp,
11817 func_mf_config[FUNC_0].e1hov_tag);
11818 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11819
11820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11821 bp->mf_mode = MULTI_FUNCTION_SD;
11822 bp->mf_config[vn] = MF_CFG_RD(bp,
11823 func_mf_config[func].config);
11824 } else
11825 BNX2X_DEV_INFO("illegal OV for SD\n");
11826 break;
11827 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
11828 bp->mf_mode = MULTI_FUNCTION_SD;
11829 bp->mf_sub_mode = SUB_MF_MODE_UFP;
11830 bp->mf_config[vn] =
11831 MF_CFG_RD(bp,
11832 func_mf_config[func].config);
11833 break;
11834 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11835 bp->mf_config[vn] = 0;
11836 break;
11837 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
11838 val2 = SHMEM_RD(bp,
11839 dev_info.shared_hw_config.config_3);
11840 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
11841 switch (val2) {
11842 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
11843 validate_set_si_mode(bp);
11844 bp->mf_sub_mode =
11845 SUB_MF_MODE_NPAR1_DOT_5;
11846 break;
11847 default:
11848
11849 bp->mf_config[vn] = 0;
11850 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
11851 val);
11852 }
11853 break;
11854 default:
11855
11856 bp->mf_config[vn] = 0;
11857 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11858 }
11859 }
11860
11861 BNX2X_DEV_INFO("%s function mode\n",
11862 IS_MF(bp) ? "multi" : "single");
11863
11864 switch (bp->mf_mode) {
11865 case MULTI_FUNCTION_SD:
11866 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11867 FUNC_MF_CFG_E1HOV_TAG_MASK;
11868 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11869 bp->mf_ov = val;
11870 bp->path_has_ovlan = true;
11871
11872 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11873 func, bp->mf_ov, bp->mf_ov);
11874 } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
11875 dev_err(&bp->pdev->dev,
11876 "Unexpected - no valid MF OV for func %d in UFP mode\n",
11877 func);
11878 bp->path_has_ovlan = true;
11879 } else {
11880 dev_err(&bp->pdev->dev,
11881 "No valid MF OV for func %d, aborting\n",
11882 func);
11883 return -EPERM;
11884 }
11885 break;
11886 case MULTI_FUNCTION_AFEX:
11887 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11888 break;
11889 case MULTI_FUNCTION_SI:
11890 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11891 func);
11892 break;
11893 default:
11894 if (vn) {
11895 dev_err(&bp->pdev->dev,
11896 "VN %d is in a single function mode, aborting\n",
11897 vn);
11898 return -EPERM;
11899 }
11900 break;
11901 }
11902
11903
11904
11905
11906
11907
11908 if (CHIP_MODE_IS_4_PORT(bp) &&
11909 !bp->path_has_ovlan &&
11910 !IS_MF(bp) &&
11911 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11912 u8 other_port = !BP_PORT(bp);
11913 u8 other_func = BP_PATH(bp) + 2*other_port;
11914 val = MF_CFG_RD(bp,
11915 func_mf_config[other_func].e1hov_tag);
11916 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11917 bp->path_has_ovlan = true;
11918 }
11919 }
11920
11921
11922 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11923 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11924
11925
11926 bnx2x_get_port_hwinfo(bp);
11927
11928
11929 bnx2x_get_mac_hwinfo(bp);
11930
11931 bnx2x_get_cnic_info(bp);
11932
11933 return rc;
11934}
11935
11936static void bnx2x_read_fwinfo(struct bnx2x *bp)
11937{
11938 int cnt, i, block_end, rodi;
11939 char vpd_start[BNX2X_VPD_LEN+1];
11940 char str_id_reg[VENDOR_ID_LEN+1];
11941 char str_id_cap[VENDOR_ID_LEN+1];
11942 char *vpd_data;
11943 char *vpd_extended_data = NULL;
11944 u8 len;
11945
11946 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11947 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11948
11949 if (cnt < BNX2X_VPD_LEN)
11950 goto out_not_found;
11951
11952
11953
11954
11955 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11956 PCI_VPD_LRDT_RO_DATA);
11957 if (i < 0)
11958 goto out_not_found;
11959
11960 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11961 pci_vpd_lrdt_size(&vpd_start[i]);
11962
11963 i += PCI_VPD_LRDT_TAG_SIZE;
11964
11965 if (block_end > BNX2X_VPD_LEN) {
11966 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11967 if (vpd_extended_data == NULL)
11968 goto out_not_found;
11969
11970
11971 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11972 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11973 block_end - BNX2X_VPD_LEN,
11974 vpd_extended_data + BNX2X_VPD_LEN);
11975 if (cnt < (block_end - BNX2X_VPD_LEN))
11976 goto out_not_found;
11977 vpd_data = vpd_extended_data;
11978 } else
11979 vpd_data = vpd_start;
11980
11981
11982
11983 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11984 PCI_VPD_RO_KEYWORD_MFR_ID);
11985 if (rodi < 0)
11986 goto out_not_found;
11987
11988 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11989
11990 if (len != VENDOR_ID_LEN)
11991 goto out_not_found;
11992
11993 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11994
11995
11996 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11997 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11998 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11999 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12000
12001 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12002 PCI_VPD_RO_KEYWORD_VENDOR0);
12003 if (rodi >= 0) {
12004 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12005
12006 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12007
12008 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12009 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12010 bp->fw_ver[len] = ' ';
12011 }
12012 }
12013 kfree(vpd_extended_data);
12014 return;
12015 }
12016out_not_found:
12017 kfree(vpd_extended_data);
12018 return;
12019}
12020
12021static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12022{
12023 u32 flags = 0;
12024
12025 if (CHIP_REV_IS_FPGA(bp))
12026 SET_FLAGS(flags, MODE_FPGA);
12027 else if (CHIP_REV_IS_EMUL(bp))
12028 SET_FLAGS(flags, MODE_EMUL);
12029 else
12030 SET_FLAGS(flags, MODE_ASIC);
12031
12032 if (CHIP_MODE_IS_4_PORT(bp))
12033 SET_FLAGS(flags, MODE_PORT4);
12034 else
12035 SET_FLAGS(flags, MODE_PORT2);
12036
12037 if (CHIP_IS_E2(bp))
12038 SET_FLAGS(flags, MODE_E2);
12039 else if (CHIP_IS_E3(bp)) {
12040 SET_FLAGS(flags, MODE_E3);
12041 if (CHIP_REV(bp) == CHIP_REV_Ax)
12042 SET_FLAGS(flags, MODE_E3_A0);
12043 else
12044 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12045 }
12046
12047 if (IS_MF(bp)) {
12048 SET_FLAGS(flags, MODE_MF);
12049 switch (bp->mf_mode) {
12050 case MULTI_FUNCTION_SD:
12051 SET_FLAGS(flags, MODE_MF_SD);
12052 break;
12053 case MULTI_FUNCTION_SI:
12054 SET_FLAGS(flags, MODE_MF_SI);
12055 break;
12056 case MULTI_FUNCTION_AFEX:
12057 SET_FLAGS(flags, MODE_MF_AFEX);
12058 break;
12059 }
12060 } else
12061 SET_FLAGS(flags, MODE_SF);
12062
12063#if defined(__LITTLE_ENDIAN)
12064 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12065#else
12066 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12067#endif
12068 INIT_MODE_FLAGS(bp) = flags;
12069}
12070
12071static int bnx2x_init_bp(struct bnx2x *bp)
12072{
12073 int func;
12074 int rc;
12075
12076 mutex_init(&bp->port.phy_mutex);
12077 mutex_init(&bp->fw_mb_mutex);
12078 mutex_init(&bp->drv_info_mutex);
12079 sema_init(&bp->stats_lock, 1);
12080 bp->drv_info_mng_owner = false;
12081
12082 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12083 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12084 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12085 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12086 if (IS_PF(bp)) {
12087 rc = bnx2x_get_hwinfo(bp);
12088 if (rc)
12089 return rc;
12090 } else {
12091 eth_zero_addr(bp->dev->dev_addr);
12092 }
12093
12094 bnx2x_set_modes_bitmap(bp);
12095
12096 rc = bnx2x_alloc_mem_bp(bp);
12097 if (rc)
12098 return rc;
12099
12100 bnx2x_read_fwinfo(bp);
12101
12102 func = BP_FUNC(bp);
12103
12104
12105 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12106
12107 bp->fw_seq =
12108 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12109 DRV_MSG_SEQ_NUMBER_MASK;
12110 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12111
12112 rc = bnx2x_prev_unload(bp);
12113 if (rc) {
12114 bnx2x_free_mem_bp(bp);
12115 return rc;
12116 }
12117 }
12118
12119 if (CHIP_REV_IS_FPGA(bp))
12120 dev_err(&bp->pdev->dev, "FPGA detected\n");
12121
12122 if (BP_NOMCP(bp) && (func == 0))
12123 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12124
12125 bp->disable_tpa = disable_tpa;
12126 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12127
12128 bp->disable_tpa |= is_kdump_kernel();
12129
12130
12131 if (bp->disable_tpa) {
12132 bp->dev->hw_features &= ~NETIF_F_LRO;
12133 bp->dev->features &= ~NETIF_F_LRO;
12134 }
12135
12136 if (CHIP_IS_E1(bp))
12137 bp->dropless_fc = 0;
12138 else
12139 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12140
12141 bp->mrrs = mrrs;
12142
12143 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12144 if (IS_VF(bp))
12145 bp->rx_ring_size = MAX_RX_AVAIL;
12146
12147
12148 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12149 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12150
12151 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12152
12153 init_timer(&bp->timer);
12154 bp->timer.expires = jiffies + bp->current_interval;
12155 bp->timer.data = (unsigned long) bp;
12156 bp->timer.function = bnx2x_timer;
12157
12158 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12159 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12160 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12161 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
12162 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12163 bnx2x_dcbx_init_params(bp);
12164 } else {
12165 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12166 }
12167
12168 if (CHIP_IS_E1x(bp))
12169 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12170 else
12171 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12172
12173
12174 if (IS_VF(bp))
12175 bp->max_cos = 1;
12176 else if (CHIP_IS_E1x(bp))
12177 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12178 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12179 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12180 else if (CHIP_IS_E3B0(bp))
12181 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12182 else
12183 BNX2X_ERR("unknown chip %x revision %x\n",
12184 CHIP_NUM(bp), CHIP_REV(bp));
12185 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12186
12187
12188
12189
12190
12191 if (IS_VF(bp))
12192 bp->min_msix_vec_cnt = 1;
12193 else if (CNIC_SUPPORT(bp))
12194 bp->min_msix_vec_cnt = 3;
12195 else
12196 bp->min_msix_vec_cnt = 2;
12197 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12198
12199 bp->dump_preset_idx = 1;
12200
12201 if (CHIP_IS_E3B0(bp))
12202 bp->flags |= PTP_SUPPORTED;
12203
12204 return rc;
12205}
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216static int bnx2x_open(struct net_device *dev)
12217{
12218 struct bnx2x *bp = netdev_priv(dev);
12219 int rc;
12220
12221 bp->stats_init = true;
12222
12223 netif_carrier_off(dev);
12224
12225 bnx2x_set_power_state(bp, PCI_D0);
12226
12227
12228
12229
12230
12231
12232
12233 if (IS_PF(bp)) {
12234 int other_engine = BP_PATH(bp) ? 0 : 1;
12235 bool other_load_status, load_status;
12236 bool global = false;
12237
12238 other_load_status = bnx2x_get_load_status(bp, other_engine);
12239 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12240 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12241 bnx2x_chk_parity_attn(bp, &global, true)) {
12242 do {
12243
12244
12245
12246
12247
12248 if (global)
12249 bnx2x_set_reset_global(bp);
12250
12251
12252
12253
12254
12255
12256 if ((!load_status &&
12257 (!global || !other_load_status)) &&
12258 bnx2x_trylock_leader_lock(bp) &&
12259 !bnx2x_leader_reset(bp)) {
12260 netdev_info(bp->dev,
12261 "Recovered in open\n");
12262 break;
12263 }
12264
12265
12266 bnx2x_set_power_state(bp, PCI_D3hot);
12267 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12268
12269 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12270 "If you still see this message after a few retries then power cycle is required.\n");
12271
12272 return -EAGAIN;
12273 } while (0);
12274 }
12275 }
12276
12277 bp->recovery_state = BNX2X_RECOVERY_DONE;
12278 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12279 if (rc)
12280 return rc;
12281 return 0;
12282}
12283
12284
12285static int bnx2x_close(struct net_device *dev)
12286{
12287 struct bnx2x *bp = netdev_priv(dev);
12288
12289
12290 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12291
12292 return 0;
12293}
12294
12295static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12296 struct bnx2x_mcast_ramrod_params *p)
12297{
12298 int mc_count = netdev_mc_count(bp->dev);
12299 struct bnx2x_mcast_list_elem *mc_mac =
12300 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
12301 struct netdev_hw_addr *ha;
12302
12303 if (!mc_mac)
12304 return -ENOMEM;
12305
12306 INIT_LIST_HEAD(&p->mcast_list);
12307
12308 netdev_for_each_mc_addr(ha, bp->dev) {
12309 mc_mac->mac = bnx2x_mc_addr(ha);
12310 list_add_tail(&mc_mac->link, &p->mcast_list);
12311 mc_mac++;
12312 }
12313
12314 p->mcast_list_len = mc_count;
12315
12316 return 0;
12317}
12318
12319static void bnx2x_free_mcast_macs_list(
12320 struct bnx2x_mcast_ramrod_params *p)
12321{
12322 struct bnx2x_mcast_list_elem *mc_mac =
12323 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
12324 link);
12325
12326 WARN_ON(!mc_mac);
12327 kfree(mc_mac);
12328}
12329
12330
12331
12332
12333
12334
12335
12336
12337static int bnx2x_set_uc_list(struct bnx2x *bp)
12338{
12339 int rc;
12340 struct net_device *dev = bp->dev;
12341 struct netdev_hw_addr *ha;
12342 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12343 unsigned long ramrod_flags = 0;
12344
12345
12346 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12347 if (rc < 0) {
12348 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12349 return rc;
12350 }
12351
12352 netdev_for_each_uc_addr(ha, dev) {
12353 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12354 BNX2X_UC_LIST_MAC, &ramrod_flags);
12355 if (rc == -EEXIST) {
12356 DP(BNX2X_MSG_SP,
12357 "Failed to schedule ADD operations: %d\n", rc);
12358
12359 rc = 0;
12360
12361 } else if (rc < 0) {
12362
12363 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12364 rc);
12365 return rc;
12366 }
12367 }
12368
12369
12370 __set_bit(RAMROD_CONT, &ramrod_flags);
12371 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12372 BNX2X_UC_LIST_MAC, &ramrod_flags);
12373}
12374
12375static int bnx2x_set_mc_list(struct bnx2x *bp)
12376{
12377 struct net_device *dev = bp->dev;
12378 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12379 int rc = 0;
12380
12381 rparam.mcast_obj = &bp->mcast_obj;
12382
12383
12384 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12385 if (rc < 0) {
12386 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12387 return rc;
12388 }
12389
12390
12391 if (netdev_mc_count(dev)) {
12392 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12393 if (rc) {
12394 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12395 rc);
12396 return rc;
12397 }
12398
12399
12400 rc = bnx2x_config_mcast(bp, &rparam,
12401 BNX2X_MCAST_CMD_ADD);
12402 if (rc < 0)
12403 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12404 rc);
12405
12406 bnx2x_free_mcast_macs_list(&rparam);
12407 }
12408
12409 return rc;
12410}
12411
12412
12413static void bnx2x_set_rx_mode(struct net_device *dev)
12414{
12415 struct bnx2x *bp = netdev_priv(dev);
12416
12417 if (bp->state != BNX2X_STATE_OPEN) {
12418 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12419 return;
12420 } else {
12421
12422 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12423 NETIF_MSG_IFUP);
12424 }
12425}
12426
12427void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12428{
12429 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12430
12431 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12432
12433 netif_addr_lock_bh(bp->dev);
12434
12435 if (bp->dev->flags & IFF_PROMISC) {
12436 rx_mode = BNX2X_RX_MODE_PROMISC;
12437 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12438 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12439 CHIP_IS_E1(bp))) {
12440 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12441 } else {
12442 if (IS_PF(bp)) {
12443
12444 if (bnx2x_set_mc_list(bp) < 0)
12445 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12446
12447
12448 netif_addr_unlock_bh(bp->dev);
12449 if (bnx2x_set_uc_list(bp) < 0)
12450 rx_mode = BNX2X_RX_MODE_PROMISC;
12451 netif_addr_lock_bh(bp->dev);
12452 } else {
12453
12454
12455
12456 bnx2x_schedule_sp_rtnl(bp,
12457 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12458 }
12459 }
12460
12461 bp->rx_mode = rx_mode;
12462
12463 if (IS_MF_ISCSI_ONLY(bp))
12464 bp->rx_mode = BNX2X_RX_MODE_NONE;
12465
12466
12467 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12468 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12469 netif_addr_unlock_bh(bp->dev);
12470 return;
12471 }
12472
12473 if (IS_PF(bp)) {
12474 bnx2x_set_storm_rx_mode(bp);
12475 netif_addr_unlock_bh(bp->dev);
12476 } else {
12477
12478
12479
12480
12481 netif_addr_unlock_bh(bp->dev);
12482 bnx2x_vfpf_storm_rx_mode(bp);
12483 }
12484}
12485
12486
12487static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12488 int devad, u16 addr)
12489{
12490 struct bnx2x *bp = netdev_priv(netdev);
12491 u16 value;
12492 int rc;
12493
12494 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12495 prtad, devad, addr);
12496
12497
12498 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12499
12500 bnx2x_acquire_phy_lock(bp);
12501 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12502 bnx2x_release_phy_lock(bp);
12503 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12504
12505 if (!rc)
12506 rc = value;
12507 return rc;
12508}
12509
12510
12511static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12512 u16 addr, u16 value)
12513{
12514 struct bnx2x *bp = netdev_priv(netdev);
12515 int rc;
12516
12517 DP(NETIF_MSG_LINK,
12518 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12519 prtad, devad, addr, value);
12520
12521
12522 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12523
12524 bnx2x_acquire_phy_lock(bp);
12525 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12526 bnx2x_release_phy_lock(bp);
12527 return rc;
12528}
12529
12530
12531static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12532{
12533 struct bnx2x *bp = netdev_priv(dev);
12534 struct mii_ioctl_data *mdio = if_mii(ifr);
12535
12536 if (!netif_running(dev))
12537 return -EAGAIN;
12538
12539 switch (cmd) {
12540 case SIOCSHWTSTAMP:
12541 return bnx2x_hwtstamp_ioctl(bp, ifr);
12542 default:
12543 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12544 mdio->phy_id, mdio->reg_num, mdio->val_in);
12545 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12546 }
12547}
12548
12549#ifdef CONFIG_NET_POLL_CONTROLLER
12550static void poll_bnx2x(struct net_device *dev)
12551{
12552 struct bnx2x *bp = netdev_priv(dev);
12553 int i;
12554
12555 for_each_eth_queue(bp, i) {
12556 struct bnx2x_fastpath *fp = &bp->fp[i];
12557 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12558 }
12559}
12560#endif
12561
12562static int bnx2x_validate_addr(struct net_device *dev)
12563{
12564 struct bnx2x *bp = netdev_priv(dev);
12565
12566
12567 if (IS_VF(bp))
12568 bnx2x_sample_bulletin(bp);
12569
12570 if (!is_valid_ether_addr(dev->dev_addr)) {
12571 BNX2X_ERR("Non-valid Ethernet address\n");
12572 return -EADDRNOTAVAIL;
12573 }
12574 return 0;
12575}
12576
12577static int bnx2x_get_phys_port_id(struct net_device *netdev,
12578 struct netdev_phys_item_id *ppid)
12579{
12580 struct bnx2x *bp = netdev_priv(netdev);
12581
12582 if (!(bp->flags & HAS_PHYS_PORT_ID))
12583 return -EOPNOTSUPP;
12584
12585 ppid->id_len = sizeof(bp->phys_port_id);
12586 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12587
12588 return 0;
12589}
12590
12591static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12592 struct net_device *dev,
12593 netdev_features_t features)
12594{
12595 features = vlan_features_check(skb, features);
12596 return vxlan_features_check(skb, features);
12597}
12598
12599static const struct net_device_ops bnx2x_netdev_ops = {
12600 .ndo_open = bnx2x_open,
12601 .ndo_stop = bnx2x_close,
12602 .ndo_start_xmit = bnx2x_start_xmit,
12603 .ndo_select_queue = bnx2x_select_queue,
12604 .ndo_set_rx_mode = bnx2x_set_rx_mode,
12605 .ndo_set_mac_address = bnx2x_change_mac_addr,
12606 .ndo_validate_addr = bnx2x_validate_addr,
12607 .ndo_do_ioctl = bnx2x_ioctl,
12608 .ndo_change_mtu = bnx2x_change_mtu,
12609 .ndo_fix_features = bnx2x_fix_features,
12610 .ndo_set_features = bnx2x_set_features,
12611 .ndo_tx_timeout = bnx2x_tx_timeout,
12612#ifdef CONFIG_NET_POLL_CONTROLLER
12613 .ndo_poll_controller = poll_bnx2x,
12614#endif
12615 .ndo_setup_tc = bnx2x_setup_tc,
12616#ifdef CONFIG_BNX2X_SRIOV
12617 .ndo_set_vf_mac = bnx2x_set_vf_mac,
12618 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
12619 .ndo_get_vf_config = bnx2x_get_vf_config,
12620#endif
12621#ifdef NETDEV_FCOE_WWNN
12622 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12623#endif
12624
12625#ifdef CONFIG_NET_RX_BUSY_POLL
12626 .ndo_busy_poll = bnx2x_low_latency_recv,
12627#endif
12628 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
12629 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
12630 .ndo_features_check = bnx2x_features_check,
12631};
12632
12633static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12634{
12635 struct device *dev = &bp->pdev->dev;
12636
12637 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
12638 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
12639 dev_err(dev, "System does not support DMA, aborting\n");
12640 return -EIO;
12641 }
12642
12643 return 0;
12644}
12645
12646static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
12647{
12648 if (bp->flags & AER_ENABLED) {
12649 pci_disable_pcie_error_reporting(bp->pdev);
12650 bp->flags &= ~AER_ENABLED;
12651 }
12652}
12653
12654static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12655 struct net_device *dev, unsigned long board_type)
12656{
12657 int rc;
12658 u32 pci_cfg_dword;
12659 bool chip_is_e1x = (board_type == BCM57710 ||
12660 board_type == BCM57711 ||
12661 board_type == BCM57711E);
12662
12663 SET_NETDEV_DEV(dev, &pdev->dev);
12664
12665 bp->dev = dev;
12666 bp->pdev = pdev;
12667
12668 rc = pci_enable_device(pdev);
12669 if (rc) {
12670 dev_err(&bp->pdev->dev,
12671 "Cannot enable PCI device, aborting\n");
12672 goto err_out;
12673 }
12674
12675 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12676 dev_err(&bp->pdev->dev,
12677 "Cannot find PCI device base address, aborting\n");
12678 rc = -ENODEV;
12679 goto err_out_disable;
12680 }
12681
12682 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12683 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
12684 rc = -ENODEV;
12685 goto err_out_disable;
12686 }
12687
12688 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12689 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12690 PCICFG_REVESION_ID_ERROR_VAL) {
12691 pr_err("PCI device error, probably due to fan failure, aborting\n");
12692 rc = -ENODEV;
12693 goto err_out_disable;
12694 }
12695
12696 if (atomic_read(&pdev->enable_cnt) == 1) {
12697 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12698 if (rc) {
12699 dev_err(&bp->pdev->dev,
12700 "Cannot obtain PCI resources, aborting\n");
12701 goto err_out_disable;
12702 }
12703
12704 pci_set_master(pdev);
12705 pci_save_state(pdev);
12706 }
12707
12708 if (IS_PF(bp)) {
12709 if (!pdev->pm_cap) {
12710 dev_err(&bp->pdev->dev,
12711 "Cannot find power management capability, aborting\n");
12712 rc = -EIO;
12713 goto err_out_release;
12714 }
12715 }
12716
12717 if (!pci_is_pcie(pdev)) {
12718 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
12719 rc = -EIO;
12720 goto err_out_release;
12721 }
12722
12723 rc = bnx2x_set_coherency_mask(bp);
12724 if (rc)
12725 goto err_out_release;
12726
12727 dev->mem_start = pci_resource_start(pdev, 0);
12728 dev->base_addr = dev->mem_start;
12729 dev->mem_end = pci_resource_end(pdev, 0);
12730
12731 dev->irq = pdev->irq;
12732
12733 bp->regview = pci_ioremap_bar(pdev, 0);
12734 if (!bp->regview) {
12735 dev_err(&bp->pdev->dev,
12736 "Cannot map register space, aborting\n");
12737 rc = -ENOMEM;
12738 goto err_out_release;
12739 }
12740
12741
12742
12743
12744
12745
12746 if (chip_is_e1x) {
12747 bp->pf_num = PCI_FUNC(pdev->devfn);
12748 } else {
12749
12750 pci_read_config_dword(bp->pdev,
12751 PCICFG_ME_REGISTER, &pci_cfg_dword);
12752 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
12753 ME_REG_ABS_PF_NUM_SHIFT);
12754 }
12755 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12756
12757
12758 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12759 PCICFG_VENDOR_ID_OFFSET);
12760
12761
12762 pdev->needs_freset = 1;
12763
12764
12765 rc = pci_enable_pcie_error_reporting(pdev);
12766 if (!rc)
12767 bp->flags |= AER_ENABLED;
12768 else
12769 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
12770
12771
12772
12773
12774
12775 if (IS_PF(bp)) {
12776 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12777 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12778 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12779 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12780
12781 if (chip_is_e1x) {
12782 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12783 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12784 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12785 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12786 }
12787
12788
12789
12790
12791
12792 if (!chip_is_e1x)
12793 REG_WR(bp,
12794 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
12795 }
12796
12797 dev->watchdog_timeo = TX_TIMEOUT;
12798
12799 dev->netdev_ops = &bnx2x_netdev_ops;
12800 bnx2x_set_ethtool_ops(bp, dev);
12801
12802 dev->priv_flags |= IFF_UNICAST_FLT;
12803
12804 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12805 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12806 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12807 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12808 if (!chip_is_e1x) {
12809 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12810 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12811 dev->hw_enc_features =
12812 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12813 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12814 NETIF_F_GSO_IPIP |
12815 NETIF_F_GSO_SIT |
12816 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12817 }
12818
12819 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12820 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12821
12822 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12823 dev->features |= NETIF_F_HIGHDMA;
12824
12825
12826 dev->hw_features |= NETIF_F_LOOPBACK;
12827
12828#ifdef BCM_DCBNL
12829 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12830#endif
12831
12832
12833 bp->mdio.prtad = MDIO_PRTAD_NONE;
12834 bp->mdio.mmds = 0;
12835 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12836 bp->mdio.dev = dev;
12837 bp->mdio.mdio_read = bnx2x_mdio_read;
12838 bp->mdio.mdio_write = bnx2x_mdio_write;
12839
12840 return 0;
12841
12842err_out_release:
12843 if (atomic_read(&pdev->enable_cnt) == 1)
12844 pci_release_regions(pdev);
12845
12846err_out_disable:
12847 pci_disable_device(pdev);
12848
12849err_out:
12850 return rc;
12851}
12852
12853static int bnx2x_check_firmware(struct bnx2x *bp)
12854{
12855 const struct firmware *firmware = bp->firmware;
12856 struct bnx2x_fw_file_hdr *fw_hdr;
12857 struct bnx2x_fw_file_section *sections;
12858 u32 offset, len, num_ops;
12859 __be16 *ops_offsets;
12860 int i;
12861 const u8 *fw_ver;
12862
12863 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12864 BNX2X_ERR("Wrong FW size\n");
12865 return -EINVAL;
12866 }
12867
12868 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12869 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12870
12871
12872
12873 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12874 offset = be32_to_cpu(sections[i].offset);
12875 len = be32_to_cpu(sections[i].len);
12876 if (offset + len > firmware->size) {
12877 BNX2X_ERR("Section %d length is out of bounds\n", i);
12878 return -EINVAL;
12879 }
12880 }
12881
12882
12883 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12884 ops_offsets = (__force __be16 *)(firmware->data + offset);
12885 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12886
12887 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12888 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12889 BNX2X_ERR("Section offset %d is out of bounds\n", i);
12890 return -EINVAL;
12891 }
12892 }
12893
12894
12895 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12896 fw_ver = firmware->data + offset;
12897 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12898 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12899 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12900 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12901 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12902 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12903 BCM_5710_FW_MAJOR_VERSION,
12904 BCM_5710_FW_MINOR_VERSION,
12905 BCM_5710_FW_REVISION_VERSION,
12906 BCM_5710_FW_ENGINEERING_VERSION);
12907 return -EINVAL;
12908 }
12909
12910 return 0;
12911}
12912
12913static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12914{
12915 const __be32 *source = (const __be32 *)_source;
12916 u32 *target = (u32 *)_target;
12917 u32 i;
12918
12919 for (i = 0; i < n/4; i++)
12920 target[i] = be32_to_cpu(source[i]);
12921}
12922
12923
12924
12925
12926
12927static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12928{
12929 const __be32 *source = (const __be32 *)_source;
12930 struct raw_op *target = (struct raw_op *)_target;
12931 u32 i, j, tmp;
12932
12933 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12934 tmp = be32_to_cpu(source[j]);
12935 target[i].op = (tmp >> 24) & 0xff;
12936 target[i].offset = tmp & 0xffffff;
12937 target[i].raw_data = be32_to_cpu(source[j + 1]);
12938 }
12939}
12940
12941
12942
12943
12944static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12945{
12946 const __be32 *source = (const __be32 *)_source;
12947 struct iro *target = (struct iro *)_target;
12948 u32 i, j, tmp;
12949
12950 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12951 target[i].base = be32_to_cpu(source[j]);
12952 j++;
12953 tmp = be32_to_cpu(source[j]);
12954 target[i].m1 = (tmp >> 16) & 0xffff;
12955 target[i].m2 = tmp & 0xffff;
12956 j++;
12957 tmp = be32_to_cpu(source[j]);
12958 target[i].m3 = (tmp >> 16) & 0xffff;
12959 target[i].size = tmp & 0xffff;
12960 j++;
12961 }
12962}
12963
12964static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12965{
12966 const __be16 *source = (const __be16 *)_source;
12967 u16 *target = (u16 *)_target;
12968 u32 i;
12969
12970 for (i = 0; i < n/2; i++)
12971 target[i] = be16_to_cpu(source[i]);
12972}
12973
12974#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12975do { \
12976 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12977 bp->arr = kmalloc(len, GFP_KERNEL); \
12978 if (!bp->arr) \
12979 goto lbl; \
12980 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12981 (u8 *)bp->arr, len); \
12982} while (0)
12983
12984static int bnx2x_init_firmware(struct bnx2x *bp)
12985{
12986 const char *fw_file_name;
12987 struct bnx2x_fw_file_hdr *fw_hdr;
12988 int rc;
12989
12990 if (bp->firmware)
12991 return 0;
12992
12993 if (CHIP_IS_E1(bp))
12994 fw_file_name = FW_FILE_NAME_E1;
12995 else if (CHIP_IS_E1H(bp))
12996 fw_file_name = FW_FILE_NAME_E1H;
12997 else if (!CHIP_IS_E1x(bp))
12998 fw_file_name = FW_FILE_NAME_E2;
12999 else {
13000 BNX2X_ERR("Unsupported chip revision\n");
13001 return -EINVAL;
13002 }
13003 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13004
13005 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13006 if (rc) {
13007 BNX2X_ERR("Can't load firmware file %s\n",
13008 fw_file_name);
13009 goto request_firmware_exit;
13010 }
13011
13012 rc = bnx2x_check_firmware(bp);
13013 if (rc) {
13014 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13015 goto request_firmware_exit;
13016 }
13017
13018 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13019
13020
13021
13022 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13023
13024
13025 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13026
13027
13028 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13029 be16_to_cpu_n);
13030
13031
13032 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13033 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13034 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13035 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13036 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13037 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13038 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13039 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13040 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13041 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13042 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13043 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13044 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13045 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13046 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13047 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13048
13049 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13050
13051 return 0;
13052
13053iro_alloc_err:
13054 kfree(bp->init_ops_offsets);
13055init_offsets_alloc_err:
13056 kfree(bp->init_ops);
13057init_ops_alloc_err:
13058 kfree(bp->init_data);
13059request_firmware_exit:
13060 release_firmware(bp->firmware);
13061 bp->firmware = NULL;
13062
13063 return rc;
13064}
13065
13066static void bnx2x_release_firmware(struct bnx2x *bp)
13067{
13068 kfree(bp->init_ops_offsets);
13069 kfree(bp->init_ops);
13070 kfree(bp->init_data);
13071 release_firmware(bp->firmware);
13072 bp->firmware = NULL;
13073}
13074
13075static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13076 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13077 .init_hw_cmn = bnx2x_init_hw_common,
13078 .init_hw_port = bnx2x_init_hw_port,
13079 .init_hw_func = bnx2x_init_hw_func,
13080
13081 .reset_hw_cmn = bnx2x_reset_common,
13082 .reset_hw_port = bnx2x_reset_port,
13083 .reset_hw_func = bnx2x_reset_func,
13084
13085 .gunzip_init = bnx2x_gunzip_init,
13086 .gunzip_end = bnx2x_gunzip_end,
13087
13088 .init_fw = bnx2x_init_firmware,
13089 .release_fw = bnx2x_release_firmware,
13090};
13091
13092void bnx2x__init_func_obj(struct bnx2x *bp)
13093{
13094
13095 bnx2x_setup_dmae(bp);
13096
13097 bnx2x_init_func_obj(bp, &bp->func_obj,
13098 bnx2x_sp(bp, func_rdata),
13099 bnx2x_sp_mapping(bp, func_rdata),
13100 bnx2x_sp(bp, func_afex_rdata),
13101 bnx2x_sp_mapping(bp, func_afex_rdata),
13102 &bnx2x_func_sp_drv);
13103}
13104
13105
13106static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13107{
13108 int cid_count = BNX2X_L2_MAX_CID(bp);
13109
13110 if (IS_SRIOV(bp))
13111 cid_count += BNX2X_VF_CIDS;
13112
13113 if (CNIC_SUPPORT(bp))
13114 cid_count += CNIC_CID_MAX;
13115
13116 return roundup(cid_count, QM_CID_ROUND);
13117}
13118
13119
13120
13121
13122
13123
13124
13125static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13126{
13127 int index;
13128 u16 control = 0;
13129
13130
13131
13132
13133
13134 if (!pdev->msix_cap) {
13135 dev_info(&pdev->dev, "no msix capability found\n");
13136 return 1 + cnic_cnt;
13137 }
13138 dev_info(&pdev->dev, "msix capability found\n");
13139
13140
13141
13142
13143
13144
13145
13146
13147 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13148
13149 index = control & PCI_MSIX_FLAGS_QSIZE;
13150
13151 return index;
13152}
13153
13154static int set_max_cos_est(int chip_id)
13155{
13156 switch (chip_id) {
13157 case BCM57710:
13158 case BCM57711:
13159 case BCM57711E:
13160 return BNX2X_MULTI_TX_COS_E1X;
13161 case BCM57712:
13162 case BCM57712_MF:
13163 return BNX2X_MULTI_TX_COS_E2_E3A0;
13164 case BCM57800:
13165 case BCM57800_MF:
13166 case BCM57810:
13167 case BCM57810_MF:
13168 case BCM57840_4_10:
13169 case BCM57840_2_20:
13170 case BCM57840_O:
13171 case BCM57840_MFO:
13172 case BCM57840_MF:
13173 case BCM57811:
13174 case BCM57811_MF:
13175 return BNX2X_MULTI_TX_COS_E3B0;
13176 case BCM57712_VF:
13177 case BCM57800_VF:
13178 case BCM57810_VF:
13179 case BCM57840_VF:
13180 case BCM57811_VF:
13181 return 1;
13182 default:
13183 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13184 return -ENODEV;
13185 }
13186}
13187
13188static int set_is_vf(int chip_id)
13189{
13190 switch (chip_id) {
13191 case BCM57712_VF:
13192 case BCM57800_VF:
13193 case BCM57810_VF:
13194 case BCM57840_VF:
13195 case BCM57811_VF:
13196 return true;
13197 default:
13198 return false;
13199 }
13200}
13201
13202
13203#define tsgen_ctrl 0x0
13204#define tsgen_freecount 0x10
13205#define tsgen_synctime_t0 0x20
13206#define tsgen_offset_t0 0x28
13207#define tsgen_drift_t0 0x30
13208#define tsgen_synctime_t1 0x58
13209#define tsgen_offset_t1 0x60
13210#define tsgen_drift_t1 0x68
13211
13212
13213static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13214 int best_val, int best_period)
13215{
13216 struct bnx2x_func_state_params func_params = {NULL};
13217 struct bnx2x_func_set_timesync_params *set_timesync_params =
13218 &func_params.params.set_timesync;
13219
13220
13221 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13222 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13223
13224 func_params.f_obj = &bp->func_obj;
13225 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13226
13227
13228 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13229 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13230 set_timesync_params->add_sub_drift_adjust_value =
13231 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13232 set_timesync_params->drift_adjust_value = best_val;
13233 set_timesync_params->drift_adjust_period = best_period;
13234
13235 return bnx2x_func_state_change(bp, &func_params);
13236}
13237
13238static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13239{
13240 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13241 int rc;
13242 int drift_dir = 1;
13243 int val, period, period1, period2, dif, dif1, dif2;
13244 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13245
13246 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13247
13248 if (!netif_running(bp->dev)) {
13249 DP(BNX2X_MSG_PTP,
13250 "PTP adjfreq called while the interface is down\n");
13251 return -EFAULT;
13252 }
13253
13254 if (ppb < 0) {
13255 ppb = -ppb;
13256 drift_dir = 0;
13257 }
13258
13259 if (ppb == 0) {
13260 best_val = 1;
13261 best_period = 0x1FFFFFF;
13262 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13263 best_val = 31;
13264 best_period = 1;
13265 } else {
13266
13267
13268
13269 for (val = 0; val <= 31; val++) {
13270 if ((val & 0x7) == 0)
13271 continue;
13272 period1 = val * 1000000 / ppb;
13273 period2 = period1 + 1;
13274 if (period1 != 0)
13275 dif1 = ppb - (val * 1000000 / period1);
13276 else
13277 dif1 = BNX2X_MAX_PHC_DRIFT;
13278 if (dif1 < 0)
13279 dif1 = -dif1;
13280 dif2 = ppb - (val * 1000000 / period2);
13281 if (dif2 < 0)
13282 dif2 = -dif2;
13283 dif = (dif1 < dif2) ? dif1 : dif2;
13284 period = (dif1 < dif2) ? period1 : period2;
13285 if (dif < best_dif) {
13286 best_dif = dif;
13287 best_val = val;
13288 best_period = period;
13289 }
13290 }
13291 }
13292
13293 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13294 best_period);
13295 if (rc) {
13296 BNX2X_ERR("Failed to set drift\n");
13297 return -EFAULT;
13298 }
13299
13300 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13301 best_period);
13302
13303 return 0;
13304}
13305
13306static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13307{
13308 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13309
13310 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13311
13312 timecounter_adjtime(&bp->timecounter, delta);
13313
13314 return 0;
13315}
13316
13317static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13318{
13319 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13320 u64 ns;
13321
13322 ns = timecounter_read(&bp->timecounter);
13323
13324 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13325
13326 *ts = ns_to_timespec64(ns);
13327
13328 return 0;
13329}
13330
13331static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13332 const struct timespec64 *ts)
13333{
13334 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13335 u64 ns;
13336
13337 ns = timespec64_to_ns(ts);
13338
13339 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13340
13341
13342 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13343
13344 return 0;
13345}
13346
13347
13348static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13349 struct ptp_clock_request *rq, int on)
13350{
13351 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13352
13353 BNX2X_ERR("PHC ancillary features are not supported\n");
13354 return -ENOTSUPP;
13355}
13356
13357static void bnx2x_register_phc(struct bnx2x *bp)
13358{
13359
13360 bp->ptp_clock_info.owner = THIS_MODULE;
13361 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13362 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13363 bp->ptp_clock_info.n_alarm = 0;
13364 bp->ptp_clock_info.n_ext_ts = 0;
13365 bp->ptp_clock_info.n_per_out = 0;
13366 bp->ptp_clock_info.pps = 0;
13367 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13368 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13369 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13370 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13371 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13372
13373 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13374 if (IS_ERR(bp->ptp_clock)) {
13375 bp->ptp_clock = NULL;
13376 BNX2X_ERR("PTP clock registeration failed\n");
13377 }
13378}
13379
13380static int bnx2x_init_one(struct pci_dev *pdev,
13381 const struct pci_device_id *ent)
13382{
13383 struct net_device *dev = NULL;
13384 struct bnx2x *bp;
13385 enum pcie_link_width pcie_width;
13386 enum pci_bus_speed pcie_speed;
13387 int rc, max_non_def_sbs;
13388 int rx_count, tx_count, rss_count, doorbell_size;
13389 int max_cos_est;
13390 bool is_vf;
13391 int cnic_cnt;
13392
13393
13394
13395
13396 if (is_kdump_kernel()) {
13397 ktime_t now = ktime_get_boottime();
13398 ktime_t fw_ready_time = ktime_set(5, 0);
13399
13400 if (ktime_before(now, fw_ready_time))
13401 msleep(ktime_ms_delta(fw_ready_time, now));
13402 }
13403
13404
13405
13406
13407
13408
13409
13410
13411
13412 max_cos_est = set_max_cos_est(ent->driver_data);
13413 if (max_cos_est < 0)
13414 return max_cos_est;
13415 is_vf = set_is_vf(ent->driver_data);
13416 cnic_cnt = is_vf ? 0 : 1;
13417
13418 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13419
13420
13421 max_non_def_sbs += is_vf ? 1 : 0;
13422
13423
13424 rss_count = max_non_def_sbs - cnic_cnt;
13425
13426 if (rss_count < 1)
13427 return -EINVAL;
13428
13429
13430 rx_count = rss_count + cnic_cnt;
13431
13432
13433
13434
13435 tx_count = rss_count * max_cos_est + cnic_cnt;
13436
13437
13438 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13439 if (!dev)
13440 return -ENOMEM;
13441
13442 bp = netdev_priv(dev);
13443
13444 bp->flags = 0;
13445 if (is_vf)
13446 bp->flags |= IS_VF_FLAG;
13447
13448 bp->igu_sb_cnt = max_non_def_sbs;
13449 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13450 bp->msg_enable = debug;
13451 bp->cnic_support = cnic_cnt;
13452 bp->cnic_probe = bnx2x_cnic_probe;
13453
13454 pci_set_drvdata(pdev, dev);
13455
13456 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13457 if (rc < 0) {
13458 free_netdev(dev);
13459 return rc;
13460 }
13461
13462 BNX2X_DEV_INFO("This is a %s function\n",
13463 IS_PF(bp) ? "physical" : "virtual");
13464 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13465 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13466 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13467 tx_count, rx_count);
13468
13469 rc = bnx2x_init_bp(bp);
13470 if (rc)
13471 goto init_one_exit;
13472
13473
13474
13475
13476
13477 if (IS_VF(bp)) {
13478 bp->doorbells = bnx2x_vf_doorbells(bp);
13479 rc = bnx2x_vf_pci_alloc(bp);
13480 if (rc)
13481 goto init_one_exit;
13482 } else {
13483 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13484 if (doorbell_size > pci_resource_len(pdev, 2)) {
13485 dev_err(&bp->pdev->dev,
13486 "Cannot map doorbells, bar size too small, aborting\n");
13487 rc = -ENOMEM;
13488 goto init_one_exit;
13489 }
13490 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13491 doorbell_size);
13492 }
13493 if (!bp->doorbells) {
13494 dev_err(&bp->pdev->dev,
13495 "Cannot map doorbell space, aborting\n");
13496 rc = -ENOMEM;
13497 goto init_one_exit;
13498 }
13499
13500 if (IS_VF(bp)) {
13501 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13502 if (rc)
13503 goto init_one_exit;
13504 }
13505
13506
13507 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13508 if (rc)
13509 goto init_one_exit;
13510
13511
13512 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13513 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13514
13515
13516 if (CHIP_IS_E1x(bp))
13517 bp->flags |= NO_FCOE_FLAG;
13518
13519
13520 bnx2x_set_num_queues(bp);
13521
13522
13523
13524
13525 rc = bnx2x_set_int_mode(bp);
13526 if (rc) {
13527 dev_err(&pdev->dev, "Cannot set interrupts\n");
13528 goto init_one_exit;
13529 }
13530 BNX2X_DEV_INFO("set interrupts successfully\n");
13531
13532
13533 rc = register_netdev(dev);
13534 if (rc) {
13535 dev_err(&pdev->dev, "Cannot register net device\n");
13536 goto init_one_exit;
13537 }
13538 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13539
13540 if (!NO_FCOE(bp)) {
13541
13542 rtnl_lock();
13543 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13544 rtnl_unlock();
13545 }
13546 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
13547 pcie_speed == PCI_SPEED_UNKNOWN ||
13548 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
13549 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
13550 else
13551 BNX2X_DEV_INFO(
13552 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13553 board_info[ent->driver_data].name,
13554 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13555 pcie_width,
13556 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
13557 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
13558 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
13559 "Unknown",
13560 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13561
13562 bnx2x_register_phc(bp);
13563
13564 return 0;
13565
13566init_one_exit:
13567 bnx2x_disable_pcie_error_reporting(bp);
13568
13569 if (bp->regview)
13570 iounmap(bp->regview);
13571
13572 if (IS_PF(bp) && bp->doorbells)
13573 iounmap(bp->doorbells);
13574
13575 free_netdev(dev);
13576
13577 if (atomic_read(&pdev->enable_cnt) == 1)
13578 pci_release_regions(pdev);
13579
13580 pci_disable_device(pdev);
13581
13582 return rc;
13583}
13584
13585static void __bnx2x_remove(struct pci_dev *pdev,
13586 struct net_device *dev,
13587 struct bnx2x *bp,
13588 bool remove_netdev)
13589{
13590 if (bp->ptp_clock) {
13591 ptp_clock_unregister(bp->ptp_clock);
13592 bp->ptp_clock = NULL;
13593 }
13594
13595
13596 if (!NO_FCOE(bp)) {
13597 rtnl_lock();
13598 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13599 rtnl_unlock();
13600 }
13601
13602#ifdef BCM_DCBNL
13603
13604 bnx2x_dcbnl_update_applist(bp, true);
13605#endif
13606
13607 if (IS_PF(bp) &&
13608 !BP_NOMCP(bp) &&
13609 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
13610 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
13611
13612
13613 if (remove_netdev) {
13614 unregister_netdev(dev);
13615 } else {
13616 rtnl_lock();
13617 dev_close(dev);
13618 rtnl_unlock();
13619 }
13620
13621 bnx2x_iov_remove_one(bp);
13622
13623
13624 if (IS_PF(bp)) {
13625 bnx2x_set_power_state(bp, PCI_D0);
13626
13627
13628
13629
13630 bnx2x_reset_endianity(bp);
13631 }
13632
13633
13634 bnx2x_disable_msi(bp);
13635
13636
13637 if (IS_PF(bp))
13638 bnx2x_set_power_state(bp, PCI_D3hot);
13639
13640
13641 cancel_delayed_work_sync(&bp->sp_rtnl_task);
13642
13643
13644 if (IS_VF(bp))
13645 bnx2x_vfpf_release(bp);
13646
13647
13648 if (system_state == SYSTEM_POWER_OFF) {
13649 pci_wake_from_d3(pdev, bp->wol);
13650 pci_set_power_state(pdev, PCI_D3hot);
13651 }
13652
13653 bnx2x_disable_pcie_error_reporting(bp);
13654 if (remove_netdev) {
13655 if (bp->regview)
13656 iounmap(bp->regview);
13657
13658
13659
13660
13661 if (IS_PF(bp)) {
13662 if (bp->doorbells)
13663 iounmap(bp->doorbells);
13664
13665 bnx2x_release_firmware(bp);
13666 } else {
13667 bnx2x_vf_pci_dealloc(bp);
13668 }
13669 bnx2x_free_mem_bp(bp);
13670
13671 free_netdev(dev);
13672
13673 if (atomic_read(&pdev->enable_cnt) == 1)
13674 pci_release_regions(pdev);
13675
13676 pci_disable_device(pdev);
13677 }
13678}
13679
13680static void bnx2x_remove_one(struct pci_dev *pdev)
13681{
13682 struct net_device *dev = pci_get_drvdata(pdev);
13683 struct bnx2x *bp;
13684
13685 if (!dev) {
13686 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13687 return;
13688 }
13689 bp = netdev_priv(dev);
13690
13691 __bnx2x_remove(pdev, dev, bp, true);
13692}
13693
13694static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13695{
13696 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
13697
13698 bp->rx_mode = BNX2X_RX_MODE_NONE;
13699
13700 if (CNIC_LOADED(bp))
13701 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
13702
13703
13704 bnx2x_tx_disable(bp);
13705
13706 bnx2x_del_all_napi(bp);
13707 if (CNIC_LOADED(bp))
13708 bnx2x_del_all_napi_cnic(bp);
13709 netdev_reset_tc(bp->dev);
13710
13711 del_timer_sync(&bp->timer);
13712 cancel_delayed_work_sync(&bp->sp_task);
13713 cancel_delayed_work_sync(&bp->period_task);
13714
13715 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
13716 bp->stats_state = STATS_STATE_DISABLED;
13717 up(&bp->stats_lock);
13718 }
13719
13720 bnx2x_save_statistics(bp);
13721
13722 netif_carrier_off(bp->dev);
13723
13724 return 0;
13725}
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13736 pci_channel_state_t state)
13737{
13738 struct net_device *dev = pci_get_drvdata(pdev);
13739 struct bnx2x *bp = netdev_priv(dev);
13740
13741 rtnl_lock();
13742
13743 BNX2X_ERR("IO error detected\n");
13744
13745 netif_device_detach(dev);
13746
13747 if (state == pci_channel_io_perm_failure) {
13748 rtnl_unlock();
13749 return PCI_ERS_RESULT_DISCONNECT;
13750 }
13751
13752 if (netif_running(dev))
13753 bnx2x_eeh_nic_unload(bp);
13754
13755 bnx2x_prev_path_mark_eeh(bp);
13756
13757 pci_disable_device(pdev);
13758
13759 rtnl_unlock();
13760
13761
13762 return PCI_ERS_RESULT_NEED_RESET;
13763}
13764
13765
13766
13767
13768
13769
13770
13771static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13772{
13773 struct net_device *dev = pci_get_drvdata(pdev);
13774 struct bnx2x *bp = netdev_priv(dev);
13775 int i;
13776
13777 rtnl_lock();
13778 BNX2X_ERR("IO slot reset initializing...\n");
13779 if (pci_enable_device(pdev)) {
13780 dev_err(&pdev->dev,
13781 "Cannot re-enable PCI device after reset\n");
13782 rtnl_unlock();
13783 return PCI_ERS_RESULT_DISCONNECT;
13784 }
13785
13786 pci_set_master(pdev);
13787 pci_restore_state(pdev);
13788 pci_save_state(pdev);
13789
13790 if (netif_running(dev))
13791 bnx2x_set_power_state(bp, PCI_D0);
13792
13793 if (netif_running(dev)) {
13794 BNX2X_ERR("IO slot reset --> driver unload\n");
13795
13796
13797 bnx2x_init_shmem(bp);
13798
13799 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13800 u32 v;
13801
13802 v = SHMEM2_RD(bp,
13803 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13804 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13805 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13806 }
13807 bnx2x_drain_tx_queues(bp);
13808 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13809 bnx2x_netif_stop(bp, 1);
13810 bnx2x_free_irq(bp);
13811
13812
13813 bnx2x_send_unload_done(bp, true);
13814
13815 bp->sp_state = 0;
13816 bp->port.pmf = 0;
13817
13818 bnx2x_prev_unload(bp);
13819
13820
13821
13822
13823 bnx2x_squeeze_objects(bp);
13824 bnx2x_free_skbs(bp);
13825 for_each_rx_queue(bp, i)
13826 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13827 bnx2x_free_fp_mem(bp);
13828 bnx2x_free_mem(bp);
13829
13830 bp->state = BNX2X_STATE_CLOSED;
13831 }
13832
13833 rtnl_unlock();
13834
13835
13836 if (bp->flags & AER_ENABLED) {
13837 if (pci_cleanup_aer_uncorrect_error_status(pdev))
13838 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
13839 else
13840 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
13841 }
13842
13843 return PCI_ERS_RESULT_RECOVERED;
13844}
13845
13846
13847
13848
13849
13850
13851
13852
13853static void bnx2x_io_resume(struct pci_dev *pdev)
13854{
13855 struct net_device *dev = pci_get_drvdata(pdev);
13856 struct bnx2x *bp = netdev_priv(dev);
13857
13858 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13859 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
13860 return;
13861 }
13862
13863 rtnl_lock();
13864
13865 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13866 DRV_MSG_SEQ_NUMBER_MASK;
13867
13868 if (netif_running(dev))
13869 bnx2x_nic_load(bp, LOAD_NORMAL);
13870
13871 netif_device_attach(dev);
13872
13873 rtnl_unlock();
13874}
13875
13876static const struct pci_error_handlers bnx2x_err_handler = {
13877 .error_detected = bnx2x_io_error_detected,
13878 .slot_reset = bnx2x_io_slot_reset,
13879 .resume = bnx2x_io_resume,
13880};
13881
13882static void bnx2x_shutdown(struct pci_dev *pdev)
13883{
13884 struct net_device *dev = pci_get_drvdata(pdev);
13885 struct bnx2x *bp;
13886
13887 if (!dev)
13888 return;
13889
13890 bp = netdev_priv(dev);
13891 if (!bp)
13892 return;
13893
13894 rtnl_lock();
13895 netif_device_detach(dev);
13896 rtnl_unlock();
13897
13898
13899
13900
13901
13902 __bnx2x_remove(pdev, dev, bp, false);
13903}
13904
13905static struct pci_driver bnx2x_pci_driver = {
13906 .name = DRV_MODULE_NAME,
13907 .id_table = bnx2x_pci_tbl,
13908 .probe = bnx2x_init_one,
13909 .remove = bnx2x_remove_one,
13910 .suspend = bnx2x_suspend,
13911 .resume = bnx2x_resume,
13912 .err_handler = &bnx2x_err_handler,
13913#ifdef CONFIG_BNX2X_SRIOV
13914 .sriov_configure = bnx2x_sriov_configure,
13915#endif
13916 .shutdown = bnx2x_shutdown,
13917};
13918
13919static int __init bnx2x_init(void)
13920{
13921 int ret;
13922
13923 pr_info("%s", version);
13924
13925 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13926 if (bnx2x_wq == NULL) {
13927 pr_err("Cannot create workqueue\n");
13928 return -ENOMEM;
13929 }
13930 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
13931 if (!bnx2x_iov_wq) {
13932 pr_err("Cannot create iov workqueue\n");
13933 destroy_workqueue(bnx2x_wq);
13934 return -ENOMEM;
13935 }
13936
13937 ret = pci_register_driver(&bnx2x_pci_driver);
13938 if (ret) {
13939 pr_err("Cannot register driver\n");
13940 destroy_workqueue(bnx2x_wq);
13941 destroy_workqueue(bnx2x_iov_wq);
13942 }
13943 return ret;
13944}
13945
13946static void __exit bnx2x_cleanup(void)
13947{
13948 struct list_head *pos, *q;
13949
13950 pci_unregister_driver(&bnx2x_pci_driver);
13951
13952 destroy_workqueue(bnx2x_wq);
13953 destroy_workqueue(bnx2x_iov_wq);
13954
13955
13956 list_for_each_safe(pos, q, &bnx2x_prev_list) {
13957 struct bnx2x_prev_path_list *tmp =
13958 list_entry(pos, struct bnx2x_prev_path_list, list);
13959 list_del(pos);
13960 kfree(tmp);
13961 }
13962}
13963
13964void bnx2x_notify_link_changed(struct bnx2x *bp)
13965{
13966 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13967}
13968
13969module_init(bnx2x_init);
13970module_exit(bnx2x_cleanup);
13971
13972
13973
13974
13975
13976
13977
13978
13979
13980
13981static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
13982{
13983 unsigned long ramrod_flags = 0;
13984
13985 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13986 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13987 &bp->iscsi_l2_mac_obj, true,
13988 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13989}
13990
13991
13992static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13993{
13994 struct eth_spe *spe;
13995 int cxt_index, cxt_offset;
13996
13997#ifdef BNX2X_STOP_ON_ERROR
13998 if (unlikely(bp->panic))
13999 return;
14000#endif
14001
14002 spin_lock_bh(&bp->spq_lock);
14003 BUG_ON(bp->cnic_spq_pending < count);
14004 bp->cnic_spq_pending -= count;
14005
14006 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14007 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14008 & SPE_HDR_CONN_TYPE) >>
14009 SPE_HDR_CONN_TYPE_SHIFT;
14010 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14011 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14012
14013
14014
14015
14016 if (type == ETH_CONNECTION_TYPE) {
14017 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14018 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14019 ILT_PAGE_CIDS;
14020 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14021 (cxt_index * ILT_PAGE_CIDS);
14022 bnx2x_set_ctx_validation(bp,
14023 &bp->context[cxt_index].
14024 vcxt[cxt_offset].eth,
14025 BNX2X_ISCSI_ETH_CID(bp));
14026 }
14027 }
14028
14029
14030
14031
14032
14033
14034
14035 if (type == ETH_CONNECTION_TYPE) {
14036 if (!atomic_read(&bp->cq_spq_left))
14037 break;
14038 else
14039 atomic_dec(&bp->cq_spq_left);
14040 } else if (type == NONE_CONNECTION_TYPE) {
14041 if (!atomic_read(&bp->eq_spq_left))
14042 break;
14043 else
14044 atomic_dec(&bp->eq_spq_left);
14045 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14046 (type == FCOE_CONNECTION_TYPE)) {
14047 if (bp->cnic_spq_pending >=
14048 bp->cnic_eth_dev.max_kwqe_pending)
14049 break;
14050 else
14051 bp->cnic_spq_pending++;
14052 } else {
14053 BNX2X_ERR("Unknown SPE type: %d\n", type);
14054 bnx2x_panic();
14055 break;
14056 }
14057
14058 spe = bnx2x_sp_get_next(bp);
14059 *spe = *bp->cnic_kwq_cons;
14060
14061 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14062 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14063
14064 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14065 bp->cnic_kwq_cons = bp->cnic_kwq;
14066 else
14067 bp->cnic_kwq_cons++;
14068 }
14069 bnx2x_sp_prod_update(bp);
14070 spin_unlock_bh(&bp->spq_lock);
14071}
14072
14073static int bnx2x_cnic_sp_queue(struct net_device *dev,
14074 struct kwqe_16 *kwqes[], u32 count)
14075{
14076 struct bnx2x *bp = netdev_priv(dev);
14077 int i;
14078
14079#ifdef BNX2X_STOP_ON_ERROR
14080 if (unlikely(bp->panic)) {
14081 BNX2X_ERR("Can't post to SP queue while panic\n");
14082 return -EIO;
14083 }
14084#endif
14085
14086 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14087 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14088 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14089 return -EAGAIN;
14090 }
14091
14092 spin_lock_bh(&bp->spq_lock);
14093
14094 for (i = 0; i < count; i++) {
14095 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14096
14097 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14098 break;
14099
14100 *bp->cnic_kwq_prod = *spe;
14101
14102 bp->cnic_kwq_pending++;
14103
14104 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14105 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14106 spe->data.update_data_addr.hi,
14107 spe->data.update_data_addr.lo,
14108 bp->cnic_kwq_pending);
14109
14110 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14111 bp->cnic_kwq_prod = bp->cnic_kwq;
14112 else
14113 bp->cnic_kwq_prod++;
14114 }
14115
14116 spin_unlock_bh(&bp->spq_lock);
14117
14118 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14119 bnx2x_cnic_sp_post(bp, 0);
14120
14121 return i;
14122}
14123
14124static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14125{
14126 struct cnic_ops *c_ops;
14127 int rc = 0;
14128
14129 mutex_lock(&bp->cnic_mutex);
14130 c_ops = rcu_dereference_protected(bp->cnic_ops,
14131 lockdep_is_held(&bp->cnic_mutex));
14132 if (c_ops)
14133 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14134 mutex_unlock(&bp->cnic_mutex);
14135
14136 return rc;
14137}
14138
14139static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14140{
14141 struct cnic_ops *c_ops;
14142 int rc = 0;
14143
14144 rcu_read_lock();
14145 c_ops = rcu_dereference(bp->cnic_ops);
14146 if (c_ops)
14147 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14148 rcu_read_unlock();
14149
14150 return rc;
14151}
14152
14153
14154
14155
14156int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14157{
14158 struct cnic_ctl_info ctl = {0};
14159
14160 ctl.cmd = cmd;
14161
14162 return bnx2x_cnic_ctl_send(bp, &ctl);
14163}
14164
14165static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14166{
14167 struct cnic_ctl_info ctl = {0};
14168
14169
14170 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14171 ctl.data.comp.cid = cid;
14172 ctl.data.comp.error = err;
14173
14174 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14175 bnx2x_cnic_sp_post(bp, 0);
14176}
14177
14178
14179
14180
14181
14182
14183static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14184{
14185 unsigned long accept_flags = 0, ramrod_flags = 0;
14186 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14187 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14188
14189 if (start) {
14190
14191
14192
14193
14194
14195
14196 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14197 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14198 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14199 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14200
14201
14202 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14203
14204 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14205 } else
14206
14207 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14208
14209 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14210 set_bit(sched_state, &bp->sp_state);
14211 else {
14212 __set_bit(RAMROD_RX, &ramrod_flags);
14213 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14214 ramrod_flags);
14215 }
14216}
14217
14218static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14219{
14220 struct bnx2x *bp = netdev_priv(dev);
14221 int rc = 0;
14222
14223 switch (ctl->cmd) {
14224 case DRV_CTL_CTXTBL_WR_CMD: {
14225 u32 index = ctl->data.io.offset;
14226 dma_addr_t addr = ctl->data.io.dma_addr;
14227
14228 bnx2x_ilt_wr(bp, index, addr);
14229 break;
14230 }
14231
14232 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14233 int count = ctl->data.credit.credit_count;
14234
14235 bnx2x_cnic_sp_post(bp, count);
14236 break;
14237 }
14238
14239
14240 case DRV_CTL_START_L2_CMD: {
14241 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14242 unsigned long sp_bits = 0;
14243
14244
14245 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14246 cp->iscsi_l2_client_id,
14247 cp->iscsi_l2_cid, BP_FUNC(bp),
14248 bnx2x_sp(bp, mac_rdata),
14249 bnx2x_sp_mapping(bp, mac_rdata),
14250 BNX2X_FILTER_MAC_PENDING,
14251 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14252 &bp->macs_pool);
14253
14254
14255 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14256 if (rc)
14257 break;
14258
14259 mmiowb();
14260 barrier();
14261
14262
14263
14264 netif_addr_lock_bh(dev);
14265 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14266 netif_addr_unlock_bh(dev);
14267
14268
14269 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14270 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14271
14272 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14273 BNX2X_ERR("rx_mode completion timed out!\n");
14274
14275 break;
14276 }
14277
14278
14279 case DRV_CTL_STOP_L2_CMD: {
14280 unsigned long sp_bits = 0;
14281
14282
14283 netif_addr_lock_bh(dev);
14284 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14285 netif_addr_unlock_bh(dev);
14286
14287
14288 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14289 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14290
14291 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14292 BNX2X_ERR("rx_mode completion timed out!\n");
14293
14294 mmiowb();
14295 barrier();
14296
14297
14298 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14299 BNX2X_ISCSI_ETH_MAC, true);
14300 break;
14301 }
14302 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14303 int count = ctl->data.credit.credit_count;
14304
14305 smp_mb__before_atomic();
14306 atomic_add(count, &bp->cq_spq_left);
14307 smp_mb__after_atomic();
14308 break;
14309 }
14310 case DRV_CTL_ULP_REGISTER_CMD: {
14311 int ulp_type = ctl->data.register_data.ulp_type;
14312
14313 if (CHIP_IS_E3(bp)) {
14314 int idx = BP_FW_MB_IDX(bp);
14315 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14316 int path = BP_PATH(bp);
14317 int port = BP_PORT(bp);
14318 int i;
14319 u32 scratch_offset;
14320 u32 *host_addr;
14321
14322
14323 if (ulp_type == CNIC_ULP_ISCSI)
14324 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14325 else if (ulp_type == CNIC_ULP_FCOE)
14326 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14327 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14328
14329 if ((ulp_type != CNIC_ULP_FCOE) ||
14330 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14331 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14332 break;
14333
14334
14335 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14336 if (!scratch_offset)
14337 break;
14338 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14339 fcoe_features[path][port]);
14340 host_addr = (u32 *) &(ctl->data.register_data.
14341 fcoe_features);
14342 for (i = 0; i < sizeof(struct fcoe_capabilities);
14343 i += 4)
14344 REG_WR(bp, scratch_offset + i,
14345 *(host_addr + i/4));
14346 }
14347 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14348 break;
14349 }
14350
14351 case DRV_CTL_ULP_UNREGISTER_CMD: {
14352 int ulp_type = ctl->data.ulp_type;
14353
14354 if (CHIP_IS_E3(bp)) {
14355 int idx = BP_FW_MB_IDX(bp);
14356 u32 cap;
14357
14358 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14359 if (ulp_type == CNIC_ULP_ISCSI)
14360 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14361 else if (ulp_type == CNIC_ULP_FCOE)
14362 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14363 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14364 }
14365 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14366 break;
14367 }
14368
14369 default:
14370 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14371 rc = -EINVAL;
14372 }
14373
14374 return rc;
14375}
14376
14377void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14378{
14379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14380
14381 if (bp->flags & USING_MSIX_FLAG) {
14382 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14383 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14384 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14385 } else {
14386 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14387 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14388 }
14389 if (!CHIP_IS_E1x(bp))
14390 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14391 else
14392 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14393
14394 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14395 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14396 cp->irq_arr[1].status_blk = bp->def_status_blk;
14397 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14398 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14399
14400 cp->num_irq = 2;
14401}
14402
14403void bnx2x_setup_cnic_info(struct bnx2x *bp)
14404{
14405 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14406
14407 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14408 bnx2x_cid_ilt_lines(bp);
14409 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14410 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14411 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14412
14413 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14414 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14415 cp->iscsi_l2_cid);
14416
14417 if (NO_ISCSI_OOO(bp))
14418 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14419}
14420
14421static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14422 void *data)
14423{
14424 struct bnx2x *bp = netdev_priv(dev);
14425 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14426 int rc;
14427
14428 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14429
14430 if (ops == NULL) {
14431 BNX2X_ERR("NULL ops received\n");
14432 return -EINVAL;
14433 }
14434
14435 if (!CNIC_SUPPORT(bp)) {
14436 BNX2X_ERR("Can't register CNIC when not supported\n");
14437 return -EOPNOTSUPP;
14438 }
14439
14440 if (!CNIC_LOADED(bp)) {
14441 rc = bnx2x_load_cnic(bp);
14442 if (rc) {
14443 BNX2X_ERR("CNIC-related load failed\n");
14444 return rc;
14445 }
14446 }
14447
14448 bp->cnic_enabled = true;
14449
14450 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14451 if (!bp->cnic_kwq)
14452 return -ENOMEM;
14453
14454 bp->cnic_kwq_cons = bp->cnic_kwq;
14455 bp->cnic_kwq_prod = bp->cnic_kwq;
14456 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14457
14458 bp->cnic_spq_pending = 0;
14459 bp->cnic_kwq_pending = 0;
14460
14461 bp->cnic_data = data;
14462
14463 cp->num_irq = 0;
14464 cp->drv_state |= CNIC_DRV_STATE_REGD;
14465 cp->iro_arr = bp->iro_arr;
14466
14467 bnx2x_setup_cnic_irq_info(bp);
14468
14469 rcu_assign_pointer(bp->cnic_ops, ops);
14470
14471
14472 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14473
14474 return 0;
14475}
14476
14477static int bnx2x_unregister_cnic(struct net_device *dev)
14478{
14479 struct bnx2x *bp = netdev_priv(dev);
14480 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14481
14482 mutex_lock(&bp->cnic_mutex);
14483 cp->drv_state = 0;
14484 RCU_INIT_POINTER(bp->cnic_ops, NULL);
14485 mutex_unlock(&bp->cnic_mutex);
14486 synchronize_rcu();
14487 bp->cnic_enabled = false;
14488 kfree(bp->cnic_kwq);
14489 bp->cnic_kwq = NULL;
14490
14491 return 0;
14492}
14493
14494static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
14495{
14496 struct bnx2x *bp = netdev_priv(dev);
14497 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14498
14499
14500
14501
14502
14503 if (NO_ISCSI(bp) && NO_FCOE(bp))
14504 return NULL;
14505
14506 cp->drv_owner = THIS_MODULE;
14507 cp->chip_id = CHIP_ID(bp);
14508 cp->pdev = bp->pdev;
14509 cp->io_base = bp->regview;
14510 cp->io_base2 = bp->doorbells;
14511 cp->max_kwqe_pending = 8;
14512 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
14513 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14514 bnx2x_cid_ilt_lines(bp);
14515 cp->ctx_tbl_len = CNIC_ILT_LINES;
14516 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14517 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
14518 cp->drv_ctl = bnx2x_drv_ctl;
14519 cp->drv_register_cnic = bnx2x_register_cnic;
14520 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
14521 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14522 cp->iscsi_l2_client_id =
14523 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14524 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14525
14526 if (NO_ISCSI_OOO(bp))
14527 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14528
14529 if (NO_ISCSI(bp))
14530 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
14531
14532 if (NO_FCOE(bp))
14533 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
14534
14535 BNX2X_DEV_INFO(
14536 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
14537 cp->ctx_blk_size,
14538 cp->ctx_tbl_offset,
14539 cp->ctx_tbl_len,
14540 cp->starting_cid);
14541 return cp;
14542}
14543
14544static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
14545{
14546 struct bnx2x *bp = fp->bp;
14547 u32 offset = BAR_USTRORM_INTMEM;
14548
14549 if (IS_VF(bp))
14550 return bnx2x_vf_ustorm_prods_offset(bp, fp);
14551 else if (!CHIP_IS_E1x(bp))
14552 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
14553 else
14554 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
14555
14556 return offset;
14557}
14558
14559
14560
14561
14562
14563
14564int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
14565{
14566 u32 pretend_reg;
14567
14568 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
14569 return -1;
14570
14571
14572 pretend_reg = bnx2x_get_pretend_reg(bp);
14573 REG_WR(bp, pretend_reg, pretend_func_val);
14574 REG_RD(bp, pretend_reg);
14575 return 0;
14576}
14577
14578static void bnx2x_ptp_task(struct work_struct *work)
14579{
14580 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
14581 int port = BP_PORT(bp);
14582 u32 val_seq;
14583 u64 timestamp, ns;
14584 struct skb_shared_hwtstamps shhwtstamps;
14585
14586
14587 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14588 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
14589 if (val_seq & 0x10000) {
14590
14591 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
14592 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
14593 timestamp <<= 32;
14594 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
14595 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
14596
14597 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14598 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14599 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14600
14601 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
14602 shhwtstamps.hwtstamp = ns_to_ktime(ns);
14603 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
14604 dev_kfree_skb_any(bp->ptp_tx_skb);
14605 bp->ptp_tx_skb = NULL;
14606
14607 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
14608 timestamp, ns);
14609 } else {
14610 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
14611
14612 schedule_work(&bp->ptp_task);
14613 }
14614}
14615
14616void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
14617{
14618 int port = BP_PORT(bp);
14619 u64 timestamp, ns;
14620
14621 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
14622 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
14623 timestamp <<= 32;
14624 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
14625 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
14626
14627
14628 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14629 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14630
14631 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14632
14633 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
14634
14635 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
14636 timestamp, ns);
14637}
14638
14639
14640static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
14641{
14642 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
14643 int port = BP_PORT(bp);
14644 u32 wb_data[2];
14645 u64 phc_cycles;
14646
14647 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
14648 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
14649 phc_cycles = wb_data[1];
14650 phc_cycles = (phc_cycles << 32) + wb_data[0];
14651
14652 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
14653
14654 return phc_cycles;
14655}
14656
14657static void bnx2x_init_cyclecounter(struct bnx2x *bp)
14658{
14659 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
14660 bp->cyclecounter.read = bnx2x_cyclecounter_read;
14661 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
14662 bp->cyclecounter.shift = 1;
14663 bp->cyclecounter.mult = 1;
14664}
14665
14666static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
14667{
14668 struct bnx2x_func_state_params func_params = {NULL};
14669 struct bnx2x_func_set_timesync_params *set_timesync_params =
14670 &func_params.params.set_timesync;
14671
14672
14673 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
14674 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
14675
14676 func_params.f_obj = &bp->func_obj;
14677 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
14678
14679
14680 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
14681 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
14682
14683 return bnx2x_func_state_change(bp, &func_params);
14684}
14685
14686static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
14687{
14688 struct bnx2x_queue_state_params q_params;
14689 int rc, i;
14690
14691
14692 memset(&q_params, 0, sizeof(q_params));
14693 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
14694 q_params.cmd = BNX2X_Q_CMD_UPDATE;
14695 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
14696 &q_params.params.update.update_flags);
14697 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
14698 &q_params.params.update.update_flags);
14699
14700
14701 for_each_eth_queue(bp, i) {
14702 struct bnx2x_fastpath *fp = &bp->fp[i];
14703
14704
14705 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
14706
14707
14708 rc = bnx2x_queue_state_change(bp, &q_params);
14709 if (rc) {
14710 BNX2X_ERR("Failed to enable PTP packets\n");
14711 return rc;
14712 }
14713 }
14714
14715 return 0;
14716}
14717
14718int bnx2x_configure_ptp_filters(struct bnx2x *bp)
14719{
14720 int port = BP_PORT(bp);
14721 int rc;
14722
14723 if (!bp->hwtstamp_ioctl_called)
14724 return 0;
14725
14726 switch (bp->tx_type) {
14727 case HWTSTAMP_TX_ON:
14728 bp->flags |= TX_TIMESTAMPING_EN;
14729 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14730 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
14731 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14732 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
14733 break;
14734 case HWTSTAMP_TX_ONESTEP_SYNC:
14735 BNX2X_ERR("One-step timestamping is not supported\n");
14736 return -ERANGE;
14737 }
14738
14739 switch (bp->rx_filter) {
14740 case HWTSTAMP_FILTER_NONE:
14741 break;
14742 case HWTSTAMP_FILTER_ALL:
14743 case HWTSTAMP_FILTER_SOME:
14744 bp->rx_filter = HWTSTAMP_FILTER_NONE;
14745 break;
14746 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
14747 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
14748 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
14749 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14750
14751 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14752 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
14753 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14754 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
14755 break;
14756 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
14757 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
14758 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
14759 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14760
14761 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14762 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
14763 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14764 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
14765 break;
14766 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
14767 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
14768 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
14769 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14770
14771 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14772 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
14773 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14774 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
14775
14776 break;
14777 case HWTSTAMP_FILTER_PTP_V2_EVENT:
14778 case HWTSTAMP_FILTER_PTP_V2_SYNC:
14779 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
14780 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14781
14782 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14783 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
14784 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14785 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
14786 break;
14787 }
14788
14789
14790 rc = bnx2x_enable_ptp_packets(bp);
14791 if (rc)
14792 return rc;
14793
14794
14795 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14796 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
14797
14798 return 0;
14799}
14800
14801static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
14802{
14803 struct hwtstamp_config config;
14804 int rc;
14805
14806 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
14807
14808 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
14809 return -EFAULT;
14810
14811 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
14812 config.tx_type, config.rx_filter);
14813
14814 if (config.flags) {
14815 BNX2X_ERR("config.flags is reserved for future use\n");
14816 return -EINVAL;
14817 }
14818
14819 bp->hwtstamp_ioctl_called = 1;
14820 bp->tx_type = config.tx_type;
14821 bp->rx_filter = config.rx_filter;
14822
14823 rc = bnx2x_configure_ptp_filters(bp);
14824 if (rc)
14825 return rc;
14826
14827 config.rx_filter = bp->rx_filter;
14828
14829 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
14830 -EFAULT : 0;
14831}
14832
14833
14834static int bnx2x_configure_ptp(struct bnx2x *bp)
14835{
14836 int rc, port = BP_PORT(bp);
14837 u32 wb_data[2];
14838
14839
14840 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14841 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
14842 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14843 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
14844 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14845 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
14846 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14847 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
14848
14849
14850 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14851 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
14852
14853
14854 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
14855 NIG_REG_P0_PTP_EN, 0x3F);
14856
14857
14858 wb_data[0] = 0;
14859 wb_data[1] = 0;
14860 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
14861
14862
14863 rc = bnx2x_send_reset_timesync_ramrod(bp);
14864 if (rc) {
14865 BNX2X_ERR("Failed to reset PHC drift register\n");
14866 return -EFAULT;
14867 }
14868
14869
14870 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14871 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14872 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14873 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14874
14875 return 0;
14876}
14877
14878
14879void bnx2x_init_ptp(struct bnx2x *bp)
14880{
14881 int rc;
14882
14883
14884 rc = bnx2x_configure_ptp(bp);
14885 if (rc) {
14886 BNX2X_ERR("Stopping PTP initialization\n");
14887 return;
14888 }
14889
14890
14891 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
14892
14893
14894
14895
14896
14897 if (!bp->timecounter_init_done) {
14898 bnx2x_init_cyclecounter(bp);
14899 timecounter_init(&bp->timecounter, &bp->cyclecounter,
14900 ktime_to_ns(ktime_get_real()));
14901 bp->timecounter_init_done = 1;
14902 }
14903
14904 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
14905}
14906