1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h>
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/aer.h>
31#include <linux/init.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/dma-mapping.h>
36#include <linux/bitops.h>
37#include <linux/irq.h>
38#include <linux/delay.h>
39#include <asm/byteorder.h>
40#include <linux/time.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/if_vlan.h>
44#include <linux/crash_dump.h>
45#include <net/ip.h>
46#include <net/ipv6.h>
47#include <net/tcp.h>
48#include <net/vxlan.h>
49#include <net/checksum.h>
50#include <net/ip6_checksum.h>
51#include <linux/workqueue.h>
52#include <linux/crc32.h>
53#include <linux/crc32c.h>
54#include <linux/prefetch.h>
55#include <linux/zlib.h>
56#include <linux/io.h>
57#include <linux/semaphore.h>
58#include <linux/stringify.h>
59#include <linux/vmalloc.h>
60
61#include "bnx2x.h"
62#include "bnx2x_init.h"
63#include "bnx2x_init_ops.h"
64#include "bnx2x_cmn.h"
65#include "bnx2x_vfpf.h"
66#include "bnx2x_dcb.h"
67#include "bnx2x_sp.h"
68#include <linux/firmware.h>
69#include "bnx2x_fw_file_hdr.h"
70
71#define FW_FILE_VERSION \
72 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
73 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
74 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
75 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
76#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
77#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
79
80
81#define TX_TIMEOUT (5*HZ)
82
83static char version[] =
84 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
85 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
86
87MODULE_AUTHOR("Eliezer Tamir");
88MODULE_DESCRIPTION("Broadcom NetXtreme II "
89 "BCM57710/57711/57711E/"
90 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
91 "57840/57840_MF Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION);
94MODULE_FIRMWARE(FW_FILE_NAME_E1);
95MODULE_FIRMWARE(FW_FILE_NAME_E1H);
96MODULE_FIRMWARE(FW_FILE_NAME_E2);
97
98int bnx2x_num_queues;
99module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
100MODULE_PARM_DESC(num_queues,
101 " Set number of queues (default is as a number of CPUs)");
102
103static int disable_tpa;
104module_param(disable_tpa, int, S_IRUGO);
105MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
106
107static int int_mode;
108module_param(int_mode, int, S_IRUGO);
109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
110 "(1 INT#x; 2 MSI)");
111
112static int dropless_fc;
113module_param(dropless_fc, int, S_IRUGO);
114MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
115
116static int mrrs = -1;
117module_param(mrrs, int, S_IRUGO);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120static int debug;
121module_param(debug, int, S_IRUGO);
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static struct workqueue_struct *bnx2x_wq;
125struct workqueue_struct *bnx2x_iov_wq;
126
127struct bnx2x_mac_vals {
128 u32 xmac_addr;
129 u32 xmac_val;
130 u32 emac_addr;
131 u32 emac_val;
132 u32 umac_addr[2];
133 u32 umac_val[2];
134 u32 bmac_addr;
135 u32 bmac_val[2];
136};
137
138enum bnx2x_board_type {
139 BCM57710 = 0,
140 BCM57711,
141 BCM57711E,
142 BCM57712,
143 BCM57712_MF,
144 BCM57712_VF,
145 BCM57800,
146 BCM57800_MF,
147 BCM57800_VF,
148 BCM57810,
149 BCM57810_MF,
150 BCM57810_VF,
151 BCM57840_4_10,
152 BCM57840_2_20,
153 BCM57840_MF,
154 BCM57840_VF,
155 BCM57811,
156 BCM57811_MF,
157 BCM57840_O,
158 BCM57840_MFO,
159 BCM57811_VF
160};
161
162
163static struct {
164 char *name;
165} board_info[] = {
166 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
167 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
168 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
169 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
170 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
171 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
172 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
173 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
174 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
175 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
176 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
177 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
178 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
179 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
180 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
181 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
182 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
183 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
184 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
185 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
186 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
187};
188
189#ifndef PCI_DEVICE_ID_NX2_57710
190#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
191#endif
192#ifndef PCI_DEVICE_ID_NX2_57711
193#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
194#endif
195#ifndef PCI_DEVICE_ID_NX2_57711E
196#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
197#endif
198#ifndef PCI_DEVICE_ID_NX2_57712
199#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
200#endif
201#ifndef PCI_DEVICE_ID_NX2_57712_MF
202#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
203#endif
204#ifndef PCI_DEVICE_ID_NX2_57712_VF
205#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
206#endif
207#ifndef PCI_DEVICE_ID_NX2_57800
208#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
209#endif
210#ifndef PCI_DEVICE_ID_NX2_57800_MF
211#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
212#endif
213#ifndef PCI_DEVICE_ID_NX2_57800_VF
214#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
215#endif
216#ifndef PCI_DEVICE_ID_NX2_57810
217#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
218#endif
219#ifndef PCI_DEVICE_ID_NX2_57810_MF
220#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
221#endif
222#ifndef PCI_DEVICE_ID_NX2_57840_O
223#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
224#endif
225#ifndef PCI_DEVICE_ID_NX2_57810_VF
226#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
227#endif
228#ifndef PCI_DEVICE_ID_NX2_57840_4_10
229#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
230#endif
231#ifndef PCI_DEVICE_ID_NX2_57840_2_20
232#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
233#endif
234#ifndef PCI_DEVICE_ID_NX2_57840_MFO
235#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
236#endif
237#ifndef PCI_DEVICE_ID_NX2_57840_MF
238#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
239#endif
240#ifndef PCI_DEVICE_ID_NX2_57840_VF
241#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
242#endif
243#ifndef PCI_DEVICE_ID_NX2_57811
244#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
245#endif
246#ifndef PCI_DEVICE_ID_NX2_57811_MF
247#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
248#endif
249#ifndef PCI_DEVICE_ID_NX2_57811_VF
250#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
251#endif
252
253static const struct pci_device_id bnx2x_pci_tbl[] = {
254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
275 { 0 }
276};
277
278MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
279
280
281#define BNX2X_PREV_WAIT_NEEDED 1
282static DEFINE_SEMAPHORE(bnx2x_prev_sem);
283static LIST_HEAD(bnx2x_prev_list);
284
285
286static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
287static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
288static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
289
290
291
292
293
294static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
295
296static void __storm_memset_dma_mapping(struct bnx2x *bp,
297 u32 addr, dma_addr_t mapping)
298{
299 REG_WR(bp, addr, U64_LO(mapping));
300 REG_WR(bp, addr + 4, U64_HI(mapping));
301}
302
303static void storm_memset_spq_addr(struct bnx2x *bp,
304 dma_addr_t mapping, u16 abs_fid)
305{
306 u32 addr = XSEM_REG_FAST_MEMORY +
307 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
308
309 __storm_memset_dma_mapping(bp, addr, mapping);
310}
311
312static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
313 u16 pf_id)
314{
315 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
316 pf_id);
317 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
318 pf_id);
319 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
320 pf_id);
321 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323}
324
325static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
326 u8 enable)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
329 enable);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
331 enable);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
333 enable);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336}
337
338static void storm_memset_eq_data(struct bnx2x *bp,
339 struct event_ring_data *eq_data,
340 u16 pfid)
341{
342 size_t size = sizeof(struct event_ring_data);
343
344 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
345
346 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
347}
348
349static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
350 u16 pfid)
351{
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
353 REG_WR16(bp, addr, eq_prod);
354}
355
356
357
358
359static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
360{
361 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
362 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
363 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
364 PCICFG_VENDOR_ID_OFFSET);
365}
366
367static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
368{
369 u32 val;
370
371 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
372 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
373 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
374 PCICFG_VENDOR_ID_OFFSET);
375
376 return val;
377}
378
379#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
380#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
381#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
382#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
383#define DMAE_DP_DST_NONE "dst_addr [none]"
384
385static void bnx2x_dp_dmae(struct bnx2x *bp,
386 struct dmae_command *dmae, int msglvl)
387{
388 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
389 int i;
390
391 switch (dmae->opcode & DMAE_COMMAND_DST) {
392 case DMAE_CMD_DST_PCI:
393 if (src_type == DMAE_CMD_SRC_PCI)
394 DP(msglvl, "DMAE: opcode 0x%08x\n"
395 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
396 "comp_addr [%x:%08x], comp_val 0x%08x\n",
397 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
398 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
399 dmae->comp_addr_hi, dmae->comp_addr_lo,
400 dmae->comp_val);
401 else
402 DP(msglvl, "DMAE: opcode 0x%08x\n"
403 "src [%08x], len [%d*4], dst [%x:%08x]\n"
404 "comp_addr [%x:%08x], comp_val 0x%08x\n",
405 dmae->opcode, dmae->src_addr_lo >> 2,
406 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
407 dmae->comp_addr_hi, dmae->comp_addr_lo,
408 dmae->comp_val);
409 break;
410 case DMAE_CMD_DST_GRC:
411 if (src_type == DMAE_CMD_SRC_PCI)
412 DP(msglvl, "DMAE: opcode 0x%08x\n"
413 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
414 "comp_addr [%x:%08x], comp_val 0x%08x\n",
415 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
416 dmae->len, dmae->dst_addr_lo >> 2,
417 dmae->comp_addr_hi, dmae->comp_addr_lo,
418 dmae->comp_val);
419 else
420 DP(msglvl, "DMAE: opcode 0x%08x\n"
421 "src [%08x], len [%d*4], dst [%08x]\n"
422 "comp_addr [%x:%08x], comp_val 0x%08x\n",
423 dmae->opcode, dmae->src_addr_lo >> 2,
424 dmae->len, dmae->dst_addr_lo >> 2,
425 dmae->comp_addr_hi, dmae->comp_addr_lo,
426 dmae->comp_val);
427 break;
428 default:
429 if (src_type == DMAE_CMD_SRC_PCI)
430 DP(msglvl, "DMAE: opcode 0x%08x\n"
431 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
432 "comp_addr [%x:%08x] comp_val 0x%08x\n",
433 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
434 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
435 dmae->comp_val);
436 else
437 DP(msglvl, "DMAE: opcode 0x%08x\n"
438 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
439 "comp_addr [%x:%08x] comp_val 0x%08x\n",
440 dmae->opcode, dmae->src_addr_lo >> 2,
441 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
442 dmae->comp_val);
443 break;
444 }
445
446 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
447 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
448 i, *(((u32 *)dmae) + i));
449}
450
451
452void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
453{
454 u32 cmd_offset;
455 int i;
456
457 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
458 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
459 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
460 }
461 REG_WR(bp, dmae_reg_go_c[idx], 1);
462}
463
464u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
465{
466 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
467 DMAE_CMD_C_ENABLE);
468}
469
470u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
471{
472 return opcode & ~DMAE_CMD_SRC_RESET;
473}
474
475u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
476 bool with_comp, u8 comp_type)
477{
478 u32 opcode = 0;
479
480 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
481 (dst_type << DMAE_COMMAND_DST_SHIFT));
482
483 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
484
485 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
486 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
487 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
488 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
489
490#ifdef __BIG_ENDIAN
491 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
492#else
493 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
494#endif
495 if (with_comp)
496 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
497 return opcode;
498}
499
500void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
501 struct dmae_command *dmae,
502 u8 src_type, u8 dst_type)
503{
504 memset(dmae, 0, sizeof(struct dmae_command));
505
506
507 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
508 true, DMAE_COMP_PCI);
509
510
511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
513 dmae->comp_val = DMAE_COMP_VAL;
514}
515
516
517int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
518 u32 *comp)
519{
520 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
521 int rc = 0;
522
523 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
524
525
526
527
528
529
530 spin_lock_bh(&bp->dmae_lock);
531
532
533 *comp = 0;
534
535
536 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
537
538
539 udelay(5);
540 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
541
542 if (!cnt ||
543 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
544 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
545 BNX2X_ERR("DMAE timeout!\n");
546 rc = DMAE_TIMEOUT;
547 goto unlock;
548 }
549 cnt--;
550 udelay(50);
551 }
552 if (*comp & DMAE_PCI_ERR_FLAG) {
553 BNX2X_ERR("DMAE PCI error!\n");
554 rc = DMAE_PCI_ERROR;
555 }
556
557unlock:
558
559 spin_unlock_bh(&bp->dmae_lock);
560
561 return rc;
562}
563
564void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
565 u32 len32)
566{
567 int rc;
568 struct dmae_command dmae;
569
570 if (!bp->dmae_ready) {
571 u32 *data = bnx2x_sp(bp, wb_data[0]);
572
573 if (CHIP_IS_E1(bp))
574 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
575 else
576 bnx2x_init_str_wr(bp, dst_addr, data, len32);
577 return;
578 }
579
580
581 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
582
583
584 dmae.src_addr_lo = U64_LO(dma_addr);
585 dmae.src_addr_hi = U64_HI(dma_addr);
586 dmae.dst_addr_lo = dst_addr >> 2;
587 dmae.dst_addr_hi = 0;
588 dmae.len = len32;
589
590
591 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
592 if (rc) {
593 BNX2X_ERR("DMAE returned failure %d\n", rc);
594#ifdef BNX2X_STOP_ON_ERROR
595 bnx2x_panic();
596#endif
597 }
598}
599
600void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
601{
602 int rc;
603 struct dmae_command dmae;
604
605 if (!bp->dmae_ready) {
606 u32 *data = bnx2x_sp(bp, wb_data[0]);
607 int i;
608
609 if (CHIP_IS_E1(bp))
610 for (i = 0; i < len32; i++)
611 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
612 else
613 for (i = 0; i < len32; i++)
614 data[i] = REG_RD(bp, src_addr + i*4);
615
616 return;
617 }
618
619
620 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
621
622
623 dmae.src_addr_lo = src_addr >> 2;
624 dmae.src_addr_hi = 0;
625 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
626 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
627 dmae.len = len32;
628
629
630 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
631 if (rc) {
632 BNX2X_ERR("DMAE returned failure %d\n", rc);
633#ifdef BNX2X_STOP_ON_ERROR
634 bnx2x_panic();
635#endif
636 }
637}
638
639static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
640 u32 addr, u32 len)
641{
642 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
643 int offset = 0;
644
645 while (len > dmae_wr_max) {
646 bnx2x_write_dmae(bp, phys_addr + offset,
647 addr + offset, dmae_wr_max);
648 offset += dmae_wr_max * 4;
649 len -= dmae_wr_max;
650 }
651
652 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
653}
654
655enum storms {
656 XSTORM,
657 TSTORM,
658 CSTORM,
659 USTORM,
660 MAX_STORMS
661};
662
663#define STORMS_NUM 4
664#define REGS_IN_ENTRY 4
665
666static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
667 enum storms storm,
668 int entry)
669{
670 switch (storm) {
671 case XSTORM:
672 return XSTORM_ASSERT_LIST_OFFSET(entry);
673 case TSTORM:
674 return TSTORM_ASSERT_LIST_OFFSET(entry);
675 case CSTORM:
676 return CSTORM_ASSERT_LIST_OFFSET(entry);
677 case USTORM:
678 return USTORM_ASSERT_LIST_OFFSET(entry);
679 case MAX_STORMS:
680 default:
681 BNX2X_ERR("unknown storm\n");
682 }
683 return -EINVAL;
684}
685
686static int bnx2x_mc_assert(struct bnx2x *bp)
687{
688 char last_idx;
689 int i, j, rc = 0;
690 enum storms storm;
691 u32 regs[REGS_IN_ENTRY];
692 u32 bar_storm_intmem[STORMS_NUM] = {
693 BAR_XSTRORM_INTMEM,
694 BAR_TSTRORM_INTMEM,
695 BAR_CSTRORM_INTMEM,
696 BAR_USTRORM_INTMEM
697 };
698 u32 storm_assert_list_index[STORMS_NUM] = {
699 XSTORM_ASSERT_LIST_INDEX_OFFSET,
700 TSTORM_ASSERT_LIST_INDEX_OFFSET,
701 CSTORM_ASSERT_LIST_INDEX_OFFSET,
702 USTORM_ASSERT_LIST_INDEX_OFFSET
703 };
704 char *storms_string[STORMS_NUM] = {
705 "XSTORM",
706 "TSTORM",
707 "CSTORM",
708 "USTORM"
709 };
710
711 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
712 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
713 storm_assert_list_index[storm]);
714 if (last_idx)
715 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
716 storms_string[storm], last_idx);
717
718
719 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
720
721 for (j = 0; j < REGS_IN_ENTRY; j++)
722 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
723 bnx2x_get_assert_list_entry(bp,
724 storm,
725 i) +
726 sizeof(u32) * j);
727
728
729 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
730 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
731 storms_string[storm], i, regs[3],
732 regs[2], regs[1], regs[0]);
733 rc++;
734 } else {
735 break;
736 }
737 }
738 }
739
740 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
741 CHIP_IS_E1(bp) ? "everest1" :
742 CHIP_IS_E1H(bp) ? "everest1h" :
743 CHIP_IS_E2(bp) ? "everest2" : "everest3",
744 BCM_5710_FW_MAJOR_VERSION,
745 BCM_5710_FW_MINOR_VERSION,
746 BCM_5710_FW_REVISION_VERSION);
747
748 return rc;
749}
750
751#define MCPR_TRACE_BUFFER_SIZE (0x800)
752#define SCRATCH_BUFFER_SIZE(bp) \
753 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
754
755void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
756{
757 u32 addr, val;
758 u32 mark, offset;
759 __be32 data[9];
760 int word;
761 u32 trace_shmem_base;
762 if (BP_NOMCP(bp)) {
763 BNX2X_ERR("NO MCP - can not dump\n");
764 return;
765 }
766 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
767 (bp->common.bc_ver & 0xff0000) >> 16,
768 (bp->common.bc_ver & 0xff00) >> 8,
769 (bp->common.bc_ver & 0xff));
770
771 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
772 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
773 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
774
775 if (BP_PATH(bp) == 0)
776 trace_shmem_base = bp->common.shmem_base;
777 else
778 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
779
780
781 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
782 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
783 SCRATCH_BUFFER_SIZE(bp)) {
784 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
785 trace_shmem_base);
786 return;
787 }
788
789 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
790
791
792 mark = REG_RD(bp, addr);
793 if (mark != MFW_TRACE_SIGNATURE) {
794 BNX2X_ERR("Trace buffer signature is missing.");
795 return ;
796 }
797
798
799 addr += 4;
800 mark = REG_RD(bp, addr);
801 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
802 if (mark >= trace_shmem_base || mark < addr + 4) {
803 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
804 return;
805 }
806 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
807
808 printk("%s", lvl);
809
810
811 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
812 for (word = 0; word < 8; word++)
813 data[word] = htonl(REG_RD(bp, offset + 4*word));
814 data[8] = 0x0;
815 pr_cont("%s", (char *)data);
816 }
817
818
819 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
820 for (word = 0; word < 8; word++)
821 data[word] = htonl(REG_RD(bp, offset + 4*word));
822 data[8] = 0x0;
823 pr_cont("%s", (char *)data);
824 }
825 printk("%s" "end of fw dump\n", lvl);
826}
827
828static void bnx2x_fw_dump(struct bnx2x *bp)
829{
830 bnx2x_fw_dump_lvl(bp, KERN_ERR);
831}
832
833static void bnx2x_hc_int_disable(struct bnx2x *bp)
834{
835 int port = BP_PORT(bp);
836 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
837 u32 val = REG_RD(bp, addr);
838
839
840
841
842
843 if (CHIP_IS_E1(bp)) {
844
845
846
847
848 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
849
850 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
851 HC_CONFIG_0_REG_INT_LINE_EN_0 |
852 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
853 } else
854 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
855 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
856 HC_CONFIG_0_REG_INT_LINE_EN_0 |
857 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
858
859 DP(NETIF_MSG_IFDOWN,
860 "write %x to HC %d (addr 0x%x)\n",
861 val, port, addr);
862
863
864 mmiowb();
865
866 REG_WR(bp, addr, val);
867 if (REG_RD(bp, addr) != val)
868 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
869}
870
871static void bnx2x_igu_int_disable(struct bnx2x *bp)
872{
873 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
874
875 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
876 IGU_PF_CONF_INT_LINE_EN |
877 IGU_PF_CONF_ATTN_BIT_EN);
878
879 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
880
881
882 mmiowb();
883
884 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
885 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
886 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
887}
888
889static void bnx2x_int_disable(struct bnx2x *bp)
890{
891 if (bp->common.int_block == INT_BLOCK_HC)
892 bnx2x_hc_int_disable(bp);
893 else
894 bnx2x_igu_int_disable(bp);
895}
896
897void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
898{
899 int i;
900 u16 j;
901 struct hc_sp_status_block_data sp_sb_data;
902 int func = BP_FUNC(bp);
903#ifdef BNX2X_STOP_ON_ERROR
904 u16 start = 0, end = 0;
905 u8 cos;
906#endif
907 if (IS_PF(bp) && disable_int)
908 bnx2x_int_disable(bp);
909
910 bp->stats_state = STATS_STATE_DISABLED;
911 bp->eth_stats.unrecoverable_error++;
912 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
913
914 BNX2X_ERR("begin crash dump -----------------\n");
915
916
917
918 if (IS_PF(bp)) {
919 struct host_sp_status_block *def_sb = bp->def_status_blk;
920 int data_size, cstorm_offset;
921
922 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
923 bp->def_idx, bp->def_att_idx, bp->attn_state,
924 bp->spq_prod_idx, bp->stats_counter);
925 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
926 def_sb->atten_status_block.attn_bits,
927 def_sb->atten_status_block.attn_bits_ack,
928 def_sb->atten_status_block.status_block_id,
929 def_sb->atten_status_block.attn_bits_index);
930 BNX2X_ERR(" def (");
931 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
932 pr_cont("0x%x%s",
933 def_sb->sp_sb.index_values[i],
934 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
935
936 data_size = sizeof(struct hc_sp_status_block_data) /
937 sizeof(u32);
938 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
939 for (i = 0; i < data_size; i++)
940 *((u32 *)&sp_sb_data + i) =
941 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
942 i * sizeof(u32));
943
944 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
945 sp_sb_data.igu_sb_id,
946 sp_sb_data.igu_seg_id,
947 sp_sb_data.p_func.pf_id,
948 sp_sb_data.p_func.vnic_id,
949 sp_sb_data.p_func.vf_id,
950 sp_sb_data.p_func.vf_valid,
951 sp_sb_data.state);
952 }
953
954 for_each_eth_queue(bp, i) {
955 struct bnx2x_fastpath *fp = &bp->fp[i];
956 int loop;
957 struct hc_status_block_data_e2 sb_data_e2;
958 struct hc_status_block_data_e1x sb_data_e1x;
959 struct hc_status_block_sm *hc_sm_p =
960 CHIP_IS_E1x(bp) ?
961 sb_data_e1x.common.state_machine :
962 sb_data_e2.common.state_machine;
963 struct hc_index_data *hc_index_p =
964 CHIP_IS_E1x(bp) ?
965 sb_data_e1x.index_data :
966 sb_data_e2.index_data;
967 u8 data_size, cos;
968 u32 *sb_data_p;
969 struct bnx2x_fp_txdata txdata;
970
971 if (!bp->fp)
972 break;
973
974 if (!fp->rx_cons_sb)
975 continue;
976
977
978 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
979 i, fp->rx_bd_prod, fp->rx_bd_cons,
980 fp->rx_comp_prod,
981 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
982 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
983 fp->rx_sge_prod, fp->last_max_sge,
984 le16_to_cpu(fp->fp_hc_idx));
985
986
987 for_each_cos_in_tx_queue(fp, cos)
988 {
989 if (!fp->txdata_ptr[cos])
990 break;
991
992 txdata = *fp->txdata_ptr[cos];
993
994 if (!txdata.tx_cons_sb)
995 continue;
996
997 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
998 i, txdata.tx_pkt_prod,
999 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1000 txdata.tx_bd_cons,
1001 le16_to_cpu(*txdata.tx_cons_sb));
1002 }
1003
1004 loop = CHIP_IS_E1x(bp) ?
1005 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1006
1007
1008
1009 if (IS_FCOE_FP(fp))
1010 continue;
1011
1012 BNX2X_ERR(" run indexes (");
1013 for (j = 0; j < HC_SB_MAX_SM; j++)
1014 pr_cont("0x%x%s",
1015 fp->sb_running_index[j],
1016 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1017
1018 BNX2X_ERR(" indexes (");
1019 for (j = 0; j < loop; j++)
1020 pr_cont("0x%x%s",
1021 fp->sb_index_values[j],
1022 (j == loop - 1) ? ")" : " ");
1023
1024
1025 if (IS_VF(bp))
1026 continue;
1027
1028
1029 data_size = CHIP_IS_E1x(bp) ?
1030 sizeof(struct hc_status_block_data_e1x) :
1031 sizeof(struct hc_status_block_data_e2);
1032 data_size /= sizeof(u32);
1033 sb_data_p = CHIP_IS_E1x(bp) ?
1034 (u32 *)&sb_data_e1x :
1035 (u32 *)&sb_data_e2;
1036
1037 for (j = 0; j < data_size; j++)
1038 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1039 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1040 j * sizeof(u32));
1041
1042 if (!CHIP_IS_E1x(bp)) {
1043 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1044 sb_data_e2.common.p_func.pf_id,
1045 sb_data_e2.common.p_func.vf_id,
1046 sb_data_e2.common.p_func.vf_valid,
1047 sb_data_e2.common.p_func.vnic_id,
1048 sb_data_e2.common.same_igu_sb_1b,
1049 sb_data_e2.common.state);
1050 } else {
1051 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1052 sb_data_e1x.common.p_func.pf_id,
1053 sb_data_e1x.common.p_func.vf_id,
1054 sb_data_e1x.common.p_func.vf_valid,
1055 sb_data_e1x.common.p_func.vnic_id,
1056 sb_data_e1x.common.same_igu_sb_1b,
1057 sb_data_e1x.common.state);
1058 }
1059
1060
1061 for (j = 0; j < HC_SB_MAX_SM; j++) {
1062 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1063 j, hc_sm_p[j].__flags,
1064 hc_sm_p[j].igu_sb_id,
1065 hc_sm_p[j].igu_seg_id,
1066 hc_sm_p[j].time_to_expire,
1067 hc_sm_p[j].timer_value);
1068 }
1069
1070
1071 for (j = 0; j < loop; j++) {
1072 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1073 hc_index_p[j].flags,
1074 hc_index_p[j].timeout);
1075 }
1076 }
1077
1078#ifdef BNX2X_STOP_ON_ERROR
1079 if (IS_PF(bp)) {
1080
1081 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1082 for (i = 0; i < NUM_EQ_DESC; i++) {
1083 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1084
1085 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1086 i, bp->eq_ring[i].message.opcode,
1087 bp->eq_ring[i].message.error);
1088 BNX2X_ERR("data: %x %x %x\n",
1089 data[0], data[1], data[2]);
1090 }
1091 }
1092
1093
1094
1095 for_each_valid_rx_queue(bp, i) {
1096 struct bnx2x_fastpath *fp = &bp->fp[i];
1097
1098 if (!bp->fp)
1099 break;
1100
1101 if (!fp->rx_cons_sb)
1102 continue;
1103
1104 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1105 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1106 for (j = start; j != end; j = RX_BD(j + 1)) {
1107 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1108 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1109
1110 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1111 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1112 }
1113
1114 start = RX_SGE(fp->rx_sge_prod);
1115 end = RX_SGE(fp->last_max_sge);
1116 for (j = start; j != end; j = RX_SGE(j + 1)) {
1117 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1118 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1119
1120 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1121 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1122 }
1123
1124 start = RCQ_BD(fp->rx_comp_cons - 10);
1125 end = RCQ_BD(fp->rx_comp_cons + 503);
1126 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1127 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1128
1129 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1130 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1131 }
1132 }
1133
1134
1135 for_each_valid_tx_queue(bp, i) {
1136 struct bnx2x_fastpath *fp = &bp->fp[i];
1137
1138 if (!bp->fp)
1139 break;
1140
1141 for_each_cos_in_tx_queue(fp, cos) {
1142 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1143
1144 if (!fp->txdata_ptr[cos])
1145 break;
1146
1147 if (!txdata->tx_cons_sb)
1148 continue;
1149
1150 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1151 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1152 for (j = start; j != end; j = TX_BD(j + 1)) {
1153 struct sw_tx_bd *sw_bd =
1154 &txdata->tx_buf_ring[j];
1155
1156 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1157 i, cos, j, sw_bd->skb,
1158 sw_bd->first_bd);
1159 }
1160
1161 start = TX_BD(txdata->tx_bd_cons - 10);
1162 end = TX_BD(txdata->tx_bd_cons + 254);
1163 for (j = start; j != end; j = TX_BD(j + 1)) {
1164 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1165
1166 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1167 i, cos, j, tx_bd[0], tx_bd[1],
1168 tx_bd[2], tx_bd[3]);
1169 }
1170 }
1171 }
1172#endif
1173 if (IS_PF(bp)) {
1174 bnx2x_fw_dump(bp);
1175 bnx2x_mc_assert(bp);
1176 }
1177 BNX2X_ERR("end crash dump -----------------\n");
1178}
1179
1180
1181
1182
1183
1184
1185
1186#define FLR_WAIT_USEC 10000
1187#define FLR_WAIT_INTERVAL 50
1188#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1189
1190struct pbf_pN_buf_regs {
1191 int pN;
1192 u32 init_crd;
1193 u32 crd;
1194 u32 crd_freed;
1195};
1196
1197struct pbf_pN_cmd_regs {
1198 int pN;
1199 u32 lines_occup;
1200 u32 lines_freed;
1201};
1202
1203static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1204 struct pbf_pN_buf_regs *regs,
1205 u32 poll_count)
1206{
1207 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1208 u32 cur_cnt = poll_count;
1209
1210 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1211 crd = crd_start = REG_RD(bp, regs->crd);
1212 init_crd = REG_RD(bp, regs->init_crd);
1213
1214 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1215 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1216 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1217
1218 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1219 (init_crd - crd_start))) {
1220 if (cur_cnt--) {
1221 udelay(FLR_WAIT_INTERVAL);
1222 crd = REG_RD(bp, regs->crd);
1223 crd_freed = REG_RD(bp, regs->crd_freed);
1224 } else {
1225 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1226 regs->pN);
1227 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1228 regs->pN, crd);
1229 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1230 regs->pN, crd_freed);
1231 break;
1232 }
1233 }
1234 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1235 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1236}
1237
1238static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1239 struct pbf_pN_cmd_regs *regs,
1240 u32 poll_count)
1241{
1242 u32 occup, to_free, freed, freed_start;
1243 u32 cur_cnt = poll_count;
1244
1245 occup = to_free = REG_RD(bp, regs->lines_occup);
1246 freed = freed_start = REG_RD(bp, regs->lines_freed);
1247
1248 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1249 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1250
1251 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1252 if (cur_cnt--) {
1253 udelay(FLR_WAIT_INTERVAL);
1254 occup = REG_RD(bp, regs->lines_occup);
1255 freed = REG_RD(bp, regs->lines_freed);
1256 } else {
1257 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1258 regs->pN);
1259 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1260 regs->pN, occup);
1261 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1262 regs->pN, freed);
1263 break;
1264 }
1265 }
1266 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1267 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1268}
1269
1270static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1271 u32 expected, u32 poll_count)
1272{
1273 u32 cur_cnt = poll_count;
1274 u32 val;
1275
1276 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1277 udelay(FLR_WAIT_INTERVAL);
1278
1279 return val;
1280}
1281
1282int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1283 char *msg, u32 poll_cnt)
1284{
1285 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1286 if (val != 0) {
1287 BNX2X_ERR("%s usage count=%d\n", msg, val);
1288 return 1;
1289 }
1290 return 0;
1291}
1292
1293
1294u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1295{
1296
1297 if (CHIP_REV_IS_EMUL(bp))
1298 return FLR_POLL_CNT * 2000;
1299
1300 if (CHIP_REV_IS_FPGA(bp))
1301 return FLR_POLL_CNT * 120;
1302
1303 return FLR_POLL_CNT;
1304}
1305
1306void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1307{
1308 struct pbf_pN_cmd_regs cmd_regs[] = {
1309 {0, (CHIP_IS_E3B0(bp)) ?
1310 PBF_REG_TQ_OCCUPANCY_Q0 :
1311 PBF_REG_P0_TQ_OCCUPANCY,
1312 (CHIP_IS_E3B0(bp)) ?
1313 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1314 PBF_REG_P0_TQ_LINES_FREED_CNT},
1315 {1, (CHIP_IS_E3B0(bp)) ?
1316 PBF_REG_TQ_OCCUPANCY_Q1 :
1317 PBF_REG_P1_TQ_OCCUPANCY,
1318 (CHIP_IS_E3B0(bp)) ?
1319 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1320 PBF_REG_P1_TQ_LINES_FREED_CNT},
1321 {4, (CHIP_IS_E3B0(bp)) ?
1322 PBF_REG_TQ_OCCUPANCY_LB_Q :
1323 PBF_REG_P4_TQ_OCCUPANCY,
1324 (CHIP_IS_E3B0(bp)) ?
1325 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1326 PBF_REG_P4_TQ_LINES_FREED_CNT}
1327 };
1328
1329 struct pbf_pN_buf_regs buf_regs[] = {
1330 {0, (CHIP_IS_E3B0(bp)) ?
1331 PBF_REG_INIT_CRD_Q0 :
1332 PBF_REG_P0_INIT_CRD ,
1333 (CHIP_IS_E3B0(bp)) ?
1334 PBF_REG_CREDIT_Q0 :
1335 PBF_REG_P0_CREDIT,
1336 (CHIP_IS_E3B0(bp)) ?
1337 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1338 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1339 {1, (CHIP_IS_E3B0(bp)) ?
1340 PBF_REG_INIT_CRD_Q1 :
1341 PBF_REG_P1_INIT_CRD,
1342 (CHIP_IS_E3B0(bp)) ?
1343 PBF_REG_CREDIT_Q1 :
1344 PBF_REG_P1_CREDIT,
1345 (CHIP_IS_E3B0(bp)) ?
1346 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1347 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1348 {4, (CHIP_IS_E3B0(bp)) ?
1349 PBF_REG_INIT_CRD_LB_Q :
1350 PBF_REG_P4_INIT_CRD,
1351 (CHIP_IS_E3B0(bp)) ?
1352 PBF_REG_CREDIT_LB_Q :
1353 PBF_REG_P4_CREDIT,
1354 (CHIP_IS_E3B0(bp)) ?
1355 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1356 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1357 };
1358
1359 int i;
1360
1361
1362 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1363 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1364
1365
1366 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1367 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1368}
1369
1370#define OP_GEN_PARAM(param) \
1371 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1372
1373#define OP_GEN_TYPE(type) \
1374 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1375
1376#define OP_GEN_AGG_VECT(index) \
1377 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1378
1379int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1380{
1381 u32 op_gen_command = 0;
1382 u32 comp_addr = BAR_CSTRORM_INTMEM +
1383 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1384 int ret = 0;
1385
1386 if (REG_RD(bp, comp_addr)) {
1387 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1388 return 1;
1389 }
1390
1391 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1392 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1393 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1394 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1395
1396 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1397 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1398
1399 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1400 BNX2X_ERR("FW final cleanup did not succeed\n");
1401 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1402 (REG_RD(bp, comp_addr)));
1403 bnx2x_panic();
1404 return 1;
1405 }
1406
1407 REG_WR(bp, comp_addr, 0);
1408
1409 return ret;
1410}
1411
1412u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1413{
1414 u16 status;
1415
1416 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1417 return status & PCI_EXP_DEVSTA_TRPND;
1418}
1419
1420
1421
1422static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1423{
1424
1425 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1426 CFC_REG_NUM_LCIDS_INSIDE_PF,
1427 "CFC PF usage counter timed out",
1428 poll_cnt))
1429 return 1;
1430
1431
1432 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1433 DORQ_REG_PF_USAGE_CNT,
1434 "DQ PF usage counter timed out",
1435 poll_cnt))
1436 return 1;
1437
1438
1439 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1441 "QM PF usage counter timed out",
1442 poll_cnt))
1443 return 1;
1444
1445
1446 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1448 "Timers VNIC usage counter timed out",
1449 poll_cnt))
1450 return 1;
1451 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1452 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1453 "Timers NUM_SCANS usage counter timed out",
1454 poll_cnt))
1455 return 1;
1456
1457
1458 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459 dmae_reg_go_c[INIT_DMAE_C(bp)],
1460 "DMAE command register timed out",
1461 poll_cnt))
1462 return 1;
1463
1464 return 0;
1465}
1466
1467static void bnx2x_hw_enable_status(struct bnx2x *bp)
1468{
1469 u32 val;
1470
1471 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1472 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1473
1474 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1475 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1476
1477 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1478 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1479
1480 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1481 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1482
1483 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1484 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1485
1486 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1487 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1488
1489 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1490 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1491
1492 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1493 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1494 val);
1495}
1496
1497static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1498{
1499 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1500
1501 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1502
1503
1504 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1505
1506
1507 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1508 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1509 return -EBUSY;
1510
1511
1512
1513
1514 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1515 return -EBUSY;
1516
1517
1518
1519
1520 bnx2x_tx_hw_flushed(bp, poll_cnt);
1521
1522
1523 msleep(100);
1524
1525
1526 if (bnx2x_is_pcie_pending(bp->pdev))
1527 BNX2X_ERR("PCIE Transactions still pending\n");
1528
1529
1530 bnx2x_hw_enable_status(bp);
1531
1532
1533
1534
1535
1536 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1537
1538 return 0;
1539}
1540
1541static void bnx2x_hc_int_enable(struct bnx2x *bp)
1542{
1543 int port = BP_PORT(bp);
1544 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1545 u32 val = REG_RD(bp, addr);
1546 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1547 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1548 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1549
1550 if (msix) {
1551 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1552 HC_CONFIG_0_REG_INT_LINE_EN_0);
1553 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1554 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1555 if (single_msix)
1556 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1557 } else if (msi) {
1558 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1559 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1560 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1561 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1562 } else {
1563 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1564 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1565 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1566 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1567
1568 if (!CHIP_IS_E1(bp)) {
1569 DP(NETIF_MSG_IFUP,
1570 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1571
1572 REG_WR(bp, addr, val);
1573
1574 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1575 }
1576 }
1577
1578 if (CHIP_IS_E1(bp))
1579 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1580
1581 DP(NETIF_MSG_IFUP,
1582 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1583 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1584
1585 REG_WR(bp, addr, val);
1586
1587
1588
1589 mmiowb();
1590 barrier();
1591
1592 if (!CHIP_IS_E1(bp)) {
1593
1594 if (IS_MF(bp)) {
1595 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1596 if (bp->port.pmf)
1597
1598 val |= 0x1100;
1599 } else
1600 val = 0xffff;
1601
1602 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1603 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1604 }
1605
1606
1607 mmiowb();
1608}
1609
1610static void bnx2x_igu_int_enable(struct bnx2x *bp)
1611{
1612 u32 val;
1613 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1614 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1615 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1616
1617 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1618
1619 if (msix) {
1620 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1621 IGU_PF_CONF_SINGLE_ISR_EN);
1622 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1623 IGU_PF_CONF_ATTN_BIT_EN);
1624
1625 if (single_msix)
1626 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1627 } else if (msi) {
1628 val &= ~IGU_PF_CONF_INT_LINE_EN;
1629 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1630 IGU_PF_CONF_ATTN_BIT_EN |
1631 IGU_PF_CONF_SINGLE_ISR_EN);
1632 } else {
1633 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1634 val |= (IGU_PF_CONF_INT_LINE_EN |
1635 IGU_PF_CONF_ATTN_BIT_EN |
1636 IGU_PF_CONF_SINGLE_ISR_EN);
1637 }
1638
1639
1640 if ((!msix) || single_msix) {
1641 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1642 bnx2x_ack_int(bp);
1643 }
1644
1645 val |= IGU_PF_CONF_FUNC_EN;
1646
1647 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1648 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1649
1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1651
1652 if (val & IGU_PF_CONF_INT_LINE_EN)
1653 pci_intx(bp->pdev, true);
1654
1655 barrier();
1656
1657
1658 if (IS_MF(bp)) {
1659 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1660 if (bp->port.pmf)
1661
1662 val |= 0x1100;
1663 } else
1664 val = 0xffff;
1665
1666 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1667 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1668
1669
1670 mmiowb();
1671}
1672
1673void bnx2x_int_enable(struct bnx2x *bp)
1674{
1675 if (bp->common.int_block == INT_BLOCK_HC)
1676 bnx2x_hc_int_enable(bp);
1677 else
1678 bnx2x_igu_int_enable(bp);
1679}
1680
1681void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1682{
1683 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1684 int i, offset;
1685
1686 if (disable_hw)
1687
1688 bnx2x_int_disable(bp);
1689
1690
1691 if (msix) {
1692 synchronize_irq(bp->msix_table[0].vector);
1693 offset = 1;
1694 if (CNIC_SUPPORT(bp))
1695 offset++;
1696 for_each_eth_queue(bp, i)
1697 synchronize_irq(bp->msix_table[offset++].vector);
1698 } else
1699 synchronize_irq(bp->pdev->irq);
1700
1701
1702 cancel_delayed_work(&bp->sp_task);
1703 cancel_delayed_work(&bp->period_task);
1704 flush_workqueue(bnx2x_wq);
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1715{
1716 u32 lock_status;
1717 u32 resource_bit = (1 << resource);
1718 int func = BP_FUNC(bp);
1719 u32 hw_lock_control_reg;
1720
1721 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1722 "Trying to take a lock on resource %d\n", resource);
1723
1724
1725 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1726 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1727 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1728 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1729 return false;
1730 }
1731
1732 if (func <= 5)
1733 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1734 else
1735 hw_lock_control_reg =
1736 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1737
1738
1739 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1740 lock_status = REG_RD(bp, hw_lock_control_reg);
1741 if (lock_status & resource_bit)
1742 return true;
1743
1744 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1745 "Failed to get a lock on resource %d\n", resource);
1746 return false;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1758{
1759 if (BP_PATH(bp))
1760 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1761 else
1762 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1773{
1774 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1775}
1776
1777static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1778
1779
1780static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1781{
1782
1783
1784
1785
1786 atomic_set(&bp->interrupt_occurred, 1);
1787
1788
1789
1790
1791
1792 smp_wmb();
1793
1794
1795 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1796}
1797
1798void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1799{
1800 struct bnx2x *bp = fp->bp;
1801 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1802 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1803 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1804 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1805
1806 DP(BNX2X_MSG_SP,
1807 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1808 fp->index, cid, command, bp->state,
1809 rr_cqe->ramrod_cqe.ramrod_type);
1810
1811
1812
1813
1814 if (cid >= BNX2X_FIRST_VF_CID &&
1815 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1816 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1817
1818 switch (command) {
1819 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1820 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1821 drv_cmd = BNX2X_Q_CMD_UPDATE;
1822 break;
1823
1824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1825 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1826 drv_cmd = BNX2X_Q_CMD_SETUP;
1827 break;
1828
1829 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1830 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1831 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1832 break;
1833
1834 case (RAMROD_CMD_ID_ETH_HALT):
1835 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1836 drv_cmd = BNX2X_Q_CMD_HALT;
1837 break;
1838
1839 case (RAMROD_CMD_ID_ETH_TERMINATE):
1840 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1841 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1842 break;
1843
1844 case (RAMROD_CMD_ID_ETH_EMPTY):
1845 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1846 drv_cmd = BNX2X_Q_CMD_EMPTY;
1847 break;
1848
1849 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1850 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1851 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1852 break;
1853
1854 default:
1855 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1856 command, fp->index);
1857 return;
1858 }
1859
1860 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1861 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1862
1863
1864
1865
1866
1867
1868
1869#ifdef BNX2X_STOP_ON_ERROR
1870 bnx2x_panic();
1871#else
1872 return;
1873#endif
1874
1875 smp_mb__before_atomic();
1876 atomic_inc(&bp->cq_spq_left);
1877
1878 smp_mb__after_atomic();
1879
1880 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1881
1882 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1883 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 smp_mb__before_atomic();
1894 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1895 wmb();
1896 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1897 smp_mb__after_atomic();
1898
1899
1900 bnx2x_schedule_sp_task(bp);
1901 }
1902
1903 return;
1904}
1905
1906irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1907{
1908 struct bnx2x *bp = netdev_priv(dev_instance);
1909 u16 status = bnx2x_ack_int(bp);
1910 u16 mask;
1911 int i;
1912 u8 cos;
1913
1914
1915 if (unlikely(status == 0)) {
1916 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1917 return IRQ_NONE;
1918 }
1919 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1920
1921#ifdef BNX2X_STOP_ON_ERROR
1922 if (unlikely(bp->panic))
1923 return IRQ_HANDLED;
1924#endif
1925
1926 for_each_eth_queue(bp, i) {
1927 struct bnx2x_fastpath *fp = &bp->fp[i];
1928
1929 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1930 if (status & mask) {
1931
1932 for_each_cos_in_tx_queue(fp, cos)
1933 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1934 prefetch(&fp->sb_running_index[SM_RX_ID]);
1935 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1936 status &= ~mask;
1937 }
1938 }
1939
1940 if (CNIC_SUPPORT(bp)) {
1941 mask = 0x2;
1942 if (status & (mask | 0x1)) {
1943 struct cnic_ops *c_ops = NULL;
1944
1945 rcu_read_lock();
1946 c_ops = rcu_dereference(bp->cnic_ops);
1947 if (c_ops && (bp->cnic_eth_dev.drv_state &
1948 CNIC_DRV_STATE_HANDLES_IRQ))
1949 c_ops->cnic_handler(bp->cnic_data, NULL);
1950 rcu_read_unlock();
1951
1952 status &= ~mask;
1953 }
1954 }
1955
1956 if (unlikely(status & 0x1)) {
1957
1958
1959
1960
1961 bnx2x_schedule_sp_task(bp);
1962
1963 status &= ~0x1;
1964 if (!status)
1965 return IRQ_HANDLED;
1966 }
1967
1968 if (unlikely(status))
1969 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1970 status);
1971
1972 return IRQ_HANDLED;
1973}
1974
1975
1976
1977
1978
1979
1980
1981int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1982{
1983 u32 lock_status;
1984 u32 resource_bit = (1 << resource);
1985 int func = BP_FUNC(bp);
1986 u32 hw_lock_control_reg;
1987 int cnt;
1988
1989
1990 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1991 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1992 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1993 return -EINVAL;
1994 }
1995
1996 if (func <= 5) {
1997 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1998 } else {
1999 hw_lock_control_reg =
2000 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2001 }
2002
2003
2004 lock_status = REG_RD(bp, hw_lock_control_reg);
2005 if (lock_status & resource_bit) {
2006 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2007 lock_status, resource_bit);
2008 return -EEXIST;
2009 }
2010
2011
2012 for (cnt = 0; cnt < 1000; cnt++) {
2013
2014 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2015 lock_status = REG_RD(bp, hw_lock_control_reg);
2016 if (lock_status & resource_bit)
2017 return 0;
2018
2019 usleep_range(5000, 10000);
2020 }
2021 BNX2X_ERR("Timeout\n");
2022 return -EAGAIN;
2023}
2024
2025int bnx2x_release_leader_lock(struct bnx2x *bp)
2026{
2027 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2028}
2029
2030int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2031{
2032 u32 lock_status;
2033 u32 resource_bit = (1 << resource);
2034 int func = BP_FUNC(bp);
2035 u32 hw_lock_control_reg;
2036
2037
2038 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2039 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2040 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2041 return -EINVAL;
2042 }
2043
2044 if (func <= 5) {
2045 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2046 } else {
2047 hw_lock_control_reg =
2048 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2049 }
2050
2051
2052 lock_status = REG_RD(bp, hw_lock_control_reg);
2053 if (!(lock_status & resource_bit)) {
2054 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2055 lock_status, resource_bit);
2056 return -EFAULT;
2057 }
2058
2059 REG_WR(bp, hw_lock_control_reg, resource_bit);
2060 return 0;
2061}
2062
2063int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2064{
2065
2066 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2067 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2068 int gpio_shift = gpio_num +
2069 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2070 u32 gpio_mask = (1 << gpio_shift);
2071 u32 gpio_reg;
2072 int value;
2073
2074 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2075 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2076 return -EINVAL;
2077 }
2078
2079
2080 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2081
2082
2083 if ((gpio_reg & gpio_mask) == gpio_mask)
2084 value = 1;
2085 else
2086 value = 0;
2087
2088 return value;
2089}
2090
2091int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2092{
2093
2094 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2095 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2096 int gpio_shift = gpio_num +
2097 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2098 u32 gpio_mask = (1 << gpio_shift);
2099 u32 gpio_reg;
2100
2101 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2102 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2103 return -EINVAL;
2104 }
2105
2106 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2107
2108 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2109
2110 switch (mode) {
2111 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2112 DP(NETIF_MSG_LINK,
2113 "Set GPIO %d (shift %d) -> output low\n",
2114 gpio_num, gpio_shift);
2115
2116 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2117 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2118 break;
2119
2120 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2121 DP(NETIF_MSG_LINK,
2122 "Set GPIO %d (shift %d) -> output high\n",
2123 gpio_num, gpio_shift);
2124
2125 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2126 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2127 break;
2128
2129 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2130 DP(NETIF_MSG_LINK,
2131 "Set GPIO %d (shift %d) -> input\n",
2132 gpio_num, gpio_shift);
2133
2134 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2135 break;
2136
2137 default:
2138 break;
2139 }
2140
2141 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2142 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2143
2144 return 0;
2145}
2146
2147int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2148{
2149 u32 gpio_reg = 0;
2150 int rc = 0;
2151
2152
2153
2154 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2155
2156 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2157 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2158 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2159 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2160
2161 switch (mode) {
2162 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2163 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2164
2165 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2166 break;
2167
2168 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2169 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2170
2171 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2172 break;
2173
2174 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2176
2177 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2178 break;
2179
2180 default:
2181 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2182 rc = -EINVAL;
2183 break;
2184 }
2185
2186 if (rc == 0)
2187 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2188
2189 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2190
2191 return rc;
2192}
2193
2194int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2195{
2196
2197 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2198 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2199 int gpio_shift = gpio_num +
2200 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2201 u32 gpio_mask = (1 << gpio_shift);
2202 u32 gpio_reg;
2203
2204 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2205 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2206 return -EINVAL;
2207 }
2208
2209 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2210
2211 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2212
2213 switch (mode) {
2214 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2215 DP(NETIF_MSG_LINK,
2216 "Clear GPIO INT %d (shift %d) -> output low\n",
2217 gpio_num, gpio_shift);
2218
2219 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2220 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2221 break;
2222
2223 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2224 DP(NETIF_MSG_LINK,
2225 "Set GPIO INT %d (shift %d) -> output high\n",
2226 gpio_num, gpio_shift);
2227
2228 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2229 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2230 break;
2231
2232 default:
2233 break;
2234 }
2235
2236 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2237 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2238
2239 return 0;
2240}
2241
2242static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2243{
2244 u32 spio_reg;
2245
2246
2247 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2248 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2249 return -EINVAL;
2250 }
2251
2252 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2253
2254 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2255
2256 switch (mode) {
2257 case MISC_SPIO_OUTPUT_LOW:
2258 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2259
2260 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2261 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2262 break;
2263
2264 case MISC_SPIO_OUTPUT_HIGH:
2265 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2266
2267 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268 spio_reg |= (spio << MISC_SPIO_SET_POS);
2269 break;
2270
2271 case MISC_SPIO_INPUT_HI_Z:
2272 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2273
2274 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2275 break;
2276
2277 default:
2278 break;
2279 }
2280
2281 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2282 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2283
2284 return 0;
2285}
2286
2287void bnx2x_calc_fc_adv(struct bnx2x *bp)
2288{
2289 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2290 switch (bp->link_vars.ieee_fc &
2291 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2292 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2293 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2294 ADVERTISED_Pause);
2295 break;
2296
2297 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2298 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2299 ADVERTISED_Pause);
2300 break;
2301
2302 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2303 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2304 break;
2305
2306 default:
2307 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2308 ADVERTISED_Pause);
2309 break;
2310 }
2311}
2312
2313static void bnx2x_set_requested_fc(struct bnx2x *bp)
2314{
2315
2316
2317
2318
2319 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2320 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2321 else
2322 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2323}
2324
2325static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2326{
2327 u32 pause_enabled = 0;
2328
2329 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2330 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2331 pause_enabled = 1;
2332
2333 REG_WR(bp, BAR_USTRORM_INTMEM +
2334 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2335 pause_enabled);
2336 }
2337
2338 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2339 pause_enabled ? "enabled" : "disabled");
2340}
2341
2342int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2343{
2344 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2345 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2346
2347 if (!BP_NOMCP(bp)) {
2348 bnx2x_set_requested_fc(bp);
2349 bnx2x_acquire_phy_lock(bp);
2350
2351 if (load_mode == LOAD_DIAG) {
2352 struct link_params *lp = &bp->link_params;
2353 lp->loopback_mode = LOOPBACK_XGXS;
2354
2355 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2356 if (lp->speed_cap_mask[cfx_idx] &
2357 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2358 lp->req_line_speed[cfx_idx] =
2359 SPEED_10000;
2360 else
2361 lp->req_line_speed[cfx_idx] =
2362 SPEED_1000;
2363 }
2364 }
2365
2366 if (load_mode == LOAD_LOOPBACK_EXT) {
2367 struct link_params *lp = &bp->link_params;
2368 lp->loopback_mode = LOOPBACK_EXT;
2369 }
2370
2371 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2372
2373 bnx2x_release_phy_lock(bp);
2374
2375 bnx2x_init_dropless_fc(bp);
2376
2377 bnx2x_calc_fc_adv(bp);
2378
2379 if (bp->link_vars.link_up) {
2380 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2381 bnx2x_link_report(bp);
2382 }
2383 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2384 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2385 return rc;
2386 }
2387 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2388 return -EINVAL;
2389}
2390
2391void bnx2x_link_set(struct bnx2x *bp)
2392{
2393 if (!BP_NOMCP(bp)) {
2394 bnx2x_acquire_phy_lock(bp);
2395 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2396 bnx2x_release_phy_lock(bp);
2397
2398 bnx2x_init_dropless_fc(bp);
2399
2400 bnx2x_calc_fc_adv(bp);
2401 } else
2402 BNX2X_ERR("Bootcode is missing - can not set link\n");
2403}
2404
2405static void bnx2x__link_reset(struct bnx2x *bp)
2406{
2407 if (!BP_NOMCP(bp)) {
2408 bnx2x_acquire_phy_lock(bp);
2409 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2410 bnx2x_release_phy_lock(bp);
2411 } else
2412 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2413}
2414
2415void bnx2x_force_link_reset(struct bnx2x *bp)
2416{
2417 bnx2x_acquire_phy_lock(bp);
2418 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2419 bnx2x_release_phy_lock(bp);
2420}
2421
2422u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2423{
2424 u8 rc = 0;
2425
2426 if (!BP_NOMCP(bp)) {
2427 bnx2x_acquire_phy_lock(bp);
2428 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2429 is_serdes);
2430 bnx2x_release_phy_lock(bp);
2431 } else
2432 BNX2X_ERR("Bootcode is missing - can not test link\n");
2433
2434 return rc;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446static void bnx2x_calc_vn_min(struct bnx2x *bp,
2447 struct cmng_init_input *input)
2448{
2449 int all_zero = 1;
2450 int vn;
2451
2452 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2453 u32 vn_cfg = bp->mf_config[vn];
2454 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2455 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2456
2457
2458 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2459 vn_min_rate = 0;
2460
2461 else if (!vn_min_rate)
2462 vn_min_rate = DEF_MIN_RATE;
2463 else
2464 all_zero = 0;
2465
2466 input->vnic_min_rate[vn] = vn_min_rate;
2467 }
2468
2469
2470 if (BNX2X_IS_ETS_ENABLED(bp)) {
2471 input->flags.cmng_enables &=
2472 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2473 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2474 } else if (all_zero) {
2475 input->flags.cmng_enables &=
2476 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2477 DP(NETIF_MSG_IFUP,
2478 "All MIN values are zeroes fairness will be disabled\n");
2479 } else
2480 input->flags.cmng_enables |=
2481 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2482}
2483
2484static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2485 struct cmng_init_input *input)
2486{
2487 u16 vn_max_rate;
2488 u32 vn_cfg = bp->mf_config[vn];
2489
2490 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2491 vn_max_rate = 0;
2492 else {
2493 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2494
2495 if (IS_MF_SI(bp)) {
2496
2497 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2498 } else
2499
2500 vn_max_rate = maxCfg * 100;
2501 }
2502
2503 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2504
2505 input->vnic_max_rate[vn] = vn_max_rate;
2506}
2507
2508static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2509{
2510 if (CHIP_REV_IS_SLOW(bp))
2511 return CMNG_FNS_NONE;
2512 if (IS_MF(bp))
2513 return CMNG_FNS_MINMAX;
2514
2515 return CMNG_FNS_NONE;
2516}
2517
2518void bnx2x_read_mf_cfg(struct bnx2x *bp)
2519{
2520 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2521
2522 if (BP_NOMCP(bp))
2523 return;
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2537 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2538
2539 if (func >= E1H_FUNC_MAX)
2540 break;
2541
2542 bp->mf_config[vn] =
2543 MF_CFG_RD(bp, func_mf_config[func].config);
2544 }
2545 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2546 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2547 bp->flags |= MF_FUNC_DIS;
2548 } else {
2549 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2550 bp->flags &= ~MF_FUNC_DIS;
2551 }
2552}
2553
2554static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2555{
2556 struct cmng_init_input input;
2557 memset(&input, 0, sizeof(struct cmng_init_input));
2558
2559 input.port_rate = bp->link_vars.line_speed;
2560
2561 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2562 int vn;
2563
2564
2565 if (read_cfg)
2566 bnx2x_read_mf_cfg(bp);
2567
2568
2569 bnx2x_calc_vn_min(bp, &input);
2570
2571
2572 if (bp->port.pmf)
2573 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2574 bnx2x_calc_vn_max(bp, vn, &input);
2575
2576
2577 input.flags.cmng_enables |=
2578 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2579
2580 bnx2x_init_cmng(&input, &bp->cmng);
2581 return;
2582 }
2583
2584
2585 DP(NETIF_MSG_IFUP,
2586 "rate shaping and fairness are disabled\n");
2587}
2588
2589static void storm_memset_cmng(struct bnx2x *bp,
2590 struct cmng_init *cmng,
2591 u8 port)
2592{
2593 int vn;
2594 size_t size = sizeof(struct cmng_struct_per_port);
2595
2596 u32 addr = BAR_XSTRORM_INTMEM +
2597 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2598
2599 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2600
2601 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2602 int func = func_by_vn(bp, vn);
2603
2604 addr = BAR_XSTRORM_INTMEM +
2605 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2606 size = sizeof(struct rate_shaping_vars_per_vn);
2607 __storm_memset_struct(bp, addr, size,
2608 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2609
2610 addr = BAR_XSTRORM_INTMEM +
2611 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2612 size = sizeof(struct fairness_vars_per_vn);
2613 __storm_memset_struct(bp, addr, size,
2614 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2615 }
2616}
2617
2618
2619void bnx2x_set_local_cmng(struct bnx2x *bp)
2620{
2621 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2622
2623 if (cmng_fns != CMNG_FNS_NONE) {
2624 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2625 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2626 } else {
2627
2628 DP(NETIF_MSG_IFUP,
2629 "single function mode without fairness\n");
2630 }
2631}
2632
2633
2634static void bnx2x_link_attn(struct bnx2x *bp)
2635{
2636
2637 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2638
2639 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2640
2641 bnx2x_init_dropless_fc(bp);
2642
2643 if (bp->link_vars.link_up) {
2644
2645 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2646 struct host_port_stats *pstats;
2647
2648 pstats = bnx2x_sp(bp, port_stats);
2649
2650 memset(&(pstats->mac_stx[0]), 0,
2651 sizeof(struct mac_stx));
2652 }
2653 if (bp->state == BNX2X_STATE_OPEN)
2654 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2655 }
2656
2657 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2658 bnx2x_set_local_cmng(bp);
2659
2660 __bnx2x_link_report(bp);
2661
2662 if (IS_MF(bp))
2663 bnx2x_link_sync_notify(bp);
2664}
2665
2666void bnx2x__link_status_update(struct bnx2x *bp)
2667{
2668 if (bp->state != BNX2X_STATE_OPEN)
2669 return;
2670
2671
2672 if (IS_PF(bp)) {
2673 bnx2x_dcbx_pmf_update(bp);
2674 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2675 if (bp->link_vars.link_up)
2676 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2677 else
2678 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2679
2680 bnx2x_link_report(bp);
2681
2682 } else {
2683 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2684 SUPPORTED_10baseT_Full |
2685 SUPPORTED_100baseT_Half |
2686 SUPPORTED_100baseT_Full |
2687 SUPPORTED_1000baseT_Full |
2688 SUPPORTED_2500baseX_Full |
2689 SUPPORTED_10000baseT_Full |
2690 SUPPORTED_TP |
2691 SUPPORTED_FIBRE |
2692 SUPPORTED_Autoneg |
2693 SUPPORTED_Pause |
2694 SUPPORTED_Asym_Pause);
2695 bp->port.advertising[0] = bp->port.supported[0];
2696
2697 bp->link_params.bp = bp;
2698 bp->link_params.port = BP_PORT(bp);
2699 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2700 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2701 bp->link_params.req_line_speed[0] = SPEED_10000;
2702 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2703 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2704 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2705 bp->link_vars.line_speed = SPEED_10000;
2706 bp->link_vars.link_status =
2707 (LINK_STATUS_LINK_UP |
2708 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2709 bp->link_vars.link_up = 1;
2710 bp->link_vars.duplex = DUPLEX_FULL;
2711 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2712 __bnx2x_link_report(bp);
2713
2714 bnx2x_sample_bulletin(bp);
2715
2716
2717
2718
2719
2720
2721 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2722 }
2723}
2724
2725static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2726 u16 vlan_val, u8 allowed_prio)
2727{
2728 struct bnx2x_func_state_params func_params = {NULL};
2729 struct bnx2x_func_afex_update_params *f_update_params =
2730 &func_params.params.afex_update;
2731
2732 func_params.f_obj = &bp->func_obj;
2733 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2734
2735
2736
2737
2738
2739 f_update_params->vif_id = vifid;
2740 f_update_params->afex_default_vlan = vlan_val;
2741 f_update_params->allowed_priorities = allowed_prio;
2742
2743
2744 if (bnx2x_func_state_change(bp, &func_params) < 0)
2745 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2746
2747 return 0;
2748}
2749
2750static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2751 u16 vif_index, u8 func_bit_map)
2752{
2753 struct bnx2x_func_state_params func_params = {NULL};
2754 struct bnx2x_func_afex_viflists_params *update_params =
2755 &func_params.params.afex_viflists;
2756 int rc;
2757 u32 drv_msg_code;
2758
2759
2760 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2761 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2762 cmd_type);
2763
2764 func_params.f_obj = &bp->func_obj;
2765 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2766
2767
2768 update_params->afex_vif_list_command = cmd_type;
2769 update_params->vif_list_index = vif_index;
2770 update_params->func_bit_map =
2771 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2772 update_params->func_to_clear = 0;
2773 drv_msg_code =
2774 (cmd_type == VIF_LIST_RULE_GET) ?
2775 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2776 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2777
2778
2779
2780
2781 rc = bnx2x_func_state_change(bp, &func_params);
2782 if (rc < 0)
2783 bnx2x_fw_command(bp, drv_msg_code, 0);
2784
2785 return 0;
2786}
2787
2788static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2789{
2790 struct afex_stats afex_stats;
2791 u32 func = BP_ABS_FUNC(bp);
2792 u32 mf_config;
2793 u16 vlan_val;
2794 u32 vlan_prio;
2795 u16 vif_id;
2796 u8 allowed_prio;
2797 u8 vlan_mode;
2798 u32 addr_to_write, vifid, addrs, stats_type, i;
2799
2800 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2801 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2802 DP(BNX2X_MSG_MCP,
2803 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2804 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2805 }
2806
2807 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2808 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2810 DP(BNX2X_MSG_MCP,
2811 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2812 vifid, addrs);
2813 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2814 addrs);
2815 }
2816
2817 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2818 addr_to_write = SHMEM2_RD(bp,
2819 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2820 stats_type = SHMEM2_RD(bp,
2821 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2822
2823 DP(BNX2X_MSG_MCP,
2824 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2825 addr_to_write);
2826
2827 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2828
2829
2830 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2831 REG_WR(bp, addr_to_write + i*sizeof(u32),
2832 *(((u32 *)(&afex_stats))+i));
2833
2834
2835 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2836 }
2837
2838 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2839 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2840 bp->mf_config[BP_VN(bp)] = mf_config;
2841 DP(BNX2X_MSG_MCP,
2842 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2843 mf_config);
2844
2845
2846 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2847
2848 struct cmng_init_input cmng_input;
2849 struct rate_shaping_vars_per_vn m_rs_vn;
2850 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2851 u32 addr = BAR_XSTRORM_INTMEM +
2852 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2853
2854 bp->mf_config[BP_VN(bp)] = mf_config;
2855
2856 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2857 m_rs_vn.vn_counter.rate =
2858 cmng_input.vnic_max_rate[BP_VN(bp)];
2859 m_rs_vn.vn_counter.quota =
2860 (m_rs_vn.vn_counter.rate *
2861 RS_PERIODIC_TIMEOUT_USEC) / 8;
2862
2863 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2864
2865
2866 vif_id =
2867 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2868 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2869 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2870 vlan_val =
2871 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2872 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2873 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2874 vlan_prio = (mf_config &
2875 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2876 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2877 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2878 vlan_mode =
2879 (MF_CFG_RD(bp,
2880 func_mf_config[func].afex_config) &
2881 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2882 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2883 allowed_prio =
2884 (MF_CFG_RD(bp,
2885 func_mf_config[func].afex_config) &
2886 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2887 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2888
2889
2890 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2891 allowed_prio))
2892 return;
2893
2894 bp->afex_def_vlan_tag = vlan_val;
2895 bp->afex_vlan_mode = vlan_mode;
2896 } else {
2897
2898 bnx2x_link_report(bp);
2899
2900
2901 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2902
2903
2904 bp->afex_def_vlan_tag = -1;
2905 }
2906 }
2907}
2908
2909static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2910{
2911 struct bnx2x_func_switch_update_params *switch_update_params;
2912 struct bnx2x_func_state_params func_params;
2913
2914 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2915 switch_update_params = &func_params.params.switch_update;
2916 func_params.f_obj = &bp->func_obj;
2917 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2918
2919 if (IS_MF_UFP(bp)) {
2920 int func = BP_ABS_FUNC(bp);
2921 u32 val;
2922
2923
2924 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2925 FUNC_MF_CFG_E1HOV_TAG_MASK;
2926 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2927 bp->mf_ov = val;
2928 } else {
2929 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2930 goto fail;
2931 }
2932
2933
2934 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2935 bp->mf_ov);
2936
2937
2938 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2939 &switch_update_params->changes);
2940 switch_update_params->vlan = bp->mf_ov;
2941
2942 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2943 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2944 bp->mf_ov);
2945 goto fail;
2946 }
2947
2948 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
2949
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2951
2952 return;
2953 }
2954
2955
2956fail:
2957 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2958}
2959
2960static void bnx2x_pmf_update(struct bnx2x *bp)
2961{
2962 int port = BP_PORT(bp);
2963 u32 val;
2964
2965 bp->port.pmf = 1;
2966 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2967
2968
2969
2970
2971
2972 smp_mb();
2973
2974
2975 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2976
2977 bnx2x_dcbx_pmf_update(bp);
2978
2979
2980 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2981 if (bp->common.int_block == INT_BLOCK_HC) {
2982 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2984 } else if (!CHIP_IS_E1x(bp)) {
2985 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2987 }
2988
2989 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3002{
3003 int mb_idx = BP_FW_MB_IDX(bp);
3004 u32 seq;
3005 u32 rc = 0;
3006 u32 cnt = 1;
3007 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3008
3009 mutex_lock(&bp->fw_mb_mutex);
3010 seq = ++bp->fw_seq;
3011 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3012 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3013
3014 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3015 (command | seq), param);
3016
3017 do {
3018
3019 msleep(delay);
3020
3021 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3022
3023
3024 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3025
3026 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3027 cnt*delay, rc, seq);
3028
3029
3030 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3031 rc &= FW_MSG_CODE_MASK;
3032 else {
3033
3034 BNX2X_ERR("FW failed to respond!\n");
3035 bnx2x_fw_dump(bp);
3036 rc = 0;
3037 }
3038 mutex_unlock(&bp->fw_mb_mutex);
3039
3040 return rc;
3041}
3042
3043static void storm_memset_func_cfg(struct bnx2x *bp,
3044 struct tstorm_eth_function_common_config *tcfg,
3045 u16 abs_fid)
3046{
3047 size_t size = sizeof(struct tstorm_eth_function_common_config);
3048
3049 u32 addr = BAR_TSTRORM_INTMEM +
3050 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3051
3052 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3053}
3054
3055void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3056{
3057 if (CHIP_IS_E1x(bp)) {
3058 struct tstorm_eth_function_common_config tcfg = {0};
3059
3060 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3061 }
3062
3063
3064 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3065 storm_memset_func_en(bp, p->func_id, 1);
3066
3067
3068 if (p->func_flgs & FUNC_FLG_SPQ) {
3069 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3070 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3071 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3072 }
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3085 struct bnx2x_fastpath *fp,
3086 bool zero_stats)
3087{
3088 unsigned long flags = 0;
3089
3090
3091 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3092
3093
3094
3095
3096
3097
3098 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3099 if (zero_stats)
3100 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3101
3102 if (bp->flags & TX_SWITCHING)
3103 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3104
3105 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3106 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3107
3108#ifdef BNX2X_STOP_ON_ERROR
3109 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3110#endif
3111
3112 return flags;
3113}
3114
3115static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3116 struct bnx2x_fastpath *fp,
3117 bool leading)
3118{
3119 unsigned long flags = 0;
3120
3121
3122 if (IS_MF_SD(bp))
3123 __set_bit(BNX2X_Q_FLG_OV, &flags);
3124
3125 if (IS_FCOE_FP(fp)) {
3126 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3127
3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3129 }
3130
3131 if (fp->mode != TPA_MODE_DISABLED) {
3132 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3134 if (fp->mode == TPA_MODE_GRO)
3135 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3136 }
3137
3138 if (leading) {
3139 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3140 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3141 }
3142
3143
3144 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3145
3146
3147 if (IS_MF_AFEX(bp))
3148 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3149
3150 return flags | bnx2x_get_common_flags(bp, fp, true);
3151}
3152
3153static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3154 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3155 u8 cos)
3156{
3157 gen_init->stat_id = bnx2x_stats_id(fp);
3158 gen_init->spcl_id = fp->cl_id;
3159
3160
3161 if (IS_FCOE_FP(fp))
3162 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3163 else
3164 gen_init->mtu = bp->dev->mtu;
3165
3166 gen_init->cos = cos;
3167
3168 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3169}
3170
3171static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3172 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3173 struct bnx2x_rxq_setup_params *rxq_init)
3174{
3175 u8 max_sge = 0;
3176 u16 sge_sz = 0;
3177 u16 tpa_agg_size = 0;
3178
3179 if (fp->mode != TPA_MODE_DISABLED) {
3180 pause->sge_th_lo = SGE_TH_LO(bp);
3181 pause->sge_th_hi = SGE_TH_HI(bp);
3182
3183
3184 WARN_ON(bp->dropless_fc &&
3185 pause->sge_th_hi + FW_PREFETCH_CNT >
3186 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3187
3188 tpa_agg_size = TPA_AGG_SIZE;
3189 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3190 SGE_PAGE_SHIFT;
3191 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3192 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3193 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3194 }
3195
3196
3197 if (!CHIP_IS_E1(bp)) {
3198 pause->bd_th_lo = BD_TH_LO(bp);
3199 pause->bd_th_hi = BD_TH_HI(bp);
3200
3201 pause->rcq_th_lo = RCQ_TH_LO(bp);
3202 pause->rcq_th_hi = RCQ_TH_HI(bp);
3203
3204
3205
3206
3207 WARN_ON(bp->dropless_fc &&
3208 pause->bd_th_hi + FW_PREFETCH_CNT >
3209 bp->rx_ring_size);
3210 WARN_ON(bp->dropless_fc &&
3211 pause->rcq_th_hi + FW_PREFETCH_CNT >
3212 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3213
3214 pause->pri_map = 1;
3215 }
3216
3217
3218 rxq_init->dscr_map = fp->rx_desc_mapping;
3219 rxq_init->sge_map = fp->rx_sge_mapping;
3220 rxq_init->rcq_map = fp->rx_comp_mapping;
3221 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3222
3223
3224
3225
3226 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3227 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3228
3229 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3230 rxq_init->tpa_agg_sz = tpa_agg_size;
3231 rxq_init->sge_buf_sz = sge_sz;
3232 rxq_init->max_sges_pkt = max_sge;
3233 rxq_init->rss_engine_id = BP_FUNC(bp);
3234 rxq_init->mcast_engine_id = BP_FUNC(bp);
3235
3236
3237
3238
3239
3240
3241 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3242
3243 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3244 rxq_init->fw_sb_id = fp->fw_sb_id;
3245
3246 if (IS_FCOE_FP(fp))
3247 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3248 else
3249 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3250
3251
3252
3253 if (IS_MF_AFEX(bp)) {
3254 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3255 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3256 }
3257}
3258
3259static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3260 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3261 u8 cos)
3262{
3263 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3264 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3265 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3266 txq_init->fw_sb_id = fp->fw_sb_id;
3267
3268
3269
3270
3271
3272 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3273
3274 if (IS_FCOE_FP(fp)) {
3275 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3277 }
3278}
3279
3280static void bnx2x_pf_init(struct bnx2x *bp)
3281{
3282 struct bnx2x_func_init_params func_init = {0};
3283 struct event_ring_data eq_data = { {0} };
3284 u16 flags;
3285
3286 if (!CHIP_IS_E1x(bp)) {
3287
3288
3289 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3290 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3291 (CHIP_MODE_IS_4_PORT(bp) ?
3292 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3293
3294 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3295 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3296 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3297 (CHIP_MODE_IS_4_PORT(bp) ?
3298 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3299 }
3300
3301
3302 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3303
3304
3305
3306
3307 flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
3308
3309 func_init.func_flgs = flags;
3310 func_init.pf_id = BP_FUNC(bp);
3311 func_init.func_id = BP_FUNC(bp);
3312 func_init.spq_map = bp->spq_mapping;
3313 func_init.spq_prod = bp->spq_prod_idx;
3314
3315 bnx2x_func_init(bp, &func_init);
3316
3317 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3318
3319
3320
3321
3322
3323
3324
3325 bp->link_vars.line_speed = SPEED_10000;
3326 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3327
3328
3329 if (bp->port.pmf)
3330 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3331
3332
3333 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3334 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3335 eq_data.producer = bp->eq_prod;
3336 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3337 eq_data.sb_id = DEF_SB_ID;
3338 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3339}
3340
3341static void bnx2x_e1h_disable(struct bnx2x *bp)
3342{
3343 int port = BP_PORT(bp);
3344
3345 bnx2x_tx_disable(bp);
3346
3347 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3348}
3349
3350static void bnx2x_e1h_enable(struct bnx2x *bp)
3351{
3352 int port = BP_PORT(bp);
3353
3354 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3355 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3356
3357
3358 netif_tx_wake_all_queues(bp->dev);
3359
3360
3361
3362
3363
3364}
3365
3366#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3367
3368static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3369{
3370 struct eth_stats_info *ether_stat =
3371 &bp->slowpath->drv_info_to_mcp.ether_stat;
3372 struct bnx2x_vlan_mac_obj *mac_obj =
3373 &bp->sp_objs->mac_obj;
3374 int i;
3375
3376 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3377 ETH_STAT_INFO_VERSION_LEN);
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3388 memset(ether_stat->mac_local + i, 0,
3389 sizeof(ether_stat->mac_local[0]));
3390 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3391 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3392 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3393 ETH_ALEN);
3394 ether_stat->mtu_size = bp->dev->mtu;
3395 if (bp->dev->features & NETIF_F_RXCSUM)
3396 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3397 if (bp->dev->features & NETIF_F_TSO)
3398 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3399 ether_stat->feature_flags |= bp->common.boot_mode;
3400
3401 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3402
3403 ether_stat->txq_size = bp->tx_ring_size;
3404 ether_stat->rxq_size = bp->rx_ring_size;
3405
3406#ifdef CONFIG_BNX2X_SRIOV
3407 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3408#endif
3409}
3410
3411static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3412{
3413 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3414 struct fcoe_stats_info *fcoe_stat =
3415 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3416
3417 if (!CNIC_LOADED(bp))
3418 return;
3419
3420 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3421
3422 fcoe_stat->qos_priority =
3423 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3424
3425
3426 if (!NO_FCOE(bp)) {
3427 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3428 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3429 tstorm_queue_statistics;
3430
3431 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3432 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3433 xstorm_queue_statistics;
3434
3435 struct fcoe_statistics_params *fw_fcoe_stat =
3436 &bp->fw_stats_data->fcoe;
3437
3438 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3439 fcoe_stat->rx_bytes_lo,
3440 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3441
3442 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3443 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3444 fcoe_stat->rx_bytes_lo,
3445 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3446
3447 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3448 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3449 fcoe_stat->rx_bytes_lo,
3450 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3451
3452 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3453 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3454 fcoe_stat->rx_bytes_lo,
3455 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3456
3457 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3458 fcoe_stat->rx_frames_lo,
3459 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3460
3461 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3462 fcoe_stat->rx_frames_lo,
3463 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3464
3465 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3466 fcoe_stat->rx_frames_lo,
3467 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3468
3469 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3470 fcoe_stat->rx_frames_lo,
3471 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3472
3473 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3474 fcoe_stat->tx_bytes_lo,
3475 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3476
3477 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3478 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3479 fcoe_stat->tx_bytes_lo,
3480 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3481
3482 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3483 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3484 fcoe_stat->tx_bytes_lo,
3485 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3486
3487 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3488 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3489 fcoe_stat->tx_bytes_lo,
3490 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3491
3492 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3493 fcoe_stat->tx_frames_lo,
3494 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3495
3496 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3497 fcoe_stat->tx_frames_lo,
3498 fcoe_q_xstorm_stats->ucast_pkts_sent);
3499
3500 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3501 fcoe_stat->tx_frames_lo,
3502 fcoe_q_xstorm_stats->bcast_pkts_sent);
3503
3504 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3505 fcoe_stat->tx_frames_lo,
3506 fcoe_q_xstorm_stats->mcast_pkts_sent);
3507 }
3508
3509
3510 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3511}
3512
3513static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3514{
3515 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3516 struct iscsi_stats_info *iscsi_stat =
3517 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3518
3519 if (!CNIC_LOADED(bp))
3520 return;
3521
3522 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3523 ETH_ALEN);
3524
3525 iscsi_stat->qos_priority =
3526 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3527
3528
3529 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3530}
3531
3532
3533
3534
3535
3536
3537static void bnx2x_config_mf_bw(struct bnx2x *bp)
3538{
3539 if (bp->link_vars.link_up) {
3540 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3541 bnx2x_link_sync_notify(bp);
3542 }
3543 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3544}
3545
3546static void bnx2x_set_mf_bw(struct bnx2x *bp)
3547{
3548 bnx2x_config_mf_bw(bp);
3549 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3550}
3551
3552static void bnx2x_handle_eee_event(struct bnx2x *bp)
3553{
3554 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3555 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3556}
3557
3558#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3559#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3560
3561static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3562{
3563 enum drv_info_opcode op_code;
3564 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3565 bool release = false;
3566 int wait;
3567
3568
3569 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3570 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3571 return;
3572 }
3573
3574 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3575 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3576
3577
3578 mutex_lock(&bp->drv_info_mutex);
3579
3580 memset(&bp->slowpath->drv_info_to_mcp, 0,
3581 sizeof(union drv_info_to_mcp));
3582
3583 switch (op_code) {
3584 case ETH_STATS_OPCODE:
3585 bnx2x_drv_info_ether_stat(bp);
3586 break;
3587 case FCOE_STATS_OPCODE:
3588 bnx2x_drv_info_fcoe_stat(bp);
3589 break;
3590 case ISCSI_STATS_OPCODE:
3591 bnx2x_drv_info_iscsi_stat(bp);
3592 break;
3593 default:
3594
3595 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3596 goto out;
3597 }
3598
3599
3600
3601
3602 SHMEM2_WR(bp, drv_info_host_addr_lo,
3603 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3604 SHMEM2_WR(bp, drv_info_host_addr_hi,
3605 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3606
3607 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3608
3609
3610
3611
3612
3613 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3614 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3615 } else if (!bp->drv_info_mng_owner) {
3616 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3617
3618 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3619 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3620
3621
3622 if (indication & bit) {
3623 SHMEM2_WR(bp, mfw_drv_indication,
3624 indication & ~bit);
3625 release = true;
3626 break;
3627 }
3628
3629 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3630 }
3631 }
3632 if (!release) {
3633 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3634 bp->drv_info_mng_owner = true;
3635 }
3636
3637out:
3638 mutex_unlock(&bp->drv_info_mutex);
3639}
3640
3641static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3642{
3643 u8 vals[4];
3644 int i = 0;
3645
3646 if (bnx2x_format) {
3647 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3648 &vals[0], &vals[1], &vals[2], &vals[3]);
3649 if (i > 0)
3650 vals[0] -= '0';
3651 } else {
3652 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3653 &vals[0], &vals[1], &vals[2], &vals[3]);
3654 }
3655
3656 while (i < 4)
3657 vals[i++] = 0;
3658
3659 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3660}
3661
3662void bnx2x_update_mng_version(struct bnx2x *bp)
3663{
3664 u32 iscsiver = DRV_VER_NOT_LOADED;
3665 u32 fcoever = DRV_VER_NOT_LOADED;
3666 u32 ethver = DRV_VER_NOT_LOADED;
3667 int idx = BP_FW_MB_IDX(bp);
3668 u8 *version;
3669
3670 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3671 return;
3672
3673 mutex_lock(&bp->drv_info_mutex);
3674
3675 if (bp->drv_info_mng_owner)
3676 goto out;
3677
3678 if (bp->state != BNX2X_STATE_OPEN)
3679 goto out;
3680
3681
3682 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3683 if (!CNIC_LOADED(bp))
3684 goto out;
3685
3686
3687 memset(&bp->slowpath->drv_info_to_mcp, 0,
3688 sizeof(union drv_info_to_mcp));
3689 bnx2x_drv_info_iscsi_stat(bp);
3690 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3691 iscsiver = bnx2x_update_mng_version_utility(version, false);
3692
3693 memset(&bp->slowpath->drv_info_to_mcp, 0,
3694 sizeof(union drv_info_to_mcp));
3695 bnx2x_drv_info_fcoe_stat(bp);
3696 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3697 fcoever = bnx2x_update_mng_version_utility(version, false);
3698
3699out:
3700 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3701 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3702 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3703
3704 mutex_unlock(&bp->drv_info_mutex);
3705
3706 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3707 ethver, iscsiver, fcoever);
3708}
3709
3710static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3711{
3712 u32 cmd_ok, cmd_fail;
3713
3714
3715 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3716 event & DRV_STATUS_OEM_EVENT_MASK) {
3717 BNX2X_ERR("Received simultaneous events %08x\n", event);
3718 return;
3719 }
3720
3721 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3722 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3723 cmd_ok = DRV_MSG_CODE_DCC_OK;
3724 } else {
3725 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3726 cmd_ok = DRV_MSG_CODE_OEM_OK;
3727 }
3728
3729 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3730
3731 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3732 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3733
3734
3735
3736
3737 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3738 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3739 bp->flags |= MF_FUNC_DIS;
3740
3741 bnx2x_e1h_disable(bp);
3742 } else {
3743 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3744 bp->flags &= ~MF_FUNC_DIS;
3745
3746 bnx2x_e1h_enable(bp);
3747 }
3748 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3749 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3750 }
3751
3752 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3753 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3754 bnx2x_config_mf_bw(bp);
3755 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3756 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3757 }
3758
3759
3760 if (event)
3761 bnx2x_fw_command(bp, cmd_fail, 0);
3762 else
3763 bnx2x_fw_command(bp, cmd_ok, 0);
3764}
3765
3766
3767static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3768{
3769 struct eth_spe *next_spe = bp->spq_prod_bd;
3770
3771 if (bp->spq_prod_bd == bp->spq_last_bd) {
3772 bp->spq_prod_bd = bp->spq;
3773 bp->spq_prod_idx = 0;
3774 DP(BNX2X_MSG_SP, "end of spq\n");
3775 } else {
3776 bp->spq_prod_bd++;
3777 bp->spq_prod_idx++;
3778 }
3779 return next_spe;
3780}
3781
3782
3783static void bnx2x_sp_prod_update(struct bnx2x *bp)
3784{
3785 int func = BP_FUNC(bp);
3786
3787
3788
3789
3790
3791
3792 mb();
3793
3794 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3795 bp->spq_prod_idx);
3796 mmiowb();
3797}
3798
3799
3800
3801
3802
3803
3804
3805static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3806{
3807 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3808 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3809 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3810 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3811 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3812 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3813 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3814 return true;
3815 else
3816 return false;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3834 u32 data_hi, u32 data_lo, int cmd_type)
3835{
3836 struct eth_spe *spe;
3837 u16 type;
3838 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3839
3840#ifdef BNX2X_STOP_ON_ERROR
3841 if (unlikely(bp->panic)) {
3842 BNX2X_ERR("Can't post SP when there is panic\n");
3843 return -EIO;
3844 }
3845#endif
3846
3847 spin_lock_bh(&bp->spq_lock);
3848
3849 if (common) {
3850 if (!atomic_read(&bp->eq_spq_left)) {
3851 BNX2X_ERR("BUG! EQ ring full!\n");
3852 spin_unlock_bh(&bp->spq_lock);
3853 bnx2x_panic();
3854 return -EBUSY;
3855 }
3856 } else if (!atomic_read(&bp->cq_spq_left)) {
3857 BNX2X_ERR("BUG! SPQ ring full!\n");
3858 spin_unlock_bh(&bp->spq_lock);
3859 bnx2x_panic();
3860 return -EBUSY;
3861 }
3862
3863 spe = bnx2x_sp_get_next(bp);
3864
3865
3866 spe->hdr.conn_and_cmd_data =
3867 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3868 HW_CID(bp, cid));
3869
3870
3871
3872
3873
3874 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3875 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3876 SPE_HDR_CONN_TYPE;
3877 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3878 SPE_HDR_FUNCTION_ID);
3879 } else {
3880 type = cmd_type;
3881 }
3882
3883 spe->hdr.type = cpu_to_le16(type);
3884
3885 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3886 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3887
3888
3889
3890
3891
3892
3893 if (common)
3894 atomic_dec(&bp->eq_spq_left);
3895 else
3896 atomic_dec(&bp->cq_spq_left);
3897
3898 DP(BNX2X_MSG_SP,
3899 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3900 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3901 (u32)(U64_LO(bp->spq_mapping) +
3902 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3903 HW_CID(bp, cid), data_hi, data_lo, type,
3904 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3905
3906 bnx2x_sp_prod_update(bp);
3907 spin_unlock_bh(&bp->spq_lock);
3908 return 0;
3909}
3910
3911
3912static int bnx2x_acquire_alr(struct bnx2x *bp)
3913{
3914 u32 j, val;
3915 int rc = 0;
3916
3917 might_sleep();
3918 for (j = 0; j < 1000; j++) {
3919 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3920 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3921 if (val & MCPR_ACCESS_LOCK_LOCK)
3922 break;
3923
3924 usleep_range(5000, 10000);
3925 }
3926 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3927 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3928 rc = -EBUSY;
3929 }
3930
3931 return rc;
3932}
3933
3934
3935static void bnx2x_release_alr(struct bnx2x *bp)
3936{
3937 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3938}
3939
3940#define BNX2X_DEF_SB_ATT_IDX 0x0001
3941#define BNX2X_DEF_SB_IDX 0x0002
3942
3943static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3944{
3945 struct host_sp_status_block *def_sb = bp->def_status_blk;
3946 u16 rc = 0;
3947
3948 barrier();
3949 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3950 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3951 rc |= BNX2X_DEF_SB_ATT_IDX;
3952 }
3953
3954 if (bp->def_idx != def_sb->sp_sb.running_index) {
3955 bp->def_idx = def_sb->sp_sb.running_index;
3956 rc |= BNX2X_DEF_SB_IDX;
3957 }
3958
3959
3960 barrier();
3961 return rc;
3962}
3963
3964
3965
3966
3967
3968static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3969{
3970 int port = BP_PORT(bp);
3971 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3972 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3973 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3974 NIG_REG_MASK_INTERRUPT_PORT0;
3975 u32 aeu_mask;
3976 u32 nig_mask = 0;
3977 u32 reg_addr;
3978
3979 if (bp->attn_state & asserted)
3980 BNX2X_ERR("IGU ERROR\n");
3981
3982 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3983 aeu_mask = REG_RD(bp, aeu_addr);
3984
3985 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3986 aeu_mask, asserted);
3987 aeu_mask &= ~(asserted & 0x3ff);
3988 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3989
3990 REG_WR(bp, aeu_addr, aeu_mask);
3991 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3992
3993 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3994 bp->attn_state |= asserted;
3995 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3996
3997 if (asserted & ATTN_HARD_WIRED_MASK) {
3998 if (asserted & ATTN_NIG_FOR_FUNC) {
3999
4000 bnx2x_acquire_phy_lock(bp);
4001
4002
4003 nig_mask = REG_RD(bp, nig_int_mask_addr);
4004
4005
4006
4007
4008 if (nig_mask) {
4009 REG_WR(bp, nig_int_mask_addr, 0);
4010
4011 bnx2x_link_attn(bp);
4012 }
4013
4014
4015 }
4016 if (asserted & ATTN_SW_TIMER_4_FUNC)
4017 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4018
4019 if (asserted & GPIO_2_FUNC)
4020 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4021
4022 if (asserted & GPIO_3_FUNC)
4023 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4024
4025 if (asserted & GPIO_4_FUNC)
4026 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4027
4028 if (port == 0) {
4029 if (asserted & ATTN_GENERAL_ATTN_1) {
4030 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4032 }
4033 if (asserted & ATTN_GENERAL_ATTN_2) {
4034 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4035 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4036 }
4037 if (asserted & ATTN_GENERAL_ATTN_3) {
4038 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4039 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4040 }
4041 } else {
4042 if (asserted & ATTN_GENERAL_ATTN_4) {
4043 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4045 }
4046 if (asserted & ATTN_GENERAL_ATTN_5) {
4047 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4049 }
4050 if (asserted & ATTN_GENERAL_ATTN_6) {
4051 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4053 }
4054 }
4055
4056 }
4057
4058 if (bp->common.int_block == INT_BLOCK_HC)
4059 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4060 COMMAND_REG_ATTN_BITS_SET);
4061 else
4062 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4063
4064 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4065 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4066 REG_WR(bp, reg_addr, asserted);
4067
4068
4069 if (asserted & ATTN_NIG_FOR_FUNC) {
4070
4071
4072
4073 if (bp->common.int_block != INT_BLOCK_HC) {
4074 u32 cnt = 0, igu_acked;
4075 do {
4076 igu_acked = REG_RD(bp,
4077 IGU_REG_ATTENTION_ACK_BITS);
4078 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4079 (++cnt < MAX_IGU_ATTN_ACK_TO));
4080 if (!igu_acked)
4081 DP(NETIF_MSG_HW,
4082 "Failed to verify IGU ack on time\n");
4083 barrier();
4084 }
4085 REG_WR(bp, nig_int_mask_addr, nig_mask);
4086 bnx2x_release_phy_lock(bp);
4087 }
4088}
4089
4090static void bnx2x_fan_failure(struct bnx2x *bp)
4091{
4092 int port = BP_PORT(bp);
4093 u32 ext_phy_config;
4094
4095 ext_phy_config =
4096 SHMEM_RD(bp,
4097 dev_info.port_hw_config[port].external_phy_config);
4098
4099 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4100 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4101 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4102 ext_phy_config);
4103
4104
4105 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4106 "Please contact OEM Support for assistance\n");
4107
4108
4109
4110
4111
4112 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4113}
4114
4115static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4116{
4117 int port = BP_PORT(bp);
4118 int reg_offset;
4119 u32 val;
4120
4121 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4123
4124 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4125
4126 val = REG_RD(bp, reg_offset);
4127 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4128 REG_WR(bp, reg_offset, val);
4129
4130 BNX2X_ERR("SPIO5 hw attention\n");
4131
4132
4133 bnx2x_hw_reset_phy(&bp->link_params);
4134 bnx2x_fan_failure(bp);
4135 }
4136
4137 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4138 bnx2x_acquire_phy_lock(bp);
4139 bnx2x_handle_module_detect_int(&bp->link_params);
4140 bnx2x_release_phy_lock(bp);
4141 }
4142
4143 if (attn & HW_INTERRUT_ASSERT_SET_0) {
4144
4145 val = REG_RD(bp, reg_offset);
4146 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4147 REG_WR(bp, reg_offset, val);
4148
4149 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4150 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4151 bnx2x_panic();
4152 }
4153}
4154
4155static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4156{
4157 u32 val;
4158
4159 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4160
4161 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4162 BNX2X_ERR("DB hw attention 0x%x\n", val);
4163
4164 if (val & 0x2)
4165 BNX2X_ERR("FATAL error from DORQ\n");
4166 }
4167
4168 if (attn & HW_INTERRUT_ASSERT_SET_1) {
4169
4170 int port = BP_PORT(bp);
4171 int reg_offset;
4172
4173 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4174 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4175
4176 val = REG_RD(bp, reg_offset);
4177 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4178 REG_WR(bp, reg_offset, val);
4179
4180 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4181 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4182 bnx2x_panic();
4183 }
4184}
4185
4186static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4187{
4188 u32 val;
4189
4190 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4191
4192 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4193 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4194
4195 if (val & 0x2)
4196 BNX2X_ERR("FATAL error from CFC\n");
4197 }
4198
4199 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4200 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4201 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4202
4203 if (val & 0x18000)
4204 BNX2X_ERR("FATAL error from PXP\n");
4205
4206 if (!CHIP_IS_E1x(bp)) {
4207 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4208 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4209 }
4210 }
4211
4212 if (attn & HW_INTERRUT_ASSERT_SET_2) {
4213
4214 int port = BP_PORT(bp);
4215 int reg_offset;
4216
4217 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4218 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4219
4220 val = REG_RD(bp, reg_offset);
4221 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4222 REG_WR(bp, reg_offset, val);
4223
4224 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4225 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4226 bnx2x_panic();
4227 }
4228}
4229
4230static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4231{
4232 u32 val;
4233
4234 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4235
4236 if (attn & BNX2X_PMF_LINK_ASSERT) {
4237 int func = BP_FUNC(bp);
4238
4239 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4240 bnx2x_read_mf_cfg(bp);
4241 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4242 func_mf_config[BP_ABS_FUNC(bp)].config);
4243 val = SHMEM_RD(bp,
4244 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4245
4246 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4247 DRV_STATUS_OEM_EVENT_MASK))
4248 bnx2x_oem_event(bp,
4249 (val & (DRV_STATUS_DCC_EVENT_MASK |
4250 DRV_STATUS_OEM_EVENT_MASK)));
4251
4252 if (val & DRV_STATUS_SET_MF_BW)
4253 bnx2x_set_mf_bw(bp);
4254
4255 if (val & DRV_STATUS_DRV_INFO_REQ)
4256 bnx2x_handle_drv_info_req(bp);
4257
4258 if (val & DRV_STATUS_VF_DISABLED)
4259 bnx2x_schedule_iov_task(bp,
4260 BNX2X_IOV_HANDLE_FLR);
4261
4262 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4263 bnx2x_pmf_update(bp);
4264
4265 if (bp->port.pmf &&
4266 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4267 bp->dcbx_enabled > 0)
4268
4269 bnx2x_dcbx_set_params(bp,
4270 BNX2X_DCBX_STATE_NEG_RECEIVED);
4271 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4272 bnx2x_handle_afex_cmd(bp,
4273 val & DRV_STATUS_AFEX_EVENT_MASK);
4274 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4275 bnx2x_handle_eee_event(bp);
4276
4277 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4278 bnx2x_handle_update_svid_cmd(bp);
4279
4280 if (bp->link_vars.periodic_flags &
4281 PERIODIC_FLAGS_LINK_EVENT) {
4282
4283 bnx2x_acquire_phy_lock(bp);
4284 bp->link_vars.periodic_flags &=
4285 ~PERIODIC_FLAGS_LINK_EVENT;
4286 bnx2x_release_phy_lock(bp);
4287 if (IS_MF(bp))
4288 bnx2x_link_sync_notify(bp);
4289 bnx2x_link_report(bp);
4290 }
4291
4292
4293
4294 bnx2x__link_status_update(bp);
4295 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4296
4297 BNX2X_ERR("MC assert!\n");
4298 bnx2x_mc_assert(bp);
4299 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4300 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4301 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4302 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4303 bnx2x_panic();
4304
4305 } else if (attn & BNX2X_MCP_ASSERT) {
4306
4307 BNX2X_ERR("MCP assert!\n");
4308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4309 bnx2x_fw_dump(bp);
4310
4311 } else
4312 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4313 }
4314
4315 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4316 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4317 if (attn & BNX2X_GRC_TIMEOUT) {
4318 val = CHIP_IS_E1(bp) ? 0 :
4319 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4320 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4321 }
4322 if (attn & BNX2X_GRC_RSV) {
4323 val = CHIP_IS_E1(bp) ? 0 :
4324 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4325 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4326 }
4327 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4328 }
4329}
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4346
4347#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4348#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4349#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4350#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4351#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4352#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4353#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4354
4355
4356
4357
4358
4359
4360void bnx2x_set_reset_global(struct bnx2x *bp)
4361{
4362 u32 val;
4363 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4364 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4365 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4366 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4367}
4368
4369
4370
4371
4372
4373
4374static void bnx2x_clear_reset_global(struct bnx2x *bp)
4375{
4376 u32 val;
4377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4378 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4379 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4380 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4381}
4382
4383
4384
4385
4386
4387
4388static bool bnx2x_reset_is_global(struct bnx2x *bp)
4389{
4390 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4391
4392 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4393 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4394}
4395
4396
4397
4398
4399
4400
4401static void bnx2x_set_reset_done(struct bnx2x *bp)
4402{
4403 u32 val;
4404 u32 bit = BP_PATH(bp) ?
4405 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4406 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4407 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4408
4409
4410 val &= ~bit;
4411 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4412
4413 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4414}
4415
4416
4417
4418
4419
4420
4421void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4422{
4423 u32 val;
4424 u32 bit = BP_PATH(bp) ?
4425 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4426 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4427 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4428
4429
4430 val |= bit;
4431 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4432 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4433}
4434
4435
4436
4437
4438
4439bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4440{
4441 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4442 u32 bit = engine ?
4443 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4444
4445
4446 return (val & bit) ? false : true;
4447}
4448
4449
4450
4451
4452
4453
4454void bnx2x_set_pf_load(struct bnx2x *bp)
4455{
4456 u32 val1, val;
4457 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4458 BNX2X_PATH0_LOAD_CNT_MASK;
4459 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4460 BNX2X_PATH0_LOAD_CNT_SHIFT;
4461
4462 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4463 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4464
4465 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4466
4467
4468 val1 = (val & mask) >> shift;
4469
4470
4471 val1 |= (1 << bp->pf_num);
4472
4473
4474 val &= ~mask;
4475
4476
4477 val |= ((val1 << shift) & mask);
4478
4479 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4481}
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492bool bnx2x_clear_pf_load(struct bnx2x *bp)
4493{
4494 u32 val1, val;
4495 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4496 BNX2X_PATH0_LOAD_CNT_MASK;
4497 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4498 BNX2X_PATH0_LOAD_CNT_SHIFT;
4499
4500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4501 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4502 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4503
4504
4505 val1 = (val & mask) >> shift;
4506
4507
4508 val1 &= ~(1 << bp->pf_num);
4509
4510
4511 val &= ~mask;
4512
4513
4514 val |= ((val1 << shift) & mask);
4515
4516 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4517 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4518 return val1 != 0;
4519}
4520
4521
4522
4523
4524
4525
4526static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4527{
4528 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4529 BNX2X_PATH0_LOAD_CNT_MASK);
4530 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4531 BNX2X_PATH0_LOAD_CNT_SHIFT);
4532 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4533
4534 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4535
4536 val = (val & mask) >> shift;
4537
4538 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4539 engine, val);
4540
4541 return val != 0;
4542}
4543
4544static void _print_parity(struct bnx2x *bp, u32 reg)
4545{
4546 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4547}
4548
4549static void _print_next_block(int idx, const char *blk)
4550{
4551 pr_cont("%s%s", idx ? ", " : "", blk);
4552}
4553
4554static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4555 int *par_num, bool print)
4556{
4557 u32 cur_bit;
4558 bool res;
4559 int i;
4560
4561 res = false;
4562
4563 for (i = 0; sig; i++) {
4564 cur_bit = (0x1UL << i);
4565 if (sig & cur_bit) {
4566 res |= true;
4567
4568 if (print) {
4569 switch (cur_bit) {
4570 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4571 _print_next_block((*par_num)++, "BRB");
4572 _print_parity(bp,
4573 BRB1_REG_BRB1_PRTY_STS);
4574 break;
4575 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4576 _print_next_block((*par_num)++,
4577 "PARSER");
4578 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4579 break;
4580 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4581 _print_next_block((*par_num)++, "TSDM");
4582 _print_parity(bp,
4583 TSDM_REG_TSDM_PRTY_STS);
4584 break;
4585 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4586 _print_next_block((*par_num)++,
4587 "SEARCHER");
4588 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4589 break;
4590 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4591 _print_next_block((*par_num)++, "TCM");
4592 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4593 break;
4594 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4595 _print_next_block((*par_num)++,
4596 "TSEMI");
4597 _print_parity(bp,
4598 TSEM_REG_TSEM_PRTY_STS_0);
4599 _print_parity(bp,
4600 TSEM_REG_TSEM_PRTY_STS_1);
4601 break;
4602 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4603 _print_next_block((*par_num)++, "XPB");
4604 _print_parity(bp, GRCBASE_XPB +
4605 PB_REG_PB_PRTY_STS);
4606 break;
4607 }
4608 }
4609
4610
4611 sig &= ~cur_bit;
4612 }
4613 }
4614
4615 return res;
4616}
4617
4618static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4619 int *par_num, bool *global,
4620 bool print)
4621{
4622 u32 cur_bit;
4623 bool res;
4624 int i;
4625
4626 res = false;
4627
4628 for (i = 0; sig; i++) {
4629 cur_bit = (0x1UL << i);
4630 if (sig & cur_bit) {
4631 res |= true;
4632 switch (cur_bit) {
4633 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4634 if (print) {
4635 _print_next_block((*par_num)++, "PBF");
4636 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4637 }
4638 break;
4639 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4640 if (print) {
4641 _print_next_block((*par_num)++, "QM");
4642 _print_parity(bp, QM_REG_QM_PRTY_STS);
4643 }
4644 break;
4645 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4646 if (print) {
4647 _print_next_block((*par_num)++, "TM");
4648 _print_parity(bp, TM_REG_TM_PRTY_STS);
4649 }
4650 break;
4651 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4652 if (print) {
4653 _print_next_block((*par_num)++, "XSDM");
4654 _print_parity(bp,
4655 XSDM_REG_XSDM_PRTY_STS);
4656 }
4657 break;
4658 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4659 if (print) {
4660 _print_next_block((*par_num)++, "XCM");
4661 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4662 }
4663 break;
4664 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4665 if (print) {
4666 _print_next_block((*par_num)++,
4667 "XSEMI");
4668 _print_parity(bp,
4669 XSEM_REG_XSEM_PRTY_STS_0);
4670 _print_parity(bp,
4671 XSEM_REG_XSEM_PRTY_STS_1);
4672 }
4673 break;
4674 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4675 if (print) {
4676 _print_next_block((*par_num)++,
4677 "DOORBELLQ");
4678 _print_parity(bp,
4679 DORQ_REG_DORQ_PRTY_STS);
4680 }
4681 break;
4682 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4683 if (print) {
4684 _print_next_block((*par_num)++, "NIG");
4685 if (CHIP_IS_E1x(bp)) {
4686 _print_parity(bp,
4687 NIG_REG_NIG_PRTY_STS);
4688 } else {
4689 _print_parity(bp,
4690 NIG_REG_NIG_PRTY_STS_0);
4691 _print_parity(bp,
4692 NIG_REG_NIG_PRTY_STS_1);
4693 }
4694 }
4695 break;
4696 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4697 if (print)
4698 _print_next_block((*par_num)++,
4699 "VAUX PCI CORE");
4700 *global = true;
4701 break;
4702 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4703 if (print) {
4704 _print_next_block((*par_num)++,
4705 "DEBUG");
4706 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4707 }
4708 break;
4709 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4710 if (print) {
4711 _print_next_block((*par_num)++, "USDM");
4712 _print_parity(bp,
4713 USDM_REG_USDM_PRTY_STS);
4714 }
4715 break;
4716 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4717 if (print) {
4718 _print_next_block((*par_num)++, "UCM");
4719 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4720 }
4721 break;
4722 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4723 if (print) {
4724 _print_next_block((*par_num)++,
4725 "USEMI");
4726 _print_parity(bp,
4727 USEM_REG_USEM_PRTY_STS_0);
4728 _print_parity(bp,
4729 USEM_REG_USEM_PRTY_STS_1);
4730 }
4731 break;
4732 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4733 if (print) {
4734 _print_next_block((*par_num)++, "UPB");
4735 _print_parity(bp, GRCBASE_UPB +
4736 PB_REG_PB_PRTY_STS);
4737 }
4738 break;
4739 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4740 if (print) {
4741 _print_next_block((*par_num)++, "CSDM");
4742 _print_parity(bp,
4743 CSDM_REG_CSDM_PRTY_STS);
4744 }
4745 break;
4746 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4747 if (print) {
4748 _print_next_block((*par_num)++, "CCM");
4749 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4750 }
4751 break;
4752 }
4753
4754
4755 sig &= ~cur_bit;
4756 }
4757 }
4758
4759 return res;
4760}
4761
4762static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4763 int *par_num, bool print)
4764{
4765 u32 cur_bit;
4766 bool res;
4767 int i;
4768
4769 res = false;
4770
4771 for (i = 0; sig; i++) {
4772 cur_bit = (0x1UL << i);
4773 if (sig & cur_bit) {
4774 res = true;
4775 if (print) {
4776 switch (cur_bit) {
4777 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4778 _print_next_block((*par_num)++,
4779 "CSEMI");
4780 _print_parity(bp,
4781 CSEM_REG_CSEM_PRTY_STS_0);
4782 _print_parity(bp,
4783 CSEM_REG_CSEM_PRTY_STS_1);
4784 break;
4785 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4786 _print_next_block((*par_num)++, "PXP");
4787 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4788 _print_parity(bp,
4789 PXP2_REG_PXP2_PRTY_STS_0);
4790 _print_parity(bp,
4791 PXP2_REG_PXP2_PRTY_STS_1);
4792 break;
4793 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4794 _print_next_block((*par_num)++,
4795 "PXPPCICLOCKCLIENT");
4796 break;
4797 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4798 _print_next_block((*par_num)++, "CFC");
4799 _print_parity(bp,
4800 CFC_REG_CFC_PRTY_STS);
4801 break;
4802 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4803 _print_next_block((*par_num)++, "CDU");
4804 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4805 break;
4806 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4807 _print_next_block((*par_num)++, "DMAE");
4808 _print_parity(bp,
4809 DMAE_REG_DMAE_PRTY_STS);
4810 break;
4811 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4812 _print_next_block((*par_num)++, "IGU");
4813 if (CHIP_IS_E1x(bp))
4814 _print_parity(bp,
4815 HC_REG_HC_PRTY_STS);
4816 else
4817 _print_parity(bp,
4818 IGU_REG_IGU_PRTY_STS);
4819 break;
4820 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4821 _print_next_block((*par_num)++, "MISC");
4822 _print_parity(bp,
4823 MISC_REG_MISC_PRTY_STS);
4824 break;
4825 }
4826 }
4827
4828
4829 sig &= ~cur_bit;
4830 }
4831 }
4832
4833 return res;
4834}
4835
4836static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4837 int *par_num, bool *global,
4838 bool print)
4839{
4840 bool res = false;
4841 u32 cur_bit;
4842 int i;
4843
4844 for (i = 0; sig; i++) {
4845 cur_bit = (0x1UL << i);
4846 if (sig & cur_bit) {
4847 switch (cur_bit) {
4848 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4849 if (print)
4850 _print_next_block((*par_num)++,
4851 "MCP ROM");
4852 *global = true;
4853 res = true;
4854 break;
4855 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4856 if (print)
4857 _print_next_block((*par_num)++,
4858 "MCP UMP RX");
4859 *global = true;
4860 res = true;
4861 break;
4862 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4863 if (print)
4864 _print_next_block((*par_num)++,
4865 "MCP UMP TX");
4866 *global = true;
4867 res = true;
4868 break;
4869 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4870 if (print)
4871 _print_next_block((*par_num)++,
4872 "MCP SCPAD");
4873
4874 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4875 1UL << 10);
4876 break;
4877 }
4878
4879
4880 sig &= ~cur_bit;
4881 }
4882 }
4883
4884 return res;
4885}
4886
4887static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4888 int *par_num, bool print)
4889{
4890 u32 cur_bit;
4891 bool res;
4892 int i;
4893
4894 res = false;
4895
4896 for (i = 0; sig; i++) {
4897 cur_bit = (0x1UL << i);
4898 if (sig & cur_bit) {
4899 res = true;
4900 if (print) {
4901 switch (cur_bit) {
4902 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4903 _print_next_block((*par_num)++,
4904 "PGLUE_B");
4905 _print_parity(bp,
4906 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4907 break;
4908 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4909 _print_next_block((*par_num)++, "ATC");
4910 _print_parity(bp,
4911 ATC_REG_ATC_PRTY_STS);
4912 break;
4913 }
4914 }
4915
4916 sig &= ~cur_bit;
4917 }
4918 }
4919
4920 return res;
4921}
4922
4923static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4924 u32 *sig)
4925{
4926 bool res = false;
4927
4928 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4929 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4930 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4931 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4932 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4933 int par_num = 0;
4934 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4935 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4936 sig[0] & HW_PRTY_ASSERT_SET_0,
4937 sig[1] & HW_PRTY_ASSERT_SET_1,
4938 sig[2] & HW_PRTY_ASSERT_SET_2,
4939 sig[3] & HW_PRTY_ASSERT_SET_3,
4940 sig[4] & HW_PRTY_ASSERT_SET_4);
4941 if (print)
4942 netdev_err(bp->dev,
4943 "Parity errors detected in blocks: ");
4944 res |= bnx2x_check_blocks_with_parity0(bp,
4945 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4946 res |= bnx2x_check_blocks_with_parity1(bp,
4947 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4948 res |= bnx2x_check_blocks_with_parity2(bp,
4949 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4950 res |= bnx2x_check_blocks_with_parity3(bp,
4951 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4952 res |= bnx2x_check_blocks_with_parity4(bp,
4953 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4954
4955 if (print)
4956 pr_cont("\n");
4957 }
4958
4959 return res;
4960}
4961
4962
4963
4964
4965
4966
4967
4968
4969bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4970{
4971 struct attn_route attn = { {0} };
4972 int port = BP_PORT(bp);
4973
4974 attn.sig[0] = REG_RD(bp,
4975 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4976 port*4);
4977 attn.sig[1] = REG_RD(bp,
4978 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4979 port*4);
4980 attn.sig[2] = REG_RD(bp,
4981 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4982 port*4);
4983 attn.sig[3] = REG_RD(bp,
4984 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4985 port*4);
4986
4987
4988
4989 attn.sig[3] &= ((REG_RD(bp,
4990 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4991 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4992 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4993 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4994
4995 if (!CHIP_IS_E1x(bp))
4996 attn.sig[4] = REG_RD(bp,
4997 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4998 port*4);
4999
5000 return bnx2x_parity_attn(bp, global, print, attn.sig);
5001}
5002
5003static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5004{
5005 u32 val;
5006 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5007
5008 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5009 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5010 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5011 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5012 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5013 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5014 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5015 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5016 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5017 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5018 if (val &
5019 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5020 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5021 if (val &
5022 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5023 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5024 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5025 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5026 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5027 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5028 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5029 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5030 }
5031 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5032 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5033 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5034 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5035 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5036 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5037 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5038 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5039 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5040 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5041 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5042 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5043 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5044 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5045 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5046 }
5047
5048 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5049 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5050 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5051 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5052 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5053 }
5054}
5055
5056static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5057{
5058 struct attn_route attn, *group_mask;
5059 int port = BP_PORT(bp);
5060 int index;
5061 u32 reg_addr;
5062 u32 val;
5063 u32 aeu_mask;
5064 bool global = false;
5065
5066
5067
5068 bnx2x_acquire_alr(bp);
5069
5070 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5071#ifndef BNX2X_STOP_ON_ERROR
5072 bp->recovery_state = BNX2X_RECOVERY_INIT;
5073 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5074
5075 bnx2x_int_disable(bp);
5076
5077
5078
5079#else
5080 bnx2x_panic();
5081#endif
5082 bnx2x_release_alr(bp);
5083 return;
5084 }
5085
5086 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5087 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5088 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5089 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5090 if (!CHIP_IS_E1x(bp))
5091 attn.sig[4] =
5092 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5093 else
5094 attn.sig[4] = 0;
5095
5096 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5097 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5098
5099 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5100 if (deasserted & (1 << index)) {
5101 group_mask = &bp->attn_group[index];
5102
5103 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5104 index,
5105 group_mask->sig[0], group_mask->sig[1],
5106 group_mask->sig[2], group_mask->sig[3],
5107 group_mask->sig[4]);
5108
5109 bnx2x_attn_int_deasserted4(bp,
5110 attn.sig[4] & group_mask->sig[4]);
5111 bnx2x_attn_int_deasserted3(bp,
5112 attn.sig[3] & group_mask->sig[3]);
5113 bnx2x_attn_int_deasserted1(bp,
5114 attn.sig[1] & group_mask->sig[1]);
5115 bnx2x_attn_int_deasserted2(bp,
5116 attn.sig[2] & group_mask->sig[2]);
5117 bnx2x_attn_int_deasserted0(bp,
5118 attn.sig[0] & group_mask->sig[0]);
5119 }
5120 }
5121
5122 bnx2x_release_alr(bp);
5123
5124 if (bp->common.int_block == INT_BLOCK_HC)
5125 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5126 COMMAND_REG_ATTN_BITS_CLR);
5127 else
5128 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5129
5130 val = ~deasserted;
5131 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5132 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5133 REG_WR(bp, reg_addr, val);
5134
5135 if (~bp->attn_state & deasserted)
5136 BNX2X_ERR("IGU ERROR\n");
5137
5138 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5139 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5140
5141 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5142 aeu_mask = REG_RD(bp, reg_addr);
5143
5144 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5145 aeu_mask, deasserted);
5146 aeu_mask |= (deasserted & 0x3ff);
5147 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5148
5149 REG_WR(bp, reg_addr, aeu_mask);
5150 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5151
5152 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5153 bp->attn_state &= ~deasserted;
5154 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5155}
5156
5157static void bnx2x_attn_int(struct bnx2x *bp)
5158{
5159
5160 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5161 attn_bits);
5162 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5163 attn_bits_ack);
5164 u32 attn_state = bp->attn_state;
5165
5166
5167 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5168 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5169
5170 DP(NETIF_MSG_HW,
5171 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5172 attn_bits, attn_ack, asserted, deasserted);
5173
5174 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5175 BNX2X_ERR("BAD attention state\n");
5176
5177
5178 if (asserted)
5179 bnx2x_attn_int_asserted(bp, asserted);
5180
5181 if (deasserted)
5182 bnx2x_attn_int_deasserted(bp, deasserted);
5183}
5184
5185void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5186 u16 index, u8 op, u8 update)
5187{
5188 u32 igu_addr = bp->igu_base_addr;
5189 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5190 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5191 igu_addr);
5192}
5193
5194static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5195{
5196
5197 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5198 mmiowb();
5199}
5200
5201static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5202 union event_ring_elem *elem)
5203{
5204 u8 err = elem->message.error;
5205
5206 if (!bp->cnic_eth_dev.starting_cid ||
5207 (cid < bp->cnic_eth_dev.starting_cid &&
5208 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5209 return 1;
5210
5211 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5212
5213 if (unlikely(err)) {
5214
5215 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5216 cid);
5217 bnx2x_panic_dump(bp, false);
5218 }
5219 bnx2x_cnic_cfc_comp(bp, cid, err);
5220 return 0;
5221}
5222
5223static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5224{
5225 struct bnx2x_mcast_ramrod_params rparam;
5226 int rc;
5227
5228 memset(&rparam, 0, sizeof(rparam));
5229
5230 rparam.mcast_obj = &bp->mcast_obj;
5231
5232 netif_addr_lock_bh(bp->dev);
5233
5234
5235 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5236
5237
5238 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5239 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5240 if (rc < 0)
5241 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5242 rc);
5243 }
5244
5245 netif_addr_unlock_bh(bp->dev);
5246}
5247
5248static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5249 union event_ring_elem *elem)
5250{
5251 unsigned long ramrod_flags = 0;
5252 int rc = 0;
5253 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5254 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5255
5256
5257 __set_bit(RAMROD_CONT, &ramrod_flags);
5258
5259 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5260 >> BNX2X_SWCID_SHIFT) {
5261 case BNX2X_FILTER_MAC_PENDING:
5262 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5263 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5264 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5265 else
5266 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5267
5268 break;
5269 case BNX2X_FILTER_MCAST_PENDING:
5270 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5271
5272
5273
5274 bnx2x_handle_mcast_eqe(bp);
5275 return;
5276 default:
5277 BNX2X_ERR("Unsupported classification command: %d\n",
5278 elem->message.data.eth_event.echo);
5279 return;
5280 }
5281
5282 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5283
5284 if (rc < 0)
5285 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5286 else if (rc > 0)
5287 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5288}
5289
5290static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5291
5292static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5293{
5294 netif_addr_lock_bh(bp->dev);
5295
5296 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5297
5298
5299 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5300 bnx2x_set_storm_rx_mode(bp);
5301 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5302 &bp->sp_state))
5303 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5304 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5305 &bp->sp_state))
5306 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5307
5308 netif_addr_unlock_bh(bp->dev);
5309}
5310
5311static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5312 union event_ring_elem *elem)
5313{
5314 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5315 DP(BNX2X_MSG_SP,
5316 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5317 elem->message.data.vif_list_event.func_bit_map);
5318 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5319 elem->message.data.vif_list_event.func_bit_map);
5320 } else if (elem->message.data.vif_list_event.echo ==
5321 VIF_LIST_RULE_SET) {
5322 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5323 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5324 }
5325}
5326
5327
5328static void bnx2x_after_function_update(struct bnx2x *bp)
5329{
5330 int q, rc;
5331 struct bnx2x_fastpath *fp;
5332 struct bnx2x_queue_state_params queue_params = {NULL};
5333 struct bnx2x_queue_update_params *q_update_params =
5334 &queue_params.params.update;
5335
5336
5337 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5338
5339
5340 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5341 &q_update_params->update_flags);
5342 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5343 &q_update_params->update_flags);
5344 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5345
5346
5347 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5348 q_update_params->silent_removal_value = 0;
5349 q_update_params->silent_removal_mask = 0;
5350 } else {
5351 q_update_params->silent_removal_value =
5352 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5353 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5354 }
5355
5356 for_each_eth_queue(bp, q) {
5357
5358 fp = &bp->fp[q];
5359 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5360
5361
5362 rc = bnx2x_queue_state_change(bp, &queue_params);
5363 if (rc < 0)
5364 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5365 q);
5366 }
5367
5368 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5369 fp = &bp->fp[FCOE_IDX(bp)];
5370 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5371
5372
5373 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5374
5375
5376 smp_mb__before_atomic();
5377 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5378 smp_mb__after_atomic();
5379
5380
5381 rc = bnx2x_queue_state_change(bp, &queue_params);
5382 if (rc < 0)
5383 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5384 q);
5385 } else {
5386
5387 bnx2x_link_report(bp);
5388 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5389 }
5390}
5391
5392static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5393 struct bnx2x *bp, u32 cid)
5394{
5395 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5396
5397 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5398 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5399 else
5400 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5401}
5402
5403static void bnx2x_eq_int(struct bnx2x *bp)
5404{
5405 u16 hw_cons, sw_cons, sw_prod;
5406 union event_ring_elem *elem;
5407 u8 echo;
5408 u32 cid;
5409 u8 opcode;
5410 int rc, spqe_cnt = 0;
5411 struct bnx2x_queue_sp_obj *q_obj;
5412 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5413 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5414
5415 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5416
5417
5418
5419
5420
5421
5422 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5423 hw_cons++;
5424
5425
5426
5427
5428
5429 sw_cons = bp->eq_cons;
5430 sw_prod = bp->eq_prod;
5431
5432 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5433 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5434
5435 for (; sw_cons != hw_cons;
5436 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5437
5438 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5439
5440 rc = bnx2x_iov_eq_sp_event(bp, elem);
5441 if (!rc) {
5442 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5443 rc);
5444 goto next_spqe;
5445 }
5446
5447
5448 cid = SW_CID((__force __le32)
5449 elem->message.data.cfc_del_event.cid);
5450 opcode = elem->message.opcode;
5451
5452
5453 switch (opcode) {
5454 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5455 bnx2x_vf_mbx_schedule(bp,
5456 &elem->message.data.vf_pf_event);
5457 continue;
5458
5459 case EVENT_RING_OPCODE_STAT_QUERY:
5460 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5461 "got statistics comp event %d\n",
5462 bp->stats_comp++);
5463
5464 goto next_spqe;
5465
5466 case EVENT_RING_OPCODE_CFC_DEL:
5467
5468
5469
5470
5471
5472 DP(BNX2X_MSG_SP,
5473 "got delete ramrod for MULTI[%d]\n", cid);
5474
5475 if (CNIC_LOADED(bp) &&
5476 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5477 goto next_spqe;
5478
5479 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5480
5481 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5482 break;
5483
5484 goto next_spqe;
5485
5486 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5487 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5488 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5489 if (f_obj->complete_cmd(bp, f_obj,
5490 BNX2X_F_CMD_TX_STOP))
5491 break;
5492 goto next_spqe;
5493
5494 case EVENT_RING_OPCODE_START_TRAFFIC:
5495 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5496 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5497 if (f_obj->complete_cmd(bp, f_obj,
5498 BNX2X_F_CMD_TX_START))
5499 break;
5500 goto next_spqe;
5501
5502 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5503 echo = elem->message.data.function_update_event.echo;
5504 if (echo == SWITCH_UPDATE) {
5505 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5506 "got FUNC_SWITCH_UPDATE ramrod\n");
5507 if (f_obj->complete_cmd(
5508 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5509 break;
5510
5511 } else {
5512 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5513
5514 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5515 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5516 f_obj->complete_cmd(bp, f_obj,
5517 BNX2X_F_CMD_AFEX_UPDATE);
5518
5519
5520
5521
5522
5523 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5524 }
5525
5526 goto next_spqe;
5527
5528 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5529 f_obj->complete_cmd(bp, f_obj,
5530 BNX2X_F_CMD_AFEX_VIFLISTS);
5531 bnx2x_after_afex_vif_lists(bp, elem);
5532 goto next_spqe;
5533 case EVENT_RING_OPCODE_FUNCTION_START:
5534 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5535 "got FUNC_START ramrod\n");
5536 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5537 break;
5538
5539 goto next_spqe;
5540
5541 case EVENT_RING_OPCODE_FUNCTION_STOP:
5542 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5543 "got FUNC_STOP ramrod\n");
5544 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5545 break;
5546
5547 goto next_spqe;
5548
5549 case EVENT_RING_OPCODE_SET_TIMESYNC:
5550 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5551 "got set_timesync ramrod completion\n");
5552 if (f_obj->complete_cmd(bp, f_obj,
5553 BNX2X_F_CMD_SET_TIMESYNC))
5554 break;
5555 goto next_spqe;
5556 }
5557
5558 switch (opcode | bp->state) {
5559 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5560 BNX2X_STATE_OPEN):
5561 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5562 BNX2X_STATE_OPENING_WAIT4_PORT):
5563 cid = elem->message.data.eth_event.echo &
5564 BNX2X_SWCID_MASK;
5565 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5566 cid);
5567 rss_raw->clear_pending(rss_raw);
5568 break;
5569
5570 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5571 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5572 case (EVENT_RING_OPCODE_SET_MAC |
5573 BNX2X_STATE_CLOSING_WAIT4_HALT):
5574 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5575 BNX2X_STATE_OPEN):
5576 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5577 BNX2X_STATE_DIAG):
5578 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5579 BNX2X_STATE_CLOSING_WAIT4_HALT):
5580 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5581 bnx2x_handle_classification_eqe(bp, elem);
5582 break;
5583
5584 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5585 BNX2X_STATE_OPEN):
5586 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5587 BNX2X_STATE_DIAG):
5588 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5589 BNX2X_STATE_CLOSING_WAIT4_HALT):
5590 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5591 bnx2x_handle_mcast_eqe(bp);
5592 break;
5593
5594 case (EVENT_RING_OPCODE_FILTERS_RULES |
5595 BNX2X_STATE_OPEN):
5596 case (EVENT_RING_OPCODE_FILTERS_RULES |
5597 BNX2X_STATE_DIAG):
5598 case (EVENT_RING_OPCODE_FILTERS_RULES |
5599 BNX2X_STATE_CLOSING_WAIT4_HALT):
5600 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5601 bnx2x_handle_rx_mode_eqe(bp);
5602 break;
5603 default:
5604
5605 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5606 elem->message.opcode, bp->state);
5607 }
5608next_spqe:
5609 spqe_cnt++;
5610 }
5611
5612 smp_mb__before_atomic();
5613 atomic_add(spqe_cnt, &bp->eq_spq_left);
5614
5615 bp->eq_cons = sw_cons;
5616 bp->eq_prod = sw_prod;
5617
5618 smp_wmb();
5619
5620
5621 bnx2x_update_eq_prod(bp, bp->eq_prod);
5622}
5623
5624static void bnx2x_sp_task(struct work_struct *work)
5625{
5626 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5627
5628 DP(BNX2X_MSG_SP, "sp task invoked\n");
5629
5630
5631 smp_rmb();
5632 if (atomic_read(&bp->interrupt_occurred)) {
5633
5634
5635 u16 status = bnx2x_update_dsb_idx(bp);
5636
5637 DP(BNX2X_MSG_SP, "status %x\n", status);
5638 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5639 atomic_set(&bp->interrupt_occurred, 0);
5640
5641
5642 if (status & BNX2X_DEF_SB_ATT_IDX) {
5643 bnx2x_attn_int(bp);
5644 status &= ~BNX2X_DEF_SB_ATT_IDX;
5645 }
5646
5647
5648 if (status & BNX2X_DEF_SB_IDX) {
5649 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5650
5651 if (FCOE_INIT(bp) &&
5652 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5653
5654
5655
5656 local_bh_disable();
5657 napi_schedule(&bnx2x_fcoe(bp, napi));
5658 local_bh_enable();
5659 }
5660
5661
5662 bnx2x_eq_int(bp);
5663 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5664 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5665
5666 status &= ~BNX2X_DEF_SB_IDX;
5667 }
5668
5669
5670 if (unlikely(status))
5671 DP(BNX2X_MSG_SP,
5672 "got an unknown interrupt! (status 0x%x)\n", status);
5673
5674
5675 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5676 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5677 }
5678
5679
5680 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5681 &bp->sp_state)) {
5682 bnx2x_link_report(bp);
5683 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5684 }
5685}
5686
5687irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5688{
5689 struct net_device *dev = dev_instance;
5690 struct bnx2x *bp = netdev_priv(dev);
5691
5692 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5693 IGU_INT_DISABLE, 0);
5694
5695#ifdef BNX2X_STOP_ON_ERROR
5696 if (unlikely(bp->panic))
5697 return IRQ_HANDLED;
5698#endif
5699
5700 if (CNIC_LOADED(bp)) {
5701 struct cnic_ops *c_ops;
5702
5703 rcu_read_lock();
5704 c_ops = rcu_dereference(bp->cnic_ops);
5705 if (c_ops)
5706 c_ops->cnic_handler(bp->cnic_data, NULL);
5707 rcu_read_unlock();
5708 }
5709
5710
5711
5712
5713 bnx2x_schedule_sp_task(bp);
5714
5715 return IRQ_HANDLED;
5716}
5717
5718
5719
5720void bnx2x_drv_pulse(struct bnx2x *bp)
5721{
5722 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5723 bp->fw_drv_pulse_wr_seq);
5724}
5725
5726static void bnx2x_timer(unsigned long data)
5727{
5728 struct bnx2x *bp = (struct bnx2x *) data;
5729
5730 if (!netif_running(bp->dev))
5731 return;
5732
5733 if (IS_PF(bp) &&
5734 !BP_NOMCP(bp)) {
5735 int mb_idx = BP_FW_MB_IDX(bp);
5736 u16 drv_pulse;
5737 u16 mcp_pulse;
5738
5739 ++bp->fw_drv_pulse_wr_seq;
5740 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5741 drv_pulse = bp->fw_drv_pulse_wr_seq;
5742 bnx2x_drv_pulse(bp);
5743
5744 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5745 MCP_PULSE_SEQ_MASK);
5746
5747
5748
5749
5750
5751 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5752 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5753 drv_pulse, mcp_pulse);
5754 }
5755
5756 if (bp->state == BNX2X_STATE_OPEN)
5757 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5758
5759
5760 if (IS_VF(bp))
5761 bnx2x_timer_sriov(bp);
5762
5763 mod_timer(&bp->timer, jiffies + bp->current_interval);
5764}
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5775{
5776 u32 i;
5777 if (!(len%4) && !(addr%4))
5778 for (i = 0; i < len; i += 4)
5779 REG_WR(bp, addr + i, fill);
5780 else
5781 for (i = 0; i < len; i++)
5782 REG_WR8(bp, addr + i, fill);
5783}
5784
5785
5786static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5787 int fw_sb_id,
5788 u32 *sb_data_p,
5789 u32 data_size)
5790{
5791 int index;
5792 for (index = 0; index < data_size; index++)
5793 REG_WR(bp, BAR_CSTRORM_INTMEM +
5794 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5795 sizeof(u32)*index,
5796 *(sb_data_p + index));
5797}
5798
5799static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5800{
5801 u32 *sb_data_p;
5802 u32 data_size = 0;
5803 struct hc_status_block_data_e2 sb_data_e2;
5804 struct hc_status_block_data_e1x sb_data_e1x;
5805
5806
5807 if (!CHIP_IS_E1x(bp)) {
5808 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5809 sb_data_e2.common.state = SB_DISABLED;
5810 sb_data_e2.common.p_func.vf_valid = false;
5811 sb_data_p = (u32 *)&sb_data_e2;
5812 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5813 } else {
5814 memset(&sb_data_e1x, 0,
5815 sizeof(struct hc_status_block_data_e1x));
5816 sb_data_e1x.common.state = SB_DISABLED;
5817 sb_data_e1x.common.p_func.vf_valid = false;
5818 sb_data_p = (u32 *)&sb_data_e1x;
5819 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5820 }
5821 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5822
5823 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5824 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5825 CSTORM_STATUS_BLOCK_SIZE);
5826 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5827 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5828 CSTORM_SYNC_BLOCK_SIZE);
5829}
5830
5831
5832static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5833 struct hc_sp_status_block_data *sp_sb_data)
5834{
5835 int func = BP_FUNC(bp);
5836 int i;
5837 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5838 REG_WR(bp, BAR_CSTRORM_INTMEM +
5839 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5840 i*sizeof(u32),
5841 *((u32 *)sp_sb_data + i));
5842}
5843
5844static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5845{
5846 int func = BP_FUNC(bp);
5847 struct hc_sp_status_block_data sp_sb_data;
5848 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5849
5850 sp_sb_data.state = SB_DISABLED;
5851 sp_sb_data.p_func.vf_valid = false;
5852
5853 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5854
5855 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5856 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5857 CSTORM_SP_STATUS_BLOCK_SIZE);
5858 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5859 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5860 CSTORM_SP_SYNC_BLOCK_SIZE);
5861}
5862
5863static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5864 int igu_sb_id, int igu_seg_id)
5865{
5866 hc_sm->igu_sb_id = igu_sb_id;
5867 hc_sm->igu_seg_id = igu_seg_id;
5868 hc_sm->timer_value = 0xFF;
5869 hc_sm->time_to_expire = 0xFFFFFFFF;
5870}
5871
5872
5873static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5874{
5875
5876
5877 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5878
5879
5880 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5881 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5882 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5883 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5884
5885
5886
5887 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5888 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5889
5890
5891 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5892 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5893 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5894 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5895 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5896 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5897 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5898 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5899}
5900
5901void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5902 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5903{
5904 int igu_seg_id;
5905
5906 struct hc_status_block_data_e2 sb_data_e2;
5907 struct hc_status_block_data_e1x sb_data_e1x;
5908 struct hc_status_block_sm *hc_sm_p;
5909 int data_size;
5910 u32 *sb_data_p;
5911
5912 if (CHIP_INT_MODE_IS_BC(bp))
5913 igu_seg_id = HC_SEG_ACCESS_NORM;
5914 else
5915 igu_seg_id = IGU_SEG_ACCESS_NORM;
5916
5917 bnx2x_zero_fp_sb(bp, fw_sb_id);
5918
5919 if (!CHIP_IS_E1x(bp)) {
5920 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5921 sb_data_e2.common.state = SB_ENABLED;
5922 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5923 sb_data_e2.common.p_func.vf_id = vfid;
5924 sb_data_e2.common.p_func.vf_valid = vf_valid;
5925 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5926 sb_data_e2.common.same_igu_sb_1b = true;
5927 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5928 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5929 hc_sm_p = sb_data_e2.common.state_machine;
5930 sb_data_p = (u32 *)&sb_data_e2;
5931 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5932 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5933 } else {
5934 memset(&sb_data_e1x, 0,
5935 sizeof(struct hc_status_block_data_e1x));
5936 sb_data_e1x.common.state = SB_ENABLED;
5937 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5938 sb_data_e1x.common.p_func.vf_id = 0xff;
5939 sb_data_e1x.common.p_func.vf_valid = false;
5940 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5941 sb_data_e1x.common.same_igu_sb_1b = true;
5942 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5943 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5944 hc_sm_p = sb_data_e1x.common.state_machine;
5945 sb_data_p = (u32 *)&sb_data_e1x;
5946 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5947 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5948 }
5949
5950 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5951 igu_sb_id, igu_seg_id);
5952 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5953 igu_sb_id, igu_seg_id);
5954
5955 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5956
5957
5958 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5959}
5960
5961static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5962 u16 tx_usec, u16 rx_usec)
5963{
5964 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5965 false, rx_usec);
5966 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5967 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5968 tx_usec);
5969 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5970 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5971 tx_usec);
5972 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5973 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5974 tx_usec);
5975}
5976
5977static void bnx2x_init_def_sb(struct bnx2x *bp)
5978{
5979 struct host_sp_status_block *def_sb = bp->def_status_blk;
5980 dma_addr_t mapping = bp->def_status_blk_mapping;
5981 int igu_sp_sb_index;
5982 int igu_seg_id;
5983 int port = BP_PORT(bp);
5984 int func = BP_FUNC(bp);
5985 int reg_offset, reg_offset_en5;
5986 u64 section;
5987 int index;
5988 struct hc_sp_status_block_data sp_sb_data;
5989 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5990
5991 if (CHIP_INT_MODE_IS_BC(bp)) {
5992 igu_sp_sb_index = DEF_SB_IGU_ID;
5993 igu_seg_id = HC_SEG_ACCESS_DEF;
5994 } else {
5995 igu_sp_sb_index = bp->igu_dsb_id;
5996 igu_seg_id = IGU_SEG_ACCESS_DEF;
5997 }
5998
5999
6000 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6001 atten_status_block);
6002 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6003
6004 bp->attn_state = 0;
6005
6006 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6007 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6008 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6009 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6010 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6011 int sindex;
6012
6013 for (sindex = 0; sindex < 4; sindex++)
6014 bp->attn_group[index].sig[sindex] =
6015 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6016
6017 if (!CHIP_IS_E1x(bp))
6018
6019
6020
6021
6022
6023 bp->attn_group[index].sig[4] = REG_RD(bp,
6024 reg_offset_en5 + 0x4*index);
6025 else
6026 bp->attn_group[index].sig[4] = 0;
6027 }
6028
6029 if (bp->common.int_block == INT_BLOCK_HC) {
6030 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6031 HC_REG_ATTN_MSG0_ADDR_L);
6032
6033 REG_WR(bp, reg_offset, U64_LO(section));
6034 REG_WR(bp, reg_offset + 4, U64_HI(section));
6035 } else if (!CHIP_IS_E1x(bp)) {
6036 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6037 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6038 }
6039
6040 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6041 sp_sb);
6042
6043 bnx2x_zero_sp_sb(bp);
6044
6045
6046 sp_sb_data.state = SB_ENABLED;
6047 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6048 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6049 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6050 sp_sb_data.igu_seg_id = igu_seg_id;
6051 sp_sb_data.p_func.pf_id = func;
6052 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6053 sp_sb_data.p_func.vf_id = 0xff;
6054
6055 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6056
6057 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6058}
6059
6060void bnx2x_update_coalesce(struct bnx2x *bp)
6061{
6062 int i;
6063
6064 for_each_eth_queue(bp, i)
6065 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6066 bp->tx_ticks, bp->rx_ticks);
6067}
6068
6069static void bnx2x_init_sp_ring(struct bnx2x *bp)
6070{
6071 spin_lock_init(&bp->spq_lock);
6072 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6073
6074 bp->spq_prod_idx = 0;
6075 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6076 bp->spq_prod_bd = bp->spq;
6077 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6078}
6079
6080static void bnx2x_init_eq_ring(struct bnx2x *bp)
6081{
6082 int i;
6083 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6084 union event_ring_elem *elem =
6085 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6086
6087 elem->next_page.addr.hi =
6088 cpu_to_le32(U64_HI(bp->eq_mapping +
6089 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6090 elem->next_page.addr.lo =
6091 cpu_to_le32(U64_LO(bp->eq_mapping +
6092 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6093 }
6094 bp->eq_cons = 0;
6095 bp->eq_prod = NUM_EQ_DESC;
6096 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6097
6098 atomic_set(&bp->eq_spq_left,
6099 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6100}
6101
6102
6103static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6104 unsigned long rx_mode_flags,
6105 unsigned long rx_accept_flags,
6106 unsigned long tx_accept_flags,
6107 unsigned long ramrod_flags)
6108{
6109 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6110 int rc;
6111
6112 memset(&ramrod_param, 0, sizeof(ramrod_param));
6113
6114
6115 ramrod_param.cid = 0;
6116 ramrod_param.cl_id = cl_id;
6117 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6118 ramrod_param.func_id = BP_FUNC(bp);
6119
6120 ramrod_param.pstate = &bp->sp_state;
6121 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6122
6123 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6124 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6125
6126 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6127
6128 ramrod_param.ramrod_flags = ramrod_flags;
6129 ramrod_param.rx_mode_flags = rx_mode_flags;
6130
6131 ramrod_param.rx_accept_flags = rx_accept_flags;
6132 ramrod_param.tx_accept_flags = tx_accept_flags;
6133
6134 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6135 if (rc < 0) {
6136 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6137 return rc;
6138 }
6139
6140 return 0;
6141}
6142
6143static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6144 unsigned long *rx_accept_flags,
6145 unsigned long *tx_accept_flags)
6146{
6147
6148 *rx_accept_flags = 0;
6149 *tx_accept_flags = 0;
6150
6151 switch (rx_mode) {
6152 case BNX2X_RX_MODE_NONE:
6153
6154
6155
6156
6157 break;
6158 case BNX2X_RX_MODE_NORMAL:
6159 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6160 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6161 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6162
6163
6164 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6165 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6166 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6167
6168 break;
6169 case BNX2X_RX_MODE_ALLMULTI:
6170 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6171 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6172 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6173
6174
6175 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6176 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6177 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6178
6179 break;
6180 case BNX2X_RX_MODE_PROMISC:
6181
6182
6183
6184
6185 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6186 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6187 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6188 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6189
6190
6191 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6192 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6193
6194 if (IS_MF_SI(bp))
6195 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6196 else
6197 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6198
6199 break;
6200 default:
6201 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6202 return -EINVAL;
6203 }
6204
6205
6206 if (rx_mode != BNX2X_RX_MODE_NONE) {
6207 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6208 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6209 }
6210
6211 return 0;
6212}
6213
6214
6215static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6216{
6217 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6218 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6219 int rc;
6220
6221 if (!NO_FCOE(bp))
6222
6223 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6224
6225 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6226 &tx_accept_flags);
6227 if (rc)
6228 return rc;
6229
6230 __set_bit(RAMROD_RX, &ramrod_flags);
6231 __set_bit(RAMROD_TX, &ramrod_flags);
6232
6233 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6234 rx_accept_flags, tx_accept_flags,
6235 ramrod_flags);
6236}
6237
6238static void bnx2x_init_internal_common(struct bnx2x *bp)
6239{
6240 int i;
6241
6242
6243
6244 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6245 REG_WR(bp, BAR_USTRORM_INTMEM +
6246 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6247 if (!CHIP_IS_E1x(bp)) {
6248 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6249 CHIP_INT_MODE_IS_BC(bp) ?
6250 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6251 }
6252}
6253
6254static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6255{
6256 switch (load_code) {
6257 case FW_MSG_CODE_DRV_LOAD_COMMON:
6258 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6259 bnx2x_init_internal_common(bp);
6260
6261
6262 case FW_MSG_CODE_DRV_LOAD_PORT:
6263
6264
6265
6266 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6267
6268
6269 break;
6270
6271 default:
6272 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6273 break;
6274 }
6275}
6276
6277static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6278{
6279 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6280}
6281
6282static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6283{
6284 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6285}
6286
6287static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6288{
6289 if (CHIP_IS_E1x(fp->bp))
6290 return BP_L_ID(fp->bp) + fp->index;
6291 else
6292 return bnx2x_fp_igu_sb_id(fp);
6293}
6294
6295static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6296{
6297 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6298 u8 cos;
6299 unsigned long q_type = 0;
6300 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6301 fp->rx_queue = fp_idx;
6302 fp->cid = fp_idx;
6303 fp->cl_id = bnx2x_fp_cl_id(fp);
6304 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6305 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6306
6307 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6308
6309
6310 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6311
6312
6313 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6314
6315
6316 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6317 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6318
6319 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6320
6321
6322 for_each_cos_in_tx_queue(fp, cos) {
6323 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6324 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6325 FP_COS_TO_TXQ(fp, cos, bp),
6326 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6327 cids[cos] = fp->txdata_ptr[cos]->cid;
6328 }
6329
6330
6331 if (IS_VF(bp))
6332 return;
6333
6334 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6335 fp->fw_sb_id, fp->igu_sb_id);
6336 bnx2x_update_fpsb_idx(fp);
6337 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6338 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6339 bnx2x_sp_mapping(bp, q_rdata), q_type);
6340
6341
6342
6343
6344 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6345
6346 DP(NETIF_MSG_IFUP,
6347 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6348 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6349 fp->igu_sb_id);
6350}
6351
6352static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6353{
6354 int i;
6355
6356 for (i = 1; i <= NUM_TX_RINGS; i++) {
6357 struct eth_tx_next_bd *tx_next_bd =
6358 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6359
6360 tx_next_bd->addr_hi =
6361 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6362 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6363 tx_next_bd->addr_lo =
6364 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6365 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6366 }
6367
6368 *txdata->tx_cons_sb = cpu_to_le16(0);
6369
6370 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6371 txdata->tx_db.data.zero_fill1 = 0;
6372 txdata->tx_db.data.prod = 0;
6373
6374 txdata->tx_pkt_prod = 0;
6375 txdata->tx_pkt_cons = 0;
6376 txdata->tx_bd_prod = 0;
6377 txdata->tx_bd_cons = 0;
6378 txdata->tx_pkt = 0;
6379}
6380
6381static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6382{
6383 int i;
6384
6385 for_each_tx_queue_cnic(bp, i)
6386 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6387}
6388
6389static void bnx2x_init_tx_rings(struct bnx2x *bp)
6390{
6391 int i;
6392 u8 cos;
6393
6394 for_each_eth_queue(bp, i)
6395 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6396 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6397}
6398
6399static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6400{
6401 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6402 unsigned long q_type = 0;
6403
6404 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6405 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6406 BNX2X_FCOE_ETH_CL_ID_IDX);
6407 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6408 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6409 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6410 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6411 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6412 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6413 fp);
6414
6415 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6416
6417
6418 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6419
6420 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6421 bnx2x_rx_ustorm_prods_offset(fp);
6422
6423
6424 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6425 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6426
6427
6428 BUG_ON(fp->max_cos != 1);
6429
6430 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6431 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6432 bnx2x_sp_mapping(bp, q_rdata), q_type);
6433
6434 DP(NETIF_MSG_IFUP,
6435 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6436 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6437 fp->igu_sb_id);
6438}
6439
6440void bnx2x_nic_init_cnic(struct bnx2x *bp)
6441{
6442 if (!NO_FCOE(bp))
6443 bnx2x_init_fcoe_fp(bp);
6444
6445 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6446 BNX2X_VF_ID_INVALID, false,
6447 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6448
6449
6450 rmb();
6451 bnx2x_init_rx_rings_cnic(bp);
6452 bnx2x_init_tx_rings_cnic(bp);
6453
6454
6455 mb();
6456 mmiowb();
6457}
6458
6459void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6460{
6461 int i;
6462
6463
6464 for_each_eth_queue(bp, i)
6465 bnx2x_init_eth_fp(bp, i);
6466
6467
6468 rmb();
6469 bnx2x_init_rx_rings(bp);
6470 bnx2x_init_tx_rings(bp);
6471
6472 if (IS_PF(bp)) {
6473
6474 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6475 bp->common.shmem_base,
6476 bp->common.shmem2_base, BP_PORT(bp));
6477
6478
6479 bnx2x_init_def_sb(bp);
6480 bnx2x_update_dsb_idx(bp);
6481 bnx2x_init_sp_ring(bp);
6482 } else {
6483 bnx2x_memset_stats(bp);
6484 }
6485}
6486
6487void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6488{
6489 bnx2x_init_eq_ring(bp);
6490 bnx2x_init_internal(bp, load_code);
6491 bnx2x_pf_init(bp);
6492 bnx2x_stats_init(bp);
6493
6494
6495 mb();
6496 mmiowb();
6497
6498 bnx2x_int_enable(bp);
6499
6500
6501 bnx2x_attn_int_deasserted0(bp,
6502 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6503 AEU_INPUTS_ATTN_BITS_SPIO5);
6504}
6505
6506
6507static int bnx2x_gunzip_init(struct bnx2x *bp)
6508{
6509 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6510 &bp->gunzip_mapping, GFP_KERNEL);
6511 if (bp->gunzip_buf == NULL)
6512 goto gunzip_nomem1;
6513
6514 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6515 if (bp->strm == NULL)
6516 goto gunzip_nomem2;
6517
6518 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6519 if (bp->strm->workspace == NULL)
6520 goto gunzip_nomem3;
6521
6522 return 0;
6523
6524gunzip_nomem3:
6525 kfree(bp->strm);
6526 bp->strm = NULL;
6527
6528gunzip_nomem2:
6529 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6530 bp->gunzip_mapping);
6531 bp->gunzip_buf = NULL;
6532
6533gunzip_nomem1:
6534 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6535 return -ENOMEM;
6536}
6537
6538static void bnx2x_gunzip_end(struct bnx2x *bp)
6539{
6540 if (bp->strm) {
6541 vfree(bp->strm->workspace);
6542 kfree(bp->strm);
6543 bp->strm = NULL;
6544 }
6545
6546 if (bp->gunzip_buf) {
6547 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6548 bp->gunzip_mapping);
6549 bp->gunzip_buf = NULL;
6550 }
6551}
6552
6553static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6554{
6555 int n, rc;
6556
6557
6558 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6559 BNX2X_ERR("Bad gzip header\n");
6560 return -EINVAL;
6561 }
6562
6563 n = 10;
6564
6565#define FNAME 0x8
6566
6567 if (zbuf[3] & FNAME)
6568 while ((zbuf[n++] != 0) && (n < len));
6569
6570 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6571 bp->strm->avail_in = len - n;
6572 bp->strm->next_out = bp->gunzip_buf;
6573 bp->strm->avail_out = FW_BUF_SIZE;
6574
6575 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6576 if (rc != Z_OK)
6577 return rc;
6578
6579 rc = zlib_inflate(bp->strm, Z_FINISH);
6580 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6581 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6582 bp->strm->msg);
6583
6584 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6585 if (bp->gunzip_outlen & 0x3)
6586 netdev_err(bp->dev,
6587 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6588 bp->gunzip_outlen);
6589 bp->gunzip_outlen >>= 2;
6590
6591 zlib_inflateEnd(bp->strm);
6592
6593 if (rc == Z_STREAM_END)
6594 return 0;
6595
6596 return rc;
6597}
6598
6599
6600
6601
6602
6603
6604
6605
6606static void bnx2x_lb_pckt(struct bnx2x *bp)
6607{
6608 u32 wb_write[3];
6609
6610
6611 wb_write[0] = 0x55555555;
6612 wb_write[1] = 0x55555555;
6613 wb_write[2] = 0x20;
6614 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6615
6616
6617 wb_write[0] = 0x09000000;
6618 wb_write[1] = 0x55555555;
6619 wb_write[2] = 0x10;
6620 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6621}
6622
6623
6624
6625
6626
6627static int bnx2x_int_mem_test(struct bnx2x *bp)
6628{
6629 int factor;
6630 int count, i;
6631 u32 val = 0;
6632
6633 if (CHIP_REV_IS_FPGA(bp))
6634 factor = 120;
6635 else if (CHIP_REV_IS_EMUL(bp))
6636 factor = 200;
6637 else
6638 factor = 1;
6639
6640
6641 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6642 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6643 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6644 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6645
6646
6647 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6648
6649
6650 bnx2x_lb_pckt(bp);
6651
6652
6653
6654 count = 1000 * factor;
6655 while (count) {
6656
6657 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6658 val = *bnx2x_sp(bp, wb_data[0]);
6659 if (val == 0x10)
6660 break;
6661
6662 usleep_range(10000, 20000);
6663 count--;
6664 }
6665 if (val != 0x10) {
6666 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6667 return -1;
6668 }
6669
6670
6671 count = 1000 * factor;
6672 while (count) {
6673 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6674 if (val == 1)
6675 break;
6676
6677 usleep_range(10000, 20000);
6678 count--;
6679 }
6680 if (val != 0x1) {
6681 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6682 return -2;
6683 }
6684
6685
6686 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6687 msleep(50);
6688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6689 msleep(50);
6690 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6691 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6692
6693 DP(NETIF_MSG_HW, "part2\n");
6694
6695
6696 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6697 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6698 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6699 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6700
6701
6702 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6703
6704
6705 for (i = 0; i < 10; i++)
6706 bnx2x_lb_pckt(bp);
6707
6708
6709
6710 count = 1000 * factor;
6711 while (count) {
6712
6713 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6714 val = *bnx2x_sp(bp, wb_data[0]);
6715 if (val == 0xb0)
6716 break;
6717
6718 usleep_range(10000, 20000);
6719 count--;
6720 }
6721 if (val != 0xb0) {
6722 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6723 return -3;
6724 }
6725
6726
6727 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6728 if (val != 2)
6729 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6730
6731
6732 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6733
6734
6735 msleep(10 * factor);
6736
6737 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6738 if (val != 3)
6739 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6740
6741
6742 for (i = 0; i < 11; i++)
6743 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6744 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6745 if (val != 1) {
6746 BNX2X_ERR("clear of NIG failed\n");
6747 return -4;
6748 }
6749
6750
6751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6752 msleep(50);
6753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6754 msleep(50);
6755 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6756 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6757 if (!CNIC_SUPPORT(bp))
6758
6759 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6760
6761
6762 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6763 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6764 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6765 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6766
6767 DP(NETIF_MSG_HW, "done\n");
6768
6769 return 0;
6770}
6771
6772static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6773{
6774 u32 val;
6775
6776 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6777 if (!CHIP_IS_E1x(bp))
6778 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6779 else
6780 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6781 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6782 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6783
6784
6785
6786
6787
6788
6789 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6790 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6791 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6792 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6793 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6794 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6795
6796
6797 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6798 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6799 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6800
6801
6802 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6803 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6804 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6805 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6806
6807
6808
6809 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6810 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6811 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6812 if (!CHIP_IS_E1x(bp))
6813 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6814 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6815 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6816
6817 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6818 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6819 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6820
6821
6822 if (!CHIP_IS_E1x(bp))
6823
6824 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6825
6826 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6827 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6828
6829 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6830}
6831
6832static void bnx2x_reset_common(struct bnx2x *bp)
6833{
6834 u32 val = 0x1400;
6835
6836
6837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6838 0xd3ffff7f);
6839
6840 if (CHIP_IS_E3(bp)) {
6841 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6842 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6843 }
6844
6845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6846}
6847
6848static void bnx2x_setup_dmae(struct bnx2x *bp)
6849{
6850 bp->dmae_ready = 0;
6851 spin_lock_init(&bp->dmae_lock);
6852}
6853
6854static void bnx2x_init_pxp(struct bnx2x *bp)
6855{
6856 u16 devctl;
6857 int r_order, w_order;
6858
6859 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6860 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6861 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6862 if (bp->mrrs == -1)
6863 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6864 else {
6865 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6866 r_order = bp->mrrs;
6867 }
6868
6869 bnx2x_init_pxp_arb(bp, r_order, w_order);
6870}
6871
6872static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6873{
6874 int is_required;
6875 u32 val;
6876 int port;
6877
6878 if (BP_NOMCP(bp))
6879 return;
6880
6881 is_required = 0;
6882 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6883 SHARED_HW_CFG_FAN_FAILURE_MASK;
6884
6885 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6886 is_required = 1;
6887
6888
6889
6890
6891
6892
6893 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6894 for (port = PORT_0; port < PORT_MAX; port++) {
6895 is_required |=
6896 bnx2x_fan_failure_det_req(
6897 bp,
6898 bp->common.shmem_base,
6899 bp->common.shmem2_base,
6900 port);
6901 }
6902
6903 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6904
6905 if (is_required == 0)
6906 return;
6907
6908
6909 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6910
6911
6912 val = REG_RD(bp, MISC_REG_SPIO_INT);
6913 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6914 REG_WR(bp, MISC_REG_SPIO_INT, val);
6915
6916
6917 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6918 val |= MISC_SPIO_SPIO5;
6919 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6920}
6921
6922void bnx2x_pf_disable(struct bnx2x *bp)
6923{
6924 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6925 val &= ~IGU_PF_CONF_FUNC_EN;
6926
6927 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6928 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6929 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6930}
6931
6932static void bnx2x__common_init_phy(struct bnx2x *bp)
6933{
6934 u32 shmem_base[2], shmem2_base[2];
6935
6936 if (SHMEM2_RD(bp, size) >
6937 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6938 return;
6939 shmem_base[0] = bp->common.shmem_base;
6940 shmem2_base[0] = bp->common.shmem2_base;
6941 if (!CHIP_IS_E1x(bp)) {
6942 shmem_base[1] =
6943 SHMEM2_RD(bp, other_shmem_base_addr);
6944 shmem2_base[1] =
6945 SHMEM2_RD(bp, other_shmem2_base_addr);
6946 }
6947 bnx2x_acquire_phy_lock(bp);
6948 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6949 bp->common.chip_id);
6950 bnx2x_release_phy_lock(bp);
6951}
6952
6953static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6954{
6955 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6956 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
6957 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
6958 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
6959 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
6960
6961
6962 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6963
6964 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
6965 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
6966 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
6967 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
6968}
6969
6970static void bnx2x_set_endianity(struct bnx2x *bp)
6971{
6972#ifdef __BIG_ENDIAN
6973 bnx2x_config_endianity(bp, 1);
6974#else
6975 bnx2x_config_endianity(bp, 0);
6976#endif
6977}
6978
6979static void bnx2x_reset_endianity(struct bnx2x *bp)
6980{
6981 bnx2x_config_endianity(bp, 0);
6982}
6983
6984
6985
6986
6987
6988
6989static int bnx2x_init_hw_common(struct bnx2x *bp)
6990{
6991 u32 val;
6992
6993 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
6994
6995
6996
6997
6998
6999 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7000
7001 bnx2x_reset_common(bp);
7002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7003
7004 val = 0xfffc;
7005 if (CHIP_IS_E3(bp)) {
7006 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7007 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7008 }
7009 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7010
7011 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7012
7013 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7014
7015 if (!CHIP_IS_E1x(bp)) {
7016 u8 abs_func_id;
7017
7018
7019
7020
7021
7022
7023
7024
7025 for (abs_func_id = BP_PATH(bp);
7026 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7027 if (abs_func_id == BP_ABS_FUNC(bp)) {
7028 REG_WR(bp,
7029 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7030 1);
7031 continue;
7032 }
7033
7034 bnx2x_pretend_func(bp, abs_func_id);
7035
7036 bnx2x_pf_disable(bp);
7037 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7038 }
7039 }
7040
7041 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7042 if (CHIP_IS_E1(bp)) {
7043
7044
7045 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7046 }
7047
7048 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7049 bnx2x_init_pxp(bp);
7050 bnx2x_set_endianity(bp);
7051 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7052
7053 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7054 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7055
7056
7057 msleep(100);
7058
7059 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7060 if (val != 1) {
7061 BNX2X_ERR("PXP2 CFG failed\n");
7062 return -EBUSY;
7063 }
7064 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7065 if (val != 1) {
7066 BNX2X_ERR("PXP2 RD_INIT failed\n");
7067 return -EBUSY;
7068 }
7069
7070
7071
7072
7073
7074
7075 if (!CHIP_IS_E1x(bp)) {
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138 struct ilt_client_info ilt_cli;
7139 struct bnx2x_ilt ilt;
7140 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7141 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7142
7143
7144 ilt_cli.start = 0;
7145 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7146 ilt_cli.client_num = ILT_CLIENT_TM;
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7160 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7161 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7162
7163 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7164 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7165 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7166 }
7167
7168 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7169 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7170
7171 if (!CHIP_IS_E1x(bp)) {
7172 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7173 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7174 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7175
7176 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7177
7178
7179 do {
7180 msleep(200);
7181 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7182 } while (factor-- && (val != 1));
7183
7184 if (val != 1) {
7185 BNX2X_ERR("ATC_INIT failed\n");
7186 return -EBUSY;
7187 }
7188 }
7189
7190 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7191
7192 bnx2x_iov_init_dmae(bp);
7193
7194
7195 bp->dmae_ready = 1;
7196 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7197
7198 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7199
7200 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7201
7202 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7203
7204 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7205
7206 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7207 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7208 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7209 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7210
7211 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7212
7213
7214 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7215
7216
7217 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7218 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7219
7220 if (CNIC_SUPPORT(bp))
7221 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7222
7223 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7224
7225 if (!CHIP_REV_IS_SLOW(bp))
7226
7227 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7228
7229 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7230
7231 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7232 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7233
7234 if (!CHIP_IS_E1(bp))
7235 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7236
7237 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7238 if (IS_MF_AFEX(bp)) {
7239
7240
7241
7242 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7243 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7244 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7245 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7246 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7247 } else {
7248
7249
7250
7251 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7252 bp->path_has_ovlan ? 7 : 6);
7253 }
7254 }
7255
7256 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7257 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7258 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7259 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7260
7261 if (!CHIP_IS_E1x(bp)) {
7262
7263 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7264 VFC_MEMORIES_RST_REG_CAM_RST |
7265 VFC_MEMORIES_RST_REG_RAM_RST);
7266 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7267 VFC_MEMORIES_RST_REG_CAM_RST |
7268 VFC_MEMORIES_RST_REG_RAM_RST);
7269
7270 msleep(20);
7271 }
7272
7273 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7274 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7275 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7276 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7277
7278
7279 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7280 0x80000000);
7281 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7282 0x80000000);
7283
7284 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7285 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7286 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7287
7288 if (!CHIP_IS_E1x(bp)) {
7289 if (IS_MF_AFEX(bp)) {
7290
7291
7292
7293 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7294 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7295 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7296 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7297 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7298 } else {
7299 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7300 bp->path_has_ovlan ? 7 : 6);
7301 }
7302 }
7303
7304 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7305
7306 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7307
7308 if (CNIC_SUPPORT(bp)) {
7309 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7310 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7311 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7312 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7313 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7314 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7315 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7316 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7317 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7318 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7319 }
7320 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7321
7322 if (sizeof(union cdu_context) != 1024)
7323
7324 dev_alert(&bp->pdev->dev,
7325 "please adjust the size of cdu_context(%ld)\n",
7326 (long)sizeof(union cdu_context));
7327
7328 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7329 val = (4 << 24) + (0 << 12) + 1024;
7330 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7331
7332 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7333 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7334
7335 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7336
7337
7338 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7339
7340 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7341
7342 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7343 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7344
7345 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7346 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7347
7348
7349 REG_WR(bp, 0x2814, 0xffffffff);
7350 REG_WR(bp, 0x3820, 0xffffffff);
7351
7352 if (!CHIP_IS_E1x(bp)) {
7353 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7354 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7355 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7356 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7357 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7358 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7359 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7360 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7361 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7362 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7363 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7364 }
7365
7366 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7367 if (!CHIP_IS_E1(bp)) {
7368
7369 if (!CHIP_IS_E3(bp))
7370 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7371 }
7372 if (CHIP_IS_E1H(bp))
7373
7374 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7375
7376 if (CHIP_REV_IS_SLOW(bp))
7377 msleep(200);
7378
7379
7380 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7381 if (val != 1) {
7382 BNX2X_ERR("CFC LL_INIT failed\n");
7383 return -EBUSY;
7384 }
7385 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7386 if (val != 1) {
7387 BNX2X_ERR("CFC AC_INIT failed\n");
7388 return -EBUSY;
7389 }
7390 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7391 if (val != 1) {
7392 BNX2X_ERR("CFC CAM_INIT failed\n");
7393 return -EBUSY;
7394 }
7395 REG_WR(bp, CFC_REG_DEBUG0, 0);
7396
7397 if (CHIP_IS_E1(bp)) {
7398
7399
7400 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7401 val = *bnx2x_sp(bp, wb_data[0]);
7402
7403
7404 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7405 BNX2X_ERR("internal mem self test failed\n");
7406 return -EBUSY;
7407 }
7408 }
7409
7410 bnx2x_setup_fan_failure_detection(bp);
7411
7412
7413 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7414
7415 bnx2x_enable_blocks_attention(bp);
7416 bnx2x_enable_blocks_parity(bp);
7417
7418 if (!BP_NOMCP(bp)) {
7419 if (CHIP_IS_E1x(bp))
7420 bnx2x__common_init_phy(bp);
7421 } else
7422 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7423
7424 return 0;
7425}
7426
7427
7428
7429
7430
7431
7432static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7433{
7434 int rc = bnx2x_init_hw_common(bp);
7435
7436 if (rc)
7437 return rc;
7438
7439
7440 if (!BP_NOMCP(bp))
7441 bnx2x__common_init_phy(bp);
7442
7443 return 0;
7444}
7445
7446static int bnx2x_init_hw_port(struct bnx2x *bp)
7447{
7448 int port = BP_PORT(bp);
7449 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7450 u32 low, high;
7451 u32 val, reg;
7452
7453 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7454
7455 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7456
7457 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7458 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7459 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7460
7461
7462
7463
7464
7465
7466 if (!CHIP_IS_E1x(bp))
7467 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7468
7469 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7470 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7471 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7472 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7473
7474 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7475 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7476 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7477 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7478
7479
7480 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7481
7482 if (CNIC_SUPPORT(bp)) {
7483 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7484 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7485 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7486 }
7487
7488 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7489
7490 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7491
7492 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7493
7494 if (IS_MF(bp))
7495 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7496 else if (bp->dev->mtu > 4096) {
7497 if (bp->flags & ONE_PORT_FLAG)
7498 low = 160;
7499 else {
7500 val = bp->dev->mtu;
7501
7502 low = 96 + (val/64) +
7503 ((val % 64) ? 1 : 0);
7504 }
7505 } else
7506 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7507 high = low + 56;
7508 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7509 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7510 }
7511
7512 if (CHIP_MODE_IS_4_PORT(bp))
7513 REG_WR(bp, (BP_PORT(bp) ?
7514 BRB1_REG_MAC_GUARANTIED_1 :
7515 BRB1_REG_MAC_GUARANTIED_0), 40);
7516
7517 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7518 if (CHIP_IS_E3B0(bp)) {
7519 if (IS_MF_AFEX(bp)) {
7520
7521 REG_WR(bp, BP_PORT(bp) ?
7522 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7523 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7524 REG_WR(bp, BP_PORT(bp) ?
7525 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7526 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7527 REG_WR(bp, BP_PORT(bp) ?
7528 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7529 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7530 } else {
7531
7532
7533
7534
7535 REG_WR(bp, BP_PORT(bp) ?
7536 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7537 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7538 (bp->path_has_ovlan ? 7 : 6));
7539 }
7540 }
7541
7542 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7543 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7544 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7545 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7546
7547 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7548 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7549 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7550 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7551
7552 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7553 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7554
7555 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7556
7557 if (CHIP_IS_E1x(bp)) {
7558
7559 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7560
7561
7562 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7563
7564 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7565
7566
7567 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7568 udelay(50);
7569 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7570 }
7571
7572 if (CNIC_SUPPORT(bp))
7573 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7574
7575 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7576 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7577
7578 if (CHIP_IS_E1(bp)) {
7579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7581 }
7582 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7583
7584 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7585
7586 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7587
7588
7589
7590
7591 val = IS_MF(bp) ? 0xF7 : 0x7;
7592
7593 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7594 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7595
7596
7597 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7598 REG_WR(bp, reg,
7599 REG_RD(bp, reg) &
7600 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7601
7602 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7603 REG_WR(bp, reg,
7604 REG_RD(bp, reg) &
7605 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7606
7607 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7608
7609 if (!CHIP_IS_E1x(bp)) {
7610
7611
7612
7613 if (IS_MF_AFEX(bp))
7614 REG_WR(bp, BP_PORT(bp) ?
7615 NIG_REG_P1_HDRS_AFTER_BASIC :
7616 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7617 else
7618 REG_WR(bp, BP_PORT(bp) ?
7619 NIG_REG_P1_HDRS_AFTER_BASIC :
7620 NIG_REG_P0_HDRS_AFTER_BASIC,
7621 IS_MF_SD(bp) ? 7 : 6);
7622
7623 if (CHIP_IS_E3(bp))
7624 REG_WR(bp, BP_PORT(bp) ?
7625 NIG_REG_LLH1_MF_MODE :
7626 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7627 }
7628 if (!CHIP_IS_E3(bp))
7629 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7630
7631 if (!CHIP_IS_E1(bp)) {
7632
7633 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7634 (IS_MF_SD(bp) ? 0x1 : 0x2));
7635
7636 if (!CHIP_IS_E1x(bp)) {
7637 val = 0;
7638 switch (bp->mf_mode) {
7639 case MULTI_FUNCTION_SD:
7640 val = 1;
7641 break;
7642 case MULTI_FUNCTION_SI:
7643 case MULTI_FUNCTION_AFEX:
7644 val = 2;
7645 break;
7646 }
7647
7648 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7649 NIG_REG_LLH0_CLS_TYPE), val);
7650 }
7651 {
7652 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7653 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7654 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7655 }
7656 }
7657
7658
7659 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7660 if (val & MISC_SPIO_SPIO5) {
7661 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7662 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7663 val = REG_RD(bp, reg_addr);
7664 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7665 REG_WR(bp, reg_addr, val);
7666 }
7667
7668 return 0;
7669}
7670
7671static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7672{
7673 int reg;
7674 u32 wb_write[2];
7675
7676 if (CHIP_IS_E1(bp))
7677 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7678 else
7679 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7680
7681 wb_write[0] = ONCHIP_ADDR1(addr);
7682 wb_write[1] = ONCHIP_ADDR2(addr);
7683 REG_WR_DMAE(bp, reg, wb_write, 2);
7684}
7685
7686void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7687{
7688 u32 data, ctl, cnt = 100;
7689 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7690 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7691 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7692 u32 sb_bit = 1 << (idu_sb_id%32);
7693 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7694 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7695
7696
7697 if (CHIP_INT_MODE_IS_BC(bp))
7698 return;
7699
7700 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7701 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7702 IGU_REGULAR_CLEANUP_SET |
7703 IGU_REGULAR_BCLEANUP;
7704
7705 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7706 func_encode << IGU_CTRL_REG_FID_SHIFT |
7707 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7708
7709 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7710 data, igu_addr_data);
7711 REG_WR(bp, igu_addr_data, data);
7712 mmiowb();
7713 barrier();
7714 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7715 ctl, igu_addr_ctl);
7716 REG_WR(bp, igu_addr_ctl, ctl);
7717 mmiowb();
7718 barrier();
7719
7720
7721 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7722 msleep(20);
7723
7724 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7725 DP(NETIF_MSG_HW,
7726 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7727 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7728 }
7729}
7730
7731static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7732{
7733 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7734}
7735
7736static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7737{
7738 u32 i, base = FUNC_ILT_BASE(func);
7739 for (i = base; i < base + ILT_PER_FUNC; i++)
7740 bnx2x_ilt_wr(bp, i, 0);
7741}
7742
7743static void bnx2x_init_searcher(struct bnx2x *bp)
7744{
7745 int port = BP_PORT(bp);
7746 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7747
7748 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7749}
7750
7751static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7752{
7753 int rc;
7754 struct bnx2x_func_state_params func_params = {NULL};
7755 struct bnx2x_func_switch_update_params *switch_update_params =
7756 &func_params.params.switch_update;
7757
7758
7759 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7760 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7761
7762 func_params.f_obj = &bp->func_obj;
7763 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7764
7765
7766 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7767 &switch_update_params->changes);
7768 if (suspend)
7769 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7770 &switch_update_params->changes);
7771
7772 rc = bnx2x_func_state_change(bp, &func_params);
7773
7774 return rc;
7775}
7776
7777static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7778{
7779 int rc, i, port = BP_PORT(bp);
7780 int vlan_en = 0, mac_en[NUM_MACS];
7781
7782
7783 if (bp->mf_mode == SINGLE_FUNCTION) {
7784 bnx2x_set_rx_filter(&bp->link_params, 0);
7785 } else {
7786 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7787 NIG_REG_LLH0_FUNC_EN);
7788 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7789 NIG_REG_LLH0_FUNC_EN, 0);
7790 for (i = 0; i < NUM_MACS; i++) {
7791 mac_en[i] = REG_RD(bp, port ?
7792 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7793 4 * i) :
7794 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7795 4 * i));
7796 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7797 4 * i) :
7798 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7799 }
7800 }
7801
7802
7803 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7804 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7805
7806
7807
7808
7809
7810
7811 rc = bnx2x_func_switch_update(bp, 1);
7812 if (rc) {
7813 BNX2X_ERR("Can't suspend tx-switching!\n");
7814 return rc;
7815 }
7816
7817
7818 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7819
7820
7821 if (bp->mf_mode == SINGLE_FUNCTION) {
7822 bnx2x_set_rx_filter(&bp->link_params, 1);
7823 } else {
7824 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7825 NIG_REG_LLH0_FUNC_EN, vlan_en);
7826 for (i = 0; i < NUM_MACS; i++) {
7827 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7828 4 * i) :
7829 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7830 mac_en[i]);
7831 }
7832 }
7833
7834
7835 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7836 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7837
7838
7839 rc = bnx2x_func_switch_update(bp, 0);
7840 if (rc) {
7841 BNX2X_ERR("Can't resume tx-switching!\n");
7842 return rc;
7843 }
7844
7845 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7846 return 0;
7847}
7848
7849int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7850{
7851 int rc;
7852
7853 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7854
7855 if (CONFIGURE_NIC_MODE(bp)) {
7856
7857 bnx2x_init_searcher(bp);
7858
7859
7860 rc = bnx2x_reset_nic_mode(bp);
7861 if (rc)
7862 BNX2X_ERR("Can't change NIC mode!\n");
7863 return rc;
7864 }
7865
7866 return 0;
7867}
7868
7869
7870
7871
7872
7873
7874
7875
7876static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7877{
7878 if (!CHIP_IS_E1x(bp))
7879 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7880 1 << BP_ABS_FUNC(bp));
7881}
7882
7883static int bnx2x_init_hw_func(struct bnx2x *bp)
7884{
7885 int port = BP_PORT(bp);
7886 int func = BP_FUNC(bp);
7887 int init_phase = PHASE_PF0 + func;
7888 struct bnx2x_ilt *ilt = BP_ILT(bp);
7889 u16 cdu_ilt_start;
7890 u32 addr, val;
7891 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7892 int i, main_mem_width, rc;
7893
7894 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7895
7896
7897 if (!CHIP_IS_E1x(bp)) {
7898 rc = bnx2x_pf_flr_clnup(bp);
7899 if (rc) {
7900 bnx2x_fw_dump(bp);
7901 return rc;
7902 }
7903 }
7904
7905
7906 if (bp->common.int_block == INT_BLOCK_HC) {
7907 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7908 val = REG_RD(bp, addr);
7909 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7910 REG_WR(bp, addr, val);
7911 }
7912
7913 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7914 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7915
7916 ilt = BP_ILT(bp);
7917 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7918
7919 if (IS_SRIOV(bp))
7920 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7921 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7922
7923
7924
7925
7926 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7927 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7928 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7929 ilt->lines[cdu_ilt_start + i].page_mapping =
7930 bp->context[i].cxt_mapping;
7931 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7932 }
7933
7934 bnx2x_ilt_init_op(bp, INITOP_SET);
7935
7936 if (!CONFIGURE_NIC_MODE(bp)) {
7937 bnx2x_init_searcher(bp);
7938 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7939 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7940 } else {
7941
7942 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7943 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7944 }
7945
7946 if (!CHIP_IS_E1x(bp)) {
7947 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7948
7949
7950
7951
7952 if (!(bp->flags & USING_MSIX_FLAG))
7953 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7954
7955
7956
7957
7958
7959
7960 msleep(20);
7961
7962
7963
7964
7965
7966 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7967
7968 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7969 }
7970
7971 bp->dmae_ready = 1;
7972
7973 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7974
7975 bnx2x_clean_pglue_errors(bp);
7976
7977 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7978 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7979 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7980 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7981 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7982 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7983 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7984 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7985 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7986 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7987 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7988 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7989 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7990
7991 if (!CHIP_IS_E1x(bp))
7992 REG_WR(bp, QM_REG_PF_EN, 1);
7993
7994 if (!CHIP_IS_E1x(bp)) {
7995 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7996 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7997 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7998 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7999 }
8000 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8001
8002 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8003 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8004 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8005
8006 bnx2x_iov_init_dq(bp);
8007
8008 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8009 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8010 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8011 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8012 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8013 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8014 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8015 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8016 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8017 if (!CHIP_IS_E1x(bp))
8018 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8019
8020 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8021
8022 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8023
8024 if (!CHIP_IS_E1x(bp))
8025 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8026
8027 if (IS_MF(bp)) {
8028 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8029 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8030 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8031 bp->mf_ov);
8032 }
8033 }
8034
8035 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8036
8037
8038 if (bp->common.int_block == INT_BLOCK_HC) {
8039 if (CHIP_IS_E1H(bp)) {
8040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8041
8042 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8043 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8044 }
8045 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8046
8047 } else {
8048 int num_segs, sb_idx, prod_offset;
8049
8050 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8051
8052 if (!CHIP_IS_E1x(bp)) {
8053 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8054 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8055 }
8056
8057 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8058
8059 if (!CHIP_IS_E1x(bp)) {
8060 int dsb_idx = 0;
8061
8062
8063
8064
8065
8066
8067
8068
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8083 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8084 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8085 prod_offset = (bp->igu_base_sb + sb_idx) *
8086 num_segs;
8087
8088 for (i = 0; i < num_segs; i++) {
8089 addr = IGU_REG_PROD_CONS_MEMORY +
8090 (prod_offset + i) * 4;
8091 REG_WR(bp, addr, 0);
8092 }
8093
8094 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8095 USTORM_ID, 0, IGU_INT_NOP, 1);
8096 bnx2x_igu_clear_sb(bp,
8097 bp->igu_base_sb + sb_idx);
8098 }
8099
8100
8101 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8102 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8103
8104 if (CHIP_MODE_IS_4_PORT(bp))
8105 dsb_idx = BP_FUNC(bp);
8106 else
8107 dsb_idx = BP_VN(bp);
8108
8109 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8110 IGU_BC_BASE_DSB_PROD + dsb_idx :
8111 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8112
8113
8114
8115
8116
8117 for (i = 0; i < (num_segs * E1HVN_MAX);
8118 i += E1HVN_MAX) {
8119 addr = IGU_REG_PROD_CONS_MEMORY +
8120 (prod_offset + i)*4;
8121 REG_WR(bp, addr, 0);
8122 }
8123
8124 if (CHIP_INT_MODE_IS_BC(bp)) {
8125 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8126 USTORM_ID, 0, IGU_INT_NOP, 1);
8127 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8128 CSTORM_ID, 0, IGU_INT_NOP, 1);
8129 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8130 XSTORM_ID, 0, IGU_INT_NOP, 1);
8131 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8132 TSTORM_ID, 0, IGU_INT_NOP, 1);
8133 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8134 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8135 } else {
8136 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8137 USTORM_ID, 0, IGU_INT_NOP, 1);
8138 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8139 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8140 }
8141 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8142
8143
8144
8145 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8146 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8147 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8148 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8149 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8150 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8151 }
8152 }
8153
8154
8155 REG_WR(bp, 0x2114, 0xffffffff);
8156 REG_WR(bp, 0x2120, 0xffffffff);
8157
8158 if (CHIP_IS_E1x(bp)) {
8159 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8160 main_mem_base = HC_REG_MAIN_MEMORY +
8161 BP_PORT(bp) * (main_mem_size * 4);
8162 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8163 main_mem_width = 8;
8164
8165 val = REG_RD(bp, main_mem_prty_clr);
8166 if (val)
8167 DP(NETIF_MSG_HW,
8168 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8169 val);
8170
8171
8172 for (i = main_mem_base;
8173 i < main_mem_base + main_mem_size * 4;
8174 i += main_mem_width) {
8175 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8176 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8177 i, main_mem_width / 4);
8178 }
8179
8180 REG_RD(bp, main_mem_prty_clr);
8181 }
8182
8183#ifdef BNX2X_STOP_ON_ERROR
8184
8185 REG_WR8(bp, BAR_USTRORM_INTMEM +
8186 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8187 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8188 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8189 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8190 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8191 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8192 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8193#endif
8194
8195 bnx2x_phy_probe(&bp->link_params);
8196
8197 return 0;
8198}
8199
8200void bnx2x_free_mem_cnic(struct bnx2x *bp)
8201{
8202 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8203
8204 if (!CHIP_IS_E1x(bp))
8205 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8206 sizeof(struct host_hc_status_block_e2));
8207 else
8208 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8209 sizeof(struct host_hc_status_block_e1x));
8210
8211 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8212}
8213
8214void bnx2x_free_mem(struct bnx2x *bp)
8215{
8216 int i;
8217
8218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8219 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8220
8221 if (IS_VF(bp))
8222 return;
8223
8224 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8225 sizeof(struct host_sp_status_block));
8226
8227 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8228 sizeof(struct bnx2x_slowpath));
8229
8230 for (i = 0; i < L2_ILT_LINES(bp); i++)
8231 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8232 bp->context[i].size);
8233 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8234
8235 BNX2X_FREE(bp->ilt->lines);
8236
8237 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8238
8239 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8240 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8241
8242 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8243
8244 bnx2x_iov_free_mem(bp);
8245}
8246
8247int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8248{
8249 if (!CHIP_IS_E1x(bp)) {
8250
8251 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8252 sizeof(struct host_hc_status_block_e2));
8253 if (!bp->cnic_sb.e2_sb)
8254 goto alloc_mem_err;
8255 } else {
8256 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8257 sizeof(struct host_hc_status_block_e1x));
8258 if (!bp->cnic_sb.e1x_sb)
8259 goto alloc_mem_err;
8260 }
8261
8262 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8263
8264 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8265 if (!bp->t2)
8266 goto alloc_mem_err;
8267 }
8268
8269
8270 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8271 &bp->slowpath->drv_info_to_mcp;
8272
8273 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8274 goto alloc_mem_err;
8275
8276 return 0;
8277
8278alloc_mem_err:
8279 bnx2x_free_mem_cnic(bp);
8280 BNX2X_ERR("Can't allocate memory\n");
8281 return -ENOMEM;
8282}
8283
8284int bnx2x_alloc_mem(struct bnx2x *bp)
8285{
8286 int i, allocated, context_size;
8287
8288 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8289
8290 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8291 if (!bp->t2)
8292 goto alloc_mem_err;
8293 }
8294
8295 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8296 sizeof(struct host_sp_status_block));
8297 if (!bp->def_status_blk)
8298 goto alloc_mem_err;
8299
8300 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8301 sizeof(struct bnx2x_slowpath));
8302 if (!bp->slowpath)
8303 goto alloc_mem_err;
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8319
8320 for (i = 0, allocated = 0; allocated < context_size; i++) {
8321 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8322 (context_size - allocated));
8323 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8324 bp->context[i].size);
8325 if (!bp->context[i].vcxt)
8326 goto alloc_mem_err;
8327 allocated += bp->context[i].size;
8328 }
8329 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8330 GFP_KERNEL);
8331 if (!bp->ilt->lines)
8332 goto alloc_mem_err;
8333
8334 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8335 goto alloc_mem_err;
8336
8337 if (bnx2x_iov_alloc_mem(bp))
8338 goto alloc_mem_err;
8339
8340
8341 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8342 if (!bp->spq)
8343 goto alloc_mem_err;
8344
8345
8346 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8347 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8348 if (!bp->eq_ring)
8349 goto alloc_mem_err;
8350
8351 return 0;
8352
8353alloc_mem_err:
8354 bnx2x_free_mem(bp);
8355 BNX2X_ERR("Can't allocate memory\n");
8356 return -ENOMEM;
8357}
8358
8359
8360
8361
8362
8363int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8364 struct bnx2x_vlan_mac_obj *obj, bool set,
8365 int mac_type, unsigned long *ramrod_flags)
8366{
8367 int rc;
8368 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8369
8370 memset(&ramrod_param, 0, sizeof(ramrod_param));
8371
8372
8373 ramrod_param.vlan_mac_obj = obj;
8374 ramrod_param.ramrod_flags = *ramrod_flags;
8375
8376
8377 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8378 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8379
8380 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8381
8382
8383 if (set)
8384 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8385 else
8386 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8387 }
8388
8389 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8390
8391 if (rc == -EEXIST) {
8392 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8393
8394 rc = 0;
8395 } else if (rc < 0)
8396 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8397
8398 return rc;
8399}
8400
8401int bnx2x_del_all_macs(struct bnx2x *bp,
8402 struct bnx2x_vlan_mac_obj *mac_obj,
8403 int mac_type, bool wait_for_comp)
8404{
8405 int rc;
8406 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8407
8408
8409 if (wait_for_comp)
8410 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8411
8412
8413 __set_bit(mac_type, &vlan_mac_flags);
8414
8415 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8416 if (rc < 0)
8417 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8418
8419 return rc;
8420}
8421
8422int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8423{
8424 if (IS_PF(bp)) {
8425 unsigned long ramrod_flags = 0;
8426
8427 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8428 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8429 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8430 &bp->sp_objs->mac_obj, set,
8431 BNX2X_ETH_MAC, &ramrod_flags);
8432 } else {
8433 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8434 bp->fp->index, true);
8435 }
8436}
8437
8438int bnx2x_setup_leading(struct bnx2x *bp)
8439{
8440 if (IS_PF(bp))
8441 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8442 else
8443 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8444}
8445
8446
8447
8448
8449
8450
8451
8452
8453int bnx2x_set_int_mode(struct bnx2x *bp)
8454{
8455 int rc = 0;
8456
8457 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8458 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8459 return -EINVAL;
8460 }
8461
8462 switch (int_mode) {
8463 case BNX2X_INT_MODE_MSIX:
8464
8465 rc = bnx2x_enable_msix(bp);
8466
8467
8468 if (!rc)
8469 return 0;
8470
8471
8472 if (rc && IS_VF(bp))
8473 return rc;
8474
8475
8476 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8477 bp->num_queues,
8478 1 + bp->num_cnic_queues);
8479
8480
8481 case BNX2X_INT_MODE_MSI:
8482 bnx2x_enable_msi(bp);
8483
8484
8485 case BNX2X_INT_MODE_INTX:
8486 bp->num_ethernet_queues = 1;
8487 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8488 BNX2X_DEV_INFO("set number of queues to 1\n");
8489 break;
8490 default:
8491 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8492 return -EINVAL;
8493 }
8494 return 0;
8495}
8496
8497
8498static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8499{
8500 if (IS_SRIOV(bp))
8501 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8502 return L2_ILT_LINES(bp);
8503}
8504
8505void bnx2x_ilt_set_info(struct bnx2x *bp)
8506{
8507 struct ilt_client_info *ilt_client;
8508 struct bnx2x_ilt *ilt = BP_ILT(bp);
8509 u16 line = 0;
8510
8511 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8512 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8513
8514
8515 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8516 ilt_client->client_num = ILT_CLIENT_CDU;
8517 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8518 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8519 ilt_client->start = line;
8520 line += bnx2x_cid_ilt_lines(bp);
8521
8522 if (CNIC_SUPPORT(bp))
8523 line += CNIC_ILT_LINES;
8524 ilt_client->end = line - 1;
8525
8526 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8527 ilt_client->start,
8528 ilt_client->end,
8529 ilt_client->page_size,
8530 ilt_client->flags,
8531 ilog2(ilt_client->page_size >> 12));
8532
8533
8534 if (QM_INIT(bp->qm_cid_count)) {
8535 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8536 ilt_client->client_num = ILT_CLIENT_QM;
8537 ilt_client->page_size = QM_ILT_PAGE_SZ;
8538 ilt_client->flags = 0;
8539 ilt_client->start = line;
8540
8541
8542 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8543 QM_ILT_PAGE_SZ);
8544
8545 ilt_client->end = line - 1;
8546
8547 DP(NETIF_MSG_IFUP,
8548 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8549 ilt_client->start,
8550 ilt_client->end,
8551 ilt_client->page_size,
8552 ilt_client->flags,
8553 ilog2(ilt_client->page_size >> 12));
8554 }
8555
8556 if (CNIC_SUPPORT(bp)) {
8557
8558 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8559 ilt_client->client_num = ILT_CLIENT_SRC;
8560 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8561 ilt_client->flags = 0;
8562 ilt_client->start = line;
8563 line += SRC_ILT_LINES;
8564 ilt_client->end = line - 1;
8565
8566 DP(NETIF_MSG_IFUP,
8567 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8568 ilt_client->start,
8569 ilt_client->end,
8570 ilt_client->page_size,
8571 ilt_client->flags,
8572 ilog2(ilt_client->page_size >> 12));
8573
8574
8575 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8576 ilt_client->client_num = ILT_CLIENT_TM;
8577 ilt_client->page_size = TM_ILT_PAGE_SZ;
8578 ilt_client->flags = 0;
8579 ilt_client->start = line;
8580 line += TM_ILT_LINES;
8581 ilt_client->end = line - 1;
8582
8583 DP(NETIF_MSG_IFUP,
8584 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8585 ilt_client->start,
8586 ilt_client->end,
8587 ilt_client->page_size,
8588 ilt_client->flags,
8589 ilog2(ilt_client->page_size >> 12));
8590 }
8591
8592 BUG_ON(line > ILT_MAX_LINES);
8593}
8594
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605
8606static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8607 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8608{
8609 u8 cos;
8610 int cxt_index, cxt_offset;
8611
8612
8613 if (!IS_FCOE_FP(fp)) {
8614 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8615 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8616
8617
8618
8619
8620 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8621 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8622
8623
8624 init_params->rx.hc_rate = bp->rx_ticks ?
8625 (1000000 / bp->rx_ticks) : 0;
8626 init_params->tx.hc_rate = bp->tx_ticks ?
8627 (1000000 / bp->tx_ticks) : 0;
8628
8629
8630 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8631 fp->fw_sb_id;
8632
8633
8634
8635
8636
8637 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8638 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8639 }
8640
8641
8642 init_params->max_cos = fp->max_cos;
8643
8644 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8645 fp->index, init_params->max_cos);
8646
8647
8648 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8649 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8650 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8651 ILT_PAGE_CIDS);
8652 init_params->cxts[cos] =
8653 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8654 }
8655}
8656
8657static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8658 struct bnx2x_queue_state_params *q_params,
8659 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8660 int tx_index, bool leading)
8661{
8662 memset(tx_only_params, 0, sizeof(*tx_only_params));
8663
8664
8665 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8666
8667
8668 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8669
8670
8671 tx_only_params->cid_index = tx_index;
8672
8673
8674 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8675
8676
8677 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8678
8679 DP(NETIF_MSG_IFUP,
8680 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8681 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8682 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8683 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8684
8685
8686 return bnx2x_queue_state_change(bp, q_params);
8687}
8688
8689
8690
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8701 bool leading)
8702{
8703 struct bnx2x_queue_state_params q_params = {NULL};
8704 struct bnx2x_queue_setup_params *setup_params =
8705 &q_params.params.setup;
8706 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8707 &q_params.params.tx_only;
8708 int rc;
8709 u8 tx_index;
8710
8711 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8712
8713
8714 if (!IS_FCOE_FP(fp))
8715 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8716 IGU_INT_ENABLE, 0);
8717
8718 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8719
8720 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8721
8722
8723 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8724
8725
8726 q_params.cmd = BNX2X_Q_CMD_INIT;
8727
8728
8729 rc = bnx2x_queue_state_change(bp, &q_params);
8730 if (rc) {
8731 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8732 return rc;
8733 }
8734
8735 DP(NETIF_MSG_IFUP, "init complete\n");
8736
8737
8738 memset(setup_params, 0, sizeof(*setup_params));
8739
8740
8741 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8742
8743
8744 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8745 FIRST_TX_COS_INDEX);
8746
8747 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8748 &setup_params->rxq_params);
8749
8750 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8751 FIRST_TX_COS_INDEX);
8752
8753
8754 q_params.cmd = BNX2X_Q_CMD_SETUP;
8755
8756 if (IS_FCOE_FP(fp))
8757 bp->fcoe_init = true;
8758
8759
8760 rc = bnx2x_queue_state_change(bp, &q_params);
8761 if (rc) {
8762 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8763 return rc;
8764 }
8765
8766
8767 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8768 tx_index < fp->max_cos;
8769 tx_index++) {
8770
8771
8772 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8773 tx_only_params, tx_index, leading);
8774 if (rc) {
8775 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8776 fp->index, tx_index);
8777 return rc;
8778 }
8779 }
8780
8781 return rc;
8782}
8783
8784static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8785{
8786 struct bnx2x_fastpath *fp = &bp->fp[index];
8787 struct bnx2x_fp_txdata *txdata;
8788 struct bnx2x_queue_state_params q_params = {NULL};
8789 int rc, tx_index;
8790
8791 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8792
8793 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8794
8795 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8796
8797
8798 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8799 tx_index < fp->max_cos;
8800 tx_index++){
8801
8802
8803 txdata = fp->txdata_ptr[tx_index];
8804
8805 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8806 txdata->txq_index);
8807
8808
8809 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8810 memset(&q_params.params.terminate, 0,
8811 sizeof(q_params.params.terminate));
8812 q_params.params.terminate.cid_index = tx_index;
8813
8814 rc = bnx2x_queue_state_change(bp, &q_params);
8815 if (rc)
8816 return rc;
8817
8818
8819 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8820 memset(&q_params.params.cfc_del, 0,
8821 sizeof(q_params.params.cfc_del));
8822 q_params.params.cfc_del.cid_index = tx_index;
8823 rc = bnx2x_queue_state_change(bp, &q_params);
8824 if (rc)
8825 return rc;
8826 }
8827
8828
8829 q_params.cmd = BNX2X_Q_CMD_HALT;
8830 rc = bnx2x_queue_state_change(bp, &q_params);
8831 if (rc)
8832 return rc;
8833
8834
8835 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8836 memset(&q_params.params.terminate, 0,
8837 sizeof(q_params.params.terminate));
8838 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8839 rc = bnx2x_queue_state_change(bp, &q_params);
8840 if (rc)
8841 return rc;
8842
8843 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8844 memset(&q_params.params.cfc_del, 0,
8845 sizeof(q_params.params.cfc_del));
8846 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8847 return bnx2x_queue_state_change(bp, &q_params);
8848}
8849
8850static void bnx2x_reset_func(struct bnx2x *bp)
8851{
8852 int port = BP_PORT(bp);
8853 int func = BP_FUNC(bp);
8854 int i;
8855
8856
8857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8858 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8860 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8861
8862
8863 for_each_eth_queue(bp, i) {
8864 struct bnx2x_fastpath *fp = &bp->fp[i];
8865 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8866 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8867 SB_DISABLED);
8868 }
8869
8870 if (CNIC_LOADED(bp))
8871
8872 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8873 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8874 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8875
8876
8877 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8878 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8879 SB_DISABLED);
8880
8881 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8882 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8883 0);
8884
8885
8886 if (bp->common.int_block == INT_BLOCK_HC) {
8887 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8888 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8889 } else {
8890 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8891 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8892 }
8893
8894 if (CNIC_LOADED(bp)) {
8895
8896 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8897
8898
8899
8900
8901 for (i = 0; i < 200; i++) {
8902 usleep_range(10000, 20000);
8903 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8904 break;
8905 }
8906 }
8907
8908 bnx2x_clear_func_ilt(bp, func);
8909
8910
8911
8912
8913 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8914 struct ilt_client_info ilt_cli;
8915
8916 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8917 ilt_cli.start = 0;
8918 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8919 ilt_cli.client_num = ILT_CLIENT_TM;
8920
8921 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8922 }
8923
8924
8925 if (!CHIP_IS_E1x(bp))
8926 bnx2x_pf_disable(bp);
8927
8928 bp->dmae_ready = 0;
8929}
8930
8931static void bnx2x_reset_port(struct bnx2x *bp)
8932{
8933 int port = BP_PORT(bp);
8934 u32 val;
8935
8936
8937 bnx2x__link_reset(bp);
8938
8939 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8940
8941
8942 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8943
8944 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8945 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8946
8947
8948 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8949
8950 msleep(100);
8951
8952 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8953 if (val)
8954 DP(NETIF_MSG_IFDOWN,
8955 "BRB1 is not empty %d blocks are occupied\n", val);
8956
8957
8958}
8959
8960static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8961{
8962 struct bnx2x_func_state_params func_params = {NULL};
8963
8964
8965 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8966
8967 func_params.f_obj = &bp->func_obj;
8968 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8969
8970 func_params.params.hw_init.load_phase = load_code;
8971
8972 return bnx2x_func_state_change(bp, &func_params);
8973}
8974
8975static int bnx2x_func_stop(struct bnx2x *bp)
8976{
8977 struct bnx2x_func_state_params func_params = {NULL};
8978 int rc;
8979
8980
8981 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8982 func_params.f_obj = &bp->func_obj;
8983 func_params.cmd = BNX2X_F_CMD_STOP;
8984
8985
8986
8987
8988
8989
8990
8991 rc = bnx2x_func_state_change(bp, &func_params);
8992 if (rc) {
8993#ifdef BNX2X_STOP_ON_ERROR
8994 return rc;
8995#else
8996 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8997 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8998 return bnx2x_func_state_change(bp, &func_params);
8999#endif
9000 }
9001
9002 return 0;
9003}
9004
9005
9006
9007
9008
9009
9010
9011
9012
9013u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9014{
9015 u32 reset_code = 0;
9016 int port = BP_PORT(bp);
9017
9018
9019 if (unload_mode == UNLOAD_NORMAL)
9020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9021
9022 else if (bp->flags & NO_WOL_FLAG)
9023 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9024
9025 else if (bp->wol) {
9026 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9027 u8 *mac_addr = bp->dev->dev_addr;
9028 struct pci_dev *pdev = bp->pdev;
9029 u32 val;
9030 u16 pmc;
9031
9032
9033
9034
9035 u8 entry = (BP_VN(bp) + 1)*8;
9036
9037 val = (mac_addr[0] << 8) | mac_addr[1];
9038 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9039
9040 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9041 (mac_addr[4] << 8) | mac_addr[5];
9042 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9043
9044
9045 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9046 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9047 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9048
9049 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9050
9051 } else
9052 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9053
9054
9055 if (!BP_NOMCP(bp))
9056 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9057 else {
9058 int path = BP_PATH(bp);
9059
9060 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9061 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9062 bnx2x_load_count[path][2]);
9063 bnx2x_load_count[path][0]--;
9064 bnx2x_load_count[path][1 + port]--;
9065 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9066 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9067 bnx2x_load_count[path][2]);
9068 if (bnx2x_load_count[path][0] == 0)
9069 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9070 else if (bnx2x_load_count[path][1 + port] == 0)
9071 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9072 else
9073 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9074 }
9075
9076 return reset_code;
9077}
9078
9079
9080
9081
9082
9083
9084
9085void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9086{
9087 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9088
9089
9090 if (!BP_NOMCP(bp))
9091 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9092}
9093
9094static int bnx2x_func_wait_started(struct bnx2x *bp)
9095{
9096 int tout = 50;
9097 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9098
9099 if (!bp->port.pmf)
9100 return 0;
9101
9102
9103
9104
9105
9106
9107
9108
9109
9110
9111
9112
9113
9114
9115
9116
9117 if (msix)
9118 synchronize_irq(bp->msix_table[0].vector);
9119 else
9120 synchronize_irq(bp->pdev->irq);
9121
9122 flush_workqueue(bnx2x_wq);
9123 flush_workqueue(bnx2x_iov_wq);
9124
9125 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9126 BNX2X_F_STATE_STARTED && tout--)
9127 msleep(20);
9128
9129 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9130 BNX2X_F_STATE_STARTED) {
9131#ifdef BNX2X_STOP_ON_ERROR
9132 BNX2X_ERR("Wrong function state\n");
9133 return -EBUSY;
9134#else
9135
9136
9137
9138
9139 struct bnx2x_func_state_params func_params = {NULL};
9140
9141 DP(NETIF_MSG_IFDOWN,
9142 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9143
9144 func_params.f_obj = &bp->func_obj;
9145 __set_bit(RAMROD_DRV_CLR_ONLY,
9146 &func_params.ramrod_flags);
9147
9148
9149 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9150 bnx2x_func_state_change(bp, &func_params);
9151
9152
9153 func_params.cmd = BNX2X_F_CMD_TX_START;
9154 return bnx2x_func_state_change(bp, &func_params);
9155#endif
9156 }
9157
9158 return 0;
9159}
9160
9161static void bnx2x_disable_ptp(struct bnx2x *bp)
9162{
9163 int port = BP_PORT(bp);
9164
9165
9166 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9167 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9168
9169
9170 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9171 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9172 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9173 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9174 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9175 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9176 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9177 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9178
9179
9180 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9181 NIG_REG_P0_PTP_EN, 0x0);
9182}
9183
9184
9185static void bnx2x_stop_ptp(struct bnx2x *bp)
9186{
9187
9188
9189
9190 cancel_work_sync(&bp->ptp_task);
9191
9192 if (bp->ptp_tx_skb) {
9193 dev_kfree_skb_any(bp->ptp_tx_skb);
9194 bp->ptp_tx_skb = NULL;
9195 }
9196
9197
9198 bnx2x_disable_ptp(bp);
9199
9200 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9201}
9202
9203void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9204{
9205 int port = BP_PORT(bp);
9206 int i, rc = 0;
9207 u8 cos;
9208 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9209 u32 reset_code;
9210
9211
9212 for_each_tx_queue(bp, i) {
9213 struct bnx2x_fastpath *fp = &bp->fp[i];
9214
9215 for_each_cos_in_tx_queue(fp, cos)
9216 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9217#ifdef BNX2X_STOP_ON_ERROR
9218 if (rc)
9219 return;
9220#endif
9221 }
9222
9223
9224 usleep_range(1000, 2000);
9225
9226
9227 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9228 false);
9229 if (rc < 0)
9230 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9231
9232
9233 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9234 true);
9235 if (rc < 0)
9236 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9237 rc);
9238
9239
9240 if (!CHIP_IS_E1(bp))
9241 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9242
9243
9244
9245
9246
9247 netif_addr_lock_bh(bp->dev);
9248
9249 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9250 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9251 else
9252 bnx2x_set_storm_rx_mode(bp);
9253
9254
9255 rparam.mcast_obj = &bp->mcast_obj;
9256 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9257 if (rc < 0)
9258 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9259
9260 netif_addr_unlock_bh(bp->dev);
9261
9262 bnx2x_iov_chip_cleanup(bp);
9263
9264
9265
9266
9267
9268
9269 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9270
9271
9272
9273
9274
9275 rc = bnx2x_func_wait_started(bp);
9276 if (rc) {
9277 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9278#ifdef BNX2X_STOP_ON_ERROR
9279 return;
9280#endif
9281 }
9282
9283
9284
9285
9286 for_each_eth_queue(bp, i)
9287 if (bnx2x_stop_queue(bp, i))
9288#ifdef BNX2X_STOP_ON_ERROR
9289 return;
9290#else
9291 goto unload_error;
9292#endif
9293
9294 if (CNIC_LOADED(bp)) {
9295 for_each_cnic_queue(bp, i)
9296 if (bnx2x_stop_queue(bp, i))
9297#ifdef BNX2X_STOP_ON_ERROR
9298 return;
9299#else
9300 goto unload_error;
9301#endif
9302 }
9303
9304
9305
9306
9307 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9308 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9309
9310#ifndef BNX2X_STOP_ON_ERROR
9311unload_error:
9312#endif
9313 rc = bnx2x_func_stop(bp);
9314 if (rc) {
9315 BNX2X_ERR("Function stop failed!\n");
9316#ifdef BNX2X_STOP_ON_ERROR
9317 return;
9318#endif
9319 }
9320
9321
9322
9323
9324
9325
9326 bnx2x_stop_ptp(bp);
9327
9328
9329 bnx2x_netif_stop(bp, 1);
9330
9331 bnx2x_del_all_napi(bp);
9332 if (CNIC_LOADED(bp))
9333 bnx2x_del_all_napi_cnic(bp);
9334
9335
9336 bnx2x_free_irq(bp);
9337
9338
9339 rc = bnx2x_reset_hw(bp, reset_code);
9340 if (rc)
9341 BNX2X_ERR("HW_RESET failed\n");
9342
9343
9344 bnx2x_send_unload_done(bp, keep_link);
9345}
9346
9347void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9348{
9349 u32 val;
9350
9351 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9352
9353 if (CHIP_IS_E1(bp)) {
9354 int port = BP_PORT(bp);
9355 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9356 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9357
9358 val = REG_RD(bp, addr);
9359 val &= ~(0x300);
9360 REG_WR(bp, addr, val);
9361 } else {
9362 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9363 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9364 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9365 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9366 }
9367}
9368
9369
9370static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9371{
9372 u32 val;
9373
9374
9375 if (!CHIP_IS_E1(bp)) {
9376
9377 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9378
9379 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9380 }
9381
9382
9383 if (CHIP_IS_E1x(bp)) {
9384
9385 val = REG_RD(bp, HC_REG_CONFIG_1);
9386 REG_WR(bp, HC_REG_CONFIG_1,
9387 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9388 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9389
9390 val = REG_RD(bp, HC_REG_CONFIG_0);
9391 REG_WR(bp, HC_REG_CONFIG_0,
9392 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9393 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9394 } else {
9395
9396 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9397
9398 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9399 (!close) ?
9400 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9401 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9402 }
9403
9404 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9405 close ? "closing" : "opening");
9406 mmiowb();
9407}
9408
9409#define SHARED_MF_CLP_MAGIC 0x80000000
9410
9411static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9412{
9413
9414 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9415 *magic_val = val & SHARED_MF_CLP_MAGIC;
9416 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9417}
9418
9419
9420
9421
9422
9423
9424
9425static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9426{
9427
9428 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9429 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9430 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9431}
9432
9433
9434
9435
9436
9437
9438
9439
9440
9441static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9442{
9443 u32 shmem;
9444 u32 validity_offset;
9445
9446 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9447
9448
9449 if (!CHIP_IS_E1(bp))
9450 bnx2x_clp_reset_prep(bp, magic_val);
9451
9452
9453 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9454 validity_offset =
9455 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9456
9457
9458 if (shmem > 0)
9459 REG_WR(bp, shmem + validity_offset, 0);
9460}
9461
9462#define MCP_TIMEOUT 5000
9463#define MCP_ONE_TIMEOUT 100
9464
9465
9466
9467
9468
9469
9470static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9471{
9472
9473
9474 if (CHIP_REV_IS_SLOW(bp))
9475 msleep(MCP_ONE_TIMEOUT*10);
9476 else
9477 msleep(MCP_ONE_TIMEOUT);
9478}
9479
9480
9481
9482
9483static int bnx2x_init_shmem(struct bnx2x *bp)
9484{
9485 int cnt = 0;
9486 u32 val = 0;
9487
9488 do {
9489 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9490 if (bp->common.shmem_base) {
9491 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9492 if (val & SHR_MEM_VALIDITY_MB)
9493 return 0;
9494 }
9495
9496 bnx2x_mcp_wait_one(bp);
9497
9498 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9499
9500 BNX2X_ERR("BAD MCP validity signature\n");
9501
9502 return -ENODEV;
9503}
9504
9505static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9506{
9507 int rc = bnx2x_init_shmem(bp);
9508
9509
9510 if (!CHIP_IS_E1(bp))
9511 bnx2x_clp_reset_done(bp, magic_val);
9512
9513 return rc;
9514}
9515
9516static void bnx2x_pxp_prep(struct bnx2x *bp)
9517{
9518 if (!CHIP_IS_E1(bp)) {
9519 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9520 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9521 mmiowb();
9522 }
9523}
9524
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534
9535static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9536{
9537 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9538 u32 global_bits2, stay_reset2;
9539
9540
9541
9542
9543
9544 global_bits2 =
9545 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9546 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9547
9548
9549
9550
9551
9552
9553 not_reset_mask1 =
9554 MISC_REGISTERS_RESET_REG_1_RST_HC |
9555 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9556 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9557
9558 not_reset_mask2 =
9559 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9560 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9561 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9562 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9563 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9564 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9565 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9566 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9567 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9568 MISC_REGISTERS_RESET_REG_2_PGLC |
9569 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9570 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9571 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9572 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9573 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9574 MISC_REGISTERS_RESET_REG_2_UMAC1;
9575
9576
9577
9578
9579
9580 stay_reset2 =
9581 MISC_REGISTERS_RESET_REG_2_XMAC |
9582 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9583
9584
9585 reset_mask1 = 0xffffffff;
9586
9587 if (CHIP_IS_E1(bp))
9588 reset_mask2 = 0xffff;
9589 else if (CHIP_IS_E1H(bp))
9590 reset_mask2 = 0x1ffff;
9591 else if (CHIP_IS_E2(bp))
9592 reset_mask2 = 0xfffff;
9593 else
9594 reset_mask2 = 0x3ffffff;
9595
9596
9597 if (!global)
9598 reset_mask2 &= ~global_bits2;
9599
9600
9601
9602
9603
9604
9605
9606
9607
9608
9609
9610
9611
9612
9613
9614 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9615 reset_mask2 & (~not_reset_mask2));
9616
9617 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9618 reset_mask1 & (~not_reset_mask1));
9619
9620 barrier();
9621 mmiowb();
9622
9623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9624 reset_mask2 & (~stay_reset2));
9625
9626 barrier();
9627 mmiowb();
9628
9629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9630 mmiowb();
9631}
9632
9633
9634
9635
9636
9637
9638
9639
9640
9641
9642static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9643{
9644 u32 cnt = 1000;
9645 u32 pend_bits = 0;
9646
9647 do {
9648 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9649
9650 if (pend_bits == 0)
9651 break;
9652
9653 usleep_range(1000, 2000);
9654 } while (cnt-- > 0);
9655
9656 if (cnt <= 0) {
9657 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9658 pend_bits);
9659 return -EBUSY;
9660 }
9661
9662 return 0;
9663}
9664
9665static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9666{
9667 int cnt = 1000;
9668 u32 val = 0;
9669 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9670 u32 tags_63_32 = 0;
9671
9672
9673 do {
9674 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9675 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9676 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9677 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9678 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9679 if (CHIP_IS_E3(bp))
9680 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9681
9682 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9683 ((port_is_idle_0 & 0x1) == 0x1) &&
9684 ((port_is_idle_1 & 0x1) == 0x1) &&
9685 (pgl_exp_rom2 == 0xffffffff) &&
9686 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9687 break;
9688 usleep_range(1000, 2000);
9689 } while (cnt-- > 0);
9690
9691 if (cnt <= 0) {
9692 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9693 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9694 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9695 pgl_exp_rom2);
9696 return -EAGAIN;
9697 }
9698
9699 barrier();
9700
9701
9702 bnx2x_set_234_gates(bp, true);
9703
9704
9705 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9706 return -EAGAIN;
9707
9708
9709
9710
9711 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9712 barrier();
9713
9714
9715 mmiowb();
9716
9717
9718
9719
9720 usleep_range(1000, 2000);
9721
9722
9723
9724 if (global)
9725 bnx2x_reset_mcp_prep(bp, &val);
9726
9727
9728 bnx2x_pxp_prep(bp);
9729 barrier();
9730
9731
9732 bnx2x_process_kill_chip_reset(bp, global);
9733 barrier();
9734
9735
9736 if (!CHIP_IS_E1x(bp))
9737 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9738
9739
9740
9741 if (global && bnx2x_reset_mcp_comp(bp, val))
9742 return -EAGAIN;
9743
9744
9745
9746
9747 bnx2x_set_234_gates(bp, false);
9748
9749
9750
9751
9752 return 0;
9753}
9754
9755static int bnx2x_leader_reset(struct bnx2x *bp)
9756{
9757 int rc = 0;
9758 bool global = bnx2x_reset_is_global(bp);
9759 u32 load_code;
9760
9761
9762
9763
9764 if (!global && !BP_NOMCP(bp)) {
9765 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9766 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9767 if (!load_code) {
9768 BNX2X_ERR("MCP response failure, aborting\n");
9769 rc = -EAGAIN;
9770 goto exit_leader_reset;
9771 }
9772 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9773 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9774 BNX2X_ERR("MCP unexpected resp, aborting\n");
9775 rc = -EAGAIN;
9776 goto exit_leader_reset2;
9777 }
9778 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9779 if (!load_code) {
9780 BNX2X_ERR("MCP response failure, aborting\n");
9781 rc = -EAGAIN;
9782 goto exit_leader_reset2;
9783 }
9784 }
9785
9786
9787 if (bnx2x_process_kill(bp, global)) {
9788 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9789 BP_PATH(bp));
9790 rc = -EAGAIN;
9791 goto exit_leader_reset2;
9792 }
9793
9794
9795
9796
9797
9798 bnx2x_set_reset_done(bp);
9799 if (global)
9800 bnx2x_clear_reset_global(bp);
9801
9802exit_leader_reset2:
9803
9804 if (!global && !BP_NOMCP(bp)) {
9805 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9807 }
9808exit_leader_reset:
9809 bp->is_leader = 0;
9810 bnx2x_release_leader_lock(bp);
9811 smp_mb();
9812 return rc;
9813}
9814
9815static void bnx2x_recovery_failed(struct bnx2x *bp)
9816{
9817 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9818
9819
9820 netif_device_detach(bp->dev);
9821
9822
9823
9824
9825
9826 bnx2x_set_reset_in_progress(bp);
9827
9828
9829 bnx2x_set_power_state(bp, PCI_D3hot);
9830
9831 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9832
9833 smp_mb();
9834}
9835
9836
9837
9838
9839
9840
9841static void bnx2x_parity_recover(struct bnx2x *bp)
9842{
9843 bool global = false;
9844 u32 error_recovered, error_unrecovered;
9845 bool is_parity;
9846
9847 DP(NETIF_MSG_HW, "Handling parity\n");
9848 while (1) {
9849 switch (bp->recovery_state) {
9850 case BNX2X_RECOVERY_INIT:
9851 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9852 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9853 WARN_ON(!is_parity);
9854
9855
9856 if (bnx2x_trylock_leader_lock(bp)) {
9857 bnx2x_set_reset_in_progress(bp);
9858
9859
9860
9861
9862
9863
9864 if (global)
9865 bnx2x_set_reset_global(bp);
9866
9867 bp->is_leader = 1;
9868 }
9869
9870
9871
9872 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9873 return;
9874
9875 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9876
9877
9878
9879
9880
9881 smp_mb();
9882 break;
9883
9884 case BNX2X_RECOVERY_WAIT:
9885 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9886 if (bp->is_leader) {
9887 int other_engine = BP_PATH(bp) ? 0 : 1;
9888 bool other_load_status =
9889 bnx2x_get_load_status(bp, other_engine);
9890 bool load_status =
9891 bnx2x_get_load_status(bp, BP_PATH(bp));
9892 global = bnx2x_reset_is_global(bp);
9893
9894
9895
9896
9897
9898
9899
9900
9901
9902 if (load_status ||
9903 (global && other_load_status)) {
9904
9905
9906
9907 schedule_delayed_work(&bp->sp_rtnl_task,
9908 HZ/10);
9909 return;
9910 } else {
9911
9912
9913
9914
9915
9916 if (bnx2x_leader_reset(bp)) {
9917 bnx2x_recovery_failed(bp);
9918 return;
9919 }
9920
9921
9922
9923
9924
9925
9926 break;
9927 }
9928 } else {
9929 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9930
9931
9932
9933
9934
9935
9936 if (bnx2x_trylock_leader_lock(bp)) {
9937
9938
9939
9940 bp->is_leader = 1;
9941 break;
9942 }
9943
9944 schedule_delayed_work(&bp->sp_rtnl_task,
9945 HZ/10);
9946 return;
9947
9948 } else {
9949
9950
9951
9952
9953 if (bnx2x_reset_is_global(bp)) {
9954 schedule_delayed_work(
9955 &bp->sp_rtnl_task,
9956 HZ/10);
9957 return;
9958 }
9959
9960 error_recovered =
9961 bp->eth_stats.recoverable_error;
9962 error_unrecovered =
9963 bp->eth_stats.unrecoverable_error;
9964 bp->recovery_state =
9965 BNX2X_RECOVERY_NIC_LOADING;
9966 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9967 error_unrecovered++;
9968 netdev_err(bp->dev,
9969 "Recovery failed. Power cycle needed\n");
9970
9971 netif_device_detach(bp->dev);
9972
9973 bnx2x_set_power_state(
9974 bp, PCI_D3hot);
9975 smp_mb();
9976 } else {
9977 bp->recovery_state =
9978 BNX2X_RECOVERY_DONE;
9979 error_recovered++;
9980 smp_mb();
9981 }
9982 bp->eth_stats.recoverable_error =
9983 error_recovered;
9984 bp->eth_stats.unrecoverable_error =
9985 error_unrecovered;
9986
9987 return;
9988 }
9989 }
9990 default:
9991 return;
9992 }
9993 }
9994}
9995
9996static int bnx2x_close(struct net_device *dev);
9997
9998
9999
10000
10001static void bnx2x_sp_rtnl_task(struct work_struct *work)
10002{
10003 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10004
10005 rtnl_lock();
10006
10007 if (!netif_running(bp->dev)) {
10008 rtnl_unlock();
10009 return;
10010 }
10011
10012 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10013#ifdef BNX2X_STOP_ON_ERROR
10014 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10015 "you will need to reboot when done\n");
10016 goto sp_rtnl_not_reset;
10017#endif
10018
10019
10020
10021
10022 bp->sp_rtnl_state = 0;
10023 smp_mb();
10024
10025 bnx2x_parity_recover(bp);
10026
10027 rtnl_unlock();
10028 return;
10029 }
10030
10031 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10032#ifdef BNX2X_STOP_ON_ERROR
10033 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10034 "you will need to reboot when done\n");
10035 goto sp_rtnl_not_reset;
10036#endif
10037
10038
10039
10040
10041
10042 bp->sp_rtnl_state = 0;
10043 smp_mb();
10044
10045 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10046 bnx2x_nic_load(bp, LOAD_NORMAL);
10047
10048 rtnl_unlock();
10049 return;
10050 }
10051#ifdef BNX2X_STOP_ON_ERROR
10052sp_rtnl_not_reset:
10053#endif
10054 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10055 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10056 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10057 bnx2x_after_function_update(bp);
10058
10059
10060
10061
10062
10063 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10064 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10065 netif_device_detach(bp->dev);
10066 bnx2x_close(bp->dev);
10067 rtnl_unlock();
10068 return;
10069 }
10070
10071 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10072 DP(BNX2X_MSG_SP,
10073 "sending set mcast vf pf channel message from rtnl sp-task\n");
10074 bnx2x_vfpf_set_mcast(bp->dev);
10075 }
10076 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10077 &bp->sp_rtnl_state)){
10078 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
10079 bnx2x_tx_disable(bp);
10080 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10081 }
10082 }
10083
10084 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10085 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10086 bnx2x_set_rx_mode_inner(bp);
10087 }
10088
10089 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10090 &bp->sp_rtnl_state))
10091 bnx2x_pf_set_vfs_vlan(bp);
10092
10093 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10094 bnx2x_dcbx_stop_hw_tx(bp);
10095 bnx2x_dcbx_resume_hw_tx(bp);
10096 }
10097
10098 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10099 &bp->sp_rtnl_state))
10100 bnx2x_update_mng_version(bp);
10101
10102
10103
10104
10105 rtnl_unlock();
10106
10107
10108 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10109 &bp->sp_rtnl_state)) {
10110 bnx2x_disable_sriov(bp);
10111 bnx2x_enable_sriov(bp);
10112 }
10113}
10114
10115static void bnx2x_period_task(struct work_struct *work)
10116{
10117 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10118
10119 if (!netif_running(bp->dev))
10120 goto period_task_exit;
10121
10122 if (CHIP_REV_IS_SLOW(bp)) {
10123 BNX2X_ERR("period task called on emulation, ignoring\n");
10124 goto period_task_exit;
10125 }
10126
10127 bnx2x_acquire_phy_lock(bp);
10128
10129
10130
10131
10132
10133 smp_mb();
10134 if (bp->port.pmf) {
10135 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10136
10137
10138 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10139 }
10140
10141 bnx2x_release_phy_lock(bp);
10142period_task_exit:
10143 return;
10144}
10145
10146
10147
10148
10149
10150static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10151{
10152 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10153 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10154 return base + (BP_ABS_FUNC(bp)) * stride;
10155}
10156
10157static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10158 u8 port, u32 reset_reg,
10159 struct bnx2x_mac_vals *vals)
10160{
10161 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10162 u32 base_addr;
10163
10164 if (!(mask & reset_reg))
10165 return false;
10166
10167 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10168 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10169 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10170 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10171 REG_WR(bp, vals->umac_addr[port], 0);
10172
10173 return true;
10174}
10175
10176static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10177 struct bnx2x_mac_vals *vals)
10178{
10179 u32 val, base_addr, offset, mask, reset_reg;
10180 bool mac_stopped = false;
10181 u8 port = BP_PORT(bp);
10182
10183
10184 memset(vals, 0, sizeof(*vals));
10185
10186 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10187
10188 if (!CHIP_IS_E3(bp)) {
10189 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10190 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10191 if ((mask & reset_reg) && val) {
10192 u32 wb_data[2];
10193 BNX2X_DEV_INFO("Disable bmac Rx\n");
10194 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10195 : NIG_REG_INGRESS_BMAC0_MEM;
10196 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10197 : BIGMAC_REGISTER_BMAC_CONTROL;
10198
10199
10200
10201
10202
10203
10204
10205 wb_data[0] = REG_RD(bp, base_addr + offset);
10206 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10207 vals->bmac_addr = base_addr + offset;
10208 vals->bmac_val[0] = wb_data[0];
10209 vals->bmac_val[1] = wb_data[1];
10210 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10211 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10212 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10213 }
10214 BNX2X_DEV_INFO("Disable emac Rx\n");
10215 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10216 vals->emac_val = REG_RD(bp, vals->emac_addr);
10217 REG_WR(bp, vals->emac_addr, 0);
10218 mac_stopped = true;
10219 } else {
10220 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10221 BNX2X_DEV_INFO("Disable xmac Rx\n");
10222 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10223 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10224 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10225 val & ~(1 << 1));
10226 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10227 val | (1 << 1));
10228 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10229 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10230 REG_WR(bp, vals->xmac_addr, 0);
10231 mac_stopped = true;
10232 }
10233
10234 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10235 reset_reg, vals);
10236 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10237 reset_reg, vals);
10238 }
10239
10240 if (mac_stopped)
10241 msleep(20);
10242}
10243
10244#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10245#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10246 0x1848 + ((f) << 4))
10247#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10248#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10249#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10250
10251#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10252#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10253#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10254
10255static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10256{
10257
10258
10259
10260 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10261 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10262 return false;
10263
10264 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10265 BNX2X_DEV_INFO("UNDI previously loaded\n");
10266 return true;
10267 }
10268
10269 return false;
10270}
10271
10272static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10273{
10274 u16 rcq, bd;
10275 u32 addr, tmp_reg;
10276
10277 if (BP_FUNC(bp) < 2)
10278 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10279 else
10280 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10281
10282 tmp_reg = REG_RD(bp, addr);
10283 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10284 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10285
10286 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10287 REG_WR(bp, addr, tmp_reg);
10288
10289 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10290 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10291}
10292
10293static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10294{
10295 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10296 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10297 if (!rc) {
10298 BNX2X_ERR("MCP response failure, aborting\n");
10299 return -EBUSY;
10300 }
10301
10302 return 0;
10303}
10304
10305static struct bnx2x_prev_path_list *
10306 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10307{
10308 struct bnx2x_prev_path_list *tmp_list;
10309
10310 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10311 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10312 bp->pdev->bus->number == tmp_list->bus &&
10313 BP_PATH(bp) == tmp_list->path)
10314 return tmp_list;
10315
10316 return NULL;
10317}
10318
10319static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10320{
10321 struct bnx2x_prev_path_list *tmp_list;
10322 int rc;
10323
10324 rc = down_interruptible(&bnx2x_prev_sem);
10325 if (rc) {
10326 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10327 return rc;
10328 }
10329
10330 tmp_list = bnx2x_prev_path_get_entry(bp);
10331 if (tmp_list) {
10332 tmp_list->aer = 1;
10333 rc = 0;
10334 } else {
10335 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10336 BP_PATH(bp));
10337 }
10338
10339 up(&bnx2x_prev_sem);
10340
10341 return rc;
10342}
10343
10344static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10345{
10346 struct bnx2x_prev_path_list *tmp_list;
10347 bool rc = false;
10348
10349 if (down_trylock(&bnx2x_prev_sem))
10350 return false;
10351
10352 tmp_list = bnx2x_prev_path_get_entry(bp);
10353 if (tmp_list) {
10354 if (tmp_list->aer) {
10355 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10356 BP_PATH(bp));
10357 } else {
10358 rc = true;
10359 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10360 BP_PATH(bp));
10361 }
10362 }
10363
10364 up(&bnx2x_prev_sem);
10365
10366 return rc;
10367}
10368
10369bool bnx2x_port_after_undi(struct bnx2x *bp)
10370{
10371 struct bnx2x_prev_path_list *entry;
10372 bool val;
10373
10374 down(&bnx2x_prev_sem);
10375
10376 entry = bnx2x_prev_path_get_entry(bp);
10377 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10378
10379 up(&bnx2x_prev_sem);
10380
10381 return val;
10382}
10383
10384static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10385{
10386 struct bnx2x_prev_path_list *tmp_list;
10387 int rc;
10388
10389 rc = down_interruptible(&bnx2x_prev_sem);
10390 if (rc) {
10391 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10392 return rc;
10393 }
10394
10395
10396 tmp_list = bnx2x_prev_path_get_entry(bp);
10397 if (tmp_list) {
10398 if (!tmp_list->aer) {
10399 BNX2X_ERR("Re-Marking the path.\n");
10400 } else {
10401 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10402 BP_PATH(bp));
10403 tmp_list->aer = 0;
10404 }
10405 up(&bnx2x_prev_sem);
10406 return 0;
10407 }
10408 up(&bnx2x_prev_sem);
10409
10410
10411 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10412 if (!tmp_list) {
10413 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10414 return -ENOMEM;
10415 }
10416
10417 tmp_list->bus = bp->pdev->bus->number;
10418 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10419 tmp_list->path = BP_PATH(bp);
10420 tmp_list->aer = 0;
10421 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10422
10423 rc = down_interruptible(&bnx2x_prev_sem);
10424 if (rc) {
10425 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10426 kfree(tmp_list);
10427 } else {
10428 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10429 BP_PATH(bp));
10430 list_add(&tmp_list->list, &bnx2x_prev_list);
10431 up(&bnx2x_prev_sem);
10432 }
10433
10434 return rc;
10435}
10436
10437static int bnx2x_do_flr(struct bnx2x *bp)
10438{
10439 struct pci_dev *dev = bp->pdev;
10440
10441 if (CHIP_IS_E1x(bp)) {
10442 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10443 return -EINVAL;
10444 }
10445
10446
10447 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10448 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10449 bp->common.bc_ver);
10450 return -EINVAL;
10451 }
10452
10453 if (!pci_wait_for_pending_transaction(dev))
10454 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10455
10456 BNX2X_DEV_INFO("Initiating FLR\n");
10457 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10458
10459 return 0;
10460}
10461
10462static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10463{
10464 int rc;
10465
10466 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10467
10468
10469 if (bnx2x_prev_is_path_marked(bp))
10470 return bnx2x_prev_mcp_done(bp);
10471
10472 BNX2X_DEV_INFO("Path is unmarked\n");
10473
10474
10475 if (bnx2x_prev_is_after_undi(bp))
10476 goto out;
10477
10478
10479
10480
10481
10482 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10483
10484 if (!rc) {
10485
10486 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10487 rc = bnx2x_do_flr(bp);
10488 }
10489
10490 if (!rc) {
10491
10492 BNX2X_DEV_INFO("FLR successful\n");
10493 return 0;
10494 }
10495
10496 BNX2X_DEV_INFO("Could not FLR\n");
10497
10498out:
10499
10500 rc = bnx2x_prev_mcp_done(bp);
10501 if (!rc)
10502 rc = BNX2X_PREV_WAIT_NEEDED;
10503
10504 return rc;
10505}
10506
10507static int bnx2x_prev_unload_common(struct bnx2x *bp)
10508{
10509 u32 reset_reg, tmp_reg = 0, rc;
10510 bool prev_undi = false;
10511 struct bnx2x_mac_vals mac_vals;
10512
10513
10514
10515
10516
10517 BNX2X_DEV_INFO("Common unload Flow\n");
10518
10519 memset(&mac_vals, 0, sizeof(mac_vals));
10520
10521 if (bnx2x_prev_is_path_marked(bp))
10522 return bnx2x_prev_mcp_done(bp);
10523
10524 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10525
10526
10527 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10528 u32 timer_count = 1000;
10529
10530
10531 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10532
10533
10534 bnx2x_set_rx_filter(&bp->link_params, 0);
10535 bp->link_params.port ^= 1;
10536 bnx2x_set_rx_filter(&bp->link_params, 0);
10537 bp->link_params.port ^= 1;
10538
10539
10540 if (bnx2x_prev_is_after_undi(bp)) {
10541 prev_undi = true;
10542
10543 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10544
10545 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10546 }
10547 if (!CHIP_IS_E1x(bp))
10548
10549 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10550
10551
10552 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10553 while (timer_count) {
10554 u32 prev_brb = tmp_reg;
10555
10556 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10557 if (!tmp_reg)
10558 break;
10559
10560 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10561
10562
10563 if (prev_brb > tmp_reg)
10564 timer_count = 1000;
10565 else
10566 timer_count--;
10567
10568
10569 if (prev_undi)
10570 bnx2x_prev_unload_undi_inc(bp, 1);
10571
10572 udelay(10);
10573 }
10574
10575 if (!timer_count)
10576 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10577 }
10578
10579
10580 bnx2x_reset_common(bp);
10581
10582 if (mac_vals.xmac_addr)
10583 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10584 if (mac_vals.umac_addr[0])
10585 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10586 if (mac_vals.umac_addr[1])
10587 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10588 if (mac_vals.emac_addr)
10589 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10590 if (mac_vals.bmac_addr) {
10591 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10592 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10593 }
10594
10595 rc = bnx2x_prev_mark_path(bp, prev_undi);
10596 if (rc) {
10597 bnx2x_prev_mcp_done(bp);
10598 return rc;
10599 }
10600
10601 return bnx2x_prev_mcp_done(bp);
10602}
10603
10604static int bnx2x_prev_unload(struct bnx2x *bp)
10605{
10606 int time_counter = 10;
10607 u32 rc, fw, hw_lock_reg, hw_lock_val;
10608 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10609
10610
10611
10612
10613 bnx2x_clean_pglue_errors(bp);
10614
10615
10616 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10617 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10618 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10619
10620 hw_lock_val = REG_RD(bp, hw_lock_reg);
10621 if (hw_lock_val) {
10622 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10623 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10624 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10625 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10626 }
10627
10628 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10629 REG_WR(bp, hw_lock_reg, 0xffffffff);
10630 } else
10631 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10632
10633 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10634 BNX2X_DEV_INFO("Release previously held alr\n");
10635 bnx2x_release_alr(bp);
10636 }
10637
10638 do {
10639 int aer = 0;
10640
10641 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10642 if (!fw) {
10643 BNX2X_ERR("MCP response failure, aborting\n");
10644 rc = -EBUSY;
10645 break;
10646 }
10647
10648 rc = down_interruptible(&bnx2x_prev_sem);
10649 if (rc) {
10650 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10651 rc);
10652 } else {
10653
10654 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10655 bnx2x_prev_path_get_entry(bp)->aer);
10656 up(&bnx2x_prev_sem);
10657 }
10658
10659 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10660 rc = bnx2x_prev_unload_common(bp);
10661 break;
10662 }
10663
10664
10665 rc = bnx2x_prev_unload_uncommon(bp);
10666 if (rc != BNX2X_PREV_WAIT_NEEDED)
10667 break;
10668
10669 msleep(20);
10670 } while (--time_counter);
10671
10672 if (!time_counter || rc) {
10673 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10674 rc = -EPROBE_DEFER;
10675 }
10676
10677
10678 if (bnx2x_port_after_undi(bp))
10679 bp->link_params.feature_config_flags |=
10680 FEATURE_CONFIG_BOOT_FROM_SAN;
10681
10682 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10683
10684 return rc;
10685}
10686
10687static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10688{
10689 u32 val, val2, val3, val4, id, boot_mode;
10690 u16 pmc;
10691
10692
10693
10694 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10695 id = ((val & 0xffff) << 16);
10696 val = REG_RD(bp, MISC_REG_CHIP_REV);
10697 id |= ((val & 0xf) << 12);
10698
10699
10700
10701
10702 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10703 id |= (((val >> 24) & 0xf) << 4);
10704 val = REG_RD(bp, MISC_REG_BOND_ID);
10705 id |= (val & 0xf);
10706 bp->common.chip_id = id;
10707
10708
10709 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10710 if (CHIP_IS_57810(bp))
10711 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10712 (bp->common.chip_id & 0x0000FFFF);
10713 else if (CHIP_IS_57810_MF(bp))
10714 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10715 (bp->common.chip_id & 0x0000FFFF);
10716 bp->common.chip_id |= 0x1;
10717 }
10718
10719
10720 bp->db_size = (1 << BNX2X_DB_SHIFT);
10721
10722 if (!CHIP_IS_E1x(bp)) {
10723 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10724 if ((val & 1) == 0)
10725 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10726 else
10727 val = (val >> 1) & 1;
10728 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10729 "2_PORT_MODE");
10730 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10731 CHIP_2_PORT_MODE;
10732
10733 if (CHIP_MODE_IS_4_PORT(bp))
10734 bp->pfid = (bp->pf_num >> 1);
10735 else
10736 bp->pfid = (bp->pf_num & 0x6);
10737 } else {
10738 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10739 bp->pfid = bp->pf_num;
10740 }
10741
10742 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10743
10744 bp->link_params.chip_id = bp->common.chip_id;
10745 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10746
10747 val = (REG_RD(bp, 0x2874) & 0x55);
10748 if ((bp->common.chip_id & 0x1) ||
10749 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10750 bp->flags |= ONE_PORT_FLAG;
10751 BNX2X_DEV_INFO("single port device\n");
10752 }
10753
10754 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10755 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10756 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10757 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10758 bp->common.flash_size, bp->common.flash_size);
10759
10760 bnx2x_init_shmem(bp);
10761
10762 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10763 MISC_REG_GENERIC_CR_1 :
10764 MISC_REG_GENERIC_CR_0));
10765
10766 bp->link_params.shmem_base = bp->common.shmem_base;
10767 bp->link_params.shmem2_base = bp->common.shmem2_base;
10768 if (SHMEM2_RD(bp, size) >
10769 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10770 bp->link_params.lfa_base =
10771 REG_RD(bp, bp->common.shmem2_base +
10772 (u32)offsetof(struct shmem2_region,
10773 lfa_host_addr[BP_PORT(bp)]));
10774 else
10775 bp->link_params.lfa_base = 0;
10776 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10777 bp->common.shmem_base, bp->common.shmem2_base);
10778
10779 if (!bp->common.shmem_base) {
10780 BNX2X_DEV_INFO("MCP not active\n");
10781 bp->flags |= NO_MCP_FLAG;
10782 return;
10783 }
10784
10785 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10786 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10787
10788 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10789 SHARED_HW_CFG_LED_MODE_MASK) >>
10790 SHARED_HW_CFG_LED_MODE_SHIFT);
10791
10792 bp->link_params.feature_config_flags = 0;
10793 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10794 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10795 bp->link_params.feature_config_flags |=
10796 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10797 else
10798 bp->link_params.feature_config_flags &=
10799 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10800
10801 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10802 bp->common.bc_ver = val;
10803 BNX2X_DEV_INFO("bc_ver %X\n", val);
10804 if (val < BNX2X_BC_VER) {
10805
10806
10807 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10808 BNX2X_BC_VER, val);
10809 }
10810 bp->link_params.feature_config_flags |=
10811 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10812 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10813
10814 bp->link_params.feature_config_flags |=
10815 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10816 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10817 bp->link_params.feature_config_flags |=
10818 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10819 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10820 bp->link_params.feature_config_flags |=
10821 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10822 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10823
10824 bp->link_params.feature_config_flags |=
10825 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10826 FEATURE_CONFIG_MT_SUPPORT : 0;
10827
10828 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10829 BC_SUPPORTS_PFC_STATS : 0;
10830
10831 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10832 BC_SUPPORTS_FCOE_FEATURES : 0;
10833
10834 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10835 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10836
10837 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10838 BC_SUPPORTS_RMMOD_CMD : 0;
10839
10840 boot_mode = SHMEM_RD(bp,
10841 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10842 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10843 switch (boot_mode) {
10844 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10845 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10846 break;
10847 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10848 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10849 break;
10850 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10851 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10852 break;
10853 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10854 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10855 break;
10856 }
10857
10858 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10859 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10860
10861 BNX2X_DEV_INFO("%sWoL capable\n",
10862 (bp->flags & NO_WOL_FLAG) ? "not " : "");
10863
10864 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10865 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10866 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10867 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10868
10869 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10870 val, val2, val3, val4);
10871}
10872
10873#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10874#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10875
10876static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10877{
10878 int pfid = BP_FUNC(bp);
10879 int igu_sb_id;
10880 u32 val;
10881 u8 fid, igu_sb_cnt = 0;
10882
10883 bp->igu_base_sb = 0xff;
10884 if (CHIP_INT_MODE_IS_BC(bp)) {
10885 int vn = BP_VN(bp);
10886 igu_sb_cnt = bp->igu_sb_cnt;
10887 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10888 FP_SB_MAX_E1x;
10889
10890 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10891 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10892
10893 return 0;
10894 }
10895
10896
10897 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10898 igu_sb_id++) {
10899 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10900 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10901 continue;
10902 fid = IGU_FID(val);
10903 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10904 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10905 continue;
10906 if (IGU_VEC(val) == 0)
10907
10908 bp->igu_dsb_id = igu_sb_id;
10909 else {
10910 if (bp->igu_base_sb == 0xff)
10911 bp->igu_base_sb = igu_sb_id;
10912 igu_sb_cnt++;
10913 }
10914 }
10915 }
10916
10917#ifdef CONFIG_PCI_MSI
10918
10919
10920
10921
10922
10923
10924 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10925#endif
10926
10927 if (igu_sb_cnt == 0) {
10928 BNX2X_ERR("CAM configuration error\n");
10929 return -EINVAL;
10930 }
10931
10932 return 0;
10933}
10934
10935static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10936{
10937 int cfg_size = 0, idx, port = BP_PORT(bp);
10938
10939
10940 bp->port.supported[0] = 0;
10941 bp->port.supported[1] = 0;
10942 switch (bp->link_params.num_phys) {
10943 case 1:
10944 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10945 cfg_size = 1;
10946 break;
10947 case 2:
10948 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10949 cfg_size = 1;
10950 break;
10951 case 3:
10952 if (bp->link_params.multi_phy_config &
10953 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10954 bp->port.supported[1] =
10955 bp->link_params.phy[EXT_PHY1].supported;
10956 bp->port.supported[0] =
10957 bp->link_params.phy[EXT_PHY2].supported;
10958 } else {
10959 bp->port.supported[0] =
10960 bp->link_params.phy[EXT_PHY1].supported;
10961 bp->port.supported[1] =
10962 bp->link_params.phy[EXT_PHY2].supported;
10963 }
10964 cfg_size = 2;
10965 break;
10966 }
10967
10968 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10969 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10970 SHMEM_RD(bp,
10971 dev_info.port_hw_config[port].external_phy_config),
10972 SHMEM_RD(bp,
10973 dev_info.port_hw_config[port].external_phy_config2));
10974 return;
10975 }
10976
10977 if (CHIP_IS_E3(bp))
10978 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10979 else {
10980 switch (switch_cfg) {
10981 case SWITCH_CFG_1G:
10982 bp->port.phy_addr = REG_RD(
10983 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10984 break;
10985 case SWITCH_CFG_10G:
10986 bp->port.phy_addr = REG_RD(
10987 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10988 break;
10989 default:
10990 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10991 bp->port.link_config[0]);
10992 return;
10993 }
10994 }
10995 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10996
10997 for (idx = 0; idx < cfg_size; idx++) {
10998 if (!(bp->link_params.speed_cap_mask[idx] &
10999 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11000 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11001
11002 if (!(bp->link_params.speed_cap_mask[idx] &
11003 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11004 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11005
11006 if (!(bp->link_params.speed_cap_mask[idx] &
11007 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11008 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11009
11010 if (!(bp->link_params.speed_cap_mask[idx] &
11011 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11012 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11013
11014 if (!(bp->link_params.speed_cap_mask[idx] &
11015 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11016 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11017 SUPPORTED_1000baseT_Full);
11018
11019 if (!(bp->link_params.speed_cap_mask[idx] &
11020 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11021 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11022
11023 if (!(bp->link_params.speed_cap_mask[idx] &
11024 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11025 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11026
11027 if (!(bp->link_params.speed_cap_mask[idx] &
11028 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11029 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11030 }
11031
11032 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11033 bp->port.supported[1]);
11034}
11035
11036static void bnx2x_link_settings_requested(struct bnx2x *bp)
11037{
11038 u32 link_config, idx, cfg_size = 0;
11039 bp->port.advertising[0] = 0;
11040 bp->port.advertising[1] = 0;
11041 switch (bp->link_params.num_phys) {
11042 case 1:
11043 case 2:
11044 cfg_size = 1;
11045 break;
11046 case 3:
11047 cfg_size = 2;
11048 break;
11049 }
11050 for (idx = 0; idx < cfg_size; idx++) {
11051 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11052 link_config = bp->port.link_config[idx];
11053 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11054 case PORT_FEATURE_LINK_SPEED_AUTO:
11055 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11056 bp->link_params.req_line_speed[idx] =
11057 SPEED_AUTO_NEG;
11058 bp->port.advertising[idx] |=
11059 bp->port.supported[idx];
11060 if (bp->link_params.phy[EXT_PHY1].type ==
11061 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11062 bp->port.advertising[idx] |=
11063 (SUPPORTED_100baseT_Half |
11064 SUPPORTED_100baseT_Full);
11065 } else {
11066
11067 bp->link_params.req_line_speed[idx] =
11068 SPEED_10000;
11069 bp->port.advertising[idx] |=
11070 (ADVERTISED_10000baseT_Full |
11071 ADVERTISED_FIBRE);
11072 continue;
11073 }
11074 break;
11075
11076 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11077 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11078 bp->link_params.req_line_speed[idx] =
11079 SPEED_10;
11080 bp->port.advertising[idx] |=
11081 (ADVERTISED_10baseT_Full |
11082 ADVERTISED_TP);
11083 } else {
11084 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11085 link_config,
11086 bp->link_params.speed_cap_mask[idx]);
11087 return;
11088 }
11089 break;
11090
11091 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11092 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11093 bp->link_params.req_line_speed[idx] =
11094 SPEED_10;
11095 bp->link_params.req_duplex[idx] =
11096 DUPLEX_HALF;
11097 bp->port.advertising[idx] |=
11098 (ADVERTISED_10baseT_Half |
11099 ADVERTISED_TP);
11100 } else {
11101 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11102 link_config,
11103 bp->link_params.speed_cap_mask[idx]);
11104 return;
11105 }
11106 break;
11107
11108 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11109 if (bp->port.supported[idx] &
11110 SUPPORTED_100baseT_Full) {
11111 bp->link_params.req_line_speed[idx] =
11112 SPEED_100;
11113 bp->port.advertising[idx] |=
11114 (ADVERTISED_100baseT_Full |
11115 ADVERTISED_TP);
11116 } else {
11117 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11118 link_config,
11119 bp->link_params.speed_cap_mask[idx]);
11120 return;
11121 }
11122 break;
11123
11124 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11125 if (bp->port.supported[idx] &
11126 SUPPORTED_100baseT_Half) {
11127 bp->link_params.req_line_speed[idx] =
11128 SPEED_100;
11129 bp->link_params.req_duplex[idx] =
11130 DUPLEX_HALF;
11131 bp->port.advertising[idx] |=
11132 (ADVERTISED_100baseT_Half |
11133 ADVERTISED_TP);
11134 } else {
11135 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11136 link_config,
11137 bp->link_params.speed_cap_mask[idx]);
11138 return;
11139 }
11140 break;
11141
11142 case PORT_FEATURE_LINK_SPEED_1G:
11143 if (bp->port.supported[idx] &
11144 SUPPORTED_1000baseT_Full) {
11145 bp->link_params.req_line_speed[idx] =
11146 SPEED_1000;
11147 bp->port.advertising[idx] |=
11148 (ADVERTISED_1000baseT_Full |
11149 ADVERTISED_TP);
11150 } else {
11151 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11152 link_config,
11153 bp->link_params.speed_cap_mask[idx]);
11154 return;
11155 }
11156 break;
11157
11158 case PORT_FEATURE_LINK_SPEED_2_5G:
11159 if (bp->port.supported[idx] &
11160 SUPPORTED_2500baseX_Full) {
11161 bp->link_params.req_line_speed[idx] =
11162 SPEED_2500;
11163 bp->port.advertising[idx] |=
11164 (ADVERTISED_2500baseX_Full |
11165 ADVERTISED_TP);
11166 } else {
11167 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11168 link_config,
11169 bp->link_params.speed_cap_mask[idx]);
11170 return;
11171 }
11172 break;
11173
11174 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11175 if (bp->port.supported[idx] &
11176 SUPPORTED_10000baseT_Full) {
11177 bp->link_params.req_line_speed[idx] =
11178 SPEED_10000;
11179 bp->port.advertising[idx] |=
11180 (ADVERTISED_10000baseT_Full |
11181 ADVERTISED_FIBRE);
11182 } else {
11183 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11184 link_config,
11185 bp->link_params.speed_cap_mask[idx]);
11186 return;
11187 }
11188 break;
11189 case PORT_FEATURE_LINK_SPEED_20G:
11190 bp->link_params.req_line_speed[idx] = SPEED_20000;
11191
11192 break;
11193 default:
11194 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11195 link_config);
11196 bp->link_params.req_line_speed[idx] =
11197 SPEED_AUTO_NEG;
11198 bp->port.advertising[idx] =
11199 bp->port.supported[idx];
11200 break;
11201 }
11202
11203 bp->link_params.req_flow_ctrl[idx] = (link_config &
11204 PORT_FEATURE_FLOW_CONTROL_MASK);
11205 if (bp->link_params.req_flow_ctrl[idx] ==
11206 BNX2X_FLOW_CTRL_AUTO) {
11207 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11208 bp->link_params.req_flow_ctrl[idx] =
11209 BNX2X_FLOW_CTRL_NONE;
11210 else
11211 bnx2x_set_requested_fc(bp);
11212 }
11213
11214 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11215 bp->link_params.req_line_speed[idx],
11216 bp->link_params.req_duplex[idx],
11217 bp->link_params.req_flow_ctrl[idx],
11218 bp->port.advertising[idx]);
11219 }
11220}
11221
11222static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11223{
11224 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11225 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11226 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11227 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11228}
11229
11230static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11231{
11232 int port = BP_PORT(bp);
11233 u32 config;
11234 u32 ext_phy_type, ext_phy_config, eee_mode;
11235
11236 bp->link_params.bp = bp;
11237 bp->link_params.port = port;
11238
11239 bp->link_params.lane_config =
11240 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11241
11242 bp->link_params.speed_cap_mask[0] =
11243 SHMEM_RD(bp,
11244 dev_info.port_hw_config[port].speed_capability_mask) &
11245 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11246 bp->link_params.speed_cap_mask[1] =
11247 SHMEM_RD(bp,
11248 dev_info.port_hw_config[port].speed_capability_mask2) &
11249 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11250 bp->port.link_config[0] =
11251 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11252
11253 bp->port.link_config[1] =
11254 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11255
11256 bp->link_params.multi_phy_config =
11257 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11258
11259
11260
11261 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11262 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11263 (config & PORT_FEATURE_WOL_ENABLED));
11264
11265 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11266 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11267 bp->flags |= NO_ISCSI_FLAG;
11268 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11269 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11270 bp->flags |= NO_FCOE_FLAG;
11271
11272 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11273 bp->link_params.lane_config,
11274 bp->link_params.speed_cap_mask[0],
11275 bp->port.link_config[0]);
11276
11277 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11278 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11279 bnx2x_phy_probe(&bp->link_params);
11280 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11281
11282 bnx2x_link_settings_requested(bp);
11283
11284
11285
11286
11287
11288 ext_phy_config =
11289 SHMEM_RD(bp,
11290 dev_info.port_hw_config[port].external_phy_config);
11291 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11292 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11293 bp->mdio.prtad = bp->port.phy_addr;
11294
11295 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11296 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11297 bp->mdio.prtad =
11298 XGXS_EXT_PHY_ADDR(ext_phy_config);
11299
11300
11301 eee_mode = (((SHMEM_RD(bp, dev_info.
11302 port_feature_config[port].eee_power_mode)) &
11303 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11304 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11305 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11306 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11307 EEE_MODE_ENABLE_LPI |
11308 EEE_MODE_OUTPUT_TIME;
11309 } else {
11310 bp->link_params.eee_mode = 0;
11311 }
11312}
11313
11314void bnx2x_get_iscsi_info(struct bnx2x *bp)
11315{
11316 u32 no_flags = NO_ISCSI_FLAG;
11317 int port = BP_PORT(bp);
11318 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11319 drv_lic_key[port].max_iscsi_conn);
11320
11321 if (!CNIC_SUPPORT(bp)) {
11322 bp->flags |= no_flags;
11323 return;
11324 }
11325
11326
11327 bp->cnic_eth_dev.max_iscsi_conn =
11328 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11329 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11330
11331 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11332 bp->cnic_eth_dev.max_iscsi_conn);
11333
11334
11335
11336
11337
11338 if (!bp->cnic_eth_dev.max_iscsi_conn)
11339 bp->flags |= no_flags;
11340}
11341
11342static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11343{
11344
11345 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11346 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11347 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11348 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11349
11350
11351 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11352 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11353 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11354 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11355}
11356
11357static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11358{
11359 u8 count = 0;
11360
11361 if (IS_MF(bp)) {
11362 u8 fid;
11363
11364
11365 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11366 if (IS_MF_SD(bp)) {
11367 u32 cfg = MF_CFG_RD(bp,
11368 func_mf_config[fid].config);
11369
11370 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11371 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11372 FUNC_MF_CFG_PROTOCOL_FCOE))
11373 count++;
11374 } else {
11375 u32 cfg = MF_CFG_RD(bp,
11376 func_ext_config[fid].
11377 func_cfg);
11378
11379 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11380 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11381 count++;
11382 }
11383 }
11384 } else {
11385 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11386
11387 for (port = 0; port < port_cnt; port++) {
11388 u32 lic = SHMEM_RD(bp,
11389 drv_lic_key[port].max_fcoe_conn) ^
11390 FW_ENCODE_32BIT_PATTERN;
11391 if (lic)
11392 count++;
11393 }
11394 }
11395
11396 return count;
11397}
11398
11399static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11400{
11401 int port = BP_PORT(bp);
11402 int func = BP_ABS_FUNC(bp);
11403 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11404 drv_lic_key[port].max_fcoe_conn);
11405 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11406
11407 if (!CNIC_SUPPORT(bp)) {
11408 bp->flags |= NO_FCOE_FLAG;
11409 return;
11410 }
11411
11412
11413 bp->cnic_eth_dev.max_fcoe_conn =
11414 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11415 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11416
11417
11418 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11419
11420
11421 if (num_fcoe_func)
11422 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11423
11424
11425 if (!IS_MF(bp)) {
11426
11427 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11428 SHMEM_RD(bp,
11429 dev_info.port_hw_config[port].
11430 fcoe_wwn_port_name_upper);
11431 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11432 SHMEM_RD(bp,
11433 dev_info.port_hw_config[port].
11434 fcoe_wwn_port_name_lower);
11435
11436
11437 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11438 SHMEM_RD(bp,
11439 dev_info.port_hw_config[port].
11440 fcoe_wwn_node_name_upper);
11441 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11442 SHMEM_RD(bp,
11443 dev_info.port_hw_config[port].
11444 fcoe_wwn_node_name_lower);
11445 } else if (!IS_MF_SD(bp)) {
11446
11447
11448
11449 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11450 bnx2x_get_ext_wwn_info(bp, func);
11451 } else {
11452 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11453 bnx2x_get_ext_wwn_info(bp, func);
11454 }
11455
11456 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11457
11458
11459
11460
11461
11462 if (!bp->cnic_eth_dev.max_fcoe_conn)
11463 bp->flags |= NO_FCOE_FLAG;
11464}
11465
11466static void bnx2x_get_cnic_info(struct bnx2x *bp)
11467{
11468
11469
11470
11471
11472
11473 bnx2x_get_iscsi_info(bp);
11474 bnx2x_get_fcoe_info(bp);
11475}
11476
11477static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11478{
11479 u32 val, val2;
11480 int func = BP_ABS_FUNC(bp);
11481 int port = BP_PORT(bp);
11482 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11483 u8 *fip_mac = bp->fip_mac;
11484
11485 if (IS_MF(bp)) {
11486
11487
11488
11489
11490
11491 if (!IS_MF_SD(bp)) {
11492 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11493 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11494 val2 = MF_CFG_RD(bp, func_ext_config[func].
11495 iscsi_mac_addr_upper);
11496 val = MF_CFG_RD(bp, func_ext_config[func].
11497 iscsi_mac_addr_lower);
11498 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11499 BNX2X_DEV_INFO
11500 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11501 } else {
11502 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11503 }
11504
11505 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11506 val2 = MF_CFG_RD(bp, func_ext_config[func].
11507 fcoe_mac_addr_upper);
11508 val = MF_CFG_RD(bp, func_ext_config[func].
11509 fcoe_mac_addr_lower);
11510 bnx2x_set_mac_buf(fip_mac, val, val2);
11511 BNX2X_DEV_INFO
11512 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11513 } else {
11514 bp->flags |= NO_FCOE_FLAG;
11515 }
11516
11517 bp->mf_ext_config = cfg;
11518
11519 } else {
11520 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11521
11522 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11523
11524 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11525 BNX2X_DEV_INFO
11526 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11527 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11528
11529 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11530 BNX2X_DEV_INFO("SD FCoE MODE\n");
11531 BNX2X_DEV_INFO
11532 ("Read FIP MAC: %pM\n", fip_mac);
11533 }
11534 }
11535
11536
11537
11538
11539
11540 if (IS_MF_FCOE_AFEX(bp))
11541 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11542 } else {
11543 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11544 iscsi_mac_upper);
11545 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11546 iscsi_mac_lower);
11547 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11548
11549 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11550 fcoe_fip_mac_upper);
11551 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11552 fcoe_fip_mac_lower);
11553 bnx2x_set_mac_buf(fip_mac, val, val2);
11554 }
11555
11556
11557 if (!is_valid_ether_addr(iscsi_mac)) {
11558 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11559 eth_zero_addr(iscsi_mac);
11560 }
11561
11562
11563 if (!is_valid_ether_addr(fip_mac)) {
11564 bp->flags |= NO_FCOE_FLAG;
11565 eth_zero_addr(bp->fip_mac);
11566 }
11567}
11568
11569static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11570{
11571 u32 val, val2;
11572 int func = BP_ABS_FUNC(bp);
11573 int port = BP_PORT(bp);
11574
11575
11576 eth_zero_addr(bp->dev->dev_addr);
11577
11578 if (BP_NOMCP(bp)) {
11579 BNX2X_ERROR("warning: random MAC workaround active\n");
11580 eth_hw_addr_random(bp->dev);
11581 } else if (IS_MF(bp)) {
11582 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11583 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11584 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11585 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11586 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11587
11588 if (CNIC_SUPPORT(bp))
11589 bnx2x_get_cnic_mac_hwinfo(bp);
11590 } else {
11591
11592 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11593 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11594 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11595
11596 if (CNIC_SUPPORT(bp))
11597 bnx2x_get_cnic_mac_hwinfo(bp);
11598 }
11599
11600 if (!BP_NOMCP(bp)) {
11601
11602 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11603 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11604 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11605 bp->flags |= HAS_PHYS_PORT_ID;
11606 }
11607
11608 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11609
11610 if (!is_valid_ether_addr(bp->dev->dev_addr))
11611 dev_err(&bp->pdev->dev,
11612 "bad Ethernet MAC address configuration: %pM\n"
11613 "change it manually before bringing up the appropriate network interface\n",
11614 bp->dev->dev_addr);
11615}
11616
11617static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11618{
11619 int tmp;
11620 u32 cfg;
11621
11622 if (IS_VF(bp))
11623 return false;
11624
11625 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11626
11627 tmp = BP_ABS_FUNC(bp);
11628 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11629 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11630 } else {
11631
11632 tmp = BP_PORT(bp);
11633 cfg = SHMEM_RD(bp,
11634 dev_info.port_hw_config[tmp].generic_features);
11635 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11636 }
11637 return cfg;
11638}
11639
11640static void validate_set_si_mode(struct bnx2x *bp)
11641{
11642 u8 func = BP_ABS_FUNC(bp);
11643 u32 val;
11644
11645 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11646
11647
11648 if (val != 0xffff) {
11649 bp->mf_mode = MULTI_FUNCTION_SI;
11650 bp->mf_config[BP_VN(bp)] =
11651 MF_CFG_RD(bp, func_mf_config[func].config);
11652 } else
11653 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11654}
11655
11656static int bnx2x_get_hwinfo(struct bnx2x *bp)
11657{
11658 int func = BP_ABS_FUNC(bp);
11659 int vn;
11660 u32 val = 0, val2 = 0;
11661 int rc = 0;
11662
11663
11664 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11665 dev_err(&bp->pdev->dev,
11666 "Chip read returns all Fs. Preventing probe from continuing\n");
11667 return -EINVAL;
11668 }
11669
11670 bnx2x_get_common_hwinfo(bp);
11671
11672
11673
11674
11675 if (CHIP_IS_E1x(bp)) {
11676 bp->common.int_block = INT_BLOCK_HC;
11677
11678 bp->igu_dsb_id = DEF_SB_IGU_ID;
11679 bp->igu_base_sb = 0;
11680 } else {
11681 bp->common.int_block = INT_BLOCK_IGU;
11682
11683
11684 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11685
11686 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11687
11688 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11689 int tout = 5000;
11690
11691 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11692
11693 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11694 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11695 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11696
11697 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11698 tout--;
11699 usleep_range(1000, 2000);
11700 }
11701
11702 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11703 dev_err(&bp->pdev->dev,
11704 "FORCING Normal Mode failed!!!\n");
11705 bnx2x_release_hw_lock(bp,
11706 HW_LOCK_RESOURCE_RESET);
11707 return -EPERM;
11708 }
11709 }
11710
11711 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11712 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11713 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11714 } else
11715 BNX2X_DEV_INFO("IGU Normal Mode\n");
11716
11717 rc = bnx2x_get_igu_cam_info(bp);
11718 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11719 if (rc)
11720 return rc;
11721 }
11722
11723
11724
11725
11726
11727
11728 if (CHIP_IS_E1x(bp))
11729 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11730 else
11731
11732
11733
11734
11735 bp->base_fw_ndsb = bp->igu_base_sb;
11736
11737 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11738 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11739 bp->igu_sb_cnt, bp->base_fw_ndsb);
11740
11741
11742
11743
11744
11745 bp->mf_ov = 0;
11746 bp->mf_mode = 0;
11747 bp->mf_sub_mode = 0;
11748 vn = BP_VN(bp);
11749
11750 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11751 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11752 bp->common.shmem2_base, SHMEM2_RD(bp, size),
11753 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11754
11755 if (SHMEM2_HAS(bp, mf_cfg_addr))
11756 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11757 else
11758 bp->common.mf_cfg_base = bp->common.shmem_base +
11759 offsetof(struct shmem_region, func_mb) +
11760 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11761
11762
11763
11764
11765
11766
11767
11768
11769 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11770
11771 val = SHMEM_RD(bp,
11772 dev_info.shared_feature_config.config);
11773 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11774
11775 switch (val) {
11776 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11777 validate_set_si_mode(bp);
11778 break;
11779 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11780 if ((!CHIP_IS_E1x(bp)) &&
11781 (MF_CFG_RD(bp, func_mf_config[func].
11782 mac_upper) != 0xffff) &&
11783 (SHMEM2_HAS(bp,
11784 afex_driver_support))) {
11785 bp->mf_mode = MULTI_FUNCTION_AFEX;
11786 bp->mf_config[vn] = MF_CFG_RD(bp,
11787 func_mf_config[func].config);
11788 } else {
11789 BNX2X_DEV_INFO("can not configure afex mode\n");
11790 }
11791 break;
11792 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11793
11794 val = MF_CFG_RD(bp,
11795 func_mf_config[FUNC_0].e1hov_tag);
11796 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11797
11798 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11799 bp->mf_mode = MULTI_FUNCTION_SD;
11800 bp->mf_config[vn] = MF_CFG_RD(bp,
11801 func_mf_config[func].config);
11802 } else
11803 BNX2X_DEV_INFO("illegal OV for SD\n");
11804 break;
11805 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
11806 bp->mf_mode = MULTI_FUNCTION_SD;
11807 bp->mf_sub_mode = SUB_MF_MODE_UFP;
11808 bp->mf_config[vn] =
11809 MF_CFG_RD(bp,
11810 func_mf_config[func].config);
11811 break;
11812 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11813 bp->mf_config[vn] = 0;
11814 break;
11815 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
11816 val2 = SHMEM_RD(bp,
11817 dev_info.shared_hw_config.config_3);
11818 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
11819 switch (val2) {
11820 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
11821 validate_set_si_mode(bp);
11822 bp->mf_sub_mode =
11823 SUB_MF_MODE_NPAR1_DOT_5;
11824 break;
11825 default:
11826
11827 bp->mf_config[vn] = 0;
11828 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
11829 val);
11830 }
11831 break;
11832 default:
11833
11834 bp->mf_config[vn] = 0;
11835 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11836 }
11837 }
11838
11839 BNX2X_DEV_INFO("%s function mode\n",
11840 IS_MF(bp) ? "multi" : "single");
11841
11842 switch (bp->mf_mode) {
11843 case MULTI_FUNCTION_SD:
11844 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11845 FUNC_MF_CFG_E1HOV_TAG_MASK;
11846 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11847 bp->mf_ov = val;
11848 bp->path_has_ovlan = true;
11849
11850 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11851 func, bp->mf_ov, bp->mf_ov);
11852 } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
11853 dev_err(&bp->pdev->dev,
11854 "Unexpected - no valid MF OV for func %d in UFP mode\n",
11855 func);
11856 bp->path_has_ovlan = true;
11857 } else {
11858 dev_err(&bp->pdev->dev,
11859 "No valid MF OV for func %d, aborting\n",
11860 func);
11861 return -EPERM;
11862 }
11863 break;
11864 case MULTI_FUNCTION_AFEX:
11865 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11866 break;
11867 case MULTI_FUNCTION_SI:
11868 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11869 func);
11870 break;
11871 default:
11872 if (vn) {
11873 dev_err(&bp->pdev->dev,
11874 "VN %d is in a single function mode, aborting\n",
11875 vn);
11876 return -EPERM;
11877 }
11878 break;
11879 }
11880
11881
11882
11883
11884
11885
11886 if (CHIP_MODE_IS_4_PORT(bp) &&
11887 !bp->path_has_ovlan &&
11888 !IS_MF(bp) &&
11889 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11890 u8 other_port = !BP_PORT(bp);
11891 u8 other_func = BP_PATH(bp) + 2*other_port;
11892 val = MF_CFG_RD(bp,
11893 func_mf_config[other_func].e1hov_tag);
11894 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11895 bp->path_has_ovlan = true;
11896 }
11897 }
11898
11899
11900 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11901 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11902
11903
11904 bnx2x_get_port_hwinfo(bp);
11905
11906
11907 bnx2x_get_mac_hwinfo(bp);
11908
11909 bnx2x_get_cnic_info(bp);
11910
11911 return rc;
11912}
11913
11914static void bnx2x_read_fwinfo(struct bnx2x *bp)
11915{
11916 int cnt, i, block_end, rodi;
11917 char vpd_start[BNX2X_VPD_LEN+1];
11918 char str_id_reg[VENDOR_ID_LEN+1];
11919 char str_id_cap[VENDOR_ID_LEN+1];
11920 char *vpd_data;
11921 char *vpd_extended_data = NULL;
11922 u8 len;
11923
11924 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11925 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11926
11927 if (cnt < BNX2X_VPD_LEN)
11928 goto out_not_found;
11929
11930
11931
11932
11933 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11934 PCI_VPD_LRDT_RO_DATA);
11935 if (i < 0)
11936 goto out_not_found;
11937
11938 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11939 pci_vpd_lrdt_size(&vpd_start[i]);
11940
11941 i += PCI_VPD_LRDT_TAG_SIZE;
11942
11943 if (block_end > BNX2X_VPD_LEN) {
11944 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11945 if (vpd_extended_data == NULL)
11946 goto out_not_found;
11947
11948
11949 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11950 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11951 block_end - BNX2X_VPD_LEN,
11952 vpd_extended_data + BNX2X_VPD_LEN);
11953 if (cnt < (block_end - BNX2X_VPD_LEN))
11954 goto out_not_found;
11955 vpd_data = vpd_extended_data;
11956 } else
11957 vpd_data = vpd_start;
11958
11959
11960
11961 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11962 PCI_VPD_RO_KEYWORD_MFR_ID);
11963 if (rodi < 0)
11964 goto out_not_found;
11965
11966 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11967
11968 if (len != VENDOR_ID_LEN)
11969 goto out_not_found;
11970
11971 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11972
11973
11974 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11975 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11976 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11977 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11978
11979 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11980 PCI_VPD_RO_KEYWORD_VENDOR0);
11981 if (rodi >= 0) {
11982 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11983
11984 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11985
11986 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11987 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11988 bp->fw_ver[len] = ' ';
11989 }
11990 }
11991 kfree(vpd_extended_data);
11992 return;
11993 }
11994out_not_found:
11995 kfree(vpd_extended_data);
11996 return;
11997}
11998
11999static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12000{
12001 u32 flags = 0;
12002
12003 if (CHIP_REV_IS_FPGA(bp))
12004 SET_FLAGS(flags, MODE_FPGA);
12005 else if (CHIP_REV_IS_EMUL(bp))
12006 SET_FLAGS(flags, MODE_EMUL);
12007 else
12008 SET_FLAGS(flags, MODE_ASIC);
12009
12010 if (CHIP_MODE_IS_4_PORT(bp))
12011 SET_FLAGS(flags, MODE_PORT4);
12012 else
12013 SET_FLAGS(flags, MODE_PORT2);
12014
12015 if (CHIP_IS_E2(bp))
12016 SET_FLAGS(flags, MODE_E2);
12017 else if (CHIP_IS_E3(bp)) {
12018 SET_FLAGS(flags, MODE_E3);
12019 if (CHIP_REV(bp) == CHIP_REV_Ax)
12020 SET_FLAGS(flags, MODE_E3_A0);
12021 else
12022 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12023 }
12024
12025 if (IS_MF(bp)) {
12026 SET_FLAGS(flags, MODE_MF);
12027 switch (bp->mf_mode) {
12028 case MULTI_FUNCTION_SD:
12029 SET_FLAGS(flags, MODE_MF_SD);
12030 break;
12031 case MULTI_FUNCTION_SI:
12032 SET_FLAGS(flags, MODE_MF_SI);
12033 break;
12034 case MULTI_FUNCTION_AFEX:
12035 SET_FLAGS(flags, MODE_MF_AFEX);
12036 break;
12037 }
12038 } else
12039 SET_FLAGS(flags, MODE_SF);
12040
12041#if defined(__LITTLE_ENDIAN)
12042 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12043#else
12044 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12045#endif
12046 INIT_MODE_FLAGS(bp) = flags;
12047}
12048
12049static int bnx2x_init_bp(struct bnx2x *bp)
12050{
12051 int func;
12052 int rc;
12053
12054 mutex_init(&bp->port.phy_mutex);
12055 mutex_init(&bp->fw_mb_mutex);
12056 mutex_init(&bp->drv_info_mutex);
12057 sema_init(&bp->stats_lock, 1);
12058 bp->drv_info_mng_owner = false;
12059
12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12061 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12062 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12063 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12064 if (IS_PF(bp)) {
12065 rc = bnx2x_get_hwinfo(bp);
12066 if (rc)
12067 return rc;
12068 } else {
12069 eth_zero_addr(bp->dev->dev_addr);
12070 }
12071
12072 bnx2x_set_modes_bitmap(bp);
12073
12074 rc = bnx2x_alloc_mem_bp(bp);
12075 if (rc)
12076 return rc;
12077
12078 bnx2x_read_fwinfo(bp);
12079
12080 func = BP_FUNC(bp);
12081
12082
12083 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12084
12085 bp->fw_seq =
12086 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12087 DRV_MSG_SEQ_NUMBER_MASK;
12088 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12089
12090 rc = bnx2x_prev_unload(bp);
12091 if (rc) {
12092 bnx2x_free_mem_bp(bp);
12093 return rc;
12094 }
12095 }
12096
12097 if (CHIP_REV_IS_FPGA(bp))
12098 dev_err(&bp->pdev->dev, "FPGA detected\n");
12099
12100 if (BP_NOMCP(bp) && (func == 0))
12101 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12102
12103 bp->disable_tpa = disable_tpa;
12104 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12105
12106 bp->disable_tpa |= is_kdump_kernel();
12107
12108
12109 if (bp->disable_tpa) {
12110 bp->dev->hw_features &= ~NETIF_F_LRO;
12111 bp->dev->features &= ~NETIF_F_LRO;
12112 }
12113
12114 if (CHIP_IS_E1(bp))
12115 bp->dropless_fc = 0;
12116 else
12117 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12118
12119 bp->mrrs = mrrs;
12120
12121 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12122 if (IS_VF(bp))
12123 bp->rx_ring_size = MAX_RX_AVAIL;
12124
12125
12126 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12127 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12128
12129 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12130
12131 init_timer(&bp->timer);
12132 bp->timer.expires = jiffies + bp->current_interval;
12133 bp->timer.data = (unsigned long) bp;
12134 bp->timer.function = bnx2x_timer;
12135
12136 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12137 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12138 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12139 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
12140 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12141 bnx2x_dcbx_init_params(bp);
12142 } else {
12143 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12144 }
12145
12146 if (CHIP_IS_E1x(bp))
12147 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12148 else
12149 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12150
12151
12152 if (IS_VF(bp))
12153 bp->max_cos = 1;
12154 else if (CHIP_IS_E1x(bp))
12155 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12156 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12157 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12158 else if (CHIP_IS_E3B0(bp))
12159 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12160 else
12161 BNX2X_ERR("unknown chip %x revision %x\n",
12162 CHIP_NUM(bp), CHIP_REV(bp));
12163 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12164
12165
12166
12167
12168
12169 if (IS_VF(bp))
12170 bp->min_msix_vec_cnt = 1;
12171 else if (CNIC_SUPPORT(bp))
12172 bp->min_msix_vec_cnt = 3;
12173 else
12174 bp->min_msix_vec_cnt = 2;
12175 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12176
12177 bp->dump_preset_idx = 1;
12178
12179 if (CHIP_IS_E3B0(bp))
12180 bp->flags |= PTP_SUPPORTED;
12181
12182 return rc;
12183}
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194static int bnx2x_open(struct net_device *dev)
12195{
12196 struct bnx2x *bp = netdev_priv(dev);
12197 int rc;
12198
12199 bp->stats_init = true;
12200
12201 netif_carrier_off(dev);
12202
12203 bnx2x_set_power_state(bp, PCI_D0);
12204
12205
12206
12207
12208
12209
12210
12211 if (IS_PF(bp)) {
12212 int other_engine = BP_PATH(bp) ? 0 : 1;
12213 bool other_load_status, load_status;
12214 bool global = false;
12215
12216 other_load_status = bnx2x_get_load_status(bp, other_engine);
12217 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12218 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12219 bnx2x_chk_parity_attn(bp, &global, true)) {
12220 do {
12221
12222
12223
12224
12225
12226 if (global)
12227 bnx2x_set_reset_global(bp);
12228
12229
12230
12231
12232
12233
12234 if ((!load_status &&
12235 (!global || !other_load_status)) &&
12236 bnx2x_trylock_leader_lock(bp) &&
12237 !bnx2x_leader_reset(bp)) {
12238 netdev_info(bp->dev,
12239 "Recovered in open\n");
12240 break;
12241 }
12242
12243
12244 bnx2x_set_power_state(bp, PCI_D3hot);
12245 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12246
12247 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12248 "If you still see this message after a few retries then power cycle is required.\n");
12249
12250 return -EAGAIN;
12251 } while (0);
12252 }
12253 }
12254
12255 bp->recovery_state = BNX2X_RECOVERY_DONE;
12256 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12257 if (rc)
12258 return rc;
12259 return 0;
12260}
12261
12262
12263static int bnx2x_close(struct net_device *dev)
12264{
12265 struct bnx2x *bp = netdev_priv(dev);
12266
12267
12268 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12269
12270 return 0;
12271}
12272
12273static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12274 struct bnx2x_mcast_ramrod_params *p)
12275{
12276 int mc_count = netdev_mc_count(bp->dev);
12277 struct bnx2x_mcast_list_elem *mc_mac =
12278 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
12279 struct netdev_hw_addr *ha;
12280
12281 if (!mc_mac)
12282 return -ENOMEM;
12283
12284 INIT_LIST_HEAD(&p->mcast_list);
12285
12286 netdev_for_each_mc_addr(ha, bp->dev) {
12287 mc_mac->mac = bnx2x_mc_addr(ha);
12288 list_add_tail(&mc_mac->link, &p->mcast_list);
12289 mc_mac++;
12290 }
12291
12292 p->mcast_list_len = mc_count;
12293
12294 return 0;
12295}
12296
12297static void bnx2x_free_mcast_macs_list(
12298 struct bnx2x_mcast_ramrod_params *p)
12299{
12300 struct bnx2x_mcast_list_elem *mc_mac =
12301 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
12302 link);
12303
12304 WARN_ON(!mc_mac);
12305 kfree(mc_mac);
12306}
12307
12308
12309
12310
12311
12312
12313
12314
12315static int bnx2x_set_uc_list(struct bnx2x *bp)
12316{
12317 int rc;
12318 struct net_device *dev = bp->dev;
12319 struct netdev_hw_addr *ha;
12320 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12321 unsigned long ramrod_flags = 0;
12322
12323
12324 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12325 if (rc < 0) {
12326 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12327 return rc;
12328 }
12329
12330 netdev_for_each_uc_addr(ha, dev) {
12331 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12332 BNX2X_UC_LIST_MAC, &ramrod_flags);
12333 if (rc == -EEXIST) {
12334 DP(BNX2X_MSG_SP,
12335 "Failed to schedule ADD operations: %d\n", rc);
12336
12337 rc = 0;
12338
12339 } else if (rc < 0) {
12340
12341 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12342 rc);
12343 return rc;
12344 }
12345 }
12346
12347
12348 __set_bit(RAMROD_CONT, &ramrod_flags);
12349 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12350 BNX2X_UC_LIST_MAC, &ramrod_flags);
12351}
12352
12353static int bnx2x_set_mc_list(struct bnx2x *bp)
12354{
12355 struct net_device *dev = bp->dev;
12356 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12357 int rc = 0;
12358
12359 rparam.mcast_obj = &bp->mcast_obj;
12360
12361
12362 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12363 if (rc < 0) {
12364 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12365 return rc;
12366 }
12367
12368
12369 if (netdev_mc_count(dev)) {
12370 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12371 if (rc) {
12372 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12373 rc);
12374 return rc;
12375 }
12376
12377
12378 rc = bnx2x_config_mcast(bp, &rparam,
12379 BNX2X_MCAST_CMD_ADD);
12380 if (rc < 0)
12381 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12382 rc);
12383
12384 bnx2x_free_mcast_macs_list(&rparam);
12385 }
12386
12387 return rc;
12388}
12389
12390
12391static void bnx2x_set_rx_mode(struct net_device *dev)
12392{
12393 struct bnx2x *bp = netdev_priv(dev);
12394
12395 if (bp->state != BNX2X_STATE_OPEN) {
12396 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12397 return;
12398 } else {
12399
12400 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12401 NETIF_MSG_IFUP);
12402 }
12403}
12404
12405void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12406{
12407 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12408
12409 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12410
12411 netif_addr_lock_bh(bp->dev);
12412
12413 if (bp->dev->flags & IFF_PROMISC) {
12414 rx_mode = BNX2X_RX_MODE_PROMISC;
12415 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12416 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12417 CHIP_IS_E1(bp))) {
12418 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12419 } else {
12420 if (IS_PF(bp)) {
12421
12422 if (bnx2x_set_mc_list(bp) < 0)
12423 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12424
12425
12426 netif_addr_unlock_bh(bp->dev);
12427 if (bnx2x_set_uc_list(bp) < 0)
12428 rx_mode = BNX2X_RX_MODE_PROMISC;
12429 netif_addr_lock_bh(bp->dev);
12430 } else {
12431
12432
12433
12434 bnx2x_schedule_sp_rtnl(bp,
12435 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12436 }
12437 }
12438
12439 bp->rx_mode = rx_mode;
12440
12441 if (IS_MF_ISCSI_ONLY(bp))
12442 bp->rx_mode = BNX2X_RX_MODE_NONE;
12443
12444
12445 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12446 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12447 netif_addr_unlock_bh(bp->dev);
12448 return;
12449 }
12450
12451 if (IS_PF(bp)) {
12452 bnx2x_set_storm_rx_mode(bp);
12453 netif_addr_unlock_bh(bp->dev);
12454 } else {
12455
12456
12457
12458
12459 netif_addr_unlock_bh(bp->dev);
12460 bnx2x_vfpf_storm_rx_mode(bp);
12461 }
12462}
12463
12464
12465static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12466 int devad, u16 addr)
12467{
12468 struct bnx2x *bp = netdev_priv(netdev);
12469 u16 value;
12470 int rc;
12471
12472 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12473 prtad, devad, addr);
12474
12475
12476 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12477
12478 bnx2x_acquire_phy_lock(bp);
12479 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12480 bnx2x_release_phy_lock(bp);
12481 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12482
12483 if (!rc)
12484 rc = value;
12485 return rc;
12486}
12487
12488
12489static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12490 u16 addr, u16 value)
12491{
12492 struct bnx2x *bp = netdev_priv(netdev);
12493 int rc;
12494
12495 DP(NETIF_MSG_LINK,
12496 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12497 prtad, devad, addr, value);
12498
12499
12500 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12501
12502 bnx2x_acquire_phy_lock(bp);
12503 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12504 bnx2x_release_phy_lock(bp);
12505 return rc;
12506}
12507
12508
12509static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12510{
12511 struct bnx2x *bp = netdev_priv(dev);
12512 struct mii_ioctl_data *mdio = if_mii(ifr);
12513
12514 if (!netif_running(dev))
12515 return -EAGAIN;
12516
12517 switch (cmd) {
12518 case SIOCSHWTSTAMP:
12519 return bnx2x_hwtstamp_ioctl(bp, ifr);
12520 default:
12521 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12522 mdio->phy_id, mdio->reg_num, mdio->val_in);
12523 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12524 }
12525}
12526
12527#ifdef CONFIG_NET_POLL_CONTROLLER
12528static void poll_bnx2x(struct net_device *dev)
12529{
12530 struct bnx2x *bp = netdev_priv(dev);
12531 int i;
12532
12533 for_each_eth_queue(bp, i) {
12534 struct bnx2x_fastpath *fp = &bp->fp[i];
12535 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12536 }
12537}
12538#endif
12539
12540static int bnx2x_validate_addr(struct net_device *dev)
12541{
12542 struct bnx2x *bp = netdev_priv(dev);
12543
12544
12545 if (IS_VF(bp))
12546 bnx2x_sample_bulletin(bp);
12547
12548 if (!is_valid_ether_addr(dev->dev_addr)) {
12549 BNX2X_ERR("Non-valid Ethernet address\n");
12550 return -EADDRNOTAVAIL;
12551 }
12552 return 0;
12553}
12554
12555static int bnx2x_get_phys_port_id(struct net_device *netdev,
12556 struct netdev_phys_item_id *ppid)
12557{
12558 struct bnx2x *bp = netdev_priv(netdev);
12559
12560 if (!(bp->flags & HAS_PHYS_PORT_ID))
12561 return -EOPNOTSUPP;
12562
12563 ppid->id_len = sizeof(bp->phys_port_id);
12564 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12565
12566 return 0;
12567}
12568
12569static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12570 struct net_device *dev,
12571 netdev_features_t features)
12572{
12573 features = vlan_features_check(skb, features);
12574 return vxlan_features_check(skb, features);
12575}
12576
12577static const struct net_device_ops bnx2x_netdev_ops = {
12578 .ndo_open = bnx2x_open,
12579 .ndo_stop = bnx2x_close,
12580 .ndo_start_xmit = bnx2x_start_xmit,
12581 .ndo_select_queue = bnx2x_select_queue,
12582 .ndo_set_rx_mode = bnx2x_set_rx_mode,
12583 .ndo_set_mac_address = bnx2x_change_mac_addr,
12584 .ndo_validate_addr = bnx2x_validate_addr,
12585 .ndo_do_ioctl = bnx2x_ioctl,
12586 .ndo_change_mtu = bnx2x_change_mtu,
12587 .ndo_fix_features = bnx2x_fix_features,
12588 .ndo_set_features = bnx2x_set_features,
12589 .ndo_tx_timeout = bnx2x_tx_timeout,
12590#ifdef CONFIG_NET_POLL_CONTROLLER
12591 .ndo_poll_controller = poll_bnx2x,
12592#endif
12593 .ndo_setup_tc = bnx2x_setup_tc,
12594#ifdef CONFIG_BNX2X_SRIOV
12595 .ndo_set_vf_mac = bnx2x_set_vf_mac,
12596 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
12597 .ndo_get_vf_config = bnx2x_get_vf_config,
12598#endif
12599#ifdef NETDEV_FCOE_WWNN
12600 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
12601#endif
12602
12603#ifdef CONFIG_NET_RX_BUSY_POLL
12604 .ndo_busy_poll = bnx2x_low_latency_recv,
12605#endif
12606 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
12607 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
12608 .ndo_features_check = bnx2x_features_check,
12609};
12610
12611static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12612{
12613 struct device *dev = &bp->pdev->dev;
12614
12615 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
12616 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
12617 dev_err(dev, "System does not support DMA, aborting\n");
12618 return -EIO;
12619 }
12620
12621 return 0;
12622}
12623
12624static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
12625{
12626 if (bp->flags & AER_ENABLED) {
12627 pci_disable_pcie_error_reporting(bp->pdev);
12628 bp->flags &= ~AER_ENABLED;
12629 }
12630}
12631
12632static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12633 struct net_device *dev, unsigned long board_type)
12634{
12635 int rc;
12636 u32 pci_cfg_dword;
12637 bool chip_is_e1x = (board_type == BCM57710 ||
12638 board_type == BCM57711 ||
12639 board_type == BCM57711E);
12640
12641 SET_NETDEV_DEV(dev, &pdev->dev);
12642
12643 bp->dev = dev;
12644 bp->pdev = pdev;
12645
12646 rc = pci_enable_device(pdev);
12647 if (rc) {
12648 dev_err(&bp->pdev->dev,
12649 "Cannot enable PCI device, aborting\n");
12650 goto err_out;
12651 }
12652
12653 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12654 dev_err(&bp->pdev->dev,
12655 "Cannot find PCI device base address, aborting\n");
12656 rc = -ENODEV;
12657 goto err_out_disable;
12658 }
12659
12660 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12661 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
12662 rc = -ENODEV;
12663 goto err_out_disable;
12664 }
12665
12666 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12667 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12668 PCICFG_REVESION_ID_ERROR_VAL) {
12669 pr_err("PCI device error, probably due to fan failure, aborting\n");
12670 rc = -ENODEV;
12671 goto err_out_disable;
12672 }
12673
12674 if (atomic_read(&pdev->enable_cnt) == 1) {
12675 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12676 if (rc) {
12677 dev_err(&bp->pdev->dev,
12678 "Cannot obtain PCI resources, aborting\n");
12679 goto err_out_disable;
12680 }
12681
12682 pci_set_master(pdev);
12683 pci_save_state(pdev);
12684 }
12685
12686 if (IS_PF(bp)) {
12687 if (!pdev->pm_cap) {
12688 dev_err(&bp->pdev->dev,
12689 "Cannot find power management capability, aborting\n");
12690 rc = -EIO;
12691 goto err_out_release;
12692 }
12693 }
12694
12695 if (!pci_is_pcie(pdev)) {
12696 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
12697 rc = -EIO;
12698 goto err_out_release;
12699 }
12700
12701 rc = bnx2x_set_coherency_mask(bp);
12702 if (rc)
12703 goto err_out_release;
12704
12705 dev->mem_start = pci_resource_start(pdev, 0);
12706 dev->base_addr = dev->mem_start;
12707 dev->mem_end = pci_resource_end(pdev, 0);
12708
12709 dev->irq = pdev->irq;
12710
12711 bp->regview = pci_ioremap_bar(pdev, 0);
12712 if (!bp->regview) {
12713 dev_err(&bp->pdev->dev,
12714 "Cannot map register space, aborting\n");
12715 rc = -ENOMEM;
12716 goto err_out_release;
12717 }
12718
12719
12720
12721
12722
12723
12724 if (chip_is_e1x) {
12725 bp->pf_num = PCI_FUNC(pdev->devfn);
12726 } else {
12727
12728 pci_read_config_dword(bp->pdev,
12729 PCICFG_ME_REGISTER, &pci_cfg_dword);
12730 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
12731 ME_REG_ABS_PF_NUM_SHIFT);
12732 }
12733 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12734
12735
12736 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12737 PCICFG_VENDOR_ID_OFFSET);
12738
12739
12740 pdev->needs_freset = 1;
12741
12742
12743 rc = pci_enable_pcie_error_reporting(pdev);
12744 if (!rc)
12745 bp->flags |= AER_ENABLED;
12746 else
12747 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
12748
12749
12750
12751
12752
12753 if (IS_PF(bp)) {
12754 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12755 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12756 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12757 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12758
12759 if (chip_is_e1x) {
12760 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12761 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12762 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12763 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12764 }
12765
12766
12767
12768
12769
12770 if (!chip_is_e1x)
12771 REG_WR(bp,
12772 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
12773 }
12774
12775 dev->watchdog_timeo = TX_TIMEOUT;
12776
12777 dev->netdev_ops = &bnx2x_netdev_ops;
12778 bnx2x_set_ethtool_ops(bp, dev);
12779
12780 dev->priv_flags |= IFF_UNICAST_FLT;
12781
12782 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12783 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12784 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12785 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12786 if (!chip_is_e1x) {
12787 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12788 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12789 dev->hw_enc_features =
12790 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12791 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12792 NETIF_F_GSO_IPIP |
12793 NETIF_F_GSO_SIT |
12794 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12795 }
12796
12797 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12798 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12799
12800 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12801 dev->features |= NETIF_F_HIGHDMA;
12802
12803
12804 dev->hw_features |= NETIF_F_LOOPBACK;
12805
12806#ifdef BCM_DCBNL
12807 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12808#endif
12809
12810
12811 bp->mdio.prtad = MDIO_PRTAD_NONE;
12812 bp->mdio.mmds = 0;
12813 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12814 bp->mdio.dev = dev;
12815 bp->mdio.mdio_read = bnx2x_mdio_read;
12816 bp->mdio.mdio_write = bnx2x_mdio_write;
12817
12818 return 0;
12819
12820err_out_release:
12821 if (atomic_read(&pdev->enable_cnt) == 1)
12822 pci_release_regions(pdev);
12823
12824err_out_disable:
12825 pci_disable_device(pdev);
12826
12827err_out:
12828 return rc;
12829}
12830
12831static int bnx2x_check_firmware(struct bnx2x *bp)
12832{
12833 const struct firmware *firmware = bp->firmware;
12834 struct bnx2x_fw_file_hdr *fw_hdr;
12835 struct bnx2x_fw_file_section *sections;
12836 u32 offset, len, num_ops;
12837 __be16 *ops_offsets;
12838 int i;
12839 const u8 *fw_ver;
12840
12841 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12842 BNX2X_ERR("Wrong FW size\n");
12843 return -EINVAL;
12844 }
12845
12846 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12847 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12848
12849
12850
12851 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12852 offset = be32_to_cpu(sections[i].offset);
12853 len = be32_to_cpu(sections[i].len);
12854 if (offset + len > firmware->size) {
12855 BNX2X_ERR("Section %d length is out of bounds\n", i);
12856 return -EINVAL;
12857 }
12858 }
12859
12860
12861 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12862 ops_offsets = (__force __be16 *)(firmware->data + offset);
12863 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12864
12865 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12866 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12867 BNX2X_ERR("Section offset %d is out of bounds\n", i);
12868 return -EINVAL;
12869 }
12870 }
12871
12872
12873 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12874 fw_ver = firmware->data + offset;
12875 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12876 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12877 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12878 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12879 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12880 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12881 BCM_5710_FW_MAJOR_VERSION,
12882 BCM_5710_FW_MINOR_VERSION,
12883 BCM_5710_FW_REVISION_VERSION,
12884 BCM_5710_FW_ENGINEERING_VERSION);
12885 return -EINVAL;
12886 }
12887
12888 return 0;
12889}
12890
12891static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12892{
12893 const __be32 *source = (const __be32 *)_source;
12894 u32 *target = (u32 *)_target;
12895 u32 i;
12896
12897 for (i = 0; i < n/4; i++)
12898 target[i] = be32_to_cpu(source[i]);
12899}
12900
12901
12902
12903
12904
12905static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12906{
12907 const __be32 *source = (const __be32 *)_source;
12908 struct raw_op *target = (struct raw_op *)_target;
12909 u32 i, j, tmp;
12910
12911 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12912 tmp = be32_to_cpu(source[j]);
12913 target[i].op = (tmp >> 24) & 0xff;
12914 target[i].offset = tmp & 0xffffff;
12915 target[i].raw_data = be32_to_cpu(source[j + 1]);
12916 }
12917}
12918
12919
12920
12921
12922static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12923{
12924 const __be32 *source = (const __be32 *)_source;
12925 struct iro *target = (struct iro *)_target;
12926 u32 i, j, tmp;
12927
12928 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12929 target[i].base = be32_to_cpu(source[j]);
12930 j++;
12931 tmp = be32_to_cpu(source[j]);
12932 target[i].m1 = (tmp >> 16) & 0xffff;
12933 target[i].m2 = tmp & 0xffff;
12934 j++;
12935 tmp = be32_to_cpu(source[j]);
12936 target[i].m3 = (tmp >> 16) & 0xffff;
12937 target[i].size = tmp & 0xffff;
12938 j++;
12939 }
12940}
12941
12942static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12943{
12944 const __be16 *source = (const __be16 *)_source;
12945 u16 *target = (u16 *)_target;
12946 u32 i;
12947
12948 for (i = 0; i < n/2; i++)
12949 target[i] = be16_to_cpu(source[i]);
12950}
12951
12952#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12953do { \
12954 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12955 bp->arr = kmalloc(len, GFP_KERNEL); \
12956 if (!bp->arr) \
12957 goto lbl; \
12958 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12959 (u8 *)bp->arr, len); \
12960} while (0)
12961
12962static int bnx2x_init_firmware(struct bnx2x *bp)
12963{
12964 const char *fw_file_name;
12965 struct bnx2x_fw_file_hdr *fw_hdr;
12966 int rc;
12967
12968 if (bp->firmware)
12969 return 0;
12970
12971 if (CHIP_IS_E1(bp))
12972 fw_file_name = FW_FILE_NAME_E1;
12973 else if (CHIP_IS_E1H(bp))
12974 fw_file_name = FW_FILE_NAME_E1H;
12975 else if (!CHIP_IS_E1x(bp))
12976 fw_file_name = FW_FILE_NAME_E2;
12977 else {
12978 BNX2X_ERR("Unsupported chip revision\n");
12979 return -EINVAL;
12980 }
12981 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
12982
12983 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12984 if (rc) {
12985 BNX2X_ERR("Can't load firmware file %s\n",
12986 fw_file_name);
12987 goto request_firmware_exit;
12988 }
12989
12990 rc = bnx2x_check_firmware(bp);
12991 if (rc) {
12992 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12993 goto request_firmware_exit;
12994 }
12995
12996 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12997
12998
12999
13000 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13001
13002
13003 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13004
13005
13006 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13007 be16_to_cpu_n);
13008
13009
13010 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13011 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13012 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13013 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13014 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13015 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13016 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13017 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13018 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13019 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13020 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13021 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13022 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13023 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13024 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13025 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13026
13027 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13028
13029 return 0;
13030
13031iro_alloc_err:
13032 kfree(bp->init_ops_offsets);
13033init_offsets_alloc_err:
13034 kfree(bp->init_ops);
13035init_ops_alloc_err:
13036 kfree(bp->init_data);
13037request_firmware_exit:
13038 release_firmware(bp->firmware);
13039 bp->firmware = NULL;
13040
13041 return rc;
13042}
13043
13044static void bnx2x_release_firmware(struct bnx2x *bp)
13045{
13046 kfree(bp->init_ops_offsets);
13047 kfree(bp->init_ops);
13048 kfree(bp->init_data);
13049 release_firmware(bp->firmware);
13050 bp->firmware = NULL;
13051}
13052
13053static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13054 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13055 .init_hw_cmn = bnx2x_init_hw_common,
13056 .init_hw_port = bnx2x_init_hw_port,
13057 .init_hw_func = bnx2x_init_hw_func,
13058
13059 .reset_hw_cmn = bnx2x_reset_common,
13060 .reset_hw_port = bnx2x_reset_port,
13061 .reset_hw_func = bnx2x_reset_func,
13062
13063 .gunzip_init = bnx2x_gunzip_init,
13064 .gunzip_end = bnx2x_gunzip_end,
13065
13066 .init_fw = bnx2x_init_firmware,
13067 .release_fw = bnx2x_release_firmware,
13068};
13069
13070void bnx2x__init_func_obj(struct bnx2x *bp)
13071{
13072
13073 bnx2x_setup_dmae(bp);
13074
13075 bnx2x_init_func_obj(bp, &bp->func_obj,
13076 bnx2x_sp(bp, func_rdata),
13077 bnx2x_sp_mapping(bp, func_rdata),
13078 bnx2x_sp(bp, func_afex_rdata),
13079 bnx2x_sp_mapping(bp, func_afex_rdata),
13080 &bnx2x_func_sp_drv);
13081}
13082
13083
13084static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13085{
13086 int cid_count = BNX2X_L2_MAX_CID(bp);
13087
13088 if (IS_SRIOV(bp))
13089 cid_count += BNX2X_VF_CIDS;
13090
13091 if (CNIC_SUPPORT(bp))
13092 cid_count += CNIC_CID_MAX;
13093
13094 return roundup(cid_count, QM_CID_ROUND);
13095}
13096
13097
13098
13099
13100
13101
13102
13103static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13104{
13105 int index;
13106 u16 control = 0;
13107
13108
13109
13110
13111
13112 if (!pdev->msix_cap) {
13113 dev_info(&pdev->dev, "no msix capability found\n");
13114 return 1 + cnic_cnt;
13115 }
13116 dev_info(&pdev->dev, "msix capability found\n");
13117
13118
13119
13120
13121
13122
13123
13124
13125 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13126
13127 index = control & PCI_MSIX_FLAGS_QSIZE;
13128
13129 return index;
13130}
13131
13132static int set_max_cos_est(int chip_id)
13133{
13134 switch (chip_id) {
13135 case BCM57710:
13136 case BCM57711:
13137 case BCM57711E:
13138 return BNX2X_MULTI_TX_COS_E1X;
13139 case BCM57712:
13140 case BCM57712_MF:
13141 return BNX2X_MULTI_TX_COS_E2_E3A0;
13142 case BCM57800:
13143 case BCM57800_MF:
13144 case BCM57810:
13145 case BCM57810_MF:
13146 case BCM57840_4_10:
13147 case BCM57840_2_20:
13148 case BCM57840_O:
13149 case BCM57840_MFO:
13150 case BCM57840_MF:
13151 case BCM57811:
13152 case BCM57811_MF:
13153 return BNX2X_MULTI_TX_COS_E3B0;
13154 case BCM57712_VF:
13155 case BCM57800_VF:
13156 case BCM57810_VF:
13157 case BCM57840_VF:
13158 case BCM57811_VF:
13159 return 1;
13160 default:
13161 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13162 return -ENODEV;
13163 }
13164}
13165
13166static int set_is_vf(int chip_id)
13167{
13168 switch (chip_id) {
13169 case BCM57712_VF:
13170 case BCM57800_VF:
13171 case BCM57810_VF:
13172 case BCM57840_VF:
13173 case BCM57811_VF:
13174 return true;
13175 default:
13176 return false;
13177 }
13178}
13179
13180
13181#define tsgen_ctrl 0x0
13182#define tsgen_freecount 0x10
13183#define tsgen_synctime_t0 0x20
13184#define tsgen_offset_t0 0x28
13185#define tsgen_drift_t0 0x30
13186#define tsgen_synctime_t1 0x58
13187#define tsgen_offset_t1 0x60
13188#define tsgen_drift_t1 0x68
13189
13190
13191static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13192 int best_val, int best_period)
13193{
13194 struct bnx2x_func_state_params func_params = {NULL};
13195 struct bnx2x_func_set_timesync_params *set_timesync_params =
13196 &func_params.params.set_timesync;
13197
13198
13199 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13200 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13201
13202 func_params.f_obj = &bp->func_obj;
13203 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13204
13205
13206 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13207 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13208 set_timesync_params->add_sub_drift_adjust_value =
13209 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13210 set_timesync_params->drift_adjust_value = best_val;
13211 set_timesync_params->drift_adjust_period = best_period;
13212
13213 return bnx2x_func_state_change(bp, &func_params);
13214}
13215
13216static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13217{
13218 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13219 int rc;
13220 int drift_dir = 1;
13221 int val, period, period1, period2, dif, dif1, dif2;
13222 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13223
13224 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13225
13226 if (!netif_running(bp->dev)) {
13227 DP(BNX2X_MSG_PTP,
13228 "PTP adjfreq called while the interface is down\n");
13229 return -EFAULT;
13230 }
13231
13232 if (ppb < 0) {
13233 ppb = -ppb;
13234 drift_dir = 0;
13235 }
13236
13237 if (ppb == 0) {
13238 best_val = 1;
13239 best_period = 0x1FFFFFF;
13240 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13241 best_val = 31;
13242 best_period = 1;
13243 } else {
13244
13245
13246
13247 for (val = 0; val <= 31; val++) {
13248 if ((val & 0x7) == 0)
13249 continue;
13250 period1 = val * 1000000 / ppb;
13251 period2 = period1 + 1;
13252 if (period1 != 0)
13253 dif1 = ppb - (val * 1000000 / period1);
13254 else
13255 dif1 = BNX2X_MAX_PHC_DRIFT;
13256 if (dif1 < 0)
13257 dif1 = -dif1;
13258 dif2 = ppb - (val * 1000000 / period2);
13259 if (dif2 < 0)
13260 dif2 = -dif2;
13261 dif = (dif1 < dif2) ? dif1 : dif2;
13262 period = (dif1 < dif2) ? period1 : period2;
13263 if (dif < best_dif) {
13264 best_dif = dif;
13265 best_val = val;
13266 best_period = period;
13267 }
13268 }
13269 }
13270
13271 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13272 best_period);
13273 if (rc) {
13274 BNX2X_ERR("Failed to set drift\n");
13275 return -EFAULT;
13276 }
13277
13278 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13279 best_period);
13280
13281 return 0;
13282}
13283
13284static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13285{
13286 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13287
13288 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13289
13290 timecounter_adjtime(&bp->timecounter, delta);
13291
13292 return 0;
13293}
13294
13295static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13296{
13297 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13298 u64 ns;
13299
13300 ns = timecounter_read(&bp->timecounter);
13301
13302 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13303
13304 *ts = ns_to_timespec64(ns);
13305
13306 return 0;
13307}
13308
13309static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13310 const struct timespec64 *ts)
13311{
13312 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13313 u64 ns;
13314
13315 ns = timespec64_to_ns(ts);
13316
13317 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13318
13319
13320 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13321
13322 return 0;
13323}
13324
13325
13326static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13327 struct ptp_clock_request *rq, int on)
13328{
13329 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13330
13331 BNX2X_ERR("PHC ancillary features are not supported\n");
13332 return -ENOTSUPP;
13333}
13334
13335static void bnx2x_register_phc(struct bnx2x *bp)
13336{
13337
13338 bp->ptp_clock_info.owner = THIS_MODULE;
13339 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13340 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13341 bp->ptp_clock_info.n_alarm = 0;
13342 bp->ptp_clock_info.n_ext_ts = 0;
13343 bp->ptp_clock_info.n_per_out = 0;
13344 bp->ptp_clock_info.pps = 0;
13345 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13346 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13347 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13348 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13349 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13350
13351 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13352 if (IS_ERR(bp->ptp_clock)) {
13353 bp->ptp_clock = NULL;
13354 BNX2X_ERR("PTP clock registeration failed\n");
13355 }
13356}
13357
13358static int bnx2x_init_one(struct pci_dev *pdev,
13359 const struct pci_device_id *ent)
13360{
13361 struct net_device *dev = NULL;
13362 struct bnx2x *bp;
13363 enum pcie_link_width pcie_width;
13364 enum pci_bus_speed pcie_speed;
13365 int rc, max_non_def_sbs;
13366 int rx_count, tx_count, rss_count, doorbell_size;
13367 int max_cos_est;
13368 bool is_vf;
13369 int cnic_cnt;
13370
13371
13372
13373
13374 if (is_kdump_kernel()) {
13375 ktime_t now = ktime_get_boottime();
13376 ktime_t fw_ready_time = ktime_set(5, 0);
13377
13378 if (ktime_before(now, fw_ready_time))
13379 msleep(ktime_ms_delta(fw_ready_time, now));
13380 }
13381
13382
13383
13384
13385
13386
13387
13388
13389
13390 max_cos_est = set_max_cos_est(ent->driver_data);
13391 if (max_cos_est < 0)
13392 return max_cos_est;
13393 is_vf = set_is_vf(ent->driver_data);
13394 cnic_cnt = is_vf ? 0 : 1;
13395
13396 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13397
13398
13399 max_non_def_sbs += is_vf ? 1 : 0;
13400
13401
13402 rss_count = max_non_def_sbs - cnic_cnt;
13403
13404 if (rss_count < 1)
13405 return -EINVAL;
13406
13407
13408 rx_count = rss_count + cnic_cnt;
13409
13410
13411
13412
13413 tx_count = rss_count * max_cos_est + cnic_cnt;
13414
13415
13416 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13417 if (!dev)
13418 return -ENOMEM;
13419
13420 bp = netdev_priv(dev);
13421
13422 bp->flags = 0;
13423 if (is_vf)
13424 bp->flags |= IS_VF_FLAG;
13425
13426 bp->igu_sb_cnt = max_non_def_sbs;
13427 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13428 bp->msg_enable = debug;
13429 bp->cnic_support = cnic_cnt;
13430 bp->cnic_probe = bnx2x_cnic_probe;
13431
13432 pci_set_drvdata(pdev, dev);
13433
13434 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13435 if (rc < 0) {
13436 free_netdev(dev);
13437 return rc;
13438 }
13439
13440 BNX2X_DEV_INFO("This is a %s function\n",
13441 IS_PF(bp) ? "physical" : "virtual");
13442 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13443 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13444 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13445 tx_count, rx_count);
13446
13447 rc = bnx2x_init_bp(bp);
13448 if (rc)
13449 goto init_one_exit;
13450
13451
13452
13453
13454
13455 if (IS_VF(bp)) {
13456 bp->doorbells = bnx2x_vf_doorbells(bp);
13457 rc = bnx2x_vf_pci_alloc(bp);
13458 if (rc)
13459 goto init_one_exit;
13460 } else {
13461 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13462 if (doorbell_size > pci_resource_len(pdev, 2)) {
13463 dev_err(&bp->pdev->dev,
13464 "Cannot map doorbells, bar size too small, aborting\n");
13465 rc = -ENOMEM;
13466 goto init_one_exit;
13467 }
13468 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13469 doorbell_size);
13470 }
13471 if (!bp->doorbells) {
13472 dev_err(&bp->pdev->dev,
13473 "Cannot map doorbell space, aborting\n");
13474 rc = -ENOMEM;
13475 goto init_one_exit;
13476 }
13477
13478 if (IS_VF(bp)) {
13479 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13480 if (rc)
13481 goto init_one_exit;
13482 }
13483
13484
13485 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13486 if (rc)
13487 goto init_one_exit;
13488
13489
13490 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13491 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13492
13493
13494 if (CHIP_IS_E1x(bp))
13495 bp->flags |= NO_FCOE_FLAG;
13496
13497
13498 bnx2x_set_num_queues(bp);
13499
13500
13501
13502
13503 rc = bnx2x_set_int_mode(bp);
13504 if (rc) {
13505 dev_err(&pdev->dev, "Cannot set interrupts\n");
13506 goto init_one_exit;
13507 }
13508 BNX2X_DEV_INFO("set interrupts successfully\n");
13509
13510
13511 rc = register_netdev(dev);
13512 if (rc) {
13513 dev_err(&pdev->dev, "Cannot register net device\n");
13514 goto init_one_exit;
13515 }
13516 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13517
13518 if (!NO_FCOE(bp)) {
13519
13520 rtnl_lock();
13521 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13522 rtnl_unlock();
13523 }
13524 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
13525 pcie_speed == PCI_SPEED_UNKNOWN ||
13526 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
13527 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
13528 else
13529 BNX2X_DEV_INFO(
13530 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13531 board_info[ent->driver_data].name,
13532 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13533 pcie_width,
13534 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
13535 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
13536 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
13537 "Unknown",
13538 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13539
13540 bnx2x_register_phc(bp);
13541
13542 return 0;
13543
13544init_one_exit:
13545 bnx2x_disable_pcie_error_reporting(bp);
13546
13547 if (bp->regview)
13548 iounmap(bp->regview);
13549
13550 if (IS_PF(bp) && bp->doorbells)
13551 iounmap(bp->doorbells);
13552
13553 free_netdev(dev);
13554
13555 if (atomic_read(&pdev->enable_cnt) == 1)
13556 pci_release_regions(pdev);
13557
13558 pci_disable_device(pdev);
13559
13560 return rc;
13561}
13562
13563static void __bnx2x_remove(struct pci_dev *pdev,
13564 struct net_device *dev,
13565 struct bnx2x *bp,
13566 bool remove_netdev)
13567{
13568 if (bp->ptp_clock) {
13569 ptp_clock_unregister(bp->ptp_clock);
13570 bp->ptp_clock = NULL;
13571 }
13572
13573
13574 if (!NO_FCOE(bp)) {
13575 rtnl_lock();
13576 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13577 rtnl_unlock();
13578 }
13579
13580#ifdef BCM_DCBNL
13581
13582 bnx2x_dcbnl_update_applist(bp, true);
13583#endif
13584
13585 if (IS_PF(bp) &&
13586 !BP_NOMCP(bp) &&
13587 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
13588 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
13589
13590
13591 if (remove_netdev) {
13592 unregister_netdev(dev);
13593 } else {
13594 rtnl_lock();
13595 dev_close(dev);
13596 rtnl_unlock();
13597 }
13598
13599 bnx2x_iov_remove_one(bp);
13600
13601
13602 if (IS_PF(bp)) {
13603 bnx2x_set_power_state(bp, PCI_D0);
13604
13605
13606
13607
13608 bnx2x_reset_endianity(bp);
13609 }
13610
13611
13612 bnx2x_disable_msi(bp);
13613
13614
13615 if (IS_PF(bp))
13616 bnx2x_set_power_state(bp, PCI_D3hot);
13617
13618
13619 cancel_delayed_work_sync(&bp->sp_rtnl_task);
13620
13621
13622 if (IS_VF(bp))
13623 bnx2x_vfpf_release(bp);
13624
13625
13626 if (system_state == SYSTEM_POWER_OFF) {
13627 pci_wake_from_d3(pdev, bp->wol);
13628 pci_set_power_state(pdev, PCI_D3hot);
13629 }
13630
13631 bnx2x_disable_pcie_error_reporting(bp);
13632 if (remove_netdev) {
13633 if (bp->regview)
13634 iounmap(bp->regview);
13635
13636
13637
13638
13639 if (IS_PF(bp)) {
13640 if (bp->doorbells)
13641 iounmap(bp->doorbells);
13642
13643 bnx2x_release_firmware(bp);
13644 } else {
13645 bnx2x_vf_pci_dealloc(bp);
13646 }
13647 bnx2x_free_mem_bp(bp);
13648
13649 free_netdev(dev);
13650
13651 if (atomic_read(&pdev->enable_cnt) == 1)
13652 pci_release_regions(pdev);
13653
13654 pci_disable_device(pdev);
13655 }
13656}
13657
13658static void bnx2x_remove_one(struct pci_dev *pdev)
13659{
13660 struct net_device *dev = pci_get_drvdata(pdev);
13661 struct bnx2x *bp;
13662
13663 if (!dev) {
13664 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13665 return;
13666 }
13667 bp = netdev_priv(dev);
13668
13669 __bnx2x_remove(pdev, dev, bp, true);
13670}
13671
13672static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13673{
13674 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
13675
13676 bp->rx_mode = BNX2X_RX_MODE_NONE;
13677
13678 if (CNIC_LOADED(bp))
13679 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
13680
13681
13682 bnx2x_tx_disable(bp);
13683
13684 bnx2x_del_all_napi(bp);
13685 if (CNIC_LOADED(bp))
13686 bnx2x_del_all_napi_cnic(bp);
13687 netdev_reset_tc(bp->dev);
13688
13689 del_timer_sync(&bp->timer);
13690 cancel_delayed_work_sync(&bp->sp_task);
13691 cancel_delayed_work_sync(&bp->period_task);
13692
13693 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
13694 bp->stats_state = STATS_STATE_DISABLED;
13695 up(&bp->stats_lock);
13696 }
13697
13698 bnx2x_save_statistics(bp);
13699
13700 netif_carrier_off(bp->dev);
13701
13702 return 0;
13703}
13704
13705
13706
13707
13708
13709
13710
13711
13712
13713static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13714 pci_channel_state_t state)
13715{
13716 struct net_device *dev = pci_get_drvdata(pdev);
13717 struct bnx2x *bp = netdev_priv(dev);
13718
13719 rtnl_lock();
13720
13721 BNX2X_ERR("IO error detected\n");
13722
13723 netif_device_detach(dev);
13724
13725 if (state == pci_channel_io_perm_failure) {
13726 rtnl_unlock();
13727 return PCI_ERS_RESULT_DISCONNECT;
13728 }
13729
13730 if (netif_running(dev))
13731 bnx2x_eeh_nic_unload(bp);
13732
13733 bnx2x_prev_path_mark_eeh(bp);
13734
13735 pci_disable_device(pdev);
13736
13737 rtnl_unlock();
13738
13739
13740 return PCI_ERS_RESULT_NEED_RESET;
13741}
13742
13743
13744
13745
13746
13747
13748
13749static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13750{
13751 struct net_device *dev = pci_get_drvdata(pdev);
13752 struct bnx2x *bp = netdev_priv(dev);
13753 int i;
13754
13755 rtnl_lock();
13756 BNX2X_ERR("IO slot reset initializing...\n");
13757 if (pci_enable_device(pdev)) {
13758 dev_err(&pdev->dev,
13759 "Cannot re-enable PCI device after reset\n");
13760 rtnl_unlock();
13761 return PCI_ERS_RESULT_DISCONNECT;
13762 }
13763
13764 pci_set_master(pdev);
13765 pci_restore_state(pdev);
13766 pci_save_state(pdev);
13767
13768 if (netif_running(dev))
13769 bnx2x_set_power_state(bp, PCI_D0);
13770
13771 if (netif_running(dev)) {
13772 BNX2X_ERR("IO slot reset --> driver unload\n");
13773
13774
13775 bnx2x_init_shmem(bp);
13776
13777 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13778 u32 v;
13779
13780 v = SHMEM2_RD(bp,
13781 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13782 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13783 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13784 }
13785 bnx2x_drain_tx_queues(bp);
13786 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13787 bnx2x_netif_stop(bp, 1);
13788 bnx2x_free_irq(bp);
13789
13790
13791 bnx2x_send_unload_done(bp, true);
13792
13793 bp->sp_state = 0;
13794 bp->port.pmf = 0;
13795
13796 bnx2x_prev_unload(bp);
13797
13798
13799
13800
13801 bnx2x_squeeze_objects(bp);
13802 bnx2x_free_skbs(bp);
13803 for_each_rx_queue(bp, i)
13804 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13805 bnx2x_free_fp_mem(bp);
13806 bnx2x_free_mem(bp);
13807
13808 bp->state = BNX2X_STATE_CLOSED;
13809 }
13810
13811 rtnl_unlock();
13812
13813
13814 if (bp->flags & AER_ENABLED) {
13815 if (pci_cleanup_aer_uncorrect_error_status(pdev))
13816 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
13817 else
13818 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
13819 }
13820
13821 return PCI_ERS_RESULT_RECOVERED;
13822}
13823
13824
13825
13826
13827
13828
13829
13830
13831static void bnx2x_io_resume(struct pci_dev *pdev)
13832{
13833 struct net_device *dev = pci_get_drvdata(pdev);
13834 struct bnx2x *bp = netdev_priv(dev);
13835
13836 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13837 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
13838 return;
13839 }
13840
13841 rtnl_lock();
13842
13843 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13844 DRV_MSG_SEQ_NUMBER_MASK;
13845
13846 if (netif_running(dev))
13847 bnx2x_nic_load(bp, LOAD_NORMAL);
13848
13849 netif_device_attach(dev);
13850
13851 rtnl_unlock();
13852}
13853
13854static const struct pci_error_handlers bnx2x_err_handler = {
13855 .error_detected = bnx2x_io_error_detected,
13856 .slot_reset = bnx2x_io_slot_reset,
13857 .resume = bnx2x_io_resume,
13858};
13859
13860static void bnx2x_shutdown(struct pci_dev *pdev)
13861{
13862 struct net_device *dev = pci_get_drvdata(pdev);
13863 struct bnx2x *bp;
13864
13865 if (!dev)
13866 return;
13867
13868 bp = netdev_priv(dev);
13869 if (!bp)
13870 return;
13871
13872 rtnl_lock();
13873 netif_device_detach(dev);
13874 rtnl_unlock();
13875
13876
13877
13878
13879
13880 __bnx2x_remove(pdev, dev, bp, false);
13881}
13882
13883static struct pci_driver bnx2x_pci_driver = {
13884 .name = DRV_MODULE_NAME,
13885 .id_table = bnx2x_pci_tbl,
13886 .probe = bnx2x_init_one,
13887 .remove = bnx2x_remove_one,
13888 .suspend = bnx2x_suspend,
13889 .resume = bnx2x_resume,
13890 .err_handler = &bnx2x_err_handler,
13891#ifdef CONFIG_BNX2X_SRIOV
13892 .sriov_configure = bnx2x_sriov_configure,
13893#endif
13894 .shutdown = bnx2x_shutdown,
13895};
13896
13897static int __init bnx2x_init(void)
13898{
13899 int ret;
13900
13901 pr_info("%s", version);
13902
13903 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13904 if (bnx2x_wq == NULL) {
13905 pr_err("Cannot create workqueue\n");
13906 return -ENOMEM;
13907 }
13908 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
13909 if (!bnx2x_iov_wq) {
13910 pr_err("Cannot create iov workqueue\n");
13911 destroy_workqueue(bnx2x_wq);
13912 return -ENOMEM;
13913 }
13914
13915 ret = pci_register_driver(&bnx2x_pci_driver);
13916 if (ret) {
13917 pr_err("Cannot register driver\n");
13918 destroy_workqueue(bnx2x_wq);
13919 destroy_workqueue(bnx2x_iov_wq);
13920 }
13921 return ret;
13922}
13923
13924static void __exit bnx2x_cleanup(void)
13925{
13926 struct list_head *pos, *q;
13927
13928 pci_unregister_driver(&bnx2x_pci_driver);
13929
13930 destroy_workqueue(bnx2x_wq);
13931 destroy_workqueue(bnx2x_iov_wq);
13932
13933
13934 list_for_each_safe(pos, q, &bnx2x_prev_list) {
13935 struct bnx2x_prev_path_list *tmp =
13936 list_entry(pos, struct bnx2x_prev_path_list, list);
13937 list_del(pos);
13938 kfree(tmp);
13939 }
13940}
13941
13942void bnx2x_notify_link_changed(struct bnx2x *bp)
13943{
13944 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13945}
13946
13947module_init(bnx2x_init);
13948module_exit(bnx2x_cleanup);
13949
13950
13951
13952
13953
13954
13955
13956
13957
13958
13959static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
13960{
13961 unsigned long ramrod_flags = 0;
13962
13963 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13964 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13965 &bp->iscsi_l2_mac_obj, true,
13966 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13967}
13968
13969
13970static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13971{
13972 struct eth_spe *spe;
13973 int cxt_index, cxt_offset;
13974
13975#ifdef BNX2X_STOP_ON_ERROR
13976 if (unlikely(bp->panic))
13977 return;
13978#endif
13979
13980 spin_lock_bh(&bp->spq_lock);
13981 BUG_ON(bp->cnic_spq_pending < count);
13982 bp->cnic_spq_pending -= count;
13983
13984 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13985 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13986 & SPE_HDR_CONN_TYPE) >>
13987 SPE_HDR_CONN_TYPE_SHIFT;
13988 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13989 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
13990
13991
13992
13993
13994 if (type == ETH_CONNECTION_TYPE) {
13995 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
13996 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
13997 ILT_PAGE_CIDS;
13998 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
13999 (cxt_index * ILT_PAGE_CIDS);
14000 bnx2x_set_ctx_validation(bp,
14001 &bp->context[cxt_index].
14002 vcxt[cxt_offset].eth,
14003 BNX2X_ISCSI_ETH_CID(bp));
14004 }
14005 }
14006
14007
14008
14009
14010
14011
14012
14013 if (type == ETH_CONNECTION_TYPE) {
14014 if (!atomic_read(&bp->cq_spq_left))
14015 break;
14016 else
14017 atomic_dec(&bp->cq_spq_left);
14018 } else if (type == NONE_CONNECTION_TYPE) {
14019 if (!atomic_read(&bp->eq_spq_left))
14020 break;
14021 else
14022 atomic_dec(&bp->eq_spq_left);
14023 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14024 (type == FCOE_CONNECTION_TYPE)) {
14025 if (bp->cnic_spq_pending >=
14026 bp->cnic_eth_dev.max_kwqe_pending)
14027 break;
14028 else
14029 bp->cnic_spq_pending++;
14030 } else {
14031 BNX2X_ERR("Unknown SPE type: %d\n", type);
14032 bnx2x_panic();
14033 break;
14034 }
14035
14036 spe = bnx2x_sp_get_next(bp);
14037 *spe = *bp->cnic_kwq_cons;
14038
14039 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14040 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14041
14042 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14043 bp->cnic_kwq_cons = bp->cnic_kwq;
14044 else
14045 bp->cnic_kwq_cons++;
14046 }
14047 bnx2x_sp_prod_update(bp);
14048 spin_unlock_bh(&bp->spq_lock);
14049}
14050
14051static int bnx2x_cnic_sp_queue(struct net_device *dev,
14052 struct kwqe_16 *kwqes[], u32 count)
14053{
14054 struct bnx2x *bp = netdev_priv(dev);
14055 int i;
14056
14057#ifdef BNX2X_STOP_ON_ERROR
14058 if (unlikely(bp->panic)) {
14059 BNX2X_ERR("Can't post to SP queue while panic\n");
14060 return -EIO;
14061 }
14062#endif
14063
14064 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14065 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14066 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14067 return -EAGAIN;
14068 }
14069
14070 spin_lock_bh(&bp->spq_lock);
14071
14072 for (i = 0; i < count; i++) {
14073 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14074
14075 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14076 break;
14077
14078 *bp->cnic_kwq_prod = *spe;
14079
14080 bp->cnic_kwq_pending++;
14081
14082 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14083 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14084 spe->data.update_data_addr.hi,
14085 spe->data.update_data_addr.lo,
14086 bp->cnic_kwq_pending);
14087
14088 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14089 bp->cnic_kwq_prod = bp->cnic_kwq;
14090 else
14091 bp->cnic_kwq_prod++;
14092 }
14093
14094 spin_unlock_bh(&bp->spq_lock);
14095
14096 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14097 bnx2x_cnic_sp_post(bp, 0);
14098
14099 return i;
14100}
14101
14102static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14103{
14104 struct cnic_ops *c_ops;
14105 int rc = 0;
14106
14107 mutex_lock(&bp->cnic_mutex);
14108 c_ops = rcu_dereference_protected(bp->cnic_ops,
14109 lockdep_is_held(&bp->cnic_mutex));
14110 if (c_ops)
14111 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14112 mutex_unlock(&bp->cnic_mutex);
14113
14114 return rc;
14115}
14116
14117static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14118{
14119 struct cnic_ops *c_ops;
14120 int rc = 0;
14121
14122 rcu_read_lock();
14123 c_ops = rcu_dereference(bp->cnic_ops);
14124 if (c_ops)
14125 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14126 rcu_read_unlock();
14127
14128 return rc;
14129}
14130
14131
14132
14133
14134int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14135{
14136 struct cnic_ctl_info ctl = {0};
14137
14138 ctl.cmd = cmd;
14139
14140 return bnx2x_cnic_ctl_send(bp, &ctl);
14141}
14142
14143static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14144{
14145 struct cnic_ctl_info ctl = {0};
14146
14147
14148 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14149 ctl.data.comp.cid = cid;
14150 ctl.data.comp.error = err;
14151
14152 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14153 bnx2x_cnic_sp_post(bp, 0);
14154}
14155
14156
14157
14158
14159
14160
14161static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14162{
14163 unsigned long accept_flags = 0, ramrod_flags = 0;
14164 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14165 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14166
14167 if (start) {
14168
14169
14170
14171
14172
14173
14174 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14175 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14176 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14177 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14178
14179
14180 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14181
14182 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14183 } else
14184
14185 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14186
14187 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14188 set_bit(sched_state, &bp->sp_state);
14189 else {
14190 __set_bit(RAMROD_RX, &ramrod_flags);
14191 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14192 ramrod_flags);
14193 }
14194}
14195
14196static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14197{
14198 struct bnx2x *bp = netdev_priv(dev);
14199 int rc = 0;
14200
14201 switch (ctl->cmd) {
14202 case DRV_CTL_CTXTBL_WR_CMD: {
14203 u32 index = ctl->data.io.offset;
14204 dma_addr_t addr = ctl->data.io.dma_addr;
14205
14206 bnx2x_ilt_wr(bp, index, addr);
14207 break;
14208 }
14209
14210 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14211 int count = ctl->data.credit.credit_count;
14212
14213 bnx2x_cnic_sp_post(bp, count);
14214 break;
14215 }
14216
14217
14218 case DRV_CTL_START_L2_CMD: {
14219 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14220 unsigned long sp_bits = 0;
14221
14222
14223 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14224 cp->iscsi_l2_client_id,
14225 cp->iscsi_l2_cid, BP_FUNC(bp),
14226 bnx2x_sp(bp, mac_rdata),
14227 bnx2x_sp_mapping(bp, mac_rdata),
14228 BNX2X_FILTER_MAC_PENDING,
14229 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14230 &bp->macs_pool);
14231
14232
14233 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14234 if (rc)
14235 break;
14236
14237 mmiowb();
14238 barrier();
14239
14240
14241
14242 netif_addr_lock_bh(dev);
14243 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14244 netif_addr_unlock_bh(dev);
14245
14246
14247 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14248 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14249
14250 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14251 BNX2X_ERR("rx_mode completion timed out!\n");
14252
14253 break;
14254 }
14255
14256
14257 case DRV_CTL_STOP_L2_CMD: {
14258 unsigned long sp_bits = 0;
14259
14260
14261 netif_addr_lock_bh(dev);
14262 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14263 netif_addr_unlock_bh(dev);
14264
14265
14266 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14267 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14268
14269 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14270 BNX2X_ERR("rx_mode completion timed out!\n");
14271
14272 mmiowb();
14273 barrier();
14274
14275
14276 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14277 BNX2X_ISCSI_ETH_MAC, true);
14278 break;
14279 }
14280 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14281 int count = ctl->data.credit.credit_count;
14282
14283 smp_mb__before_atomic();
14284 atomic_add(count, &bp->cq_spq_left);
14285 smp_mb__after_atomic();
14286 break;
14287 }
14288 case DRV_CTL_ULP_REGISTER_CMD: {
14289 int ulp_type = ctl->data.register_data.ulp_type;
14290
14291 if (CHIP_IS_E3(bp)) {
14292 int idx = BP_FW_MB_IDX(bp);
14293 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14294 int path = BP_PATH(bp);
14295 int port = BP_PORT(bp);
14296 int i;
14297 u32 scratch_offset;
14298 u32 *host_addr;
14299
14300
14301 if (ulp_type == CNIC_ULP_ISCSI)
14302 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14303 else if (ulp_type == CNIC_ULP_FCOE)
14304 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14305 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14306
14307 if ((ulp_type != CNIC_ULP_FCOE) ||
14308 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14309 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14310 break;
14311
14312
14313 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14314 if (!scratch_offset)
14315 break;
14316 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14317 fcoe_features[path][port]);
14318 host_addr = (u32 *) &(ctl->data.register_data.
14319 fcoe_features);
14320 for (i = 0; i < sizeof(struct fcoe_capabilities);
14321 i += 4)
14322 REG_WR(bp, scratch_offset + i,
14323 *(host_addr + i/4));
14324 }
14325 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14326 break;
14327 }
14328
14329 case DRV_CTL_ULP_UNREGISTER_CMD: {
14330 int ulp_type = ctl->data.ulp_type;
14331
14332 if (CHIP_IS_E3(bp)) {
14333 int idx = BP_FW_MB_IDX(bp);
14334 u32 cap;
14335
14336 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14337 if (ulp_type == CNIC_ULP_ISCSI)
14338 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14339 else if (ulp_type == CNIC_ULP_FCOE)
14340 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14341 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14342 }
14343 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14344 break;
14345 }
14346
14347 default:
14348 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14349 rc = -EINVAL;
14350 }
14351
14352 return rc;
14353}
14354
14355void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14356{
14357 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14358
14359 if (bp->flags & USING_MSIX_FLAG) {
14360 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14361 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14362 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14363 } else {
14364 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14366 }
14367 if (!CHIP_IS_E1x(bp))
14368 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14369 else
14370 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14371
14372 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14373 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14374 cp->irq_arr[1].status_blk = bp->def_status_blk;
14375 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14376 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14377
14378 cp->num_irq = 2;
14379}
14380
14381void bnx2x_setup_cnic_info(struct bnx2x *bp)
14382{
14383 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14384
14385 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14386 bnx2x_cid_ilt_lines(bp);
14387 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14388 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14389 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14390
14391 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14392 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14393 cp->iscsi_l2_cid);
14394
14395 if (NO_ISCSI_OOO(bp))
14396 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14397}
14398
14399static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14400 void *data)
14401{
14402 struct bnx2x *bp = netdev_priv(dev);
14403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14404 int rc;
14405
14406 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14407
14408 if (ops == NULL) {
14409 BNX2X_ERR("NULL ops received\n");
14410 return -EINVAL;
14411 }
14412
14413 if (!CNIC_SUPPORT(bp)) {
14414 BNX2X_ERR("Can't register CNIC when not supported\n");
14415 return -EOPNOTSUPP;
14416 }
14417
14418 if (!CNIC_LOADED(bp)) {
14419 rc = bnx2x_load_cnic(bp);
14420 if (rc) {
14421 BNX2X_ERR("CNIC-related load failed\n");
14422 return rc;
14423 }
14424 }
14425
14426 bp->cnic_enabled = true;
14427
14428 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14429 if (!bp->cnic_kwq)
14430 return -ENOMEM;
14431
14432 bp->cnic_kwq_cons = bp->cnic_kwq;
14433 bp->cnic_kwq_prod = bp->cnic_kwq;
14434 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14435
14436 bp->cnic_spq_pending = 0;
14437 bp->cnic_kwq_pending = 0;
14438
14439 bp->cnic_data = data;
14440
14441 cp->num_irq = 0;
14442 cp->drv_state |= CNIC_DRV_STATE_REGD;
14443 cp->iro_arr = bp->iro_arr;
14444
14445 bnx2x_setup_cnic_irq_info(bp);
14446
14447 rcu_assign_pointer(bp->cnic_ops, ops);
14448
14449
14450 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14451
14452 return 0;
14453}
14454
14455static int bnx2x_unregister_cnic(struct net_device *dev)
14456{
14457 struct bnx2x *bp = netdev_priv(dev);
14458 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14459
14460 mutex_lock(&bp->cnic_mutex);
14461 cp->drv_state = 0;
14462 RCU_INIT_POINTER(bp->cnic_ops, NULL);
14463 mutex_unlock(&bp->cnic_mutex);
14464 synchronize_rcu();
14465 bp->cnic_enabled = false;
14466 kfree(bp->cnic_kwq);
14467 bp->cnic_kwq = NULL;
14468
14469 return 0;
14470}
14471
14472static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
14473{
14474 struct bnx2x *bp = netdev_priv(dev);
14475 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14476
14477
14478
14479
14480
14481 if (NO_ISCSI(bp) && NO_FCOE(bp))
14482 return NULL;
14483
14484 cp->drv_owner = THIS_MODULE;
14485 cp->chip_id = CHIP_ID(bp);
14486 cp->pdev = bp->pdev;
14487 cp->io_base = bp->regview;
14488 cp->io_base2 = bp->doorbells;
14489 cp->max_kwqe_pending = 8;
14490 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
14491 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14492 bnx2x_cid_ilt_lines(bp);
14493 cp->ctx_tbl_len = CNIC_ILT_LINES;
14494 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14495 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
14496 cp->drv_ctl = bnx2x_drv_ctl;
14497 cp->drv_register_cnic = bnx2x_register_cnic;
14498 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
14499 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14500 cp->iscsi_l2_client_id =
14501 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14502 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14503
14504 if (NO_ISCSI_OOO(bp))
14505 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14506
14507 if (NO_ISCSI(bp))
14508 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
14509
14510 if (NO_FCOE(bp))
14511 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
14512
14513 BNX2X_DEV_INFO(
14514 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
14515 cp->ctx_blk_size,
14516 cp->ctx_tbl_offset,
14517 cp->ctx_tbl_len,
14518 cp->starting_cid);
14519 return cp;
14520}
14521
14522static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
14523{
14524 struct bnx2x *bp = fp->bp;
14525 u32 offset = BAR_USTRORM_INTMEM;
14526
14527 if (IS_VF(bp))
14528 return bnx2x_vf_ustorm_prods_offset(bp, fp);
14529 else if (!CHIP_IS_E1x(bp))
14530 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
14531 else
14532 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
14533
14534 return offset;
14535}
14536
14537
14538
14539
14540
14541
14542int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
14543{
14544 u32 pretend_reg;
14545
14546 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
14547 return -1;
14548
14549
14550 pretend_reg = bnx2x_get_pretend_reg(bp);
14551 REG_WR(bp, pretend_reg, pretend_func_val);
14552 REG_RD(bp, pretend_reg);
14553 return 0;
14554}
14555
14556static void bnx2x_ptp_task(struct work_struct *work)
14557{
14558 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
14559 int port = BP_PORT(bp);
14560 u32 val_seq;
14561 u64 timestamp, ns;
14562 struct skb_shared_hwtstamps shhwtstamps;
14563
14564
14565 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14566 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
14567 if (val_seq & 0x10000) {
14568
14569 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
14570 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
14571 timestamp <<= 32;
14572 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
14573 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
14574
14575 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14576 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14577 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14578
14579 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
14580 shhwtstamps.hwtstamp = ns_to_ktime(ns);
14581 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
14582 dev_kfree_skb_any(bp->ptp_tx_skb);
14583 bp->ptp_tx_skb = NULL;
14584
14585 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
14586 timestamp, ns);
14587 } else {
14588 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
14589
14590 schedule_work(&bp->ptp_task);
14591 }
14592}
14593
14594void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
14595{
14596 int port = BP_PORT(bp);
14597 u64 timestamp, ns;
14598
14599 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
14600 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
14601 timestamp <<= 32;
14602 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
14603 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
14604
14605
14606 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14607 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14608
14609 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14610
14611 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
14612
14613 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
14614 timestamp, ns);
14615}
14616
14617
14618static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
14619{
14620 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
14621 int port = BP_PORT(bp);
14622 u32 wb_data[2];
14623 u64 phc_cycles;
14624
14625 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
14626 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
14627 phc_cycles = wb_data[1];
14628 phc_cycles = (phc_cycles << 32) + wb_data[0];
14629
14630 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
14631
14632 return phc_cycles;
14633}
14634
14635static void bnx2x_init_cyclecounter(struct bnx2x *bp)
14636{
14637 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
14638 bp->cyclecounter.read = bnx2x_cyclecounter_read;
14639 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
14640 bp->cyclecounter.shift = 1;
14641 bp->cyclecounter.mult = 1;
14642}
14643
14644static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
14645{
14646 struct bnx2x_func_state_params func_params = {NULL};
14647 struct bnx2x_func_set_timesync_params *set_timesync_params =
14648 &func_params.params.set_timesync;
14649
14650
14651 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
14652 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
14653
14654 func_params.f_obj = &bp->func_obj;
14655 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
14656
14657
14658 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
14659 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
14660
14661 return bnx2x_func_state_change(bp, &func_params);
14662}
14663
14664static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
14665{
14666 struct bnx2x_queue_state_params q_params;
14667 int rc, i;
14668
14669
14670 memset(&q_params, 0, sizeof(q_params));
14671 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
14672 q_params.cmd = BNX2X_Q_CMD_UPDATE;
14673 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
14674 &q_params.params.update.update_flags);
14675 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
14676 &q_params.params.update.update_flags);
14677
14678
14679 for_each_eth_queue(bp, i) {
14680 struct bnx2x_fastpath *fp = &bp->fp[i];
14681
14682
14683 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
14684
14685
14686 rc = bnx2x_queue_state_change(bp, &q_params);
14687 if (rc) {
14688 BNX2X_ERR("Failed to enable PTP packets\n");
14689 return rc;
14690 }
14691 }
14692
14693 return 0;
14694}
14695
14696int bnx2x_configure_ptp_filters(struct bnx2x *bp)
14697{
14698 int port = BP_PORT(bp);
14699 int rc;
14700
14701 if (!bp->hwtstamp_ioctl_called)
14702 return 0;
14703
14704 switch (bp->tx_type) {
14705 case HWTSTAMP_TX_ON:
14706 bp->flags |= TX_TIMESTAMPING_EN;
14707 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14708 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
14709 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14710 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
14711 break;
14712 case HWTSTAMP_TX_ONESTEP_SYNC:
14713 BNX2X_ERR("One-step timestamping is not supported\n");
14714 return -ERANGE;
14715 }
14716
14717 switch (bp->rx_filter) {
14718 case HWTSTAMP_FILTER_NONE:
14719 break;
14720 case HWTSTAMP_FILTER_ALL:
14721 case HWTSTAMP_FILTER_SOME:
14722 bp->rx_filter = HWTSTAMP_FILTER_NONE;
14723 break;
14724 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
14725 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
14726 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
14727 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14728
14729 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14730 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
14731 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14732 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
14733 break;
14734 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
14735 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
14736 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
14737 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14738
14739 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14740 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
14741 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14742 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
14743 break;
14744 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
14745 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
14746 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
14747 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14748
14749 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14750 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
14751 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14752 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
14753
14754 break;
14755 case HWTSTAMP_FILTER_PTP_V2_EVENT:
14756 case HWTSTAMP_FILTER_PTP_V2_SYNC:
14757 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
14758 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14759
14760 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14761 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
14762 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14763 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
14764 break;
14765 }
14766
14767
14768 rc = bnx2x_enable_ptp_packets(bp);
14769 if (rc)
14770 return rc;
14771
14772
14773 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14774 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
14775
14776 return 0;
14777}
14778
14779static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
14780{
14781 struct hwtstamp_config config;
14782 int rc;
14783
14784 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
14785
14786 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
14787 return -EFAULT;
14788
14789 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
14790 config.tx_type, config.rx_filter);
14791
14792 if (config.flags) {
14793 BNX2X_ERR("config.flags is reserved for future use\n");
14794 return -EINVAL;
14795 }
14796
14797 bp->hwtstamp_ioctl_called = 1;
14798 bp->tx_type = config.tx_type;
14799 bp->rx_filter = config.rx_filter;
14800
14801 rc = bnx2x_configure_ptp_filters(bp);
14802 if (rc)
14803 return rc;
14804
14805 config.rx_filter = bp->rx_filter;
14806
14807 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
14808 -EFAULT : 0;
14809}
14810
14811
14812static int bnx2x_configure_ptp(struct bnx2x *bp)
14813{
14814 int rc, port = BP_PORT(bp);
14815 u32 wb_data[2];
14816
14817
14818 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14819 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
14820 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14821 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
14822 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14823 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
14824 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14825 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
14826
14827
14828 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14829 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
14830
14831
14832 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
14833 NIG_REG_P0_PTP_EN, 0x3F);
14834
14835
14836 wb_data[0] = 0;
14837 wb_data[1] = 0;
14838 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
14839
14840
14841 rc = bnx2x_send_reset_timesync_ramrod(bp);
14842 if (rc) {
14843 BNX2X_ERR("Failed to reset PHC drift register\n");
14844 return -EFAULT;
14845 }
14846
14847
14848 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14849 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14850 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14851 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14852
14853 return 0;
14854}
14855
14856
14857void bnx2x_init_ptp(struct bnx2x *bp)
14858{
14859 int rc;
14860
14861
14862 rc = bnx2x_configure_ptp(bp);
14863 if (rc) {
14864 BNX2X_ERR("Stopping PTP initialization\n");
14865 return;
14866 }
14867
14868
14869 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
14870
14871
14872
14873
14874
14875 if (!bp->timecounter_init_done) {
14876 bnx2x_init_cyclecounter(bp);
14877 timecounter_init(&bp->timecounter, &bp->cyclecounter,
14878 ktime_to_ns(ktime_get_real()));
14879 bp->timecounter_init_done = 1;
14880 }
14881
14882 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
14883}
14884