1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/ioport.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/aer.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39#include <linux/irq.h>
40#include <linux/delay.h>
41#include <asm/byteorder.h>
42#include <linux/time.h>
43#include <linux/ethtool.h>
44#include <linux/mii.h>
45#include <linux/if_vlan.h>
46#include <linux/crash_dump.h>
47#include <net/ip.h>
48#include <net/ipv6.h>
49#include <net/tcp.h>
50#include <net/vxlan.h>
51#include <net/checksum.h>
52#include <net/ip6_checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/crc32c.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/io.h>
59#include <linux/semaphore.h>
60#include <linux/stringify.h>
61#include <linux/vmalloc.h>
62#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
63#include <net/geneve.h>
64#endif
65#include "bnx2x.h"
66#include "bnx2x_init.h"
67#include "bnx2x_init_ops.h"
68#include "bnx2x_cmn.h"
69#include "bnx2x_vfpf.h"
70#include "bnx2x_dcb.h"
71#include "bnx2x_sp.h"
72#include <linux/firmware.h>
73#include "bnx2x_fw_file_hdr.h"
74
75#define FW_FILE_VERSION \
76 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
77 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
78 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
79 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
80#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
81#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
82#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
83
84
85#define TX_TIMEOUT (5*HZ)
86
87static char version[] =
88 "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
89 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90
91MODULE_AUTHOR("Eliezer Tamir");
92MODULE_DESCRIPTION("QLogic "
93 "BCM57710/57711/57711E/"
94 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
95 "57840/57840_MF Driver");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRV_MODULE_VERSION);
98MODULE_FIRMWARE(FW_FILE_NAME_E1);
99MODULE_FIRMWARE(FW_FILE_NAME_E1H);
100MODULE_FIRMWARE(FW_FILE_NAME_E2);
101
102int bnx2x_num_queues;
103module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
104MODULE_PARM_DESC(num_queues,
105 " Set number of queues (default is as a number of CPUs)");
106
107static int disable_tpa;
108module_param(disable_tpa, int, S_IRUGO);
109MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
110
111static int int_mode;
112module_param(int_mode, int, S_IRUGO);
113MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
114 "(1 INT#x; 2 MSI)");
115
116static int dropless_fc;
117module_param(dropless_fc, int, S_IRUGO);
118MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
119
120static int mrrs = -1;
121module_param(mrrs, int, S_IRUGO);
122MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123
124static int debug;
125module_param(debug, int, S_IRUGO);
126MODULE_PARM_DESC(debug, " Default debug msglevel");
127
128static struct workqueue_struct *bnx2x_wq;
129struct workqueue_struct *bnx2x_iov_wq;
130
131struct bnx2x_mac_vals {
132 u32 xmac_addr;
133 u32 xmac_val;
134 u32 emac_addr;
135 u32 emac_val;
136 u32 umac_addr[2];
137 u32 umac_val[2];
138 u32 bmac_addr;
139 u32 bmac_val[2];
140};
141
142enum bnx2x_board_type {
143 BCM57710 = 0,
144 BCM57711,
145 BCM57711E,
146 BCM57712,
147 BCM57712_MF,
148 BCM57712_VF,
149 BCM57800,
150 BCM57800_MF,
151 BCM57800_VF,
152 BCM57810,
153 BCM57810_MF,
154 BCM57810_VF,
155 BCM57840_4_10,
156 BCM57840_2_20,
157 BCM57840_MF,
158 BCM57840_VF,
159 BCM57811,
160 BCM57811_MF,
161 BCM57840_O,
162 BCM57840_MFO,
163 BCM57811_VF
164};
165
166
167static struct {
168 char *name;
169} board_info[] = {
170 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
171 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
172 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
173 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
174 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
175 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
176 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
177 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
178 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
179 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
180 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
181 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
182 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
183 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
184 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
185 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
186 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
187 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
188 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
189 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
190 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
191};
192
193#ifndef PCI_DEVICE_ID_NX2_57710
194#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
195#endif
196#ifndef PCI_DEVICE_ID_NX2_57711
197#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57711E
200#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
201#endif
202#ifndef PCI_DEVICE_ID_NX2_57712
203#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
204#endif
205#ifndef PCI_DEVICE_ID_NX2_57712_MF
206#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
207#endif
208#ifndef PCI_DEVICE_ID_NX2_57712_VF
209#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
210#endif
211#ifndef PCI_DEVICE_ID_NX2_57800
212#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
213#endif
214#ifndef PCI_DEVICE_ID_NX2_57800_MF
215#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
216#endif
217#ifndef PCI_DEVICE_ID_NX2_57800_VF
218#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
219#endif
220#ifndef PCI_DEVICE_ID_NX2_57810
221#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
222#endif
223#ifndef PCI_DEVICE_ID_NX2_57810_MF
224#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
225#endif
226#ifndef PCI_DEVICE_ID_NX2_57840_O
227#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
228#endif
229#ifndef PCI_DEVICE_ID_NX2_57810_VF
230#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
231#endif
232#ifndef PCI_DEVICE_ID_NX2_57840_4_10
233#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
234#endif
235#ifndef PCI_DEVICE_ID_NX2_57840_2_20
236#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
237#endif
238#ifndef PCI_DEVICE_ID_NX2_57840_MFO
239#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
240#endif
241#ifndef PCI_DEVICE_ID_NX2_57840_MF
242#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
243#endif
244#ifndef PCI_DEVICE_ID_NX2_57840_VF
245#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
246#endif
247#ifndef PCI_DEVICE_ID_NX2_57811
248#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
249#endif
250#ifndef PCI_DEVICE_ID_NX2_57811_MF
251#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
252#endif
253#ifndef PCI_DEVICE_ID_NX2_57811_VF
254#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
255#endif
256
257static const struct pci_device_id bnx2x_pci_tbl[] = {
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
271 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
275 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
276 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
278 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
279 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
280 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
281 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
282 { 0 }
283};
284
285MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
286
287
288#define BNX2X_PREV_WAIT_NEEDED 1
289static DEFINE_SEMAPHORE(bnx2x_prev_sem);
290static LIST_HEAD(bnx2x_prev_list);
291
292
293static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
294static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
295static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
296
297
298
299
300
301static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
302
303static void __storm_memset_dma_mapping(struct bnx2x *bp,
304 u32 addr, dma_addr_t mapping)
305{
306 REG_WR(bp, addr, U64_LO(mapping));
307 REG_WR(bp, addr + 4, U64_HI(mapping));
308}
309
310static void storm_memset_spq_addr(struct bnx2x *bp,
311 dma_addr_t mapping, u16 abs_fid)
312{
313 u32 addr = XSEM_REG_FAST_MEMORY +
314 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
315
316 __storm_memset_dma_mapping(bp, addr, mapping);
317}
318
319static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
320 u16 pf_id)
321{
322 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
323 pf_id);
324 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
325 pf_id);
326 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330}
331
332static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
333 u8 enable)
334{
335 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
336 enable);
337 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
338 enable);
339 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343}
344
345static void storm_memset_eq_data(struct bnx2x *bp,
346 struct event_ring_data *eq_data,
347 u16 pfid)
348{
349 size_t size = sizeof(struct event_ring_data);
350
351 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
352
353 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
354}
355
356static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
357 u16 pfid)
358{
359 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
360 REG_WR16(bp, addr, eq_prod);
361}
362
363
364
365
366static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
367{
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
370 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
371 PCICFG_VENDOR_ID_OFFSET);
372}
373
374static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
375{
376 u32 val;
377
378 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
379 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
380 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
381 PCICFG_VENDOR_ID_OFFSET);
382
383 return val;
384}
385
386#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
387#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
388#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
389#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
390#define DMAE_DP_DST_NONE "dst_addr [none]"
391
392static void bnx2x_dp_dmae(struct bnx2x *bp,
393 struct dmae_command *dmae, int msglvl)
394{
395 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
396 int i;
397
398 switch (dmae->opcode & DMAE_COMMAND_DST) {
399 case DMAE_CMD_DST_PCI:
400 if (src_type == DMAE_CMD_SRC_PCI)
401 DP(msglvl, "DMAE: opcode 0x%08x\n"
402 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
403 "comp_addr [%x:%08x], comp_val 0x%08x\n",
404 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
406 dmae->comp_addr_hi, dmae->comp_addr_lo,
407 dmae->comp_val);
408 else
409 DP(msglvl, "DMAE: opcode 0x%08x\n"
410 "src [%08x], len [%d*4], dst [%x:%08x]\n"
411 "comp_addr [%x:%08x], comp_val 0x%08x\n",
412 dmae->opcode, dmae->src_addr_lo >> 2,
413 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
414 dmae->comp_addr_hi, dmae->comp_addr_lo,
415 dmae->comp_val);
416 break;
417 case DMAE_CMD_DST_GRC:
418 if (src_type == DMAE_CMD_SRC_PCI)
419 DP(msglvl, "DMAE: opcode 0x%08x\n"
420 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
421 "comp_addr [%x:%08x], comp_val 0x%08x\n",
422 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423 dmae->len, dmae->dst_addr_lo >> 2,
424 dmae->comp_addr_hi, dmae->comp_addr_lo,
425 dmae->comp_val);
426 else
427 DP(msglvl, "DMAE: opcode 0x%08x\n"
428 "src [%08x], len [%d*4], dst [%08x]\n"
429 "comp_addr [%x:%08x], comp_val 0x%08x\n",
430 dmae->opcode, dmae->src_addr_lo >> 2,
431 dmae->len, dmae->dst_addr_lo >> 2,
432 dmae->comp_addr_hi, dmae->comp_addr_lo,
433 dmae->comp_val);
434 break;
435 default:
436 if (src_type == DMAE_CMD_SRC_PCI)
437 DP(msglvl, "DMAE: opcode 0x%08x\n"
438 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
439 "comp_addr [%x:%08x] comp_val 0x%08x\n",
440 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
441 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
442 dmae->comp_val);
443 else
444 DP(msglvl, "DMAE: opcode 0x%08x\n"
445 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
446 "comp_addr [%x:%08x] comp_val 0x%08x\n",
447 dmae->opcode, dmae->src_addr_lo >> 2,
448 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
449 dmae->comp_val);
450 break;
451 }
452
453 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
454 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
455 i, *(((u32 *)dmae) + i));
456}
457
458
459void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
460{
461 u32 cmd_offset;
462 int i;
463
464 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
465 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
466 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
467 }
468 REG_WR(bp, dmae_reg_go_c[idx], 1);
469}
470
471u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
472{
473 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
474 DMAE_CMD_C_ENABLE);
475}
476
477u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
478{
479 return opcode & ~DMAE_CMD_SRC_RESET;
480}
481
482u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
483 bool with_comp, u8 comp_type)
484{
485 u32 opcode = 0;
486
487 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
488 (dst_type << DMAE_COMMAND_DST_SHIFT));
489
490 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
491
492 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
493 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
494 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
495 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
496
497#ifdef __BIG_ENDIAN
498 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
499#else
500 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
501#endif
502 if (with_comp)
503 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
504 return opcode;
505}
506
507void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
508 struct dmae_command *dmae,
509 u8 src_type, u8 dst_type)
510{
511 memset(dmae, 0, sizeof(struct dmae_command));
512
513
514 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
515 true, DMAE_COMP_PCI);
516
517
518 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
519 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
520 dmae->comp_val = DMAE_COMP_VAL;
521}
522
523
524int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
525 u32 *comp)
526{
527 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
528 int rc = 0;
529
530 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
531
532
533
534
535
536
537 spin_lock_bh(&bp->dmae_lock);
538
539
540 *comp = 0;
541
542
543 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
544
545
546 udelay(5);
547 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
548
549 if (!cnt ||
550 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
551 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
552 BNX2X_ERR("DMAE timeout!\n");
553 rc = DMAE_TIMEOUT;
554 goto unlock;
555 }
556 cnt--;
557 udelay(50);
558 }
559 if (*comp & DMAE_PCI_ERR_FLAG) {
560 BNX2X_ERR("DMAE PCI error!\n");
561 rc = DMAE_PCI_ERROR;
562 }
563
564unlock:
565
566 spin_unlock_bh(&bp->dmae_lock);
567
568 return rc;
569}
570
571void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
572 u32 len32)
573{
574 int rc;
575 struct dmae_command dmae;
576
577 if (!bp->dmae_ready) {
578 u32 *data = bnx2x_sp(bp, wb_data[0]);
579
580 if (CHIP_IS_E1(bp))
581 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
582 else
583 bnx2x_init_str_wr(bp, dst_addr, data, len32);
584 return;
585 }
586
587
588 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
589
590
591 dmae.src_addr_lo = U64_LO(dma_addr);
592 dmae.src_addr_hi = U64_HI(dma_addr);
593 dmae.dst_addr_lo = dst_addr >> 2;
594 dmae.dst_addr_hi = 0;
595 dmae.len = len32;
596
597
598 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
599 if (rc) {
600 BNX2X_ERR("DMAE returned failure %d\n", rc);
601#ifdef BNX2X_STOP_ON_ERROR
602 bnx2x_panic();
603#endif
604 }
605}
606
607void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
608{
609 int rc;
610 struct dmae_command dmae;
611
612 if (!bp->dmae_ready) {
613 u32 *data = bnx2x_sp(bp, wb_data[0]);
614 int i;
615
616 if (CHIP_IS_E1(bp))
617 for (i = 0; i < len32; i++)
618 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
619 else
620 for (i = 0; i < len32; i++)
621 data[i] = REG_RD(bp, src_addr + i*4);
622
623 return;
624 }
625
626
627 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
628
629
630 dmae.src_addr_lo = src_addr >> 2;
631 dmae.src_addr_hi = 0;
632 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
633 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
634 dmae.len = len32;
635
636
637 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
638 if (rc) {
639 BNX2X_ERR("DMAE returned failure %d\n", rc);
640#ifdef BNX2X_STOP_ON_ERROR
641 bnx2x_panic();
642#endif
643 }
644}
645
646static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
647 u32 addr, u32 len)
648{
649 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
650 int offset = 0;
651
652 while (len > dmae_wr_max) {
653 bnx2x_write_dmae(bp, phys_addr + offset,
654 addr + offset, dmae_wr_max);
655 offset += dmae_wr_max * 4;
656 len -= dmae_wr_max;
657 }
658
659 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
660}
661
662enum storms {
663 XSTORM,
664 TSTORM,
665 CSTORM,
666 USTORM,
667 MAX_STORMS
668};
669
670#define STORMS_NUM 4
671#define REGS_IN_ENTRY 4
672
673static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
674 enum storms storm,
675 int entry)
676{
677 switch (storm) {
678 case XSTORM:
679 return XSTORM_ASSERT_LIST_OFFSET(entry);
680 case TSTORM:
681 return TSTORM_ASSERT_LIST_OFFSET(entry);
682 case CSTORM:
683 return CSTORM_ASSERT_LIST_OFFSET(entry);
684 case USTORM:
685 return USTORM_ASSERT_LIST_OFFSET(entry);
686 case MAX_STORMS:
687 default:
688 BNX2X_ERR("unknown storm\n");
689 }
690 return -EINVAL;
691}
692
693static int bnx2x_mc_assert(struct bnx2x *bp)
694{
695 char last_idx;
696 int i, j, rc = 0;
697 enum storms storm;
698 u32 regs[REGS_IN_ENTRY];
699 u32 bar_storm_intmem[STORMS_NUM] = {
700 BAR_XSTRORM_INTMEM,
701 BAR_TSTRORM_INTMEM,
702 BAR_CSTRORM_INTMEM,
703 BAR_USTRORM_INTMEM
704 };
705 u32 storm_assert_list_index[STORMS_NUM] = {
706 XSTORM_ASSERT_LIST_INDEX_OFFSET,
707 TSTORM_ASSERT_LIST_INDEX_OFFSET,
708 CSTORM_ASSERT_LIST_INDEX_OFFSET,
709 USTORM_ASSERT_LIST_INDEX_OFFSET
710 };
711 char *storms_string[STORMS_NUM] = {
712 "XSTORM",
713 "TSTORM",
714 "CSTORM",
715 "USTORM"
716 };
717
718 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
719 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
720 storm_assert_list_index[storm]);
721 if (last_idx)
722 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
723 storms_string[storm], last_idx);
724
725
726 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
727
728 for (j = 0; j < REGS_IN_ENTRY; j++)
729 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
730 bnx2x_get_assert_list_entry(bp,
731 storm,
732 i) +
733 sizeof(u32) * j);
734
735
736 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
737 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
738 storms_string[storm], i, regs[3],
739 regs[2], regs[1], regs[0]);
740 rc++;
741 } else {
742 break;
743 }
744 }
745 }
746
747 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
748 CHIP_IS_E1(bp) ? "everest1" :
749 CHIP_IS_E1H(bp) ? "everest1h" :
750 CHIP_IS_E2(bp) ? "everest2" : "everest3",
751 BCM_5710_FW_MAJOR_VERSION,
752 BCM_5710_FW_MINOR_VERSION,
753 BCM_5710_FW_REVISION_VERSION);
754
755 return rc;
756}
757
758#define MCPR_TRACE_BUFFER_SIZE (0x800)
759#define SCRATCH_BUFFER_SIZE(bp) \
760 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
761
762void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
763{
764 u32 addr, val;
765 u32 mark, offset;
766 __be32 data[9];
767 int word;
768 u32 trace_shmem_base;
769 if (BP_NOMCP(bp)) {
770 BNX2X_ERR("NO MCP - can not dump\n");
771 return;
772 }
773 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
774 (bp->common.bc_ver & 0xff0000) >> 16,
775 (bp->common.bc_ver & 0xff00) >> 8,
776 (bp->common.bc_ver & 0xff));
777
778 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
779 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
780 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
781
782 if (BP_PATH(bp) == 0)
783 trace_shmem_base = bp->common.shmem_base;
784 else
785 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
786
787
788 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
789 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
790 SCRATCH_BUFFER_SIZE(bp)) {
791 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
792 trace_shmem_base);
793 return;
794 }
795
796 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
797
798
799 mark = REG_RD(bp, addr);
800 if (mark != MFW_TRACE_SIGNATURE) {
801 BNX2X_ERR("Trace buffer signature is missing.");
802 return ;
803 }
804
805
806 addr += 4;
807 mark = REG_RD(bp, addr);
808 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
809 if (mark >= trace_shmem_base || mark < addr + 4) {
810 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
811 return;
812 }
813 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
814
815 printk("%s", lvl);
816
817
818 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
819 for (word = 0; word < 8; word++)
820 data[word] = htonl(REG_RD(bp, offset + 4*word));
821 data[8] = 0x0;
822 pr_cont("%s", (char *)data);
823 }
824
825
826 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
827 for (word = 0; word < 8; word++)
828 data[word] = htonl(REG_RD(bp, offset + 4*word));
829 data[8] = 0x0;
830 pr_cont("%s", (char *)data);
831 }
832 printk("%s" "end of fw dump\n", lvl);
833}
834
835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
837 bnx2x_fw_dump_lvl(bp, KERN_ERR);
838}
839
840static void bnx2x_hc_int_disable(struct bnx2x *bp)
841{
842 int port = BP_PORT(bp);
843 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
844 u32 val = REG_RD(bp, addr);
845
846
847
848
849
850 if (CHIP_IS_E1(bp)) {
851
852
853
854
855 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
856
857 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
858 HC_CONFIG_0_REG_INT_LINE_EN_0 |
859 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
860 } else
861 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
862 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
863 HC_CONFIG_0_REG_INT_LINE_EN_0 |
864 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
865
866 DP(NETIF_MSG_IFDOWN,
867 "write %x to HC %d (addr 0x%x)\n",
868 val, port, addr);
869
870
871 mmiowb();
872
873 REG_WR(bp, addr, val);
874 if (REG_RD(bp, addr) != val)
875 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
876}
877
878static void bnx2x_igu_int_disable(struct bnx2x *bp)
879{
880 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
881
882 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
883 IGU_PF_CONF_INT_LINE_EN |
884 IGU_PF_CONF_ATTN_BIT_EN);
885
886 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
887
888
889 mmiowb();
890
891 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
892 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
893 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
894}
895
896static void bnx2x_int_disable(struct bnx2x *bp)
897{
898 if (bp->common.int_block == INT_BLOCK_HC)
899 bnx2x_hc_int_disable(bp);
900 else
901 bnx2x_igu_int_disable(bp);
902}
903
904void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
905{
906 int i;
907 u16 j;
908 struct hc_sp_status_block_data sp_sb_data;
909 int func = BP_FUNC(bp);
910#ifdef BNX2X_STOP_ON_ERROR
911 u16 start = 0, end = 0;
912 u8 cos;
913#endif
914 if (IS_PF(bp) && disable_int)
915 bnx2x_int_disable(bp);
916
917 bp->stats_state = STATS_STATE_DISABLED;
918 bp->eth_stats.unrecoverable_error++;
919 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
920
921 BNX2X_ERR("begin crash dump -----------------\n");
922
923
924
925 if (IS_PF(bp)) {
926 struct host_sp_status_block *def_sb = bp->def_status_blk;
927 int data_size, cstorm_offset;
928
929 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
930 bp->def_idx, bp->def_att_idx, bp->attn_state,
931 bp->spq_prod_idx, bp->stats_counter);
932 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
933 def_sb->atten_status_block.attn_bits,
934 def_sb->atten_status_block.attn_bits_ack,
935 def_sb->atten_status_block.status_block_id,
936 def_sb->atten_status_block.attn_bits_index);
937 BNX2X_ERR(" def (");
938 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
939 pr_cont("0x%x%s",
940 def_sb->sp_sb.index_values[i],
941 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
942
943 data_size = sizeof(struct hc_sp_status_block_data) /
944 sizeof(u32);
945 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
946 for (i = 0; i < data_size; i++)
947 *((u32 *)&sp_sb_data + i) =
948 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
949 i * sizeof(u32));
950
951 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
952 sp_sb_data.igu_sb_id,
953 sp_sb_data.igu_seg_id,
954 sp_sb_data.p_func.pf_id,
955 sp_sb_data.p_func.vnic_id,
956 sp_sb_data.p_func.vf_id,
957 sp_sb_data.p_func.vf_valid,
958 sp_sb_data.state);
959 }
960
961 for_each_eth_queue(bp, i) {
962 struct bnx2x_fastpath *fp = &bp->fp[i];
963 int loop;
964 struct hc_status_block_data_e2 sb_data_e2;
965 struct hc_status_block_data_e1x sb_data_e1x;
966 struct hc_status_block_sm *hc_sm_p =
967 CHIP_IS_E1x(bp) ?
968 sb_data_e1x.common.state_machine :
969 sb_data_e2.common.state_machine;
970 struct hc_index_data *hc_index_p =
971 CHIP_IS_E1x(bp) ?
972 sb_data_e1x.index_data :
973 sb_data_e2.index_data;
974 u8 data_size, cos;
975 u32 *sb_data_p;
976 struct bnx2x_fp_txdata txdata;
977
978 if (!bp->fp)
979 break;
980
981 if (!fp->rx_cons_sb)
982 continue;
983
984
985 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
986 i, fp->rx_bd_prod, fp->rx_bd_cons,
987 fp->rx_comp_prod,
988 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
989 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
990 fp->rx_sge_prod, fp->last_max_sge,
991 le16_to_cpu(fp->fp_hc_idx));
992
993
994 for_each_cos_in_tx_queue(fp, cos)
995 {
996 if (!fp->txdata_ptr[cos])
997 break;
998
999 txdata = *fp->txdata_ptr[cos];
1000
1001 if (!txdata.tx_cons_sb)
1002 continue;
1003
1004 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1005 i, txdata.tx_pkt_prod,
1006 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1007 txdata.tx_bd_cons,
1008 le16_to_cpu(*txdata.tx_cons_sb));
1009 }
1010
1011 loop = CHIP_IS_E1x(bp) ?
1012 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1013
1014
1015
1016 if (IS_FCOE_FP(fp))
1017 continue;
1018
1019 BNX2X_ERR(" run indexes (");
1020 for (j = 0; j < HC_SB_MAX_SM; j++)
1021 pr_cont("0x%x%s",
1022 fp->sb_running_index[j],
1023 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1024
1025 BNX2X_ERR(" indexes (");
1026 for (j = 0; j < loop; j++)
1027 pr_cont("0x%x%s",
1028 fp->sb_index_values[j],
1029 (j == loop - 1) ? ")" : " ");
1030
1031
1032 if (IS_VF(bp))
1033 continue;
1034
1035
1036 data_size = CHIP_IS_E1x(bp) ?
1037 sizeof(struct hc_status_block_data_e1x) :
1038 sizeof(struct hc_status_block_data_e2);
1039 data_size /= sizeof(u32);
1040 sb_data_p = CHIP_IS_E1x(bp) ?
1041 (u32 *)&sb_data_e1x :
1042 (u32 *)&sb_data_e2;
1043
1044 for (j = 0; j < data_size; j++)
1045 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1046 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1047 j * sizeof(u32));
1048
1049 if (!CHIP_IS_E1x(bp)) {
1050 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1051 sb_data_e2.common.p_func.pf_id,
1052 sb_data_e2.common.p_func.vf_id,
1053 sb_data_e2.common.p_func.vf_valid,
1054 sb_data_e2.common.p_func.vnic_id,
1055 sb_data_e2.common.same_igu_sb_1b,
1056 sb_data_e2.common.state);
1057 } else {
1058 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1059 sb_data_e1x.common.p_func.pf_id,
1060 sb_data_e1x.common.p_func.vf_id,
1061 sb_data_e1x.common.p_func.vf_valid,
1062 sb_data_e1x.common.p_func.vnic_id,
1063 sb_data_e1x.common.same_igu_sb_1b,
1064 sb_data_e1x.common.state);
1065 }
1066
1067
1068 for (j = 0; j < HC_SB_MAX_SM; j++) {
1069 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1070 j, hc_sm_p[j].__flags,
1071 hc_sm_p[j].igu_sb_id,
1072 hc_sm_p[j].igu_seg_id,
1073 hc_sm_p[j].time_to_expire,
1074 hc_sm_p[j].timer_value);
1075 }
1076
1077
1078 for (j = 0; j < loop; j++) {
1079 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1080 hc_index_p[j].flags,
1081 hc_index_p[j].timeout);
1082 }
1083 }
1084
1085#ifdef BNX2X_STOP_ON_ERROR
1086 if (IS_PF(bp)) {
1087
1088 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1089 for (i = 0; i < NUM_EQ_DESC; i++) {
1090 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1091
1092 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1093 i, bp->eq_ring[i].message.opcode,
1094 bp->eq_ring[i].message.error);
1095 BNX2X_ERR("data: %x %x %x\n",
1096 data[0], data[1], data[2]);
1097 }
1098 }
1099
1100
1101
1102 for_each_valid_rx_queue(bp, i) {
1103 struct bnx2x_fastpath *fp = &bp->fp[i];
1104
1105 if (!bp->fp)
1106 break;
1107
1108 if (!fp->rx_cons_sb)
1109 continue;
1110
1111 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1112 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1113 for (j = start; j != end; j = RX_BD(j + 1)) {
1114 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1115 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1116
1117 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1118 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1119 }
1120
1121 start = RX_SGE(fp->rx_sge_prod);
1122 end = RX_SGE(fp->last_max_sge);
1123 for (j = start; j != end; j = RX_SGE(j + 1)) {
1124 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1125 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1126
1127 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1128 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1129 }
1130
1131 start = RCQ_BD(fp->rx_comp_cons - 10);
1132 end = RCQ_BD(fp->rx_comp_cons + 503);
1133 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1134 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1135
1136 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1137 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1138 }
1139 }
1140
1141
1142 for_each_valid_tx_queue(bp, i) {
1143 struct bnx2x_fastpath *fp = &bp->fp[i];
1144
1145 if (!bp->fp)
1146 break;
1147
1148 for_each_cos_in_tx_queue(fp, cos) {
1149 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1150
1151 if (!fp->txdata_ptr[cos])
1152 break;
1153
1154 if (!txdata->tx_cons_sb)
1155 continue;
1156
1157 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1158 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1159 for (j = start; j != end; j = TX_BD(j + 1)) {
1160 struct sw_tx_bd *sw_bd =
1161 &txdata->tx_buf_ring[j];
1162
1163 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1164 i, cos, j, sw_bd->skb,
1165 sw_bd->first_bd);
1166 }
1167
1168 start = TX_BD(txdata->tx_bd_cons - 10);
1169 end = TX_BD(txdata->tx_bd_cons + 254);
1170 for (j = start; j != end; j = TX_BD(j + 1)) {
1171 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1172
1173 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1174 i, cos, j, tx_bd[0], tx_bd[1],
1175 tx_bd[2], tx_bd[3]);
1176 }
1177 }
1178 }
1179#endif
1180 if (IS_PF(bp)) {
1181 bnx2x_fw_dump(bp);
1182 bnx2x_mc_assert(bp);
1183 }
1184 BNX2X_ERR("end crash dump -----------------\n");
1185}
1186
1187
1188
1189
1190
1191
1192
1193#define FLR_WAIT_USEC 10000
1194#define FLR_WAIT_INTERVAL 50
1195#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1196
1197struct pbf_pN_buf_regs {
1198 int pN;
1199 u32 init_crd;
1200 u32 crd;
1201 u32 crd_freed;
1202};
1203
1204struct pbf_pN_cmd_regs {
1205 int pN;
1206 u32 lines_occup;
1207 u32 lines_freed;
1208};
1209
1210static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1211 struct pbf_pN_buf_regs *regs,
1212 u32 poll_count)
1213{
1214 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1215 u32 cur_cnt = poll_count;
1216
1217 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1218 crd = crd_start = REG_RD(bp, regs->crd);
1219 init_crd = REG_RD(bp, regs->init_crd);
1220
1221 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1222 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1223 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1224
1225 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1226 (init_crd - crd_start))) {
1227 if (cur_cnt--) {
1228 udelay(FLR_WAIT_INTERVAL);
1229 crd = REG_RD(bp, regs->crd);
1230 crd_freed = REG_RD(bp, regs->crd_freed);
1231 } else {
1232 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1233 regs->pN);
1234 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1235 regs->pN, crd);
1236 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1237 regs->pN, crd_freed);
1238 break;
1239 }
1240 }
1241 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1242 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1243}
1244
1245static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1246 struct pbf_pN_cmd_regs *regs,
1247 u32 poll_count)
1248{
1249 u32 occup, to_free, freed, freed_start;
1250 u32 cur_cnt = poll_count;
1251
1252 occup = to_free = REG_RD(bp, regs->lines_occup);
1253 freed = freed_start = REG_RD(bp, regs->lines_freed);
1254
1255 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1256 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1257
1258 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1259 if (cur_cnt--) {
1260 udelay(FLR_WAIT_INTERVAL);
1261 occup = REG_RD(bp, regs->lines_occup);
1262 freed = REG_RD(bp, regs->lines_freed);
1263 } else {
1264 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1265 regs->pN);
1266 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1267 regs->pN, occup);
1268 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1269 regs->pN, freed);
1270 break;
1271 }
1272 }
1273 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1274 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1275}
1276
1277static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1278 u32 expected, u32 poll_count)
1279{
1280 u32 cur_cnt = poll_count;
1281 u32 val;
1282
1283 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1284 udelay(FLR_WAIT_INTERVAL);
1285
1286 return val;
1287}
1288
1289int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1290 char *msg, u32 poll_cnt)
1291{
1292 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1293 if (val != 0) {
1294 BNX2X_ERR("%s usage count=%d\n", msg, val);
1295 return 1;
1296 }
1297 return 0;
1298}
1299
1300
1301u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1302{
1303
1304 if (CHIP_REV_IS_EMUL(bp))
1305 return FLR_POLL_CNT * 2000;
1306
1307 if (CHIP_REV_IS_FPGA(bp))
1308 return FLR_POLL_CNT * 120;
1309
1310 return FLR_POLL_CNT;
1311}
1312
1313void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1314{
1315 struct pbf_pN_cmd_regs cmd_regs[] = {
1316 {0, (CHIP_IS_E3B0(bp)) ?
1317 PBF_REG_TQ_OCCUPANCY_Q0 :
1318 PBF_REG_P0_TQ_OCCUPANCY,
1319 (CHIP_IS_E3B0(bp)) ?
1320 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1321 PBF_REG_P0_TQ_LINES_FREED_CNT},
1322 {1, (CHIP_IS_E3B0(bp)) ?
1323 PBF_REG_TQ_OCCUPANCY_Q1 :
1324 PBF_REG_P1_TQ_OCCUPANCY,
1325 (CHIP_IS_E3B0(bp)) ?
1326 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1327 PBF_REG_P1_TQ_LINES_FREED_CNT},
1328 {4, (CHIP_IS_E3B0(bp)) ?
1329 PBF_REG_TQ_OCCUPANCY_LB_Q :
1330 PBF_REG_P4_TQ_OCCUPANCY,
1331 (CHIP_IS_E3B0(bp)) ?
1332 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1333 PBF_REG_P4_TQ_LINES_FREED_CNT}
1334 };
1335
1336 struct pbf_pN_buf_regs buf_regs[] = {
1337 {0, (CHIP_IS_E3B0(bp)) ?
1338 PBF_REG_INIT_CRD_Q0 :
1339 PBF_REG_P0_INIT_CRD ,
1340 (CHIP_IS_E3B0(bp)) ?
1341 PBF_REG_CREDIT_Q0 :
1342 PBF_REG_P0_CREDIT,
1343 (CHIP_IS_E3B0(bp)) ?
1344 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1345 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1346 {1, (CHIP_IS_E3B0(bp)) ?
1347 PBF_REG_INIT_CRD_Q1 :
1348 PBF_REG_P1_INIT_CRD,
1349 (CHIP_IS_E3B0(bp)) ?
1350 PBF_REG_CREDIT_Q1 :
1351 PBF_REG_P1_CREDIT,
1352 (CHIP_IS_E3B0(bp)) ?
1353 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1354 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1355 {4, (CHIP_IS_E3B0(bp)) ?
1356 PBF_REG_INIT_CRD_LB_Q :
1357 PBF_REG_P4_INIT_CRD,
1358 (CHIP_IS_E3B0(bp)) ?
1359 PBF_REG_CREDIT_LB_Q :
1360 PBF_REG_P4_CREDIT,
1361 (CHIP_IS_E3B0(bp)) ?
1362 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1363 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1364 };
1365
1366 int i;
1367
1368
1369 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1370 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1371
1372
1373 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1374 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1375}
1376
1377#define OP_GEN_PARAM(param) \
1378 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1379
1380#define OP_GEN_TYPE(type) \
1381 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1382
1383#define OP_GEN_AGG_VECT(index) \
1384 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1385
1386int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1387{
1388 u32 op_gen_command = 0;
1389 u32 comp_addr = BAR_CSTRORM_INTMEM +
1390 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1391 int ret = 0;
1392
1393 if (REG_RD(bp, comp_addr)) {
1394 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1395 return 1;
1396 }
1397
1398 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1399 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1400 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1401 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1402
1403 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1404 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1405
1406 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1407 BNX2X_ERR("FW final cleanup did not succeed\n");
1408 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1409 (REG_RD(bp, comp_addr)));
1410 bnx2x_panic();
1411 return 1;
1412 }
1413
1414 REG_WR(bp, comp_addr, 0);
1415
1416 return ret;
1417}
1418
1419u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1420{
1421 u16 status;
1422
1423 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1424 return status & PCI_EXP_DEVSTA_TRPND;
1425}
1426
1427
1428
1429static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1430{
1431
1432 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1433 CFC_REG_NUM_LCIDS_INSIDE_PF,
1434 "CFC PF usage counter timed out",
1435 poll_cnt))
1436 return 1;
1437
1438
1439 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440 DORQ_REG_PF_USAGE_CNT,
1441 "DQ PF usage counter timed out",
1442 poll_cnt))
1443 return 1;
1444
1445
1446 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1448 "QM PF usage counter timed out",
1449 poll_cnt))
1450 return 1;
1451
1452
1453 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1454 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1455 "Timers VNIC usage counter timed out",
1456 poll_cnt))
1457 return 1;
1458 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1460 "Timers NUM_SCANS usage counter timed out",
1461 poll_cnt))
1462 return 1;
1463
1464
1465 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1466 dmae_reg_go_c[INIT_DMAE_C(bp)],
1467 "DMAE command register timed out",
1468 poll_cnt))
1469 return 1;
1470
1471 return 0;
1472}
1473
1474static void bnx2x_hw_enable_status(struct bnx2x *bp)
1475{
1476 u32 val;
1477
1478 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1479 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1480
1481 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1482 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1483
1484 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1485 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1486
1487 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1488 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1489
1490 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1491 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1492
1493 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1494 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1495
1496 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1497 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1498
1499 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1500 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1501 val);
1502}
1503
1504static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1505{
1506 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1507
1508 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1509
1510
1511 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1512
1513
1514 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1515 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1516 return -EBUSY;
1517
1518
1519
1520
1521 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1522 return -EBUSY;
1523
1524
1525
1526
1527 bnx2x_tx_hw_flushed(bp, poll_cnt);
1528
1529
1530 msleep(100);
1531
1532
1533 if (bnx2x_is_pcie_pending(bp->pdev))
1534 BNX2X_ERR("PCIE Transactions still pending\n");
1535
1536
1537 bnx2x_hw_enable_status(bp);
1538
1539
1540
1541
1542
1543 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1544
1545 return 0;
1546}
1547
1548static void bnx2x_hc_int_enable(struct bnx2x *bp)
1549{
1550 int port = BP_PORT(bp);
1551 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1552 u32 val = REG_RD(bp, addr);
1553 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1554 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1555 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1556
1557 if (msix) {
1558 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1559 HC_CONFIG_0_REG_INT_LINE_EN_0);
1560 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1561 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1562 if (single_msix)
1563 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1564 } else if (msi) {
1565 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1566 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1567 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1569 } else {
1570 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1571 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1572 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1573 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1574
1575 if (!CHIP_IS_E1(bp)) {
1576 DP(NETIF_MSG_IFUP,
1577 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1578
1579 REG_WR(bp, addr, val);
1580
1581 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1582 }
1583 }
1584
1585 if (CHIP_IS_E1(bp))
1586 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1587
1588 DP(NETIF_MSG_IFUP,
1589 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1590 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1591
1592 REG_WR(bp, addr, val);
1593
1594
1595
1596 mmiowb();
1597 barrier();
1598
1599 if (!CHIP_IS_E1(bp)) {
1600
1601 if (IS_MF(bp)) {
1602 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1603 if (bp->port.pmf)
1604
1605 val |= 0x1100;
1606 } else
1607 val = 0xffff;
1608
1609 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1610 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1611 }
1612
1613
1614 mmiowb();
1615}
1616
1617static void bnx2x_igu_int_enable(struct bnx2x *bp)
1618{
1619 u32 val;
1620 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1621 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1622 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1623
1624 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1625
1626 if (msix) {
1627 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1628 IGU_PF_CONF_SINGLE_ISR_EN);
1629 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1630 IGU_PF_CONF_ATTN_BIT_EN);
1631
1632 if (single_msix)
1633 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1634 } else if (msi) {
1635 val &= ~IGU_PF_CONF_INT_LINE_EN;
1636 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1637 IGU_PF_CONF_ATTN_BIT_EN |
1638 IGU_PF_CONF_SINGLE_ISR_EN);
1639 } else {
1640 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1641 val |= (IGU_PF_CONF_INT_LINE_EN |
1642 IGU_PF_CONF_ATTN_BIT_EN |
1643 IGU_PF_CONF_SINGLE_ISR_EN);
1644 }
1645
1646
1647 if ((!msix) || single_msix) {
1648 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1649 bnx2x_ack_int(bp);
1650 }
1651
1652 val |= IGU_PF_CONF_FUNC_EN;
1653
1654 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1655 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1656
1657 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1658
1659 if (val & IGU_PF_CONF_INT_LINE_EN)
1660 pci_intx(bp->pdev, true);
1661
1662 barrier();
1663
1664
1665 if (IS_MF(bp)) {
1666 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1667 if (bp->port.pmf)
1668
1669 val |= 0x1100;
1670 } else
1671 val = 0xffff;
1672
1673 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1674 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1675
1676
1677 mmiowb();
1678}
1679
1680void bnx2x_int_enable(struct bnx2x *bp)
1681{
1682 if (bp->common.int_block == INT_BLOCK_HC)
1683 bnx2x_hc_int_enable(bp);
1684 else
1685 bnx2x_igu_int_enable(bp);
1686}
1687
1688void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1689{
1690 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1691 int i, offset;
1692
1693 if (disable_hw)
1694
1695 bnx2x_int_disable(bp);
1696
1697
1698 if (msix) {
1699 synchronize_irq(bp->msix_table[0].vector);
1700 offset = 1;
1701 if (CNIC_SUPPORT(bp))
1702 offset++;
1703 for_each_eth_queue(bp, i)
1704 synchronize_irq(bp->msix_table[offset++].vector);
1705 } else
1706 synchronize_irq(bp->pdev->irq);
1707
1708
1709 cancel_delayed_work(&bp->sp_task);
1710 cancel_delayed_work(&bp->period_task);
1711 flush_workqueue(bnx2x_wq);
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1722{
1723 u32 lock_status;
1724 u32 resource_bit = (1 << resource);
1725 int func = BP_FUNC(bp);
1726 u32 hw_lock_control_reg;
1727
1728 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1729 "Trying to take a lock on resource %d\n", resource);
1730
1731
1732 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1733 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1734 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1735 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1736 return false;
1737 }
1738
1739 if (func <= 5)
1740 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1741 else
1742 hw_lock_control_reg =
1743 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1744
1745
1746 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1747 lock_status = REG_RD(bp, hw_lock_control_reg);
1748 if (lock_status & resource_bit)
1749 return true;
1750
1751 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1752 "Failed to get a lock on resource %d\n", resource);
1753 return false;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1765{
1766 if (BP_PATH(bp))
1767 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1768 else
1769 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1780{
1781 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1782}
1783
1784static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1785
1786
1787static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1788{
1789
1790
1791
1792
1793 atomic_set(&bp->interrupt_occurred, 1);
1794
1795
1796
1797
1798
1799 smp_wmb();
1800
1801
1802 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1803}
1804
1805void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1806{
1807 struct bnx2x *bp = fp->bp;
1808 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1810 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1811 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1812
1813 DP(BNX2X_MSG_SP,
1814 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1815 fp->index, cid, command, bp->state,
1816 rr_cqe->ramrod_cqe.ramrod_type);
1817
1818
1819
1820
1821 if (cid >= BNX2X_FIRST_VF_CID &&
1822 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1823 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1824
1825 switch (command) {
1826 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1827 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1828 drv_cmd = BNX2X_Q_CMD_UPDATE;
1829 break;
1830
1831 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1832 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1833 drv_cmd = BNX2X_Q_CMD_SETUP;
1834 break;
1835
1836 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1837 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1838 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1839 break;
1840
1841 case (RAMROD_CMD_ID_ETH_HALT):
1842 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1843 drv_cmd = BNX2X_Q_CMD_HALT;
1844 break;
1845
1846 case (RAMROD_CMD_ID_ETH_TERMINATE):
1847 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1848 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1849 break;
1850
1851 case (RAMROD_CMD_ID_ETH_EMPTY):
1852 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1853 drv_cmd = BNX2X_Q_CMD_EMPTY;
1854 break;
1855
1856 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1857 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1858 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1859 break;
1860
1861 default:
1862 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1863 command, fp->index);
1864 return;
1865 }
1866
1867 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1868 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1869
1870
1871
1872
1873
1874
1875
1876#ifdef BNX2X_STOP_ON_ERROR
1877 bnx2x_panic();
1878#else
1879 return;
1880#endif
1881
1882 smp_mb__before_atomic();
1883 atomic_inc(&bp->cq_spq_left);
1884
1885 smp_mb__after_atomic();
1886
1887 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1888
1889 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1890 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 smp_mb__before_atomic();
1901 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1902 wmb();
1903 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1904 smp_mb__after_atomic();
1905
1906
1907 bnx2x_schedule_sp_task(bp);
1908 }
1909
1910 return;
1911}
1912
1913irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1914{
1915 struct bnx2x *bp = netdev_priv(dev_instance);
1916 u16 status = bnx2x_ack_int(bp);
1917 u16 mask;
1918 int i;
1919 u8 cos;
1920
1921
1922 if (unlikely(status == 0)) {
1923 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1924 return IRQ_NONE;
1925 }
1926 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1927
1928#ifdef BNX2X_STOP_ON_ERROR
1929 if (unlikely(bp->panic))
1930 return IRQ_HANDLED;
1931#endif
1932
1933 for_each_eth_queue(bp, i) {
1934 struct bnx2x_fastpath *fp = &bp->fp[i];
1935
1936 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1937 if (status & mask) {
1938
1939 for_each_cos_in_tx_queue(fp, cos)
1940 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1941 prefetch(&fp->sb_running_index[SM_RX_ID]);
1942 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1943 status &= ~mask;
1944 }
1945 }
1946
1947 if (CNIC_SUPPORT(bp)) {
1948 mask = 0x2;
1949 if (status & (mask | 0x1)) {
1950 struct cnic_ops *c_ops = NULL;
1951
1952 rcu_read_lock();
1953 c_ops = rcu_dereference(bp->cnic_ops);
1954 if (c_ops && (bp->cnic_eth_dev.drv_state &
1955 CNIC_DRV_STATE_HANDLES_IRQ))
1956 c_ops->cnic_handler(bp->cnic_data, NULL);
1957 rcu_read_unlock();
1958
1959 status &= ~mask;
1960 }
1961 }
1962
1963 if (unlikely(status & 0x1)) {
1964
1965
1966
1967
1968 bnx2x_schedule_sp_task(bp);
1969
1970 status &= ~0x1;
1971 if (!status)
1972 return IRQ_HANDLED;
1973 }
1974
1975 if (unlikely(status))
1976 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1977 status);
1978
1979 return IRQ_HANDLED;
1980}
1981
1982
1983
1984
1985
1986
1987
1988int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1989{
1990 u32 lock_status;
1991 u32 resource_bit = (1 << resource);
1992 int func = BP_FUNC(bp);
1993 u32 hw_lock_control_reg;
1994 int cnt;
1995
1996
1997 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1998 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1999 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2000 return -EINVAL;
2001 }
2002
2003 if (func <= 5) {
2004 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2005 } else {
2006 hw_lock_control_reg =
2007 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2008 }
2009
2010
2011 lock_status = REG_RD(bp, hw_lock_control_reg);
2012 if (lock_status & resource_bit) {
2013 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2014 lock_status, resource_bit);
2015 return -EEXIST;
2016 }
2017
2018
2019 for (cnt = 0; cnt < 1000; cnt++) {
2020
2021 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2022 lock_status = REG_RD(bp, hw_lock_control_reg);
2023 if (lock_status & resource_bit)
2024 return 0;
2025
2026 usleep_range(5000, 10000);
2027 }
2028 BNX2X_ERR("Timeout\n");
2029 return -EAGAIN;
2030}
2031
2032int bnx2x_release_leader_lock(struct bnx2x *bp)
2033{
2034 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2035}
2036
2037int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2038{
2039 u32 lock_status;
2040 u32 resource_bit = (1 << resource);
2041 int func = BP_FUNC(bp);
2042 u32 hw_lock_control_reg;
2043
2044
2045 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2046 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2047 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2048 return -EINVAL;
2049 }
2050
2051 if (func <= 5) {
2052 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2053 } else {
2054 hw_lock_control_reg =
2055 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2056 }
2057
2058
2059 lock_status = REG_RD(bp, hw_lock_control_reg);
2060 if (!(lock_status & resource_bit)) {
2061 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2062 lock_status, resource_bit);
2063 return -EFAULT;
2064 }
2065
2066 REG_WR(bp, hw_lock_control_reg, resource_bit);
2067 return 0;
2068}
2069
2070int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2071{
2072
2073 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2074 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2075 int gpio_shift = gpio_num +
2076 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2077 u32 gpio_mask = (1 << gpio_shift);
2078 u32 gpio_reg;
2079 int value;
2080
2081 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2082 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2083 return -EINVAL;
2084 }
2085
2086
2087 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2088
2089
2090 if ((gpio_reg & gpio_mask) == gpio_mask)
2091 value = 1;
2092 else
2093 value = 0;
2094
2095 return value;
2096}
2097
2098int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2099{
2100
2101 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2102 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2103 int gpio_shift = gpio_num +
2104 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2105 u32 gpio_mask = (1 << gpio_shift);
2106 u32 gpio_reg;
2107
2108 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2109 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2110 return -EINVAL;
2111 }
2112
2113 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2114
2115 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2116
2117 switch (mode) {
2118 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2119 DP(NETIF_MSG_LINK,
2120 "Set GPIO %d (shift %d) -> output low\n",
2121 gpio_num, gpio_shift);
2122
2123 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2124 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2125 break;
2126
2127 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2128 DP(NETIF_MSG_LINK,
2129 "Set GPIO %d (shift %d) -> output high\n",
2130 gpio_num, gpio_shift);
2131
2132 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2133 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2134 break;
2135
2136 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2137 DP(NETIF_MSG_LINK,
2138 "Set GPIO %d (shift %d) -> input\n",
2139 gpio_num, gpio_shift);
2140
2141 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2142 break;
2143
2144 default:
2145 break;
2146 }
2147
2148 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2149 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2150
2151 return 0;
2152}
2153
2154int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2155{
2156 u32 gpio_reg = 0;
2157 int rc = 0;
2158
2159
2160
2161 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2162
2163 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2164 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2165 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2166 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2167
2168 switch (mode) {
2169 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2170 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2171
2172 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2173 break;
2174
2175 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2176 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2177
2178 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2179 break;
2180
2181 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2182 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2183
2184 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2185 break;
2186
2187 default:
2188 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2189 rc = -EINVAL;
2190 break;
2191 }
2192
2193 if (rc == 0)
2194 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2195
2196 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2197
2198 return rc;
2199}
2200
2201int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2202{
2203
2204 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2205 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2206 int gpio_shift = gpio_num +
2207 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2208 u32 gpio_mask = (1 << gpio_shift);
2209 u32 gpio_reg;
2210
2211 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2212 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2213 return -EINVAL;
2214 }
2215
2216 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2217
2218 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2219
2220 switch (mode) {
2221 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2222 DP(NETIF_MSG_LINK,
2223 "Clear GPIO INT %d (shift %d) -> output low\n",
2224 gpio_num, gpio_shift);
2225
2226 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2227 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2228 break;
2229
2230 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2231 DP(NETIF_MSG_LINK,
2232 "Set GPIO INT %d (shift %d) -> output high\n",
2233 gpio_num, gpio_shift);
2234
2235 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2236 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2237 break;
2238
2239 default:
2240 break;
2241 }
2242
2243 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2244 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2245
2246 return 0;
2247}
2248
2249static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2250{
2251 u32 spio_reg;
2252
2253
2254 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2255 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2256 return -EINVAL;
2257 }
2258
2259 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2260
2261 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2262
2263 switch (mode) {
2264 case MISC_SPIO_OUTPUT_LOW:
2265 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2266
2267 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2269 break;
2270
2271 case MISC_SPIO_OUTPUT_HIGH:
2272 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2273
2274 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2275 spio_reg |= (spio << MISC_SPIO_SET_POS);
2276 break;
2277
2278 case MISC_SPIO_INPUT_HI_Z:
2279 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2280
2281 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2282 break;
2283
2284 default:
2285 break;
2286 }
2287
2288 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2289 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2290
2291 return 0;
2292}
2293
2294void bnx2x_calc_fc_adv(struct bnx2x *bp)
2295{
2296 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2297
2298 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2299 ADVERTISED_Pause);
2300 switch (bp->link_vars.ieee_fc &
2301 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2302 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2303 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2304 ADVERTISED_Pause);
2305 break;
2306
2307 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2308 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2309 break;
2310
2311 default:
2312 break;
2313 }
2314}
2315
2316static void bnx2x_set_requested_fc(struct bnx2x *bp)
2317{
2318
2319
2320
2321
2322 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2323 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2324 else
2325 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2326}
2327
2328static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2329{
2330 u32 pause_enabled = 0;
2331
2332 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2333 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2334 pause_enabled = 1;
2335
2336 REG_WR(bp, BAR_USTRORM_INTMEM +
2337 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2338 pause_enabled);
2339 }
2340
2341 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2342 pause_enabled ? "enabled" : "disabled");
2343}
2344
2345int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2346{
2347 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2348 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2349
2350 if (!BP_NOMCP(bp)) {
2351 bnx2x_set_requested_fc(bp);
2352 bnx2x_acquire_phy_lock(bp);
2353
2354 if (load_mode == LOAD_DIAG) {
2355 struct link_params *lp = &bp->link_params;
2356 lp->loopback_mode = LOOPBACK_XGXS;
2357
2358 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2359 if (lp->speed_cap_mask[cfx_idx] &
2360 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2361 lp->req_line_speed[cfx_idx] =
2362 SPEED_20000;
2363 else if (lp->speed_cap_mask[cfx_idx] &
2364 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2365 lp->req_line_speed[cfx_idx] =
2366 SPEED_10000;
2367 else
2368 lp->req_line_speed[cfx_idx] =
2369 SPEED_1000;
2370 }
2371 }
2372
2373 if (load_mode == LOAD_LOOPBACK_EXT) {
2374 struct link_params *lp = &bp->link_params;
2375 lp->loopback_mode = LOOPBACK_EXT;
2376 }
2377
2378 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2379
2380 bnx2x_release_phy_lock(bp);
2381
2382 bnx2x_init_dropless_fc(bp);
2383
2384 bnx2x_calc_fc_adv(bp);
2385
2386 if (bp->link_vars.link_up) {
2387 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2388 bnx2x_link_report(bp);
2389 }
2390 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2391 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2392 return rc;
2393 }
2394 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2395 return -EINVAL;
2396}
2397
2398void bnx2x_link_set(struct bnx2x *bp)
2399{
2400 if (!BP_NOMCP(bp)) {
2401 bnx2x_acquire_phy_lock(bp);
2402 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2403 bnx2x_release_phy_lock(bp);
2404
2405 bnx2x_init_dropless_fc(bp);
2406
2407 bnx2x_calc_fc_adv(bp);
2408 } else
2409 BNX2X_ERR("Bootcode is missing - can not set link\n");
2410}
2411
2412static void bnx2x__link_reset(struct bnx2x *bp)
2413{
2414 if (!BP_NOMCP(bp)) {
2415 bnx2x_acquire_phy_lock(bp);
2416 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2417 bnx2x_release_phy_lock(bp);
2418 } else
2419 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2420}
2421
2422void bnx2x_force_link_reset(struct bnx2x *bp)
2423{
2424 bnx2x_acquire_phy_lock(bp);
2425 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2426 bnx2x_release_phy_lock(bp);
2427}
2428
2429u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2430{
2431 u8 rc = 0;
2432
2433 if (!BP_NOMCP(bp)) {
2434 bnx2x_acquire_phy_lock(bp);
2435 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2436 is_serdes);
2437 bnx2x_release_phy_lock(bp);
2438 } else
2439 BNX2X_ERR("Bootcode is missing - can not test link\n");
2440
2441 return rc;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453static void bnx2x_calc_vn_min(struct bnx2x *bp,
2454 struct cmng_init_input *input)
2455{
2456 int all_zero = 1;
2457 int vn;
2458
2459 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2460 u32 vn_cfg = bp->mf_config[vn];
2461 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2462 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2463
2464
2465 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2466 vn_min_rate = 0;
2467
2468 else if (!vn_min_rate)
2469 vn_min_rate = DEF_MIN_RATE;
2470 else
2471 all_zero = 0;
2472
2473 input->vnic_min_rate[vn] = vn_min_rate;
2474 }
2475
2476
2477 if (BNX2X_IS_ETS_ENABLED(bp)) {
2478 input->flags.cmng_enables &=
2479 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2480 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2481 } else if (all_zero) {
2482 input->flags.cmng_enables &=
2483 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2484 DP(NETIF_MSG_IFUP,
2485 "All MIN values are zeroes fairness will be disabled\n");
2486 } else
2487 input->flags.cmng_enables |=
2488 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2489}
2490
2491static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2492 struct cmng_init_input *input)
2493{
2494 u16 vn_max_rate;
2495 u32 vn_cfg = bp->mf_config[vn];
2496
2497 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2498 vn_max_rate = 0;
2499 else {
2500 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2501
2502 if (IS_MF_PERCENT_BW(bp)) {
2503
2504 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2505 } else
2506
2507 vn_max_rate = maxCfg * 100;
2508 }
2509
2510 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2511
2512 input->vnic_max_rate[vn] = vn_max_rate;
2513}
2514
2515static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2516{
2517 if (CHIP_REV_IS_SLOW(bp))
2518 return CMNG_FNS_NONE;
2519 if (IS_MF(bp))
2520 return CMNG_FNS_MINMAX;
2521
2522 return CMNG_FNS_NONE;
2523}
2524
2525void bnx2x_read_mf_cfg(struct bnx2x *bp)
2526{
2527 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2528
2529 if (BP_NOMCP(bp))
2530 return;
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2544 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2545
2546 if (func >= E1H_FUNC_MAX)
2547 break;
2548
2549 bp->mf_config[vn] =
2550 MF_CFG_RD(bp, func_mf_config[func].config);
2551 }
2552 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2553 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2554 bp->flags |= MF_FUNC_DIS;
2555 } else {
2556 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2557 bp->flags &= ~MF_FUNC_DIS;
2558 }
2559}
2560
2561static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2562{
2563 struct cmng_init_input input;
2564 memset(&input, 0, sizeof(struct cmng_init_input));
2565
2566 input.port_rate = bp->link_vars.line_speed;
2567
2568 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2569 int vn;
2570
2571
2572 if (read_cfg)
2573 bnx2x_read_mf_cfg(bp);
2574
2575
2576 bnx2x_calc_vn_min(bp, &input);
2577
2578
2579 if (bp->port.pmf)
2580 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2581 bnx2x_calc_vn_max(bp, vn, &input);
2582
2583
2584 input.flags.cmng_enables |=
2585 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2586
2587 bnx2x_init_cmng(&input, &bp->cmng);
2588 return;
2589 }
2590
2591
2592 DP(NETIF_MSG_IFUP,
2593 "rate shaping and fairness are disabled\n");
2594}
2595
2596static void storm_memset_cmng(struct bnx2x *bp,
2597 struct cmng_init *cmng,
2598 u8 port)
2599{
2600 int vn;
2601 size_t size = sizeof(struct cmng_struct_per_port);
2602
2603 u32 addr = BAR_XSTRORM_INTMEM +
2604 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2605
2606 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2607
2608 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2609 int func = func_by_vn(bp, vn);
2610
2611 addr = BAR_XSTRORM_INTMEM +
2612 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2613 size = sizeof(struct rate_shaping_vars_per_vn);
2614 __storm_memset_struct(bp, addr, size,
2615 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2616
2617 addr = BAR_XSTRORM_INTMEM +
2618 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2619 size = sizeof(struct fairness_vars_per_vn);
2620 __storm_memset_struct(bp, addr, size,
2621 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2622 }
2623}
2624
2625
2626void bnx2x_set_local_cmng(struct bnx2x *bp)
2627{
2628 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2629
2630 if (cmng_fns != CMNG_FNS_NONE) {
2631 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2632 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2633 } else {
2634
2635 DP(NETIF_MSG_IFUP,
2636 "single function mode without fairness\n");
2637 }
2638}
2639
2640
2641static void bnx2x_link_attn(struct bnx2x *bp)
2642{
2643
2644 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2645
2646 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2647
2648 bnx2x_init_dropless_fc(bp);
2649
2650 if (bp->link_vars.link_up) {
2651
2652 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2653 struct host_port_stats *pstats;
2654
2655 pstats = bnx2x_sp(bp, port_stats);
2656
2657 memset(&(pstats->mac_stx[0]), 0,
2658 sizeof(struct mac_stx));
2659 }
2660 if (bp->state == BNX2X_STATE_OPEN)
2661 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2662 }
2663
2664 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2665 bnx2x_set_local_cmng(bp);
2666
2667 __bnx2x_link_report(bp);
2668
2669 if (IS_MF(bp))
2670 bnx2x_link_sync_notify(bp);
2671}
2672
2673void bnx2x__link_status_update(struct bnx2x *bp)
2674{
2675 if (bp->state != BNX2X_STATE_OPEN)
2676 return;
2677
2678
2679 if (IS_PF(bp)) {
2680 bnx2x_dcbx_pmf_update(bp);
2681 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2682 if (bp->link_vars.link_up)
2683 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2684 else
2685 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2686
2687 bnx2x_link_report(bp);
2688
2689 } else {
2690 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2691 SUPPORTED_10baseT_Full |
2692 SUPPORTED_100baseT_Half |
2693 SUPPORTED_100baseT_Full |
2694 SUPPORTED_1000baseT_Full |
2695 SUPPORTED_2500baseX_Full |
2696 SUPPORTED_10000baseT_Full |
2697 SUPPORTED_TP |
2698 SUPPORTED_FIBRE |
2699 SUPPORTED_Autoneg |
2700 SUPPORTED_Pause |
2701 SUPPORTED_Asym_Pause);
2702 bp->port.advertising[0] = bp->port.supported[0];
2703
2704 bp->link_params.bp = bp;
2705 bp->link_params.port = BP_PORT(bp);
2706 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2707 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2708 bp->link_params.req_line_speed[0] = SPEED_10000;
2709 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2710 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2711 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2712 bp->link_vars.line_speed = SPEED_10000;
2713 bp->link_vars.link_status =
2714 (LINK_STATUS_LINK_UP |
2715 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2716 bp->link_vars.link_up = 1;
2717 bp->link_vars.duplex = DUPLEX_FULL;
2718 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2719 __bnx2x_link_report(bp);
2720
2721 bnx2x_sample_bulletin(bp);
2722
2723
2724
2725
2726
2727
2728 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2729 }
2730}
2731
2732static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2733 u16 vlan_val, u8 allowed_prio)
2734{
2735 struct bnx2x_func_state_params func_params = {NULL};
2736 struct bnx2x_func_afex_update_params *f_update_params =
2737 &func_params.params.afex_update;
2738
2739 func_params.f_obj = &bp->func_obj;
2740 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2741
2742
2743
2744
2745
2746 f_update_params->vif_id = vifid;
2747 f_update_params->afex_default_vlan = vlan_val;
2748 f_update_params->allowed_priorities = allowed_prio;
2749
2750
2751 if (bnx2x_func_state_change(bp, &func_params) < 0)
2752 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2753
2754 return 0;
2755}
2756
2757static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2758 u16 vif_index, u8 func_bit_map)
2759{
2760 struct bnx2x_func_state_params func_params = {NULL};
2761 struct bnx2x_func_afex_viflists_params *update_params =
2762 &func_params.params.afex_viflists;
2763 int rc;
2764 u32 drv_msg_code;
2765
2766
2767 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2768 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2769 cmd_type);
2770
2771 func_params.f_obj = &bp->func_obj;
2772 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2773
2774
2775 update_params->afex_vif_list_command = cmd_type;
2776 update_params->vif_list_index = vif_index;
2777 update_params->func_bit_map =
2778 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2779 update_params->func_to_clear = 0;
2780 drv_msg_code =
2781 (cmd_type == VIF_LIST_RULE_GET) ?
2782 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2783 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2784
2785
2786
2787
2788 rc = bnx2x_func_state_change(bp, &func_params);
2789 if (rc < 0)
2790 bnx2x_fw_command(bp, drv_msg_code, 0);
2791
2792 return 0;
2793}
2794
2795static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2796{
2797 struct afex_stats afex_stats;
2798 u32 func = BP_ABS_FUNC(bp);
2799 u32 mf_config;
2800 u16 vlan_val;
2801 u32 vlan_prio;
2802 u16 vif_id;
2803 u8 allowed_prio;
2804 u8 vlan_mode;
2805 u32 addr_to_write, vifid, addrs, stats_type, i;
2806
2807 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2808 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809 DP(BNX2X_MSG_MCP,
2810 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2811 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2812 }
2813
2814 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2815 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2816 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2817 DP(BNX2X_MSG_MCP,
2818 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2819 vifid, addrs);
2820 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2821 addrs);
2822 }
2823
2824 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2825 addr_to_write = SHMEM2_RD(bp,
2826 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2827 stats_type = SHMEM2_RD(bp,
2828 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2829
2830 DP(BNX2X_MSG_MCP,
2831 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2832 addr_to_write);
2833
2834 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2835
2836
2837 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2838 REG_WR(bp, addr_to_write + i*sizeof(u32),
2839 *(((u32 *)(&afex_stats))+i));
2840
2841
2842 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2843 }
2844
2845 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2846 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2847 bp->mf_config[BP_VN(bp)] = mf_config;
2848 DP(BNX2X_MSG_MCP,
2849 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2850 mf_config);
2851
2852
2853 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2854
2855 struct cmng_init_input cmng_input;
2856 struct rate_shaping_vars_per_vn m_rs_vn;
2857 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2858 u32 addr = BAR_XSTRORM_INTMEM +
2859 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2860
2861 bp->mf_config[BP_VN(bp)] = mf_config;
2862
2863 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2864 m_rs_vn.vn_counter.rate =
2865 cmng_input.vnic_max_rate[BP_VN(bp)];
2866 m_rs_vn.vn_counter.quota =
2867 (m_rs_vn.vn_counter.rate *
2868 RS_PERIODIC_TIMEOUT_USEC) / 8;
2869
2870 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2871
2872
2873 vif_id =
2874 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2875 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2876 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2877 vlan_val =
2878 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2879 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2880 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2881 vlan_prio = (mf_config &
2882 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2883 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2884 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2885 vlan_mode =
2886 (MF_CFG_RD(bp,
2887 func_mf_config[func].afex_config) &
2888 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2889 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2890 allowed_prio =
2891 (MF_CFG_RD(bp,
2892 func_mf_config[func].afex_config) &
2893 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2894 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2895
2896
2897 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2898 allowed_prio))
2899 return;
2900
2901 bp->afex_def_vlan_tag = vlan_val;
2902 bp->afex_vlan_mode = vlan_mode;
2903 } else {
2904
2905 bnx2x_link_report(bp);
2906
2907
2908 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2909
2910
2911 bp->afex_def_vlan_tag = -1;
2912 }
2913 }
2914}
2915
2916static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2917{
2918 struct bnx2x_func_switch_update_params *switch_update_params;
2919 struct bnx2x_func_state_params func_params;
2920
2921 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2922 switch_update_params = &func_params.params.switch_update;
2923 func_params.f_obj = &bp->func_obj;
2924 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2925
2926 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2927 int func = BP_ABS_FUNC(bp);
2928 u32 val;
2929
2930
2931 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2932 FUNC_MF_CFG_E1HOV_TAG_MASK;
2933 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2934 bp->mf_ov = val;
2935 } else {
2936 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2937 goto fail;
2938 }
2939
2940
2941 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2942 bp->mf_ov);
2943
2944
2945 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2946 &switch_update_params->changes);
2947 switch_update_params->vlan = bp->mf_ov;
2948
2949 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2950 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2951 bp->mf_ov);
2952 goto fail;
2953 } else {
2954 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2955 bp->mf_ov);
2956 }
2957 } else {
2958 goto fail;
2959 }
2960
2961 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2962 return;
2963fail:
2964 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2965}
2966
2967static void bnx2x_pmf_update(struct bnx2x *bp)
2968{
2969 int port = BP_PORT(bp);
2970 u32 val;
2971
2972 bp->port.pmf = 1;
2973 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2974
2975
2976
2977
2978
2979 smp_mb();
2980
2981
2982 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2983
2984 bnx2x_dcbx_pmf_update(bp);
2985
2986
2987 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2988 if (bp->common.int_block == INT_BLOCK_HC) {
2989 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2990 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2991 } else if (!CHIP_IS_E1x(bp)) {
2992 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2993 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2994 }
2995
2996 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2997}
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3009{
3010 int mb_idx = BP_FW_MB_IDX(bp);
3011 u32 seq;
3012 u32 rc = 0;
3013 u32 cnt = 1;
3014 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3015
3016 mutex_lock(&bp->fw_mb_mutex);
3017 seq = ++bp->fw_seq;
3018 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3019 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3020
3021 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3022 (command | seq), param);
3023
3024 do {
3025
3026 msleep(delay);
3027
3028 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3029
3030
3031 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3032
3033 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3034 cnt*delay, rc, seq);
3035
3036
3037 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3038 rc &= FW_MSG_CODE_MASK;
3039 else {
3040
3041 BNX2X_ERR("FW failed to respond!\n");
3042 bnx2x_fw_dump(bp);
3043 rc = 0;
3044 }
3045 mutex_unlock(&bp->fw_mb_mutex);
3046
3047 return rc;
3048}
3049
3050static void storm_memset_func_cfg(struct bnx2x *bp,
3051 struct tstorm_eth_function_common_config *tcfg,
3052 u16 abs_fid)
3053{
3054 size_t size = sizeof(struct tstorm_eth_function_common_config);
3055
3056 u32 addr = BAR_TSTRORM_INTMEM +
3057 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3058
3059 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3060}
3061
3062void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3063{
3064 if (CHIP_IS_E1x(bp)) {
3065 struct tstorm_eth_function_common_config tcfg = {0};
3066
3067 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3068 }
3069
3070
3071 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3072 storm_memset_func_en(bp, p->func_id, 1);
3073
3074
3075 if (p->spq_active) {
3076 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3077 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3078 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3079 }
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3092 struct bnx2x_fastpath *fp,
3093 bool zero_stats)
3094{
3095 unsigned long flags = 0;
3096
3097
3098 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3099
3100
3101
3102
3103
3104
3105 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3106 if (zero_stats)
3107 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3108
3109 if (bp->flags & TX_SWITCHING)
3110 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3111
3112 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3113 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3114
3115#ifdef BNX2X_STOP_ON_ERROR
3116 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3117#endif
3118
3119 return flags;
3120}
3121
3122static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3123 struct bnx2x_fastpath *fp,
3124 bool leading)
3125{
3126 unsigned long flags = 0;
3127
3128
3129 if (IS_MF_SD(bp))
3130 __set_bit(BNX2X_Q_FLG_OV, &flags);
3131
3132 if (IS_FCOE_FP(fp)) {
3133 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3134
3135 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3136 }
3137
3138 if (fp->mode != TPA_MODE_DISABLED) {
3139 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3140 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3141 if (fp->mode == TPA_MODE_GRO)
3142 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3143 }
3144
3145 if (leading) {
3146 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3147 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3148 }
3149
3150
3151 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3152
3153
3154 if (IS_MF_AFEX(bp))
3155 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3156
3157 return flags | bnx2x_get_common_flags(bp, fp, true);
3158}
3159
3160static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3161 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3162 u8 cos)
3163{
3164 gen_init->stat_id = bnx2x_stats_id(fp);
3165 gen_init->spcl_id = fp->cl_id;
3166
3167
3168 if (IS_FCOE_FP(fp))
3169 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3170 else
3171 gen_init->mtu = bp->dev->mtu;
3172
3173 gen_init->cos = cos;
3174
3175 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3176}
3177
3178static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3180 struct bnx2x_rxq_setup_params *rxq_init)
3181{
3182 u8 max_sge = 0;
3183 u16 sge_sz = 0;
3184 u16 tpa_agg_size = 0;
3185
3186 if (fp->mode != TPA_MODE_DISABLED) {
3187 pause->sge_th_lo = SGE_TH_LO(bp);
3188 pause->sge_th_hi = SGE_TH_HI(bp);
3189
3190
3191 WARN_ON(bp->dropless_fc &&
3192 pause->sge_th_hi + FW_PREFETCH_CNT >
3193 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3194
3195 tpa_agg_size = TPA_AGG_SIZE;
3196 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3197 SGE_PAGE_SHIFT;
3198 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3199 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3200 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3201 }
3202
3203
3204 if (!CHIP_IS_E1(bp)) {
3205 pause->bd_th_lo = BD_TH_LO(bp);
3206 pause->bd_th_hi = BD_TH_HI(bp);
3207
3208 pause->rcq_th_lo = RCQ_TH_LO(bp);
3209 pause->rcq_th_hi = RCQ_TH_HI(bp);
3210
3211
3212
3213
3214 WARN_ON(bp->dropless_fc &&
3215 pause->bd_th_hi + FW_PREFETCH_CNT >
3216 bp->rx_ring_size);
3217 WARN_ON(bp->dropless_fc &&
3218 pause->rcq_th_hi + FW_PREFETCH_CNT >
3219 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3220
3221 pause->pri_map = 1;
3222 }
3223
3224
3225 rxq_init->dscr_map = fp->rx_desc_mapping;
3226 rxq_init->sge_map = fp->rx_sge_mapping;
3227 rxq_init->rcq_map = fp->rx_comp_mapping;
3228 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3229
3230
3231
3232
3233 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3234 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3235
3236 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3237 rxq_init->tpa_agg_sz = tpa_agg_size;
3238 rxq_init->sge_buf_sz = sge_sz;
3239 rxq_init->max_sges_pkt = max_sge;
3240 rxq_init->rss_engine_id = BP_FUNC(bp);
3241 rxq_init->mcast_engine_id = BP_FUNC(bp);
3242
3243
3244
3245
3246
3247
3248 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3249
3250 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3251 rxq_init->fw_sb_id = fp->fw_sb_id;
3252
3253 if (IS_FCOE_FP(fp))
3254 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3255 else
3256 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3257
3258
3259
3260 if (IS_MF_AFEX(bp)) {
3261 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3262 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3263 }
3264}
3265
3266static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3267 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3268 u8 cos)
3269{
3270 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3271 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3272 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3273 txq_init->fw_sb_id = fp->fw_sb_id;
3274
3275
3276
3277
3278
3279 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3280
3281 if (IS_FCOE_FP(fp)) {
3282 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3283 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3284 }
3285}
3286
3287static void bnx2x_pf_init(struct bnx2x *bp)
3288{
3289 struct bnx2x_func_init_params func_init = {0};
3290 struct event_ring_data eq_data = { {0} };
3291
3292 if (!CHIP_IS_E1x(bp)) {
3293
3294
3295 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3296 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3297 (CHIP_MODE_IS_4_PORT(bp) ?
3298 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3299
3300 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3301 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3302 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3303 (CHIP_MODE_IS_4_PORT(bp) ?
3304 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3305 }
3306
3307 func_init.spq_active = true;
3308 func_init.pf_id = BP_FUNC(bp);
3309 func_init.func_id = BP_FUNC(bp);
3310 func_init.spq_map = bp->spq_mapping;
3311 func_init.spq_prod = bp->spq_prod_idx;
3312
3313 bnx2x_func_init(bp, &func_init);
3314
3315 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3316
3317
3318
3319
3320
3321
3322
3323 bp->link_vars.line_speed = SPEED_10000;
3324 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3325
3326
3327 if (bp->port.pmf)
3328 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3329
3330
3331 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3332 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3333 eq_data.producer = bp->eq_prod;
3334 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3335 eq_data.sb_id = DEF_SB_ID;
3336 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3337}
3338
3339static void bnx2x_e1h_disable(struct bnx2x *bp)
3340{
3341 int port = BP_PORT(bp);
3342
3343 bnx2x_tx_disable(bp);
3344
3345 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3346}
3347
3348static void bnx2x_e1h_enable(struct bnx2x *bp)
3349{
3350 int port = BP_PORT(bp);
3351
3352 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3353 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3354
3355
3356 netif_tx_wake_all_queues(bp->dev);
3357
3358
3359
3360
3361
3362}
3363
3364#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3365
3366static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3367{
3368 struct eth_stats_info *ether_stat =
3369 &bp->slowpath->drv_info_to_mcp.ether_stat;
3370 struct bnx2x_vlan_mac_obj *mac_obj =
3371 &bp->sp_objs->mac_obj;
3372 int i;
3373
3374 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3375 ETH_STAT_INFO_VERSION_LEN);
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3386 memset(ether_stat->mac_local + i, 0,
3387 sizeof(ether_stat->mac_local[0]));
3388 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3389 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3390 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3391 ETH_ALEN);
3392 ether_stat->mtu_size = bp->dev->mtu;
3393 if (bp->dev->features & NETIF_F_RXCSUM)
3394 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3395 if (bp->dev->features & NETIF_F_TSO)
3396 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3397 ether_stat->feature_flags |= bp->common.boot_mode;
3398
3399 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3400
3401 ether_stat->txq_size = bp->tx_ring_size;
3402 ether_stat->rxq_size = bp->rx_ring_size;
3403
3404#ifdef CONFIG_BNX2X_SRIOV
3405 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3406#endif
3407}
3408
3409static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3410{
3411 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3412 struct fcoe_stats_info *fcoe_stat =
3413 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3414
3415 if (!CNIC_LOADED(bp))
3416 return;
3417
3418 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3419
3420 fcoe_stat->qos_priority =
3421 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3422
3423
3424 if (!NO_FCOE(bp)) {
3425 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3426 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3427 tstorm_queue_statistics;
3428
3429 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3430 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3431 xstorm_queue_statistics;
3432
3433 struct fcoe_statistics_params *fw_fcoe_stat =
3434 &bp->fw_stats_data->fcoe;
3435
3436 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3437 fcoe_stat->rx_bytes_lo,
3438 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3439
3440 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3441 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3442 fcoe_stat->rx_bytes_lo,
3443 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3444
3445 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3446 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3447 fcoe_stat->rx_bytes_lo,
3448 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3449
3450 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3451 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3452 fcoe_stat->rx_bytes_lo,
3453 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3454
3455 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3456 fcoe_stat->rx_frames_lo,
3457 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3458
3459 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3460 fcoe_stat->rx_frames_lo,
3461 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3462
3463 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3464 fcoe_stat->rx_frames_lo,
3465 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3466
3467 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3468 fcoe_stat->rx_frames_lo,
3469 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3470
3471 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3472 fcoe_stat->tx_bytes_lo,
3473 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3474
3475 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3476 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3477 fcoe_stat->tx_bytes_lo,
3478 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3479
3480 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3481 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3482 fcoe_stat->tx_bytes_lo,
3483 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3484
3485 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3486 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3487 fcoe_stat->tx_bytes_lo,
3488 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3489
3490 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3491 fcoe_stat->tx_frames_lo,
3492 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3493
3494 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3495 fcoe_stat->tx_frames_lo,
3496 fcoe_q_xstorm_stats->ucast_pkts_sent);
3497
3498 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3499 fcoe_stat->tx_frames_lo,
3500 fcoe_q_xstorm_stats->bcast_pkts_sent);
3501
3502 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3503 fcoe_stat->tx_frames_lo,
3504 fcoe_q_xstorm_stats->mcast_pkts_sent);
3505 }
3506
3507
3508 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3509}
3510
3511static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3512{
3513 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3514 struct iscsi_stats_info *iscsi_stat =
3515 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3516
3517 if (!CNIC_LOADED(bp))
3518 return;
3519
3520 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3521 ETH_ALEN);
3522
3523 iscsi_stat->qos_priority =
3524 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3525
3526
3527 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3528}
3529
3530
3531
3532
3533
3534
3535static void bnx2x_config_mf_bw(struct bnx2x *bp)
3536{
3537 if (bp->link_vars.link_up) {
3538 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3539 bnx2x_link_sync_notify(bp);
3540 }
3541 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3542}
3543
3544static void bnx2x_set_mf_bw(struct bnx2x *bp)
3545{
3546 bnx2x_config_mf_bw(bp);
3547 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3548}
3549
3550static void bnx2x_handle_eee_event(struct bnx2x *bp)
3551{
3552 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3553 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3554}
3555
3556#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3557#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3558
3559static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3560{
3561 enum drv_info_opcode op_code;
3562 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3563 bool release = false;
3564 int wait;
3565
3566
3567 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3568 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3569 return;
3570 }
3571
3572 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3573 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3574
3575
3576 mutex_lock(&bp->drv_info_mutex);
3577
3578 memset(&bp->slowpath->drv_info_to_mcp, 0,
3579 sizeof(union drv_info_to_mcp));
3580
3581 switch (op_code) {
3582 case ETH_STATS_OPCODE:
3583 bnx2x_drv_info_ether_stat(bp);
3584 break;
3585 case FCOE_STATS_OPCODE:
3586 bnx2x_drv_info_fcoe_stat(bp);
3587 break;
3588 case ISCSI_STATS_OPCODE:
3589 bnx2x_drv_info_iscsi_stat(bp);
3590 break;
3591 default:
3592
3593 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3594 goto out;
3595 }
3596
3597
3598
3599
3600 SHMEM2_WR(bp, drv_info_host_addr_lo,
3601 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3602 SHMEM2_WR(bp, drv_info_host_addr_hi,
3603 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3604
3605 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3606
3607
3608
3609
3610
3611 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3612 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3613 } else if (!bp->drv_info_mng_owner) {
3614 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3615
3616 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3617 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3618
3619
3620 if (indication & bit) {
3621 SHMEM2_WR(bp, mfw_drv_indication,
3622 indication & ~bit);
3623 release = true;
3624 break;
3625 }
3626
3627 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3628 }
3629 }
3630 if (!release) {
3631 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3632 bp->drv_info_mng_owner = true;
3633 }
3634
3635out:
3636 mutex_unlock(&bp->drv_info_mutex);
3637}
3638
3639static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3640{
3641 u8 vals[4];
3642 int i = 0;
3643
3644 if (bnx2x_format) {
3645 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3646 &vals[0], &vals[1], &vals[2], &vals[3]);
3647 if (i > 0)
3648 vals[0] -= '0';
3649 } else {
3650 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3651 &vals[0], &vals[1], &vals[2], &vals[3]);
3652 }
3653
3654 while (i < 4)
3655 vals[i++] = 0;
3656
3657 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3658}
3659
3660void bnx2x_update_mng_version(struct bnx2x *bp)
3661{
3662 u32 iscsiver = DRV_VER_NOT_LOADED;
3663 u32 fcoever = DRV_VER_NOT_LOADED;
3664 u32 ethver = DRV_VER_NOT_LOADED;
3665 int idx = BP_FW_MB_IDX(bp);
3666 u8 *version;
3667
3668 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3669 return;
3670
3671 mutex_lock(&bp->drv_info_mutex);
3672
3673 if (bp->drv_info_mng_owner)
3674 goto out;
3675
3676 if (bp->state != BNX2X_STATE_OPEN)
3677 goto out;
3678
3679
3680 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3681 if (!CNIC_LOADED(bp))
3682 goto out;
3683
3684
3685 memset(&bp->slowpath->drv_info_to_mcp, 0,
3686 sizeof(union drv_info_to_mcp));
3687 bnx2x_drv_info_iscsi_stat(bp);
3688 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3689 iscsiver = bnx2x_update_mng_version_utility(version, false);
3690
3691 memset(&bp->slowpath->drv_info_to_mcp, 0,
3692 sizeof(union drv_info_to_mcp));
3693 bnx2x_drv_info_fcoe_stat(bp);
3694 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3695 fcoever = bnx2x_update_mng_version_utility(version, false);
3696
3697out:
3698 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3699 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3700 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3701
3702 mutex_unlock(&bp->drv_info_mutex);
3703
3704 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3705 ethver, iscsiver, fcoever);
3706}
3707
3708void bnx2x_update_mfw_dump(struct bnx2x *bp)
3709{
3710 u32 drv_ver;
3711 u32 valid_dump;
3712
3713 if (!SHMEM2_HAS(bp, drv_info))
3714 return;
3715
3716
3717 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3718
3719 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3721
3722 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3723
3724
3725 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3726
3727 if (valid_dump & FIRST_DUMP_VALID)
3728 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3729
3730 if (valid_dump & SECOND_DUMP_VALID)
3731 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3732}
3733
3734static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3735{
3736 u32 cmd_ok, cmd_fail;
3737
3738
3739 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3740 event & DRV_STATUS_OEM_EVENT_MASK) {
3741 BNX2X_ERR("Received simultaneous events %08x\n", event);
3742 return;
3743 }
3744
3745 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3746 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3747 cmd_ok = DRV_MSG_CODE_DCC_OK;
3748 } else {
3749 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3750 cmd_ok = DRV_MSG_CODE_OEM_OK;
3751 }
3752
3753 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3754
3755 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3756 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3757
3758
3759
3760
3761 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3762 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3763 bp->flags |= MF_FUNC_DIS;
3764
3765 bnx2x_e1h_disable(bp);
3766 } else {
3767 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3768 bp->flags &= ~MF_FUNC_DIS;
3769
3770 bnx2x_e1h_enable(bp);
3771 }
3772 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3773 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3774 }
3775
3776 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3777 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3778 bnx2x_config_mf_bw(bp);
3779 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3780 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3781 }
3782
3783
3784 if (event)
3785 bnx2x_fw_command(bp, cmd_fail, 0);
3786 else
3787 bnx2x_fw_command(bp, cmd_ok, 0);
3788}
3789
3790
3791static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3792{
3793 struct eth_spe *next_spe = bp->spq_prod_bd;
3794
3795 if (bp->spq_prod_bd == bp->spq_last_bd) {
3796 bp->spq_prod_bd = bp->spq;
3797 bp->spq_prod_idx = 0;
3798 DP(BNX2X_MSG_SP, "end of spq\n");
3799 } else {
3800 bp->spq_prod_bd++;
3801 bp->spq_prod_idx++;
3802 }
3803 return next_spe;
3804}
3805
3806
3807static void bnx2x_sp_prod_update(struct bnx2x *bp)
3808{
3809 int func = BP_FUNC(bp);
3810
3811
3812
3813
3814
3815
3816 mb();
3817
3818 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3819 bp->spq_prod_idx);
3820 mmiowb();
3821}
3822
3823
3824
3825
3826
3827
3828
3829static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3830{
3831 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3832 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3833 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3834 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3835 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3836 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3837 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3838 return true;
3839 else
3840 return false;
3841}
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3858 u32 data_hi, u32 data_lo, int cmd_type)
3859{
3860 struct eth_spe *spe;
3861 u16 type;
3862 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3863
3864#ifdef BNX2X_STOP_ON_ERROR
3865 if (unlikely(bp->panic)) {
3866 BNX2X_ERR("Can't post SP when there is panic\n");
3867 return -EIO;
3868 }
3869#endif
3870
3871 spin_lock_bh(&bp->spq_lock);
3872
3873 if (common) {
3874 if (!atomic_read(&bp->eq_spq_left)) {
3875 BNX2X_ERR("BUG! EQ ring full!\n");
3876 spin_unlock_bh(&bp->spq_lock);
3877 bnx2x_panic();
3878 return -EBUSY;
3879 }
3880 } else if (!atomic_read(&bp->cq_spq_left)) {
3881 BNX2X_ERR("BUG! SPQ ring full!\n");
3882 spin_unlock_bh(&bp->spq_lock);
3883 bnx2x_panic();
3884 return -EBUSY;
3885 }
3886
3887 spe = bnx2x_sp_get_next(bp);
3888
3889
3890 spe->hdr.conn_and_cmd_data =
3891 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3892 HW_CID(bp, cid));
3893
3894
3895
3896
3897
3898 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3899 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3900 SPE_HDR_CONN_TYPE;
3901 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3902 SPE_HDR_FUNCTION_ID);
3903 } else {
3904 type = cmd_type;
3905 }
3906
3907 spe->hdr.type = cpu_to_le16(type);
3908
3909 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3910 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3911
3912
3913
3914
3915
3916
3917 if (common)
3918 atomic_dec(&bp->eq_spq_left);
3919 else
3920 atomic_dec(&bp->cq_spq_left);
3921
3922 DP(BNX2X_MSG_SP,
3923 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3924 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3925 (u32)(U64_LO(bp->spq_mapping) +
3926 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3927 HW_CID(bp, cid), data_hi, data_lo, type,
3928 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3929
3930 bnx2x_sp_prod_update(bp);
3931 spin_unlock_bh(&bp->spq_lock);
3932 return 0;
3933}
3934
3935
3936static int bnx2x_acquire_alr(struct bnx2x *bp)
3937{
3938 u32 j, val;
3939 int rc = 0;
3940
3941 might_sleep();
3942 for (j = 0; j < 1000; j++) {
3943 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3944 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3945 if (val & MCPR_ACCESS_LOCK_LOCK)
3946 break;
3947
3948 usleep_range(5000, 10000);
3949 }
3950 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3951 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3952 rc = -EBUSY;
3953 }
3954
3955 return rc;
3956}
3957
3958
3959static void bnx2x_release_alr(struct bnx2x *bp)
3960{
3961 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3962}
3963
3964#define BNX2X_DEF_SB_ATT_IDX 0x0001
3965#define BNX2X_DEF_SB_IDX 0x0002
3966
3967static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3968{
3969 struct host_sp_status_block *def_sb = bp->def_status_blk;
3970 u16 rc = 0;
3971
3972 barrier();
3973 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3974 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3975 rc |= BNX2X_DEF_SB_ATT_IDX;
3976 }
3977
3978 if (bp->def_idx != def_sb->sp_sb.running_index) {
3979 bp->def_idx = def_sb->sp_sb.running_index;
3980 rc |= BNX2X_DEF_SB_IDX;
3981 }
3982
3983
3984 barrier();
3985 return rc;
3986}
3987
3988
3989
3990
3991
3992static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3993{
3994 int port = BP_PORT(bp);
3995 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3996 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3997 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3998 NIG_REG_MASK_INTERRUPT_PORT0;
3999 u32 aeu_mask;
4000 u32 nig_mask = 0;
4001 u32 reg_addr;
4002
4003 if (bp->attn_state & asserted)
4004 BNX2X_ERR("IGU ERROR\n");
4005
4006 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4007 aeu_mask = REG_RD(bp, aeu_addr);
4008
4009 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4010 aeu_mask, asserted);
4011 aeu_mask &= ~(asserted & 0x3ff);
4012 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4013
4014 REG_WR(bp, aeu_addr, aeu_mask);
4015 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4016
4017 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4018 bp->attn_state |= asserted;
4019 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4020
4021 if (asserted & ATTN_HARD_WIRED_MASK) {
4022 if (asserted & ATTN_NIG_FOR_FUNC) {
4023
4024 bnx2x_acquire_phy_lock(bp);
4025
4026
4027 nig_mask = REG_RD(bp, nig_int_mask_addr);
4028
4029
4030
4031
4032 if (nig_mask) {
4033 REG_WR(bp, nig_int_mask_addr, 0);
4034
4035 bnx2x_link_attn(bp);
4036 }
4037
4038
4039 }
4040 if (asserted & ATTN_SW_TIMER_4_FUNC)
4041 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4042
4043 if (asserted & GPIO_2_FUNC)
4044 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4045
4046 if (asserted & GPIO_3_FUNC)
4047 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4048
4049 if (asserted & GPIO_4_FUNC)
4050 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4051
4052 if (port == 0) {
4053 if (asserted & ATTN_GENERAL_ATTN_1) {
4054 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4056 }
4057 if (asserted & ATTN_GENERAL_ATTN_2) {
4058 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4059 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4060 }
4061 if (asserted & ATTN_GENERAL_ATTN_3) {
4062 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4064 }
4065 } else {
4066 if (asserted & ATTN_GENERAL_ATTN_4) {
4067 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4069 }
4070 if (asserted & ATTN_GENERAL_ATTN_5) {
4071 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4073 }
4074 if (asserted & ATTN_GENERAL_ATTN_6) {
4075 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4076 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4077 }
4078 }
4079
4080 }
4081
4082 if (bp->common.int_block == INT_BLOCK_HC)
4083 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4084 COMMAND_REG_ATTN_BITS_SET);
4085 else
4086 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4087
4088 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4089 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4090 REG_WR(bp, reg_addr, asserted);
4091
4092
4093 if (asserted & ATTN_NIG_FOR_FUNC) {
4094
4095
4096
4097 if (bp->common.int_block != INT_BLOCK_HC) {
4098 u32 cnt = 0, igu_acked;
4099 do {
4100 igu_acked = REG_RD(bp,
4101 IGU_REG_ATTENTION_ACK_BITS);
4102 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4103 (++cnt < MAX_IGU_ATTN_ACK_TO));
4104 if (!igu_acked)
4105 DP(NETIF_MSG_HW,
4106 "Failed to verify IGU ack on time\n");
4107 barrier();
4108 }
4109 REG_WR(bp, nig_int_mask_addr, nig_mask);
4110 bnx2x_release_phy_lock(bp);
4111 }
4112}
4113
4114static void bnx2x_fan_failure(struct bnx2x *bp)
4115{
4116 int port = BP_PORT(bp);
4117 u32 ext_phy_config;
4118
4119 ext_phy_config =
4120 SHMEM_RD(bp,
4121 dev_info.port_hw_config[port].external_phy_config);
4122
4123 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4124 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4125 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4126 ext_phy_config);
4127
4128
4129 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4130 "Please contact OEM Support for assistance\n");
4131
4132
4133
4134
4135
4136 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4137}
4138
4139static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4140{
4141 int port = BP_PORT(bp);
4142 int reg_offset;
4143 u32 val;
4144
4145 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4146 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4147
4148 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4149
4150 val = REG_RD(bp, reg_offset);
4151 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4152 REG_WR(bp, reg_offset, val);
4153
4154 BNX2X_ERR("SPIO5 hw attention\n");
4155
4156
4157 bnx2x_hw_reset_phy(&bp->link_params);
4158 bnx2x_fan_failure(bp);
4159 }
4160
4161 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4162 bnx2x_acquire_phy_lock(bp);
4163 bnx2x_handle_module_detect_int(&bp->link_params);
4164 bnx2x_release_phy_lock(bp);
4165 }
4166
4167 if (attn & HW_INTERRUT_ASSERT_SET_0) {
4168
4169 val = REG_RD(bp, reg_offset);
4170 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4171 REG_WR(bp, reg_offset, val);
4172
4173 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4174 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4175 bnx2x_panic();
4176 }
4177}
4178
4179static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4180{
4181 u32 val;
4182
4183 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4184
4185 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4186 BNX2X_ERR("DB hw attention 0x%x\n", val);
4187
4188 if (val & 0x2)
4189 BNX2X_ERR("FATAL error from DORQ\n");
4190 }
4191
4192 if (attn & HW_INTERRUT_ASSERT_SET_1) {
4193
4194 int port = BP_PORT(bp);
4195 int reg_offset;
4196
4197 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4198 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4199
4200 val = REG_RD(bp, reg_offset);
4201 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4202 REG_WR(bp, reg_offset, val);
4203
4204 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4205 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4206 bnx2x_panic();
4207 }
4208}
4209
4210static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4211{
4212 u32 val;
4213
4214 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4215
4216 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4217 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4218
4219 if (val & 0x2)
4220 BNX2X_ERR("FATAL error from CFC\n");
4221 }
4222
4223 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4224 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4225 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4226
4227 if (val & 0x18000)
4228 BNX2X_ERR("FATAL error from PXP\n");
4229
4230 if (!CHIP_IS_E1x(bp)) {
4231 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4232 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4233 }
4234 }
4235
4236 if (attn & HW_INTERRUT_ASSERT_SET_2) {
4237
4238 int port = BP_PORT(bp);
4239 int reg_offset;
4240
4241 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4242 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4243
4244 val = REG_RD(bp, reg_offset);
4245 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4246 REG_WR(bp, reg_offset, val);
4247
4248 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4249 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4250 bnx2x_panic();
4251 }
4252}
4253
4254static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4255{
4256 u32 val;
4257
4258 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4259
4260 if (attn & BNX2X_PMF_LINK_ASSERT) {
4261 int func = BP_FUNC(bp);
4262
4263 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4264 bnx2x_read_mf_cfg(bp);
4265 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4266 func_mf_config[BP_ABS_FUNC(bp)].config);
4267 val = SHMEM_RD(bp,
4268 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4269
4270 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4271 DRV_STATUS_OEM_EVENT_MASK))
4272 bnx2x_oem_event(bp,
4273 (val & (DRV_STATUS_DCC_EVENT_MASK |
4274 DRV_STATUS_OEM_EVENT_MASK)));
4275
4276 if (val & DRV_STATUS_SET_MF_BW)
4277 bnx2x_set_mf_bw(bp);
4278
4279 if (val & DRV_STATUS_DRV_INFO_REQ)
4280 bnx2x_handle_drv_info_req(bp);
4281
4282 if (val & DRV_STATUS_VF_DISABLED)
4283 bnx2x_schedule_iov_task(bp,
4284 BNX2X_IOV_HANDLE_FLR);
4285
4286 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4287 bnx2x_pmf_update(bp);
4288
4289 if (bp->port.pmf &&
4290 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4291 bp->dcbx_enabled > 0)
4292
4293 bnx2x_dcbx_set_params(bp,
4294 BNX2X_DCBX_STATE_NEG_RECEIVED);
4295 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4296 bnx2x_handle_afex_cmd(bp,
4297 val & DRV_STATUS_AFEX_EVENT_MASK);
4298 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4299 bnx2x_handle_eee_event(bp);
4300
4301 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4302 bnx2x_handle_update_svid_cmd(bp);
4303
4304 if (bp->link_vars.periodic_flags &
4305 PERIODIC_FLAGS_LINK_EVENT) {
4306
4307 bnx2x_acquire_phy_lock(bp);
4308 bp->link_vars.periodic_flags &=
4309 ~PERIODIC_FLAGS_LINK_EVENT;
4310 bnx2x_release_phy_lock(bp);
4311 if (IS_MF(bp))
4312 bnx2x_link_sync_notify(bp);
4313 bnx2x_link_report(bp);
4314 }
4315
4316
4317
4318 bnx2x__link_status_update(bp);
4319 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4320
4321 BNX2X_ERR("MC assert!\n");
4322 bnx2x_mc_assert(bp);
4323 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4324 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4325 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4326 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4327 bnx2x_panic();
4328
4329 } else if (attn & BNX2X_MCP_ASSERT) {
4330
4331 BNX2X_ERR("MCP assert!\n");
4332 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4333 bnx2x_fw_dump(bp);
4334
4335 } else
4336 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4337 }
4338
4339 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4340 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4341 if (attn & BNX2X_GRC_TIMEOUT) {
4342 val = CHIP_IS_E1(bp) ? 0 :
4343 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4344 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4345 }
4346 if (attn & BNX2X_GRC_RSV) {
4347 val = CHIP_IS_E1(bp) ? 0 :
4348 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4349 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4350 }
4351 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4352 }
4353}
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4370
4371#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4372#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4373#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4374#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4375#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4376#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4377#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4378
4379
4380
4381
4382
4383
4384void bnx2x_set_reset_global(struct bnx2x *bp)
4385{
4386 u32 val;
4387 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4388 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4389 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4390 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4391}
4392
4393
4394
4395
4396
4397
4398static void bnx2x_clear_reset_global(struct bnx2x *bp)
4399{
4400 u32 val;
4401 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4402 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4403 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4404 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405}
4406
4407
4408
4409
4410
4411
4412static bool bnx2x_reset_is_global(struct bnx2x *bp)
4413{
4414 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4415
4416 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4417 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4418}
4419
4420
4421
4422
4423
4424
4425static void bnx2x_set_reset_done(struct bnx2x *bp)
4426{
4427 u32 val;
4428 u32 bit = BP_PATH(bp) ?
4429 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4430 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4431 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4432
4433
4434 val &= ~bit;
4435 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4436
4437 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4438}
4439
4440
4441
4442
4443
4444
4445void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4446{
4447 u32 val;
4448 u32 bit = BP_PATH(bp) ?
4449 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4450 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4451 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4452
4453
4454 val |= bit;
4455 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4457}
4458
4459
4460
4461
4462
4463bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4464{
4465 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4466 u32 bit = engine ?
4467 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4468
4469
4470 return (val & bit) ? false : true;
4471}
4472
4473
4474
4475
4476
4477
4478void bnx2x_set_pf_load(struct bnx2x *bp)
4479{
4480 u32 val1, val;
4481 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4482 BNX2X_PATH0_LOAD_CNT_MASK;
4483 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4484 BNX2X_PATH0_LOAD_CNT_SHIFT;
4485
4486 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4487 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4488
4489 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4490
4491
4492 val1 = (val & mask) >> shift;
4493
4494
4495 val1 |= (1 << bp->pf_num);
4496
4497
4498 val &= ~mask;
4499
4500
4501 val |= ((val1 << shift) & mask);
4502
4503 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4504 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4505}
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516bool bnx2x_clear_pf_load(struct bnx2x *bp)
4517{
4518 u32 val1, val;
4519 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4520 BNX2X_PATH0_LOAD_CNT_MASK;
4521 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4522 BNX2X_PATH0_LOAD_CNT_SHIFT;
4523
4524 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4525 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4526 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4527
4528
4529 val1 = (val & mask) >> shift;
4530
4531
4532 val1 &= ~(1 << bp->pf_num);
4533
4534
4535 val &= ~mask;
4536
4537
4538 val |= ((val1 << shift) & mask);
4539
4540 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4541 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4542 return val1 != 0;
4543}
4544
4545
4546
4547
4548
4549
4550static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4551{
4552 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4553 BNX2X_PATH0_LOAD_CNT_MASK);
4554 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4555 BNX2X_PATH0_LOAD_CNT_SHIFT);
4556 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4557
4558 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4559
4560 val = (val & mask) >> shift;
4561
4562 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4563 engine, val);
4564
4565 return val != 0;
4566}
4567
4568static void _print_parity(struct bnx2x *bp, u32 reg)
4569{
4570 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4571}
4572
4573static void _print_next_block(int idx, const char *blk)
4574{
4575 pr_cont("%s%s", idx ? ", " : "", blk);
4576}
4577
4578static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4579 int *par_num, bool print)
4580{
4581 u32 cur_bit;
4582 bool res;
4583 int i;
4584
4585 res = false;
4586
4587 for (i = 0; sig; i++) {
4588 cur_bit = (0x1UL << i);
4589 if (sig & cur_bit) {
4590 res |= true;
4591
4592 if (print) {
4593 switch (cur_bit) {
4594 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4595 _print_next_block((*par_num)++, "BRB");
4596 _print_parity(bp,
4597 BRB1_REG_BRB1_PRTY_STS);
4598 break;
4599 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4600 _print_next_block((*par_num)++,
4601 "PARSER");
4602 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4603 break;
4604 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4605 _print_next_block((*par_num)++, "TSDM");
4606 _print_parity(bp,
4607 TSDM_REG_TSDM_PRTY_STS);
4608 break;
4609 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4610 _print_next_block((*par_num)++,
4611 "SEARCHER");
4612 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4613 break;
4614 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4615 _print_next_block((*par_num)++, "TCM");
4616 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4617 break;
4618 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4619 _print_next_block((*par_num)++,
4620 "TSEMI");
4621 _print_parity(bp,
4622 TSEM_REG_TSEM_PRTY_STS_0);
4623 _print_parity(bp,
4624 TSEM_REG_TSEM_PRTY_STS_1);
4625 break;
4626 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4627 _print_next_block((*par_num)++, "XPB");
4628 _print_parity(bp, GRCBASE_XPB +
4629 PB_REG_PB_PRTY_STS);
4630 break;
4631 }
4632 }
4633
4634
4635 sig &= ~cur_bit;
4636 }
4637 }
4638
4639 return res;
4640}
4641
4642static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4643 int *par_num, bool *global,
4644 bool print)
4645{
4646 u32 cur_bit;
4647 bool res;
4648 int i;
4649
4650 res = false;
4651
4652 for (i = 0; sig; i++) {
4653 cur_bit = (0x1UL << i);
4654 if (sig & cur_bit) {
4655 res |= true;
4656 switch (cur_bit) {
4657 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4658 if (print) {
4659 _print_next_block((*par_num)++, "PBF");
4660 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4661 }
4662 break;
4663 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4664 if (print) {
4665 _print_next_block((*par_num)++, "QM");
4666 _print_parity(bp, QM_REG_QM_PRTY_STS);
4667 }
4668 break;
4669 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4670 if (print) {
4671 _print_next_block((*par_num)++, "TM");
4672 _print_parity(bp, TM_REG_TM_PRTY_STS);
4673 }
4674 break;
4675 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4676 if (print) {
4677 _print_next_block((*par_num)++, "XSDM");
4678 _print_parity(bp,
4679 XSDM_REG_XSDM_PRTY_STS);
4680 }
4681 break;
4682 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4683 if (print) {
4684 _print_next_block((*par_num)++, "XCM");
4685 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4686 }
4687 break;
4688 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4689 if (print) {
4690 _print_next_block((*par_num)++,
4691 "XSEMI");
4692 _print_parity(bp,
4693 XSEM_REG_XSEM_PRTY_STS_0);
4694 _print_parity(bp,
4695 XSEM_REG_XSEM_PRTY_STS_1);
4696 }
4697 break;
4698 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4699 if (print) {
4700 _print_next_block((*par_num)++,
4701 "DOORBELLQ");
4702 _print_parity(bp,
4703 DORQ_REG_DORQ_PRTY_STS);
4704 }
4705 break;
4706 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4707 if (print) {
4708 _print_next_block((*par_num)++, "NIG");
4709 if (CHIP_IS_E1x(bp)) {
4710 _print_parity(bp,
4711 NIG_REG_NIG_PRTY_STS);
4712 } else {
4713 _print_parity(bp,
4714 NIG_REG_NIG_PRTY_STS_0);
4715 _print_parity(bp,
4716 NIG_REG_NIG_PRTY_STS_1);
4717 }
4718 }
4719 break;
4720 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4721 if (print)
4722 _print_next_block((*par_num)++,
4723 "VAUX PCI CORE");
4724 *global = true;
4725 break;
4726 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4727 if (print) {
4728 _print_next_block((*par_num)++,
4729 "DEBUG");
4730 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4731 }
4732 break;
4733 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4734 if (print) {
4735 _print_next_block((*par_num)++, "USDM");
4736 _print_parity(bp,
4737 USDM_REG_USDM_PRTY_STS);
4738 }
4739 break;
4740 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4741 if (print) {
4742 _print_next_block((*par_num)++, "UCM");
4743 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4744 }
4745 break;
4746 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4747 if (print) {
4748 _print_next_block((*par_num)++,
4749 "USEMI");
4750 _print_parity(bp,
4751 USEM_REG_USEM_PRTY_STS_0);
4752 _print_parity(bp,
4753 USEM_REG_USEM_PRTY_STS_1);
4754 }
4755 break;
4756 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4757 if (print) {
4758 _print_next_block((*par_num)++, "UPB");
4759 _print_parity(bp, GRCBASE_UPB +
4760 PB_REG_PB_PRTY_STS);
4761 }
4762 break;
4763 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4764 if (print) {
4765 _print_next_block((*par_num)++, "CSDM");
4766 _print_parity(bp,
4767 CSDM_REG_CSDM_PRTY_STS);
4768 }
4769 break;
4770 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4771 if (print) {
4772 _print_next_block((*par_num)++, "CCM");
4773 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4774 }
4775 break;
4776 }
4777
4778
4779 sig &= ~cur_bit;
4780 }
4781 }
4782
4783 return res;
4784}
4785
4786static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4787 int *par_num, bool print)
4788{
4789 u32 cur_bit;
4790 bool res;
4791 int i;
4792
4793 res = false;
4794
4795 for (i = 0; sig; i++) {
4796 cur_bit = (0x1UL << i);
4797 if (sig & cur_bit) {
4798 res = true;
4799 if (print) {
4800 switch (cur_bit) {
4801 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4802 _print_next_block((*par_num)++,
4803 "CSEMI");
4804 _print_parity(bp,
4805 CSEM_REG_CSEM_PRTY_STS_0);
4806 _print_parity(bp,
4807 CSEM_REG_CSEM_PRTY_STS_1);
4808 break;
4809 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4810 _print_next_block((*par_num)++, "PXP");
4811 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4812 _print_parity(bp,
4813 PXP2_REG_PXP2_PRTY_STS_0);
4814 _print_parity(bp,
4815 PXP2_REG_PXP2_PRTY_STS_1);
4816 break;
4817 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4818 _print_next_block((*par_num)++,
4819 "PXPPCICLOCKCLIENT");
4820 break;
4821 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4822 _print_next_block((*par_num)++, "CFC");
4823 _print_parity(bp,
4824 CFC_REG_CFC_PRTY_STS);
4825 break;
4826 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4827 _print_next_block((*par_num)++, "CDU");
4828 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4829 break;
4830 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4831 _print_next_block((*par_num)++, "DMAE");
4832 _print_parity(bp,
4833 DMAE_REG_DMAE_PRTY_STS);
4834 break;
4835 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4836 _print_next_block((*par_num)++, "IGU");
4837 if (CHIP_IS_E1x(bp))
4838 _print_parity(bp,
4839 HC_REG_HC_PRTY_STS);
4840 else
4841 _print_parity(bp,
4842 IGU_REG_IGU_PRTY_STS);
4843 break;
4844 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4845 _print_next_block((*par_num)++, "MISC");
4846 _print_parity(bp,
4847 MISC_REG_MISC_PRTY_STS);
4848 break;
4849 }
4850 }
4851
4852
4853 sig &= ~cur_bit;
4854 }
4855 }
4856
4857 return res;
4858}
4859
4860static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4861 int *par_num, bool *global,
4862 bool print)
4863{
4864 bool res = false;
4865 u32 cur_bit;
4866 int i;
4867
4868 for (i = 0; sig; i++) {
4869 cur_bit = (0x1UL << i);
4870 if (sig & cur_bit) {
4871 switch (cur_bit) {
4872 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4873 if (print)
4874 _print_next_block((*par_num)++,
4875 "MCP ROM");
4876 *global = true;
4877 res = true;
4878 break;
4879 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4880 if (print)
4881 _print_next_block((*par_num)++,
4882 "MCP UMP RX");
4883 *global = true;
4884 res = true;
4885 break;
4886 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4887 if (print)
4888 _print_next_block((*par_num)++,
4889 "MCP UMP TX");
4890 *global = true;
4891 res = true;
4892 break;
4893 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4894 (*par_num)++;
4895
4896 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4897 1UL << 10);
4898 break;
4899 }
4900
4901
4902 sig &= ~cur_bit;
4903 }
4904 }
4905
4906 return res;
4907}
4908
4909static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4910 int *par_num, bool print)
4911{
4912 u32 cur_bit;
4913 bool res;
4914 int i;
4915
4916 res = false;
4917
4918 for (i = 0; sig; i++) {
4919 cur_bit = (0x1UL << i);
4920 if (sig & cur_bit) {
4921 res = true;
4922 if (print) {
4923 switch (cur_bit) {
4924 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4925 _print_next_block((*par_num)++,
4926 "PGLUE_B");
4927 _print_parity(bp,
4928 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4929 break;
4930 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4931 _print_next_block((*par_num)++, "ATC");
4932 _print_parity(bp,
4933 ATC_REG_ATC_PRTY_STS);
4934 break;
4935 }
4936 }
4937
4938 sig &= ~cur_bit;
4939 }
4940 }
4941
4942 return res;
4943}
4944
4945static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4946 u32 *sig)
4947{
4948 bool res = false;
4949
4950 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4951 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4952 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4953 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4954 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4955 int par_num = 0;
4956
4957 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4958 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4959 sig[0] & HW_PRTY_ASSERT_SET_0,
4960 sig[1] & HW_PRTY_ASSERT_SET_1,
4961 sig[2] & HW_PRTY_ASSERT_SET_2,
4962 sig[3] & HW_PRTY_ASSERT_SET_3,
4963 sig[4] & HW_PRTY_ASSERT_SET_4);
4964 if (print) {
4965 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4966 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4967 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4968 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4969 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4970 netdev_err(bp->dev,
4971 "Parity errors detected in blocks: ");
4972 } else {
4973 print = false;
4974 }
4975 }
4976 res |= bnx2x_check_blocks_with_parity0(bp,
4977 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4978 res |= bnx2x_check_blocks_with_parity1(bp,
4979 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4980 res |= bnx2x_check_blocks_with_parity2(bp,
4981 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4982 res |= bnx2x_check_blocks_with_parity3(bp,
4983 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4984 res |= bnx2x_check_blocks_with_parity4(bp,
4985 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4986
4987 if (print)
4988 pr_cont("\n");
4989 }
4990
4991 return res;
4992}
4993
4994
4995
4996
4997
4998
4999
5000
5001bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5002{
5003 struct attn_route attn = { {0} };
5004 int port = BP_PORT(bp);
5005
5006 attn.sig[0] = REG_RD(bp,
5007 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5008 port*4);
5009 attn.sig[1] = REG_RD(bp,
5010 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5011 port*4);
5012 attn.sig[2] = REG_RD(bp,
5013 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5014 port*4);
5015 attn.sig[3] = REG_RD(bp,
5016 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5017 port*4);
5018
5019
5020
5021 attn.sig[3] &= ((REG_RD(bp,
5022 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5023 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5024 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5025 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5026
5027 if (!CHIP_IS_E1x(bp))
5028 attn.sig[4] = REG_RD(bp,
5029 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5030 port*4);
5031
5032 return bnx2x_parity_attn(bp, global, print, attn.sig);
5033}
5034
5035static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5036{
5037 u32 val;
5038 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5039
5040 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5041 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5042 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5043 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5044 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5045 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5046 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5047 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5048 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5049 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5050 if (val &
5051 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5052 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5053 if (val &
5054 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5055 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5056 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5057 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5058 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5059 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5060 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5061 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5062 }
5063 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5064 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5065 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5066 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5067 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5068 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5069 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5070 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5071 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5072 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5073 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5074 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5075 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5076 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5077 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5078 }
5079
5080 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5081 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5082 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5083 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5084 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5085 }
5086}
5087
5088static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5089{
5090 struct attn_route attn, *group_mask;
5091 int port = BP_PORT(bp);
5092 int index;
5093 u32 reg_addr;
5094 u32 val;
5095 u32 aeu_mask;
5096 bool global = false;
5097
5098
5099
5100 bnx2x_acquire_alr(bp);
5101
5102 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5103#ifndef BNX2X_STOP_ON_ERROR
5104 bp->recovery_state = BNX2X_RECOVERY_INIT;
5105 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5106
5107 bnx2x_int_disable(bp);
5108
5109
5110
5111#else
5112 bnx2x_panic();
5113#endif
5114 bnx2x_release_alr(bp);
5115 return;
5116 }
5117
5118 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5119 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5120 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5121 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5122 if (!CHIP_IS_E1x(bp))
5123 attn.sig[4] =
5124 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5125 else
5126 attn.sig[4] = 0;
5127
5128 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5129 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5130
5131 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5132 if (deasserted & (1 << index)) {
5133 group_mask = &bp->attn_group[index];
5134
5135 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5136 index,
5137 group_mask->sig[0], group_mask->sig[1],
5138 group_mask->sig[2], group_mask->sig[3],
5139 group_mask->sig[4]);
5140
5141 bnx2x_attn_int_deasserted4(bp,
5142 attn.sig[4] & group_mask->sig[4]);
5143 bnx2x_attn_int_deasserted3(bp,
5144 attn.sig[3] & group_mask->sig[3]);
5145 bnx2x_attn_int_deasserted1(bp,
5146 attn.sig[1] & group_mask->sig[1]);
5147 bnx2x_attn_int_deasserted2(bp,
5148 attn.sig[2] & group_mask->sig[2]);
5149 bnx2x_attn_int_deasserted0(bp,
5150 attn.sig[0] & group_mask->sig[0]);
5151 }
5152 }
5153
5154 bnx2x_release_alr(bp);
5155
5156 if (bp->common.int_block == INT_BLOCK_HC)
5157 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5158 COMMAND_REG_ATTN_BITS_CLR);
5159 else
5160 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5161
5162 val = ~deasserted;
5163 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5164 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5165 REG_WR(bp, reg_addr, val);
5166
5167 if (~bp->attn_state & deasserted)
5168 BNX2X_ERR("IGU ERROR\n");
5169
5170 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5171 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5172
5173 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5174 aeu_mask = REG_RD(bp, reg_addr);
5175
5176 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5177 aeu_mask, deasserted);
5178 aeu_mask |= (deasserted & 0x3ff);
5179 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5180
5181 REG_WR(bp, reg_addr, aeu_mask);
5182 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5183
5184 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5185 bp->attn_state &= ~deasserted;
5186 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5187}
5188
5189static void bnx2x_attn_int(struct bnx2x *bp)
5190{
5191
5192 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5193 attn_bits);
5194 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5195 attn_bits_ack);
5196 u32 attn_state = bp->attn_state;
5197
5198
5199 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5200 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5201
5202 DP(NETIF_MSG_HW,
5203 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5204 attn_bits, attn_ack, asserted, deasserted);
5205
5206 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5207 BNX2X_ERR("BAD attention state\n");
5208
5209
5210 if (asserted)
5211 bnx2x_attn_int_asserted(bp, asserted);
5212
5213 if (deasserted)
5214 bnx2x_attn_int_deasserted(bp, deasserted);
5215}
5216
5217void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5218 u16 index, u8 op, u8 update)
5219{
5220 u32 igu_addr = bp->igu_base_addr;
5221 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5222 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5223 igu_addr);
5224}
5225
5226static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5227{
5228
5229 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5230 mmiowb();
5231}
5232
5233static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5234 union event_ring_elem *elem)
5235{
5236 u8 err = elem->message.error;
5237
5238 if (!bp->cnic_eth_dev.starting_cid ||
5239 (cid < bp->cnic_eth_dev.starting_cid &&
5240 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5241 return 1;
5242
5243 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5244
5245 if (unlikely(err)) {
5246
5247 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5248 cid);
5249 bnx2x_panic_dump(bp, false);
5250 }
5251 bnx2x_cnic_cfc_comp(bp, cid, err);
5252 return 0;
5253}
5254
5255static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5256{
5257 struct bnx2x_mcast_ramrod_params rparam;
5258 int rc;
5259
5260 memset(&rparam, 0, sizeof(rparam));
5261
5262 rparam.mcast_obj = &bp->mcast_obj;
5263
5264 netif_addr_lock_bh(bp->dev);
5265
5266
5267 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5268
5269
5270 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5271 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5272 if (rc < 0)
5273 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5274 rc);
5275 }
5276
5277 netif_addr_unlock_bh(bp->dev);
5278}
5279
5280static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5281 union event_ring_elem *elem)
5282{
5283 unsigned long ramrod_flags = 0;
5284 int rc = 0;
5285 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5286 u32 cid = echo & BNX2X_SWCID_MASK;
5287 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5288
5289
5290 __set_bit(RAMROD_CONT, &ramrod_flags);
5291
5292 switch (echo >> BNX2X_SWCID_SHIFT) {
5293 case BNX2X_FILTER_MAC_PENDING:
5294 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5295 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5296 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5297 else
5298 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5299
5300 break;
5301 case BNX2X_FILTER_VLAN_PENDING:
5302 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5303 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5304 break;
5305 case BNX2X_FILTER_MCAST_PENDING:
5306 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5307
5308
5309
5310 bnx2x_handle_mcast_eqe(bp);
5311 return;
5312 default:
5313 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5314 return;
5315 }
5316
5317 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5318
5319 if (rc < 0)
5320 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5321 else if (rc > 0)
5322 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5323}
5324
5325static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5326
5327static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5328{
5329 netif_addr_lock_bh(bp->dev);
5330
5331 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5332
5333
5334 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5335 bnx2x_set_storm_rx_mode(bp);
5336 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5337 &bp->sp_state))
5338 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5339 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5340 &bp->sp_state))
5341 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5342
5343 netif_addr_unlock_bh(bp->dev);
5344}
5345
5346static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5347 union event_ring_elem *elem)
5348{
5349 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5350 DP(BNX2X_MSG_SP,
5351 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5352 elem->message.data.vif_list_event.func_bit_map);
5353 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5354 elem->message.data.vif_list_event.func_bit_map);
5355 } else if (elem->message.data.vif_list_event.echo ==
5356 VIF_LIST_RULE_SET) {
5357 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5358 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5359 }
5360}
5361
5362
5363static void bnx2x_after_function_update(struct bnx2x *bp)
5364{
5365 int q, rc;
5366 struct bnx2x_fastpath *fp;
5367 struct bnx2x_queue_state_params queue_params = {NULL};
5368 struct bnx2x_queue_update_params *q_update_params =
5369 &queue_params.params.update;
5370
5371
5372 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5373
5374
5375 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5376 &q_update_params->update_flags);
5377 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5378 &q_update_params->update_flags);
5379 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5380
5381
5382 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5383 q_update_params->silent_removal_value = 0;
5384 q_update_params->silent_removal_mask = 0;
5385 } else {
5386 q_update_params->silent_removal_value =
5387 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5388 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5389 }
5390
5391 for_each_eth_queue(bp, q) {
5392
5393 fp = &bp->fp[q];
5394 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5395
5396
5397 rc = bnx2x_queue_state_change(bp, &queue_params);
5398 if (rc < 0)
5399 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5400 q);
5401 }
5402
5403 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5404 fp = &bp->fp[FCOE_IDX(bp)];
5405 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5406
5407
5408 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5409
5410
5411 smp_mb__before_atomic();
5412 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5413 smp_mb__after_atomic();
5414
5415
5416 rc = bnx2x_queue_state_change(bp, &queue_params);
5417 if (rc < 0)
5418 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5419 q);
5420 } else {
5421
5422 bnx2x_link_report(bp);
5423 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5424 }
5425}
5426
5427static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5428 struct bnx2x *bp, u32 cid)
5429{
5430 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5431
5432 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5433 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5434 else
5435 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5436}
5437
5438static void bnx2x_eq_int(struct bnx2x *bp)
5439{
5440 u16 hw_cons, sw_cons, sw_prod;
5441 union event_ring_elem *elem;
5442 u8 echo;
5443 u32 cid;
5444 u8 opcode;
5445 int rc, spqe_cnt = 0;
5446 struct bnx2x_queue_sp_obj *q_obj;
5447 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5448 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5449
5450 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5451
5452
5453
5454
5455
5456
5457 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5458 hw_cons++;
5459
5460
5461
5462
5463
5464 sw_cons = bp->eq_cons;
5465 sw_prod = bp->eq_prod;
5466
5467 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5468 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5469
5470 for (; sw_cons != hw_cons;
5471 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5472
5473 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5474
5475 rc = bnx2x_iov_eq_sp_event(bp, elem);
5476 if (!rc) {
5477 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5478 rc);
5479 goto next_spqe;
5480 }
5481
5482 opcode = elem->message.opcode;
5483
5484
5485 switch (opcode) {
5486 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5487 bnx2x_vf_mbx_schedule(bp,
5488 &elem->message.data.vf_pf_event);
5489 continue;
5490
5491 case EVENT_RING_OPCODE_STAT_QUERY:
5492 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5493 "got statistics comp event %d\n",
5494 bp->stats_comp++);
5495
5496 goto next_spqe;
5497
5498 case EVENT_RING_OPCODE_CFC_DEL:
5499
5500
5501
5502
5503
5504
5505
5506 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5507
5508 DP(BNX2X_MSG_SP,
5509 "got delete ramrod for MULTI[%d]\n", cid);
5510
5511 if (CNIC_LOADED(bp) &&
5512 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5513 goto next_spqe;
5514
5515 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5516
5517 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5518 break;
5519
5520 goto next_spqe;
5521
5522 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5523 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5524 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5525 if (f_obj->complete_cmd(bp, f_obj,
5526 BNX2X_F_CMD_TX_STOP))
5527 break;
5528 goto next_spqe;
5529
5530 case EVENT_RING_OPCODE_START_TRAFFIC:
5531 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5532 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5533 if (f_obj->complete_cmd(bp, f_obj,
5534 BNX2X_F_CMD_TX_START))
5535 break;
5536 goto next_spqe;
5537
5538 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5539 echo = elem->message.data.function_update_event.echo;
5540 if (echo == SWITCH_UPDATE) {
5541 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5542 "got FUNC_SWITCH_UPDATE ramrod\n");
5543 if (f_obj->complete_cmd(
5544 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5545 break;
5546
5547 } else {
5548 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5549
5550 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5551 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5552 f_obj->complete_cmd(bp, f_obj,
5553 BNX2X_F_CMD_AFEX_UPDATE);
5554
5555
5556
5557
5558
5559 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5560 }
5561
5562 goto next_spqe;
5563
5564 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5565 f_obj->complete_cmd(bp, f_obj,
5566 BNX2X_F_CMD_AFEX_VIFLISTS);
5567 bnx2x_after_afex_vif_lists(bp, elem);
5568 goto next_spqe;
5569 case EVENT_RING_OPCODE_FUNCTION_START:
5570 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5571 "got FUNC_START ramrod\n");
5572 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5573 break;
5574
5575 goto next_spqe;
5576
5577 case EVENT_RING_OPCODE_FUNCTION_STOP:
5578 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5579 "got FUNC_STOP ramrod\n");
5580 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5581 break;
5582
5583 goto next_spqe;
5584
5585 case EVENT_RING_OPCODE_SET_TIMESYNC:
5586 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5587 "got set_timesync ramrod completion\n");
5588 if (f_obj->complete_cmd(bp, f_obj,
5589 BNX2X_F_CMD_SET_TIMESYNC))
5590 break;
5591 goto next_spqe;
5592 }
5593
5594 switch (opcode | bp->state) {
5595 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5596 BNX2X_STATE_OPEN):
5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598 BNX2X_STATE_OPENING_WAIT4_PORT):
5599 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5600 BNX2X_STATE_CLOSING_WAIT4_HALT):
5601 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5602 SW_CID(elem->message.data.eth_event.echo));
5603 rss_raw->clear_pending(rss_raw);
5604 break;
5605
5606 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5607 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5608 case (EVENT_RING_OPCODE_SET_MAC |
5609 BNX2X_STATE_CLOSING_WAIT4_HALT):
5610 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5611 BNX2X_STATE_OPEN):
5612 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5613 BNX2X_STATE_DIAG):
5614 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5615 BNX2X_STATE_CLOSING_WAIT4_HALT):
5616 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5617 bnx2x_handle_classification_eqe(bp, elem);
5618 break;
5619
5620 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5621 BNX2X_STATE_OPEN):
5622 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5623 BNX2X_STATE_DIAG):
5624 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5625 BNX2X_STATE_CLOSING_WAIT4_HALT):
5626 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5627 bnx2x_handle_mcast_eqe(bp);
5628 break;
5629
5630 case (EVENT_RING_OPCODE_FILTERS_RULES |
5631 BNX2X_STATE_OPEN):
5632 case (EVENT_RING_OPCODE_FILTERS_RULES |
5633 BNX2X_STATE_DIAG):
5634 case (EVENT_RING_OPCODE_FILTERS_RULES |
5635 BNX2X_STATE_CLOSING_WAIT4_HALT):
5636 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5637 bnx2x_handle_rx_mode_eqe(bp);
5638 break;
5639 default:
5640
5641 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5642 elem->message.opcode, bp->state);
5643 }
5644next_spqe:
5645 spqe_cnt++;
5646 }
5647
5648 smp_mb__before_atomic();
5649 atomic_add(spqe_cnt, &bp->eq_spq_left);
5650
5651 bp->eq_cons = sw_cons;
5652 bp->eq_prod = sw_prod;
5653
5654 smp_wmb();
5655
5656
5657 bnx2x_update_eq_prod(bp, bp->eq_prod);
5658}
5659
5660static void bnx2x_sp_task(struct work_struct *work)
5661{
5662 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5663
5664 DP(BNX2X_MSG_SP, "sp task invoked\n");
5665
5666
5667 smp_rmb();
5668 if (atomic_read(&bp->interrupt_occurred)) {
5669
5670
5671 u16 status = bnx2x_update_dsb_idx(bp);
5672
5673 DP(BNX2X_MSG_SP, "status %x\n", status);
5674 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5675 atomic_set(&bp->interrupt_occurred, 0);
5676
5677
5678 if (status & BNX2X_DEF_SB_ATT_IDX) {
5679 bnx2x_attn_int(bp);
5680 status &= ~BNX2X_DEF_SB_ATT_IDX;
5681 }
5682
5683
5684 if (status & BNX2X_DEF_SB_IDX) {
5685 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5686
5687 if (FCOE_INIT(bp) &&
5688 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5689
5690
5691
5692 local_bh_disable();
5693 napi_schedule(&bnx2x_fcoe(bp, napi));
5694 local_bh_enable();
5695 }
5696
5697
5698 bnx2x_eq_int(bp);
5699 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5700 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5701
5702 status &= ~BNX2X_DEF_SB_IDX;
5703 }
5704
5705
5706 if (unlikely(status))
5707 DP(BNX2X_MSG_SP,
5708 "got an unknown interrupt! (status 0x%x)\n", status);
5709
5710
5711 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5712 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5713 }
5714
5715
5716 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5717 &bp->sp_state)) {
5718 bnx2x_link_report(bp);
5719 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5720 }
5721}
5722
5723irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5724{
5725 struct net_device *dev = dev_instance;
5726 struct bnx2x *bp = netdev_priv(dev);
5727
5728 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5729 IGU_INT_DISABLE, 0);
5730
5731#ifdef BNX2X_STOP_ON_ERROR
5732 if (unlikely(bp->panic))
5733 return IRQ_HANDLED;
5734#endif
5735
5736 if (CNIC_LOADED(bp)) {
5737 struct cnic_ops *c_ops;
5738
5739 rcu_read_lock();
5740 c_ops = rcu_dereference(bp->cnic_ops);
5741 if (c_ops)
5742 c_ops->cnic_handler(bp->cnic_data, NULL);
5743 rcu_read_unlock();
5744 }
5745
5746
5747
5748
5749 bnx2x_schedule_sp_task(bp);
5750
5751 return IRQ_HANDLED;
5752}
5753
5754
5755
5756void bnx2x_drv_pulse(struct bnx2x *bp)
5757{
5758 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5759 bp->fw_drv_pulse_wr_seq);
5760}
5761
5762static void bnx2x_timer(unsigned long data)
5763{
5764 struct bnx2x *bp = (struct bnx2x *) data;
5765
5766 if (!netif_running(bp->dev))
5767 return;
5768
5769 if (IS_PF(bp) &&
5770 !BP_NOMCP(bp)) {
5771 int mb_idx = BP_FW_MB_IDX(bp);
5772 u16 drv_pulse;
5773 u16 mcp_pulse;
5774
5775 ++bp->fw_drv_pulse_wr_seq;
5776 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5777 drv_pulse = bp->fw_drv_pulse_wr_seq;
5778 bnx2x_drv_pulse(bp);
5779
5780 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5781 MCP_PULSE_SEQ_MASK);
5782
5783
5784
5785
5786
5787 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5788 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5789 drv_pulse, mcp_pulse);
5790 }
5791
5792 if (bp->state == BNX2X_STATE_OPEN)
5793 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5794
5795
5796 if (IS_VF(bp))
5797 bnx2x_timer_sriov(bp);
5798
5799 mod_timer(&bp->timer, jiffies + bp->current_interval);
5800}
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5811{
5812 u32 i;
5813 if (!(len%4) && !(addr%4))
5814 for (i = 0; i < len; i += 4)
5815 REG_WR(bp, addr + i, fill);
5816 else
5817 for (i = 0; i < len; i++)
5818 REG_WR8(bp, addr + i, fill);
5819}
5820
5821
5822static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5823 int fw_sb_id,
5824 u32 *sb_data_p,
5825 u32 data_size)
5826{
5827 int index;
5828 for (index = 0; index < data_size; index++)
5829 REG_WR(bp, BAR_CSTRORM_INTMEM +
5830 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5831 sizeof(u32)*index,
5832 *(sb_data_p + index));
5833}
5834
5835static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5836{
5837 u32 *sb_data_p;
5838 u32 data_size = 0;
5839 struct hc_status_block_data_e2 sb_data_e2;
5840 struct hc_status_block_data_e1x sb_data_e1x;
5841
5842
5843 if (!CHIP_IS_E1x(bp)) {
5844 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5845 sb_data_e2.common.state = SB_DISABLED;
5846 sb_data_e2.common.p_func.vf_valid = false;
5847 sb_data_p = (u32 *)&sb_data_e2;
5848 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5849 } else {
5850 memset(&sb_data_e1x, 0,
5851 sizeof(struct hc_status_block_data_e1x));
5852 sb_data_e1x.common.state = SB_DISABLED;
5853 sb_data_e1x.common.p_func.vf_valid = false;
5854 sb_data_p = (u32 *)&sb_data_e1x;
5855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5856 }
5857 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5858
5859 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5860 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5861 CSTORM_STATUS_BLOCK_SIZE);
5862 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5863 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5864 CSTORM_SYNC_BLOCK_SIZE);
5865}
5866
5867
5868static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5869 struct hc_sp_status_block_data *sp_sb_data)
5870{
5871 int func = BP_FUNC(bp);
5872 int i;
5873 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5874 REG_WR(bp, BAR_CSTRORM_INTMEM +
5875 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5876 i*sizeof(u32),
5877 *((u32 *)sp_sb_data + i));
5878}
5879
5880static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5881{
5882 int func = BP_FUNC(bp);
5883 struct hc_sp_status_block_data sp_sb_data;
5884 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5885
5886 sp_sb_data.state = SB_DISABLED;
5887 sp_sb_data.p_func.vf_valid = false;
5888
5889 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5890
5891 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5892 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5893 CSTORM_SP_STATUS_BLOCK_SIZE);
5894 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5895 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5896 CSTORM_SP_SYNC_BLOCK_SIZE);
5897}
5898
5899static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5900 int igu_sb_id, int igu_seg_id)
5901{
5902 hc_sm->igu_sb_id = igu_sb_id;
5903 hc_sm->igu_seg_id = igu_seg_id;
5904 hc_sm->timer_value = 0xFF;
5905 hc_sm->time_to_expire = 0xFFFFFFFF;
5906}
5907
5908
5909static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5910{
5911
5912
5913 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5914
5915
5916 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5917 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5918 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5919 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5920
5921
5922
5923 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5924 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5925
5926
5927 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5928 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5929 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5930 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5932 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5933 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5934 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5935}
5936
5937void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5938 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5939{
5940 int igu_seg_id;
5941
5942 struct hc_status_block_data_e2 sb_data_e2;
5943 struct hc_status_block_data_e1x sb_data_e1x;
5944 struct hc_status_block_sm *hc_sm_p;
5945 int data_size;
5946 u32 *sb_data_p;
5947
5948 if (CHIP_INT_MODE_IS_BC(bp))
5949 igu_seg_id = HC_SEG_ACCESS_NORM;
5950 else
5951 igu_seg_id = IGU_SEG_ACCESS_NORM;
5952
5953 bnx2x_zero_fp_sb(bp, fw_sb_id);
5954
5955 if (!CHIP_IS_E1x(bp)) {
5956 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5957 sb_data_e2.common.state = SB_ENABLED;
5958 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5959 sb_data_e2.common.p_func.vf_id = vfid;
5960 sb_data_e2.common.p_func.vf_valid = vf_valid;
5961 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5962 sb_data_e2.common.same_igu_sb_1b = true;
5963 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5964 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5965 hc_sm_p = sb_data_e2.common.state_machine;
5966 sb_data_p = (u32 *)&sb_data_e2;
5967 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5968 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5969 } else {
5970 memset(&sb_data_e1x, 0,
5971 sizeof(struct hc_status_block_data_e1x));
5972 sb_data_e1x.common.state = SB_ENABLED;
5973 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5974 sb_data_e1x.common.p_func.vf_id = 0xff;
5975 sb_data_e1x.common.p_func.vf_valid = false;
5976 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5977 sb_data_e1x.common.same_igu_sb_1b = true;
5978 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5979 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5980 hc_sm_p = sb_data_e1x.common.state_machine;
5981 sb_data_p = (u32 *)&sb_data_e1x;
5982 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5983 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5984 }
5985
5986 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5987 igu_sb_id, igu_seg_id);
5988 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5989 igu_sb_id, igu_seg_id);
5990
5991 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5992
5993
5994 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5995}
5996
5997static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5998 u16 tx_usec, u16 rx_usec)
5999{
6000 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6001 false, rx_usec);
6002 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6003 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6004 tx_usec);
6005 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6006 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6007 tx_usec);
6008 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6009 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6010 tx_usec);
6011}
6012
6013static void bnx2x_init_def_sb(struct bnx2x *bp)
6014{
6015 struct host_sp_status_block *def_sb = bp->def_status_blk;
6016 dma_addr_t mapping = bp->def_status_blk_mapping;
6017 int igu_sp_sb_index;
6018 int igu_seg_id;
6019 int port = BP_PORT(bp);
6020 int func = BP_FUNC(bp);
6021 int reg_offset, reg_offset_en5;
6022 u64 section;
6023 int index;
6024 struct hc_sp_status_block_data sp_sb_data;
6025 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6026
6027 if (CHIP_INT_MODE_IS_BC(bp)) {
6028 igu_sp_sb_index = DEF_SB_IGU_ID;
6029 igu_seg_id = HC_SEG_ACCESS_DEF;
6030 } else {
6031 igu_sp_sb_index = bp->igu_dsb_id;
6032 igu_seg_id = IGU_SEG_ACCESS_DEF;
6033 }
6034
6035
6036 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6037 atten_status_block);
6038 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6039
6040 bp->attn_state = 0;
6041
6042 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6043 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6044 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6045 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6046 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6047 int sindex;
6048
6049 for (sindex = 0; sindex < 4; sindex++)
6050 bp->attn_group[index].sig[sindex] =
6051 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6052
6053 if (!CHIP_IS_E1x(bp))
6054
6055
6056
6057
6058
6059 bp->attn_group[index].sig[4] = REG_RD(bp,
6060 reg_offset_en5 + 0x4*index);
6061 else
6062 bp->attn_group[index].sig[4] = 0;
6063 }
6064
6065 if (bp->common.int_block == INT_BLOCK_HC) {
6066 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6067 HC_REG_ATTN_MSG0_ADDR_L);
6068
6069 REG_WR(bp, reg_offset, U64_LO(section));
6070 REG_WR(bp, reg_offset + 4, U64_HI(section));
6071 } else if (!CHIP_IS_E1x(bp)) {
6072 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6073 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6074 }
6075
6076 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6077 sp_sb);
6078
6079 bnx2x_zero_sp_sb(bp);
6080
6081
6082 sp_sb_data.state = SB_ENABLED;
6083 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6084 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6085 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6086 sp_sb_data.igu_seg_id = igu_seg_id;
6087 sp_sb_data.p_func.pf_id = func;
6088 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6089 sp_sb_data.p_func.vf_id = 0xff;
6090
6091 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6092
6093 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6094}
6095
6096void bnx2x_update_coalesce(struct bnx2x *bp)
6097{
6098 int i;
6099
6100 for_each_eth_queue(bp, i)
6101 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6102 bp->tx_ticks, bp->rx_ticks);
6103}
6104
6105static void bnx2x_init_sp_ring(struct bnx2x *bp)
6106{
6107 spin_lock_init(&bp->spq_lock);
6108 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6109
6110 bp->spq_prod_idx = 0;
6111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6112 bp->spq_prod_bd = bp->spq;
6113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6114}
6115
6116static void bnx2x_init_eq_ring(struct bnx2x *bp)
6117{
6118 int i;
6119 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6120 union event_ring_elem *elem =
6121 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6122
6123 elem->next_page.addr.hi =
6124 cpu_to_le32(U64_HI(bp->eq_mapping +
6125 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6126 elem->next_page.addr.lo =
6127 cpu_to_le32(U64_LO(bp->eq_mapping +
6128 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6129 }
6130 bp->eq_cons = 0;
6131 bp->eq_prod = NUM_EQ_DESC;
6132 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6133
6134 atomic_set(&bp->eq_spq_left,
6135 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6136}
6137
6138
6139static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6140 unsigned long rx_mode_flags,
6141 unsigned long rx_accept_flags,
6142 unsigned long tx_accept_flags,
6143 unsigned long ramrod_flags)
6144{
6145 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6146 int rc;
6147
6148 memset(&ramrod_param, 0, sizeof(ramrod_param));
6149
6150
6151 ramrod_param.cid = 0;
6152 ramrod_param.cl_id = cl_id;
6153 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6154 ramrod_param.func_id = BP_FUNC(bp);
6155
6156 ramrod_param.pstate = &bp->sp_state;
6157 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6158
6159 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6160 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6161
6162 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6163
6164 ramrod_param.ramrod_flags = ramrod_flags;
6165 ramrod_param.rx_mode_flags = rx_mode_flags;
6166
6167 ramrod_param.rx_accept_flags = rx_accept_flags;
6168 ramrod_param.tx_accept_flags = tx_accept_flags;
6169
6170 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6171 if (rc < 0) {
6172 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6173 return rc;
6174 }
6175
6176 return 0;
6177}
6178
6179static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6180 unsigned long *rx_accept_flags,
6181 unsigned long *tx_accept_flags)
6182{
6183
6184 *rx_accept_flags = 0;
6185 *tx_accept_flags = 0;
6186
6187 switch (rx_mode) {
6188 case BNX2X_RX_MODE_NONE:
6189
6190
6191
6192
6193 break;
6194 case BNX2X_RX_MODE_NORMAL:
6195 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6196 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6197 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6198
6199
6200 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6201 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6202 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6203
6204 if (bp->accept_any_vlan) {
6205 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6206 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6207 }
6208
6209 break;
6210 case BNX2X_RX_MODE_ALLMULTI:
6211 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6212 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6213 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6214
6215
6216 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6217 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6218 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6219
6220 if (bp->accept_any_vlan) {
6221 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6222 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6223 }
6224
6225 break;
6226 case BNX2X_RX_MODE_PROMISC:
6227
6228
6229
6230
6231 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6232 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6233 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6234 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6235
6236
6237 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6238 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6239
6240 if (IS_MF_SI(bp))
6241 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6242 else
6243 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6244
6245 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6246 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6247
6248 break;
6249 default:
6250 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6251 return -EINVAL;
6252 }
6253
6254 return 0;
6255}
6256
6257
6258static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6259{
6260 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6261 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6262 int rc;
6263
6264 if (!NO_FCOE(bp))
6265
6266 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6267
6268 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6269 &tx_accept_flags);
6270 if (rc)
6271 return rc;
6272
6273 __set_bit(RAMROD_RX, &ramrod_flags);
6274 __set_bit(RAMROD_TX, &ramrod_flags);
6275
6276 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6277 rx_accept_flags, tx_accept_flags,
6278 ramrod_flags);
6279}
6280
6281static void bnx2x_init_internal_common(struct bnx2x *bp)
6282{
6283 int i;
6284
6285
6286
6287 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6288 REG_WR(bp, BAR_USTRORM_INTMEM +
6289 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6290 if (!CHIP_IS_E1x(bp)) {
6291 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6292 CHIP_INT_MODE_IS_BC(bp) ?
6293 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6294 }
6295}
6296
6297static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6298{
6299 switch (load_code) {
6300 case FW_MSG_CODE_DRV_LOAD_COMMON:
6301 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6302 bnx2x_init_internal_common(bp);
6303
6304
6305 case FW_MSG_CODE_DRV_LOAD_PORT:
6306
6307
6308
6309 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6310
6311
6312 break;
6313
6314 default:
6315 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6316 break;
6317 }
6318}
6319
6320static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6321{
6322 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6323}
6324
6325static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6326{
6327 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6328}
6329
6330static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6331{
6332 if (CHIP_IS_E1x(fp->bp))
6333 return BP_L_ID(fp->bp) + fp->index;
6334 else
6335 return bnx2x_fp_igu_sb_id(fp);
6336}
6337
6338static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6339{
6340 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6341 u8 cos;
6342 unsigned long q_type = 0;
6343 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6344 fp->rx_queue = fp_idx;
6345 fp->cid = fp_idx;
6346 fp->cl_id = bnx2x_fp_cl_id(fp);
6347 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6348 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6349
6350 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6351
6352
6353 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6354
6355
6356 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6357
6358
6359 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6360 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6361
6362 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6363
6364
6365 for_each_cos_in_tx_queue(fp, cos) {
6366 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6367 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6368 FP_COS_TO_TXQ(fp, cos, bp),
6369 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6370 cids[cos] = fp->txdata_ptr[cos]->cid;
6371 }
6372
6373
6374 if (IS_VF(bp))
6375 return;
6376
6377 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6378 fp->fw_sb_id, fp->igu_sb_id);
6379 bnx2x_update_fpsb_idx(fp);
6380 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6381 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6382 bnx2x_sp_mapping(bp, q_rdata), q_type);
6383
6384
6385
6386
6387 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6388
6389 DP(NETIF_MSG_IFUP,
6390 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6391 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6392 fp->igu_sb_id);
6393}
6394
6395static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6396{
6397 int i;
6398
6399 for (i = 1; i <= NUM_TX_RINGS; i++) {
6400 struct eth_tx_next_bd *tx_next_bd =
6401 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6402
6403 tx_next_bd->addr_hi =
6404 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6406 tx_next_bd->addr_lo =
6407 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6408 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6409 }
6410
6411 *txdata->tx_cons_sb = cpu_to_le16(0);
6412
6413 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6414 txdata->tx_db.data.zero_fill1 = 0;
6415 txdata->tx_db.data.prod = 0;
6416
6417 txdata->tx_pkt_prod = 0;
6418 txdata->tx_pkt_cons = 0;
6419 txdata->tx_bd_prod = 0;
6420 txdata->tx_bd_cons = 0;
6421 txdata->tx_pkt = 0;
6422}
6423
6424static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6425{
6426 int i;
6427
6428 for_each_tx_queue_cnic(bp, i)
6429 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6430}
6431
6432static void bnx2x_init_tx_rings(struct bnx2x *bp)
6433{
6434 int i;
6435 u8 cos;
6436
6437 for_each_eth_queue(bp, i)
6438 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6439 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6440}
6441
6442static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6443{
6444 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6445 unsigned long q_type = 0;
6446
6447 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6448 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6449 BNX2X_FCOE_ETH_CL_ID_IDX);
6450 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6451 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6452 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6453 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6454 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6455 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6456 fp);
6457
6458 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6459
6460
6461 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6462
6463 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6464 bnx2x_rx_ustorm_prods_offset(fp);
6465
6466
6467 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6468 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6469
6470
6471 BUG_ON(fp->max_cos != 1);
6472
6473 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6474 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6475 bnx2x_sp_mapping(bp, q_rdata), q_type);
6476
6477 DP(NETIF_MSG_IFUP,
6478 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6479 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6480 fp->igu_sb_id);
6481}
6482
6483void bnx2x_nic_init_cnic(struct bnx2x *bp)
6484{
6485 if (!NO_FCOE(bp))
6486 bnx2x_init_fcoe_fp(bp);
6487
6488 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6489 BNX2X_VF_ID_INVALID, false,
6490 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6491
6492
6493 rmb();
6494 bnx2x_init_rx_rings_cnic(bp);
6495 bnx2x_init_tx_rings_cnic(bp);
6496
6497
6498 mb();
6499 mmiowb();
6500}
6501
6502void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6503{
6504 int i;
6505
6506
6507 for_each_eth_queue(bp, i)
6508 bnx2x_init_eth_fp(bp, i);
6509
6510
6511 rmb();
6512 bnx2x_init_rx_rings(bp);
6513 bnx2x_init_tx_rings(bp);
6514
6515 if (IS_PF(bp)) {
6516
6517 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6518 bp->common.shmem_base,
6519 bp->common.shmem2_base, BP_PORT(bp));
6520
6521
6522 bnx2x_init_def_sb(bp);
6523 bnx2x_update_dsb_idx(bp);
6524 bnx2x_init_sp_ring(bp);
6525 } else {
6526 bnx2x_memset_stats(bp);
6527 }
6528}
6529
6530void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6531{
6532 bnx2x_init_eq_ring(bp);
6533 bnx2x_init_internal(bp, load_code);
6534 bnx2x_pf_init(bp);
6535 bnx2x_stats_init(bp);
6536
6537
6538 mb();
6539 mmiowb();
6540
6541 bnx2x_int_enable(bp);
6542
6543
6544 bnx2x_attn_int_deasserted0(bp,
6545 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6546 AEU_INPUTS_ATTN_BITS_SPIO5);
6547}
6548
6549
6550static int bnx2x_gunzip_init(struct bnx2x *bp)
6551{
6552 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6553 &bp->gunzip_mapping, GFP_KERNEL);
6554 if (bp->gunzip_buf == NULL)
6555 goto gunzip_nomem1;
6556
6557 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6558 if (bp->strm == NULL)
6559 goto gunzip_nomem2;
6560
6561 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6562 if (bp->strm->workspace == NULL)
6563 goto gunzip_nomem3;
6564
6565 return 0;
6566
6567gunzip_nomem3:
6568 kfree(bp->strm);
6569 bp->strm = NULL;
6570
6571gunzip_nomem2:
6572 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6573 bp->gunzip_mapping);
6574 bp->gunzip_buf = NULL;
6575
6576gunzip_nomem1:
6577 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6578 return -ENOMEM;
6579}
6580
6581static void bnx2x_gunzip_end(struct bnx2x *bp)
6582{
6583 if (bp->strm) {
6584 vfree(bp->strm->workspace);
6585 kfree(bp->strm);
6586 bp->strm = NULL;
6587 }
6588
6589 if (bp->gunzip_buf) {
6590 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6591 bp->gunzip_mapping);
6592 bp->gunzip_buf = NULL;
6593 }
6594}
6595
6596static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6597{
6598 int n, rc;
6599
6600
6601 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6602 BNX2X_ERR("Bad gzip header\n");
6603 return -EINVAL;
6604 }
6605
6606 n = 10;
6607
6608#define FNAME 0x8
6609
6610 if (zbuf[3] & FNAME)
6611 while ((zbuf[n++] != 0) && (n < len));
6612
6613 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6614 bp->strm->avail_in = len - n;
6615 bp->strm->next_out = bp->gunzip_buf;
6616 bp->strm->avail_out = FW_BUF_SIZE;
6617
6618 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6619 if (rc != Z_OK)
6620 return rc;
6621
6622 rc = zlib_inflate(bp->strm, Z_FINISH);
6623 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6624 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6625 bp->strm->msg);
6626
6627 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6628 if (bp->gunzip_outlen & 0x3)
6629 netdev_err(bp->dev,
6630 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6631 bp->gunzip_outlen);
6632 bp->gunzip_outlen >>= 2;
6633
6634 zlib_inflateEnd(bp->strm);
6635
6636 if (rc == Z_STREAM_END)
6637 return 0;
6638
6639 return rc;
6640}
6641
6642
6643
6644
6645
6646
6647
6648
6649static void bnx2x_lb_pckt(struct bnx2x *bp)
6650{
6651 u32 wb_write[3];
6652
6653
6654 wb_write[0] = 0x55555555;
6655 wb_write[1] = 0x55555555;
6656 wb_write[2] = 0x20;
6657 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6658
6659
6660 wb_write[0] = 0x09000000;
6661 wb_write[1] = 0x55555555;
6662 wb_write[2] = 0x10;
6663 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6664}
6665
6666
6667
6668
6669
6670static int bnx2x_int_mem_test(struct bnx2x *bp)
6671{
6672 int factor;
6673 int count, i;
6674 u32 val = 0;
6675
6676 if (CHIP_REV_IS_FPGA(bp))
6677 factor = 120;
6678 else if (CHIP_REV_IS_EMUL(bp))
6679 factor = 200;
6680 else
6681 factor = 1;
6682
6683
6684 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6685 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6686 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6687 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6688
6689
6690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6691
6692
6693 bnx2x_lb_pckt(bp);
6694
6695
6696
6697 count = 1000 * factor;
6698 while (count) {
6699
6700 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6701 val = *bnx2x_sp(bp, wb_data[0]);
6702 if (val == 0x10)
6703 break;
6704
6705 usleep_range(10000, 20000);
6706 count--;
6707 }
6708 if (val != 0x10) {
6709 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6710 return -1;
6711 }
6712
6713
6714 count = 1000 * factor;
6715 while (count) {
6716 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6717 if (val == 1)
6718 break;
6719
6720 usleep_range(10000, 20000);
6721 count--;
6722 }
6723 if (val != 0x1) {
6724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6725 return -2;
6726 }
6727
6728
6729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6730 msleep(50);
6731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6732 msleep(50);
6733 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6734 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6735
6736 DP(NETIF_MSG_HW, "part2\n");
6737
6738
6739 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6740 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6741 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6742 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6743
6744
6745 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6746
6747
6748 for (i = 0; i < 10; i++)
6749 bnx2x_lb_pckt(bp);
6750
6751
6752
6753 count = 1000 * factor;
6754 while (count) {
6755
6756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6757 val = *bnx2x_sp(bp, wb_data[0]);
6758 if (val == 0xb0)
6759 break;
6760
6761 usleep_range(10000, 20000);
6762 count--;
6763 }
6764 if (val != 0xb0) {
6765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6766 return -3;
6767 }
6768
6769
6770 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6771 if (val != 2)
6772 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6773
6774
6775 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6776
6777
6778 msleep(10 * factor);
6779
6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6781 if (val != 3)
6782 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6783
6784
6785 for (i = 0; i < 11; i++)
6786 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6787 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6788 if (val != 1) {
6789 BNX2X_ERR("clear of NIG failed\n");
6790 return -4;
6791 }
6792
6793
6794 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6795 msleep(50);
6796 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6797 msleep(50);
6798 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6799 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6800 if (!CNIC_SUPPORT(bp))
6801
6802 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6803
6804
6805 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6806 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6807 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6808 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6809
6810 DP(NETIF_MSG_HW, "done\n");
6811
6812 return 0;
6813}
6814
6815static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6816{
6817 u32 val;
6818
6819 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6820 if (!CHIP_IS_E1x(bp))
6821 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6822 else
6823 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6824 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6825 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6826
6827
6828
6829
6830
6831
6832 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6833 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6834 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6835 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6836 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6837 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6838
6839
6840 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6841 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6842 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6843
6844
6845 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6846 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6847 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6848 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6849
6850
6851
6852 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6853 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6854 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6855 if (!CHIP_IS_E1x(bp))
6856 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6857 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6858 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6859
6860 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6861 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6862 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6863
6864
6865 if (!CHIP_IS_E1x(bp))
6866
6867 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6868
6869 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6870 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6871
6872 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6873}
6874
6875static void bnx2x_reset_common(struct bnx2x *bp)
6876{
6877 u32 val = 0x1400;
6878
6879
6880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6881 0xd3ffff7f);
6882
6883 if (CHIP_IS_E3(bp)) {
6884 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6885 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6886 }
6887
6888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6889}
6890
6891static void bnx2x_setup_dmae(struct bnx2x *bp)
6892{
6893 bp->dmae_ready = 0;
6894 spin_lock_init(&bp->dmae_lock);
6895}
6896
6897static void bnx2x_init_pxp(struct bnx2x *bp)
6898{
6899 u16 devctl;
6900 int r_order, w_order;
6901
6902 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6903 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6904 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6905 if (bp->mrrs == -1)
6906 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6907 else {
6908 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6909 r_order = bp->mrrs;
6910 }
6911
6912 bnx2x_init_pxp_arb(bp, r_order, w_order);
6913}
6914
6915static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6916{
6917 int is_required;
6918 u32 val;
6919 int port;
6920
6921 if (BP_NOMCP(bp))
6922 return;
6923
6924 is_required = 0;
6925 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6926 SHARED_HW_CFG_FAN_FAILURE_MASK;
6927
6928 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6929 is_required = 1;
6930
6931
6932
6933
6934
6935
6936 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6937 for (port = PORT_0; port < PORT_MAX; port++) {
6938 is_required |=
6939 bnx2x_fan_failure_det_req(
6940 bp,
6941 bp->common.shmem_base,
6942 bp->common.shmem2_base,
6943 port);
6944 }
6945
6946 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6947
6948 if (is_required == 0)
6949 return;
6950
6951
6952 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6953
6954
6955 val = REG_RD(bp, MISC_REG_SPIO_INT);
6956 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6957 REG_WR(bp, MISC_REG_SPIO_INT, val);
6958
6959
6960 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6961 val |= MISC_SPIO_SPIO5;
6962 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6963}
6964
6965void bnx2x_pf_disable(struct bnx2x *bp)
6966{
6967 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6968 val &= ~IGU_PF_CONF_FUNC_EN;
6969
6970 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6971 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6972 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6973}
6974
6975static void bnx2x__common_init_phy(struct bnx2x *bp)
6976{
6977 u32 shmem_base[2], shmem2_base[2];
6978
6979 if (SHMEM2_RD(bp, size) >
6980 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6981 return;
6982 shmem_base[0] = bp->common.shmem_base;
6983 shmem2_base[0] = bp->common.shmem2_base;
6984 if (!CHIP_IS_E1x(bp)) {
6985 shmem_base[1] =
6986 SHMEM2_RD(bp, other_shmem_base_addr);
6987 shmem2_base[1] =
6988 SHMEM2_RD(bp, other_shmem2_base_addr);
6989 }
6990 bnx2x_acquire_phy_lock(bp);
6991 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6992 bp->common.chip_id);
6993 bnx2x_release_phy_lock(bp);
6994}
6995
6996static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6997{
6998 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6999 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7000 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7001 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7002 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7003
7004
7005 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7006
7007 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7008 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7009 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7010 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7011}
7012
7013static void bnx2x_set_endianity(struct bnx2x *bp)
7014{
7015#ifdef __BIG_ENDIAN
7016 bnx2x_config_endianity(bp, 1);
7017#else
7018 bnx2x_config_endianity(bp, 0);
7019#endif
7020}
7021
7022static void bnx2x_reset_endianity(struct bnx2x *bp)
7023{
7024 bnx2x_config_endianity(bp, 0);
7025}
7026
7027
7028
7029
7030
7031
7032static int bnx2x_init_hw_common(struct bnx2x *bp)
7033{
7034 u32 val;
7035
7036 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7037
7038
7039
7040
7041
7042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7043
7044 bnx2x_reset_common(bp);
7045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7046
7047 val = 0xfffc;
7048 if (CHIP_IS_E3(bp)) {
7049 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7050 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7051 }
7052 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7053
7054 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7055
7056 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7057
7058 if (!CHIP_IS_E1x(bp)) {
7059 u8 abs_func_id;
7060
7061
7062
7063
7064
7065
7066
7067
7068 for (abs_func_id = BP_PATH(bp);
7069 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7070 if (abs_func_id == BP_ABS_FUNC(bp)) {
7071 REG_WR(bp,
7072 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7073 1);
7074 continue;
7075 }
7076
7077 bnx2x_pretend_func(bp, abs_func_id);
7078
7079 bnx2x_pf_disable(bp);
7080 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7081 }
7082 }
7083
7084 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7085 if (CHIP_IS_E1(bp)) {
7086
7087
7088 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7089 }
7090
7091 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7092 bnx2x_init_pxp(bp);
7093 bnx2x_set_endianity(bp);
7094 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7095
7096 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7097 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7098
7099
7100 msleep(100);
7101
7102 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7103 if (val != 1) {
7104 BNX2X_ERR("PXP2 CFG failed\n");
7105 return -EBUSY;
7106 }
7107 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7108 if (val != 1) {
7109 BNX2X_ERR("PXP2 RD_INIT failed\n");
7110 return -EBUSY;
7111 }
7112
7113
7114
7115
7116
7117
7118 if (!CHIP_IS_E1x(bp)) {
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181 struct ilt_client_info ilt_cli;
7182 struct bnx2x_ilt ilt;
7183 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7184 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7185
7186
7187 ilt_cli.start = 0;
7188 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7189 ilt_cli.client_num = ILT_CLIENT_TM;
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7203 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7204 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7205
7206 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7207 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7208 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7209 }
7210
7211 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7212 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7213
7214 if (!CHIP_IS_E1x(bp)) {
7215 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7216 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7217 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7218
7219 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7220
7221
7222 do {
7223 msleep(200);
7224 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7225 } while (factor-- && (val != 1));
7226
7227 if (val != 1) {
7228 BNX2X_ERR("ATC_INIT failed\n");
7229 return -EBUSY;
7230 }
7231 }
7232
7233 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7234
7235 bnx2x_iov_init_dmae(bp);
7236
7237
7238 bp->dmae_ready = 1;
7239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7240
7241 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7242
7243 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7244
7245 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7246
7247 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7248
7249 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7250 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7251 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7252 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7253
7254 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7255
7256
7257 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7258
7259
7260 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7261 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7262
7263 if (CNIC_SUPPORT(bp))
7264 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7265
7266 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7267
7268 if (!CHIP_REV_IS_SLOW(bp))
7269
7270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7271
7272 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7273
7274 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7275 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7276
7277 if (!CHIP_IS_E1(bp))
7278 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7279
7280 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7281 if (IS_MF_AFEX(bp)) {
7282
7283
7284
7285 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7286 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7287 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7288 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7289 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7290 } else {
7291
7292
7293
7294 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7295 bp->path_has_ovlan ? 7 : 6);
7296 }
7297 }
7298
7299 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7300 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7301 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7302 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7303
7304 if (!CHIP_IS_E1x(bp)) {
7305
7306 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7307 VFC_MEMORIES_RST_REG_CAM_RST |
7308 VFC_MEMORIES_RST_REG_RAM_RST);
7309 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7310 VFC_MEMORIES_RST_REG_CAM_RST |
7311 VFC_MEMORIES_RST_REG_RAM_RST);
7312
7313 msleep(20);
7314 }
7315
7316 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7317 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7318 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7319 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7320
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323 0x80000000);
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7325 0x80000000);
7326
7327 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7328 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7329 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7330
7331 if (!CHIP_IS_E1x(bp)) {
7332 if (IS_MF_AFEX(bp)) {
7333
7334
7335
7336 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7337 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7338 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7339 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7340 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7341 } else {
7342 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7343 bp->path_has_ovlan ? 7 : 6);
7344 }
7345 }
7346
7347 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7348
7349 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7350
7351 if (CNIC_SUPPORT(bp)) {
7352 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7353 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7354 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7355 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7356 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7357 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7358 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7359 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7360 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7361 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7362 }
7363 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7364
7365 if (sizeof(union cdu_context) != 1024)
7366
7367 dev_alert(&bp->pdev->dev,
7368 "please adjust the size of cdu_context(%ld)\n",
7369 (long)sizeof(union cdu_context));
7370
7371 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7372 val = (4 << 24) + (0 << 12) + 1024;
7373 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7374
7375 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7376 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7377
7378 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7379
7380
7381 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7382
7383 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7384
7385 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7386 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7387
7388 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7389 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7390
7391
7392 REG_WR(bp, 0x2814, 0xffffffff);
7393 REG_WR(bp, 0x3820, 0xffffffff);
7394
7395 if (!CHIP_IS_E1x(bp)) {
7396 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7397 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7398 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7399 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7400 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7401 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7402 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7403 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7404 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7405 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7406 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7407 }
7408
7409 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7410 if (!CHIP_IS_E1(bp)) {
7411
7412 if (!CHIP_IS_E3(bp))
7413 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7414 }
7415 if (CHIP_IS_E1H(bp))
7416
7417 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7418
7419 if (CHIP_REV_IS_SLOW(bp))
7420 msleep(200);
7421
7422
7423 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7424 if (val != 1) {
7425 BNX2X_ERR("CFC LL_INIT failed\n");
7426 return -EBUSY;
7427 }
7428 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7429 if (val != 1) {
7430 BNX2X_ERR("CFC AC_INIT failed\n");
7431 return -EBUSY;
7432 }
7433 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7434 if (val != 1) {
7435 BNX2X_ERR("CFC CAM_INIT failed\n");
7436 return -EBUSY;
7437 }
7438 REG_WR(bp, CFC_REG_DEBUG0, 0);
7439
7440 if (CHIP_IS_E1(bp)) {
7441
7442
7443 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7444 val = *bnx2x_sp(bp, wb_data[0]);
7445
7446
7447 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7448 BNX2X_ERR("internal mem self test failed\n");
7449 return -EBUSY;
7450 }
7451 }
7452
7453 bnx2x_setup_fan_failure_detection(bp);
7454
7455
7456 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7457
7458 bnx2x_enable_blocks_attention(bp);
7459 bnx2x_enable_blocks_parity(bp);
7460
7461 if (!BP_NOMCP(bp)) {
7462 if (CHIP_IS_E1x(bp))
7463 bnx2x__common_init_phy(bp);
7464 } else
7465 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7466
7467 if (SHMEM2_HAS(bp, netproc_fw_ver))
7468 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7469
7470 return 0;
7471}
7472
7473
7474
7475
7476
7477
7478static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7479{
7480 int rc = bnx2x_init_hw_common(bp);
7481
7482 if (rc)
7483 return rc;
7484
7485
7486 if (!BP_NOMCP(bp))
7487 bnx2x__common_init_phy(bp);
7488
7489 return 0;
7490}
7491
7492static int bnx2x_init_hw_port(struct bnx2x *bp)
7493{
7494 int port = BP_PORT(bp);
7495 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7496 u32 low, high;
7497 u32 val, reg;
7498
7499 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7500
7501 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7502
7503 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7504 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7505 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7506
7507
7508
7509
7510
7511
7512 if (!CHIP_IS_E1x(bp))
7513 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7514
7515 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7516 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7517 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7518 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7519
7520 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7521 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7522 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7523 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7524
7525
7526 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7527
7528 if (CNIC_SUPPORT(bp)) {
7529 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7530 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7531 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7532 }
7533
7534 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7535
7536 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7537
7538 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7539
7540 if (IS_MF(bp))
7541 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7542 else if (bp->dev->mtu > 4096) {
7543 if (bp->flags & ONE_PORT_FLAG)
7544 low = 160;
7545 else {
7546 val = bp->dev->mtu;
7547
7548 low = 96 + (val/64) +
7549 ((val % 64) ? 1 : 0);
7550 }
7551 } else
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7553 high = low + 56;
7554 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7555 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7556 }
7557
7558 if (CHIP_MODE_IS_4_PORT(bp))
7559 REG_WR(bp, (BP_PORT(bp) ?
7560 BRB1_REG_MAC_GUARANTIED_1 :
7561 BRB1_REG_MAC_GUARANTIED_0), 40);
7562
7563 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7564 if (CHIP_IS_E3B0(bp)) {
7565 if (IS_MF_AFEX(bp)) {
7566
7567 REG_WR(bp, BP_PORT(bp) ?
7568 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7569 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7570 REG_WR(bp, BP_PORT(bp) ?
7571 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7572 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7573 REG_WR(bp, BP_PORT(bp) ?
7574 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7575 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7576 } else {
7577
7578
7579
7580
7581 REG_WR(bp, BP_PORT(bp) ?
7582 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7583 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7584 (bp->path_has_ovlan ? 7 : 6));
7585 }
7586 }
7587
7588 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7589 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7590 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7591 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7592
7593 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7594 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7595 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7596 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7597
7598 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7599 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7600
7601 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7602
7603 if (CHIP_IS_E1x(bp)) {
7604
7605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7606
7607
7608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7609
7610 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7611
7612
7613 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7614 udelay(50);
7615 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7616 }
7617
7618 if (CNIC_SUPPORT(bp))
7619 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7620
7621 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7622 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7623
7624 if (CHIP_IS_E1(bp)) {
7625 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7626 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7627 }
7628 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7629
7630 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7631
7632 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7633
7634
7635
7636
7637 val = IS_MF(bp) ? 0xF7 : 0x7;
7638
7639 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7640 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7641
7642
7643 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7644 REG_WR(bp, reg,
7645 REG_RD(bp, reg) &
7646 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7647
7648 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7649 REG_WR(bp, reg,
7650 REG_RD(bp, reg) &
7651 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7652
7653 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7654
7655 if (!CHIP_IS_E1x(bp)) {
7656
7657
7658
7659 if (IS_MF_AFEX(bp))
7660 REG_WR(bp, BP_PORT(bp) ?
7661 NIG_REG_P1_HDRS_AFTER_BASIC :
7662 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7663 else
7664 REG_WR(bp, BP_PORT(bp) ?
7665 NIG_REG_P1_HDRS_AFTER_BASIC :
7666 NIG_REG_P0_HDRS_AFTER_BASIC,
7667 IS_MF_SD(bp) ? 7 : 6);
7668
7669 if (CHIP_IS_E3(bp))
7670 REG_WR(bp, BP_PORT(bp) ?
7671 NIG_REG_LLH1_MF_MODE :
7672 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7673 }
7674 if (!CHIP_IS_E3(bp))
7675 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7676
7677 if (!CHIP_IS_E1(bp)) {
7678
7679 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7680 (IS_MF_SD(bp) ? 0x1 : 0x2));
7681
7682 if (!CHIP_IS_E1x(bp)) {
7683 val = 0;
7684 switch (bp->mf_mode) {
7685 case MULTI_FUNCTION_SD:
7686 val = 1;
7687 break;
7688 case MULTI_FUNCTION_SI:
7689 case MULTI_FUNCTION_AFEX:
7690 val = 2;
7691 break;
7692 }
7693
7694 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7695 NIG_REG_LLH0_CLS_TYPE), val);
7696 }
7697 {
7698 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7699 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7700 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7701 }
7702 }
7703
7704
7705 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7706 if (val & MISC_SPIO_SPIO5) {
7707 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7709 val = REG_RD(bp, reg_addr);
7710 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7711 REG_WR(bp, reg_addr, val);
7712 }
7713
7714 return 0;
7715}
7716
7717static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7718{
7719 int reg;
7720 u32 wb_write[2];
7721
7722 if (CHIP_IS_E1(bp))
7723 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7724 else
7725 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7726
7727 wb_write[0] = ONCHIP_ADDR1(addr);
7728 wb_write[1] = ONCHIP_ADDR2(addr);
7729 REG_WR_DMAE(bp, reg, wb_write, 2);
7730}
7731
7732void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7733{
7734 u32 data, ctl, cnt = 100;
7735 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7736 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7737 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7738 u32 sb_bit = 1 << (idu_sb_id%32);
7739 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7740 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7741
7742
7743 if (CHIP_INT_MODE_IS_BC(bp))
7744 return;
7745
7746 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7747 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7748 IGU_REGULAR_CLEANUP_SET |
7749 IGU_REGULAR_BCLEANUP;
7750
7751 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7752 func_encode << IGU_CTRL_REG_FID_SHIFT |
7753 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7754
7755 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7756 data, igu_addr_data);
7757 REG_WR(bp, igu_addr_data, data);
7758 mmiowb();
7759 barrier();
7760 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7761 ctl, igu_addr_ctl);
7762 REG_WR(bp, igu_addr_ctl, ctl);
7763 mmiowb();
7764 barrier();
7765
7766
7767 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7768 msleep(20);
7769
7770 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7771 DP(NETIF_MSG_HW,
7772 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7773 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7774 }
7775}
7776
7777static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7778{
7779 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7780}
7781
7782static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7783{
7784 u32 i, base = FUNC_ILT_BASE(func);
7785 for (i = base; i < base + ILT_PER_FUNC; i++)
7786 bnx2x_ilt_wr(bp, i, 0);
7787}
7788
7789static void bnx2x_init_searcher(struct bnx2x *bp)
7790{
7791 int port = BP_PORT(bp);
7792 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7793
7794 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7795}
7796
7797static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7798{
7799 int rc;
7800 struct bnx2x_func_state_params func_params = {NULL};
7801 struct bnx2x_func_switch_update_params *switch_update_params =
7802 &func_params.params.switch_update;
7803
7804
7805 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7806 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7807
7808 func_params.f_obj = &bp->func_obj;
7809 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7810
7811
7812 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7813 &switch_update_params->changes);
7814 if (suspend)
7815 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7816 &switch_update_params->changes);
7817
7818 rc = bnx2x_func_state_change(bp, &func_params);
7819
7820 return rc;
7821}
7822
7823static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7824{
7825 int rc, i, port = BP_PORT(bp);
7826 int vlan_en = 0, mac_en[NUM_MACS];
7827
7828
7829 if (bp->mf_mode == SINGLE_FUNCTION) {
7830 bnx2x_set_rx_filter(&bp->link_params, 0);
7831 } else {
7832 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7833 NIG_REG_LLH0_FUNC_EN);
7834 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7835 NIG_REG_LLH0_FUNC_EN, 0);
7836 for (i = 0; i < NUM_MACS; i++) {
7837 mac_en[i] = REG_RD(bp, port ?
7838 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7839 4 * i) :
7840 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7841 4 * i));
7842 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7843 4 * i) :
7844 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7845 }
7846 }
7847
7848
7849 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7850 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7851
7852
7853
7854
7855
7856
7857 rc = bnx2x_func_switch_update(bp, 1);
7858 if (rc) {
7859 BNX2X_ERR("Can't suspend tx-switching!\n");
7860 return rc;
7861 }
7862
7863
7864 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7865
7866
7867 if (bp->mf_mode == SINGLE_FUNCTION) {
7868 bnx2x_set_rx_filter(&bp->link_params, 1);
7869 } else {
7870 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7871 NIG_REG_LLH0_FUNC_EN, vlan_en);
7872 for (i = 0; i < NUM_MACS; i++) {
7873 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7874 4 * i) :
7875 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7876 mac_en[i]);
7877 }
7878 }
7879
7880
7881 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7882 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7883
7884
7885 rc = bnx2x_func_switch_update(bp, 0);
7886 if (rc) {
7887 BNX2X_ERR("Can't resume tx-switching!\n");
7888 return rc;
7889 }
7890
7891 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7892 return 0;
7893}
7894
7895int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7896{
7897 int rc;
7898
7899 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7900
7901 if (CONFIGURE_NIC_MODE(bp)) {
7902
7903 bnx2x_init_searcher(bp);
7904
7905
7906 rc = bnx2x_reset_nic_mode(bp);
7907 if (rc)
7908 BNX2X_ERR("Can't change NIC mode!\n");
7909 return rc;
7910 }
7911
7912 return 0;
7913}
7914
7915
7916
7917
7918
7919
7920
7921
7922static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7923{
7924 if (!CHIP_IS_E1x(bp))
7925 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7926 1 << BP_ABS_FUNC(bp));
7927}
7928
7929static int bnx2x_init_hw_func(struct bnx2x *bp)
7930{
7931 int port = BP_PORT(bp);
7932 int func = BP_FUNC(bp);
7933 int init_phase = PHASE_PF0 + func;
7934 struct bnx2x_ilt *ilt = BP_ILT(bp);
7935 u16 cdu_ilt_start;
7936 u32 addr, val;
7937 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7938 int i, main_mem_width, rc;
7939
7940 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7941
7942
7943 if (!CHIP_IS_E1x(bp)) {
7944 rc = bnx2x_pf_flr_clnup(bp);
7945 if (rc) {
7946 bnx2x_fw_dump(bp);
7947 return rc;
7948 }
7949 }
7950
7951
7952 if (bp->common.int_block == INT_BLOCK_HC) {
7953 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7954 val = REG_RD(bp, addr);
7955 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7956 REG_WR(bp, addr, val);
7957 }
7958
7959 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7960 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7961
7962 ilt = BP_ILT(bp);
7963 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7964
7965 if (IS_SRIOV(bp))
7966 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7967 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7968
7969
7970
7971
7972 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7973 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7974 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7975 ilt->lines[cdu_ilt_start + i].page_mapping =
7976 bp->context[i].cxt_mapping;
7977 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7978 }
7979
7980 bnx2x_ilt_init_op(bp, INITOP_SET);
7981
7982 if (!CONFIGURE_NIC_MODE(bp)) {
7983 bnx2x_init_searcher(bp);
7984 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7985 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7986 } else {
7987
7988 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7989 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7990 }
7991
7992 if (!CHIP_IS_E1x(bp)) {
7993 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7994
7995
7996
7997
7998 if (!(bp->flags & USING_MSIX_FLAG))
7999 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8000
8001
8002
8003
8004
8005
8006 msleep(20);
8007
8008
8009
8010
8011
8012 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8013
8014 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8015 }
8016
8017 bp->dmae_ready = 1;
8018
8019 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8020
8021 bnx2x_clean_pglue_errors(bp);
8022
8023 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8024 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8025 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8026 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8027 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8028 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8029 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8030 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8031 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8032 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8033 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8034 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8035 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8036
8037 if (!CHIP_IS_E1x(bp))
8038 REG_WR(bp, QM_REG_PF_EN, 1);
8039
8040 if (!CHIP_IS_E1x(bp)) {
8041 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8042 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8043 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8044 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8045 }
8046 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8047
8048 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8049 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8050 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8051
8052 bnx2x_iov_init_dq(bp);
8053
8054 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8055 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8056 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8057 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8058 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8059 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8060 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8061 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8062 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8063 if (!CHIP_IS_E1x(bp))
8064 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8065
8066 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8067
8068 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8069
8070 if (!CHIP_IS_E1x(bp))
8071 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8072
8073 if (IS_MF(bp)) {
8074 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8075 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8076 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8077 bp->mf_ov);
8078 }
8079 }
8080
8081 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8082
8083
8084 if (bp->common.int_block == INT_BLOCK_HC) {
8085 if (CHIP_IS_E1H(bp)) {
8086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8087
8088 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8089 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8090 }
8091 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8092
8093 } else {
8094 int num_segs, sb_idx, prod_offset;
8095
8096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8097
8098 if (!CHIP_IS_E1x(bp)) {
8099 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8100 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8101 }
8102
8103 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8104
8105 if (!CHIP_IS_E1x(bp)) {
8106 int dsb_idx = 0;
8107
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8129 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8130 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8131 prod_offset = (bp->igu_base_sb + sb_idx) *
8132 num_segs;
8133
8134 for (i = 0; i < num_segs; i++) {
8135 addr = IGU_REG_PROD_CONS_MEMORY +
8136 (prod_offset + i) * 4;
8137 REG_WR(bp, addr, 0);
8138 }
8139
8140 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8141 USTORM_ID, 0, IGU_INT_NOP, 1);
8142 bnx2x_igu_clear_sb(bp,
8143 bp->igu_base_sb + sb_idx);
8144 }
8145
8146
8147 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8148 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8149
8150 if (CHIP_MODE_IS_4_PORT(bp))
8151 dsb_idx = BP_FUNC(bp);
8152 else
8153 dsb_idx = BP_VN(bp);
8154
8155 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8156 IGU_BC_BASE_DSB_PROD + dsb_idx :
8157 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8158
8159
8160
8161
8162
8163 for (i = 0; i < (num_segs * E1HVN_MAX);
8164 i += E1HVN_MAX) {
8165 addr = IGU_REG_PROD_CONS_MEMORY +
8166 (prod_offset + i)*4;
8167 REG_WR(bp, addr, 0);
8168 }
8169
8170 if (CHIP_INT_MODE_IS_BC(bp)) {
8171 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8172 USTORM_ID, 0, IGU_INT_NOP, 1);
8173 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8174 CSTORM_ID, 0, IGU_INT_NOP, 1);
8175 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8176 XSTORM_ID, 0, IGU_INT_NOP, 1);
8177 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8178 TSTORM_ID, 0, IGU_INT_NOP, 1);
8179 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8180 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8181 } else {
8182 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8183 USTORM_ID, 0, IGU_INT_NOP, 1);
8184 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8185 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8186 }
8187 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8188
8189
8190
8191 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8192 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8193 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8194 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8195 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8196 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8197 }
8198 }
8199
8200
8201 REG_WR(bp, 0x2114, 0xffffffff);
8202 REG_WR(bp, 0x2120, 0xffffffff);
8203
8204 if (CHIP_IS_E1x(bp)) {
8205 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8206 main_mem_base = HC_REG_MAIN_MEMORY +
8207 BP_PORT(bp) * (main_mem_size * 4);
8208 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8209 main_mem_width = 8;
8210
8211 val = REG_RD(bp, main_mem_prty_clr);
8212 if (val)
8213 DP(NETIF_MSG_HW,
8214 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8215 val);
8216
8217
8218 for (i = main_mem_base;
8219 i < main_mem_base + main_mem_size * 4;
8220 i += main_mem_width) {
8221 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8222 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8223 i, main_mem_width / 4);
8224 }
8225
8226 REG_RD(bp, main_mem_prty_clr);
8227 }
8228
8229#ifdef BNX2X_STOP_ON_ERROR
8230
8231 REG_WR8(bp, BAR_USTRORM_INTMEM +
8232 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8233 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8234 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8235 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8236 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8237 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8238 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8239#endif
8240
8241 bnx2x_phy_probe(&bp->link_params);
8242
8243 return 0;
8244}
8245
8246void bnx2x_free_mem_cnic(struct bnx2x *bp)
8247{
8248 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8249
8250 if (!CHIP_IS_E1x(bp))
8251 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8252 sizeof(struct host_hc_status_block_e2));
8253 else
8254 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8255 sizeof(struct host_hc_status_block_e1x));
8256
8257 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8258}
8259
8260void bnx2x_free_mem(struct bnx2x *bp)
8261{
8262 int i;
8263
8264 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8265 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8266
8267 if (IS_VF(bp))
8268 return;
8269
8270 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8271 sizeof(struct host_sp_status_block));
8272
8273 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8274 sizeof(struct bnx2x_slowpath));
8275
8276 for (i = 0; i < L2_ILT_LINES(bp); i++)
8277 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8278 bp->context[i].size);
8279 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8280
8281 BNX2X_FREE(bp->ilt->lines);
8282
8283 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8284
8285 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8286 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8287
8288 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8289
8290 bnx2x_iov_free_mem(bp);
8291}
8292
8293int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8294{
8295 if (!CHIP_IS_E1x(bp)) {
8296
8297 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8298 sizeof(struct host_hc_status_block_e2));
8299 if (!bp->cnic_sb.e2_sb)
8300 goto alloc_mem_err;
8301 } else {
8302 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8303 sizeof(struct host_hc_status_block_e1x));
8304 if (!bp->cnic_sb.e1x_sb)
8305 goto alloc_mem_err;
8306 }
8307
8308 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8309
8310 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8311 if (!bp->t2)
8312 goto alloc_mem_err;
8313 }
8314
8315
8316 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8317 &bp->slowpath->drv_info_to_mcp;
8318
8319 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8320 goto alloc_mem_err;
8321
8322 return 0;
8323
8324alloc_mem_err:
8325 bnx2x_free_mem_cnic(bp);
8326 BNX2X_ERR("Can't allocate memory\n");
8327 return -ENOMEM;
8328}
8329
8330int bnx2x_alloc_mem(struct bnx2x *bp)
8331{
8332 int i, allocated, context_size;
8333
8334 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8335
8336 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8337 if (!bp->t2)
8338 goto alloc_mem_err;
8339 }
8340
8341 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8342 sizeof(struct host_sp_status_block));
8343 if (!bp->def_status_blk)
8344 goto alloc_mem_err;
8345
8346 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8347 sizeof(struct bnx2x_slowpath));
8348 if (!bp->slowpath)
8349 goto alloc_mem_err;
8350
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361
8362
8363
8364 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8365
8366 for (i = 0, allocated = 0; allocated < context_size; i++) {
8367 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8368 (context_size - allocated));
8369 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8370 bp->context[i].size);
8371 if (!bp->context[i].vcxt)
8372 goto alloc_mem_err;
8373 allocated += bp->context[i].size;
8374 }
8375 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8376 GFP_KERNEL);
8377 if (!bp->ilt->lines)
8378 goto alloc_mem_err;
8379
8380 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8381 goto alloc_mem_err;
8382
8383 if (bnx2x_iov_alloc_mem(bp))
8384 goto alloc_mem_err;
8385
8386
8387 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8388 if (!bp->spq)
8389 goto alloc_mem_err;
8390
8391
8392 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8393 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8394 if (!bp->eq_ring)
8395 goto alloc_mem_err;
8396
8397 return 0;
8398
8399alloc_mem_err:
8400 bnx2x_free_mem(bp);
8401 BNX2X_ERR("Can't allocate memory\n");
8402 return -ENOMEM;
8403}
8404
8405
8406
8407
8408
8409int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8410 struct bnx2x_vlan_mac_obj *obj, bool set,
8411 int mac_type, unsigned long *ramrod_flags)
8412{
8413 int rc;
8414 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8415
8416 memset(&ramrod_param, 0, sizeof(ramrod_param));
8417
8418
8419 ramrod_param.vlan_mac_obj = obj;
8420 ramrod_param.ramrod_flags = *ramrod_flags;
8421
8422
8423 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8424 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8425
8426 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8427
8428
8429 if (set)
8430 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8431 else
8432 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8433 }
8434
8435 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8436
8437 if (rc == -EEXIST) {
8438 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8439
8440 rc = 0;
8441 } else if (rc < 0)
8442 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8443
8444 return rc;
8445}
8446
8447int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8448 struct bnx2x_vlan_mac_obj *obj, bool set,
8449 unsigned long *ramrod_flags)
8450{
8451 int rc;
8452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8453
8454 memset(&ramrod_param, 0, sizeof(ramrod_param));
8455
8456
8457 ramrod_param.vlan_mac_obj = obj;
8458 ramrod_param.ramrod_flags = *ramrod_flags;
8459
8460
8461 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8462 ramrod_param.user_req.u.vlan.vlan = vlan;
8463
8464 if (set)
8465 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8466 else
8467 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8468 }
8469
8470 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8471
8472 if (rc == -EEXIST) {
8473
8474 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8475 rc = 0;
8476 } else if (rc < 0) {
8477 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8478 }
8479
8480 return rc;
8481}
8482
8483int bnx2x_del_all_macs(struct bnx2x *bp,
8484 struct bnx2x_vlan_mac_obj *mac_obj,
8485 int mac_type, bool wait_for_comp)
8486{
8487 int rc;
8488 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8489
8490
8491 if (wait_for_comp)
8492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8493
8494
8495 __set_bit(mac_type, &vlan_mac_flags);
8496
8497 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8498 if (rc < 0)
8499 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8500
8501 return rc;
8502}
8503
8504int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8505{
8506 if (IS_PF(bp)) {
8507 unsigned long ramrod_flags = 0;
8508
8509 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8510 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8511 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8512 &bp->sp_objs->mac_obj, set,
8513 BNX2X_ETH_MAC, &ramrod_flags);
8514 } else {
8515 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8516 bp->fp->index, set);
8517 }
8518}
8519
8520int bnx2x_setup_leading(struct bnx2x *bp)
8521{
8522 if (IS_PF(bp))
8523 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8524 else
8525 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8526}
8527
8528
8529
8530
8531
8532
8533
8534
8535int bnx2x_set_int_mode(struct bnx2x *bp)
8536{
8537 int rc = 0;
8538
8539 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8540 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8541 return -EINVAL;
8542 }
8543
8544 switch (int_mode) {
8545 case BNX2X_INT_MODE_MSIX:
8546
8547 rc = bnx2x_enable_msix(bp);
8548
8549
8550 if (!rc)
8551 return 0;
8552
8553
8554 if (rc && IS_VF(bp))
8555 return rc;
8556
8557
8558 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8559 bp->num_queues,
8560 1 + bp->num_cnic_queues);
8561
8562
8563 case BNX2X_INT_MODE_MSI:
8564 bnx2x_enable_msi(bp);
8565
8566
8567 case BNX2X_INT_MODE_INTX:
8568 bp->num_ethernet_queues = 1;
8569 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8570 BNX2X_DEV_INFO("set number of queues to 1\n");
8571 break;
8572 default:
8573 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8574 return -EINVAL;
8575 }
8576 return 0;
8577}
8578
8579
8580static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8581{
8582 if (IS_SRIOV(bp))
8583 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8584 return L2_ILT_LINES(bp);
8585}
8586
8587void bnx2x_ilt_set_info(struct bnx2x *bp)
8588{
8589 struct ilt_client_info *ilt_client;
8590 struct bnx2x_ilt *ilt = BP_ILT(bp);
8591 u16 line = 0;
8592
8593 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8594 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8595
8596
8597 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8598 ilt_client->client_num = ILT_CLIENT_CDU;
8599 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8600 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8601 ilt_client->start = line;
8602 line += bnx2x_cid_ilt_lines(bp);
8603
8604 if (CNIC_SUPPORT(bp))
8605 line += CNIC_ILT_LINES;
8606 ilt_client->end = line - 1;
8607
8608 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8609 ilt_client->start,
8610 ilt_client->end,
8611 ilt_client->page_size,
8612 ilt_client->flags,
8613 ilog2(ilt_client->page_size >> 12));
8614
8615
8616 if (QM_INIT(bp->qm_cid_count)) {
8617 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8618 ilt_client->client_num = ILT_CLIENT_QM;
8619 ilt_client->page_size = QM_ILT_PAGE_SZ;
8620 ilt_client->flags = 0;
8621 ilt_client->start = line;
8622
8623
8624 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8625 QM_ILT_PAGE_SZ);
8626
8627 ilt_client->end = line - 1;
8628
8629 DP(NETIF_MSG_IFUP,
8630 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8631 ilt_client->start,
8632 ilt_client->end,
8633 ilt_client->page_size,
8634 ilt_client->flags,
8635 ilog2(ilt_client->page_size >> 12));
8636 }
8637
8638 if (CNIC_SUPPORT(bp)) {
8639
8640 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8641 ilt_client->client_num = ILT_CLIENT_SRC;
8642 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8643 ilt_client->flags = 0;
8644 ilt_client->start = line;
8645 line += SRC_ILT_LINES;
8646 ilt_client->end = line - 1;
8647
8648 DP(NETIF_MSG_IFUP,
8649 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8650 ilt_client->start,
8651 ilt_client->end,
8652 ilt_client->page_size,
8653 ilt_client->flags,
8654 ilog2(ilt_client->page_size >> 12));
8655
8656
8657 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8658 ilt_client->client_num = ILT_CLIENT_TM;
8659 ilt_client->page_size = TM_ILT_PAGE_SZ;
8660 ilt_client->flags = 0;
8661 ilt_client->start = line;
8662 line += TM_ILT_LINES;
8663 ilt_client->end = line - 1;
8664
8665 DP(NETIF_MSG_IFUP,
8666 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8667 ilt_client->start,
8668 ilt_client->end,
8669 ilt_client->page_size,
8670 ilt_client->flags,
8671 ilog2(ilt_client->page_size >> 12));
8672 }
8673
8674 BUG_ON(line > ILT_MAX_LINES);
8675}
8676
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8689 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8690{
8691 u8 cos;
8692 int cxt_index, cxt_offset;
8693
8694
8695 if (!IS_FCOE_FP(fp)) {
8696 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8697 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8698
8699
8700
8701
8702 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8703 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8704
8705
8706 init_params->rx.hc_rate = bp->rx_ticks ?
8707 (1000000 / bp->rx_ticks) : 0;
8708 init_params->tx.hc_rate = bp->tx_ticks ?
8709 (1000000 / bp->tx_ticks) : 0;
8710
8711
8712 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8713 fp->fw_sb_id;
8714
8715
8716
8717
8718
8719 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8720 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8721 }
8722
8723
8724 init_params->max_cos = fp->max_cos;
8725
8726 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8727 fp->index, init_params->max_cos);
8728
8729
8730 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8731 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8732 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8733 ILT_PAGE_CIDS);
8734 init_params->cxts[cos] =
8735 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8736 }
8737}
8738
8739static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8740 struct bnx2x_queue_state_params *q_params,
8741 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8742 int tx_index, bool leading)
8743{
8744 memset(tx_only_params, 0, sizeof(*tx_only_params));
8745
8746
8747 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8748
8749
8750 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8751
8752
8753 tx_only_params->cid_index = tx_index;
8754
8755
8756 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8757
8758
8759 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8760
8761 DP(NETIF_MSG_IFUP,
8762 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8763 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8764 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8765 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8766
8767
8768 return bnx2x_queue_state_change(bp, q_params);
8769}
8770
8771
8772
8773
8774
8775
8776
8777
8778
8779
8780
8781
8782int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8783 bool leading)
8784{
8785 struct bnx2x_queue_state_params q_params = {NULL};
8786 struct bnx2x_queue_setup_params *setup_params =
8787 &q_params.params.setup;
8788 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8789 &q_params.params.tx_only;
8790 int rc;
8791 u8 tx_index;
8792
8793 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8794
8795
8796 if (!IS_FCOE_FP(fp))
8797 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8798 IGU_INT_ENABLE, 0);
8799
8800 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8801
8802 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8803
8804
8805 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8806
8807
8808 q_params.cmd = BNX2X_Q_CMD_INIT;
8809
8810
8811 rc = bnx2x_queue_state_change(bp, &q_params);
8812 if (rc) {
8813 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8814 return rc;
8815 }
8816
8817 DP(NETIF_MSG_IFUP, "init complete\n");
8818
8819
8820 memset(setup_params, 0, sizeof(*setup_params));
8821
8822
8823 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8824
8825
8826 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8827 FIRST_TX_COS_INDEX);
8828
8829 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8830 &setup_params->rxq_params);
8831
8832 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8833 FIRST_TX_COS_INDEX);
8834
8835
8836 q_params.cmd = BNX2X_Q_CMD_SETUP;
8837
8838 if (IS_FCOE_FP(fp))
8839 bp->fcoe_init = true;
8840
8841
8842 rc = bnx2x_queue_state_change(bp, &q_params);
8843 if (rc) {
8844 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8845 return rc;
8846 }
8847
8848
8849 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8850 tx_index < fp->max_cos;
8851 tx_index++) {
8852
8853
8854 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8855 tx_only_params, tx_index, leading);
8856 if (rc) {
8857 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8858 fp->index, tx_index);
8859 return rc;
8860 }
8861 }
8862
8863 return rc;
8864}
8865
8866static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8867{
8868 struct bnx2x_fastpath *fp = &bp->fp[index];
8869 struct bnx2x_fp_txdata *txdata;
8870 struct bnx2x_queue_state_params q_params = {NULL};
8871 int rc, tx_index;
8872
8873 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8874
8875 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8876
8877 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8878
8879
8880 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8881 tx_index < fp->max_cos;
8882 tx_index++){
8883
8884
8885 txdata = fp->txdata_ptr[tx_index];
8886
8887 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8888 txdata->txq_index);
8889
8890
8891 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8892 memset(&q_params.params.terminate, 0,
8893 sizeof(q_params.params.terminate));
8894 q_params.params.terminate.cid_index = tx_index;
8895
8896 rc = bnx2x_queue_state_change(bp, &q_params);
8897 if (rc)
8898 return rc;
8899
8900
8901 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8902 memset(&q_params.params.cfc_del, 0,
8903 sizeof(q_params.params.cfc_del));
8904 q_params.params.cfc_del.cid_index = tx_index;
8905 rc = bnx2x_queue_state_change(bp, &q_params);
8906 if (rc)
8907 return rc;
8908 }
8909
8910
8911 q_params.cmd = BNX2X_Q_CMD_HALT;
8912 rc = bnx2x_queue_state_change(bp, &q_params);
8913 if (rc)
8914 return rc;
8915
8916
8917 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8918 memset(&q_params.params.terminate, 0,
8919 sizeof(q_params.params.terminate));
8920 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8921 rc = bnx2x_queue_state_change(bp, &q_params);
8922 if (rc)
8923 return rc;
8924
8925 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8926 memset(&q_params.params.cfc_del, 0,
8927 sizeof(q_params.params.cfc_del));
8928 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8929 return bnx2x_queue_state_change(bp, &q_params);
8930}
8931
8932static void bnx2x_reset_func(struct bnx2x *bp)
8933{
8934 int port = BP_PORT(bp);
8935 int func = BP_FUNC(bp);
8936 int i;
8937
8938
8939 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8940 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8941 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8942 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8943
8944
8945 for_each_eth_queue(bp, i) {
8946 struct bnx2x_fastpath *fp = &bp->fp[i];
8947 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8948 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8949 SB_DISABLED);
8950 }
8951
8952 if (CNIC_LOADED(bp))
8953
8954 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8955 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8956 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8957
8958
8959 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8960 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8961 SB_DISABLED);
8962
8963 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8964 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8965 0);
8966
8967
8968 if (bp->common.int_block == INT_BLOCK_HC) {
8969 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8970 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8971 } else {
8972 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8973 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8974 }
8975
8976 if (CNIC_LOADED(bp)) {
8977
8978 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8979
8980
8981
8982
8983 for (i = 0; i < 200; i++) {
8984 usleep_range(10000, 20000);
8985 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8986 break;
8987 }
8988 }
8989
8990 bnx2x_clear_func_ilt(bp, func);
8991
8992
8993
8994
8995 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8996 struct ilt_client_info ilt_cli;
8997
8998 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8999 ilt_cli.start = 0;
9000 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9001 ilt_cli.client_num = ILT_CLIENT_TM;
9002
9003 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9004 }
9005
9006
9007 if (!CHIP_IS_E1x(bp))
9008 bnx2x_pf_disable(bp);
9009
9010 bp->dmae_ready = 0;
9011}
9012
9013static void bnx2x_reset_port(struct bnx2x *bp)
9014{
9015 int port = BP_PORT(bp);
9016 u32 val;
9017
9018
9019 bnx2x__link_reset(bp);
9020
9021 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9022
9023
9024 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9025
9026 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9027 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9028
9029
9030 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9031
9032 msleep(100);
9033
9034 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9035 if (val)
9036 DP(NETIF_MSG_IFDOWN,
9037 "BRB1 is not empty %d blocks are occupied\n", val);
9038
9039
9040}
9041
9042static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9043{
9044 struct bnx2x_func_state_params func_params = {NULL};
9045
9046
9047 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9048
9049 func_params.f_obj = &bp->func_obj;
9050 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9051
9052 func_params.params.hw_init.load_phase = load_code;
9053
9054 return bnx2x_func_state_change(bp, &func_params);
9055}
9056
9057static int bnx2x_func_stop(struct bnx2x *bp)
9058{
9059 struct bnx2x_func_state_params func_params = {NULL};
9060 int rc;
9061
9062
9063 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9064 func_params.f_obj = &bp->func_obj;
9065 func_params.cmd = BNX2X_F_CMD_STOP;
9066
9067
9068
9069
9070
9071
9072
9073 rc = bnx2x_func_state_change(bp, &func_params);
9074 if (rc) {
9075#ifdef BNX2X_STOP_ON_ERROR
9076 return rc;
9077#else
9078 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9079 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9080 return bnx2x_func_state_change(bp, &func_params);
9081#endif
9082 }
9083
9084 return 0;
9085}
9086
9087
9088
9089
9090
9091
9092
9093
9094
9095u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9096{
9097 u32 reset_code = 0;
9098 int port = BP_PORT(bp);
9099
9100
9101 if (unload_mode == UNLOAD_NORMAL)
9102 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9103
9104 else if (bp->flags & NO_WOL_FLAG)
9105 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9106
9107 else if (bp->wol) {
9108 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9109 u8 *mac_addr = bp->dev->dev_addr;
9110 struct pci_dev *pdev = bp->pdev;
9111 u32 val;
9112 u16 pmc;
9113
9114
9115
9116
9117 u8 entry = (BP_VN(bp) + 1)*8;
9118
9119 val = (mac_addr[0] << 8) | mac_addr[1];
9120 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9121
9122 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9123 (mac_addr[4] << 8) | mac_addr[5];
9124 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9125
9126
9127 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9128 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9129 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9130
9131 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9132
9133 } else
9134 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9135
9136
9137 if (!BP_NOMCP(bp))
9138 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9139 else {
9140 int path = BP_PATH(bp);
9141
9142 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9143 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9144 bnx2x_load_count[path][2]);
9145 bnx2x_load_count[path][0]--;
9146 bnx2x_load_count[path][1 + port]--;
9147 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9148 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9149 bnx2x_load_count[path][2]);
9150 if (bnx2x_load_count[path][0] == 0)
9151 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9152 else if (bnx2x_load_count[path][1 + port] == 0)
9153 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9154 else
9155 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9156 }
9157
9158 return reset_code;
9159}
9160
9161
9162
9163
9164
9165
9166
9167void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9168{
9169 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9170
9171
9172 if (!BP_NOMCP(bp))
9173 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9174}
9175
9176static int bnx2x_func_wait_started(struct bnx2x *bp)
9177{
9178 int tout = 50;
9179 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9180
9181 if (!bp->port.pmf)
9182 return 0;
9183
9184
9185
9186
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197
9198
9199 if (msix)
9200 synchronize_irq(bp->msix_table[0].vector);
9201 else
9202 synchronize_irq(bp->pdev->irq);
9203
9204 flush_workqueue(bnx2x_wq);
9205 flush_workqueue(bnx2x_iov_wq);
9206
9207 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9208 BNX2X_F_STATE_STARTED && tout--)
9209 msleep(20);
9210
9211 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9212 BNX2X_F_STATE_STARTED) {
9213#ifdef BNX2X_STOP_ON_ERROR
9214 BNX2X_ERR("Wrong function state\n");
9215 return -EBUSY;
9216#else
9217
9218
9219
9220
9221 struct bnx2x_func_state_params func_params = {NULL};
9222
9223 DP(NETIF_MSG_IFDOWN,
9224 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9225
9226 func_params.f_obj = &bp->func_obj;
9227 __set_bit(RAMROD_DRV_CLR_ONLY,
9228 &func_params.ramrod_flags);
9229
9230
9231 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9232 bnx2x_func_state_change(bp, &func_params);
9233
9234
9235 func_params.cmd = BNX2X_F_CMD_TX_START;
9236 return bnx2x_func_state_change(bp, &func_params);
9237#endif
9238 }
9239
9240 return 0;
9241}
9242
9243static void bnx2x_disable_ptp(struct bnx2x *bp)
9244{
9245 int port = BP_PORT(bp);
9246
9247
9248 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9249 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9250
9251
9252 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9253 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9254 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9255 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9256 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9257 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9258 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9259 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9260
9261
9262 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9263 NIG_REG_P0_PTP_EN, 0x0);
9264}
9265
9266
9267static void bnx2x_stop_ptp(struct bnx2x *bp)
9268{
9269
9270
9271
9272 cancel_work_sync(&bp->ptp_task);
9273
9274 if (bp->ptp_tx_skb) {
9275 dev_kfree_skb_any(bp->ptp_tx_skb);
9276 bp->ptp_tx_skb = NULL;
9277 }
9278
9279
9280 bnx2x_disable_ptp(bp);
9281
9282 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9283}
9284
9285void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9286{
9287 int port = BP_PORT(bp);
9288 int i, rc = 0;
9289 u8 cos;
9290 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9291 u32 reset_code;
9292
9293
9294 for_each_tx_queue(bp, i) {
9295 struct bnx2x_fastpath *fp = &bp->fp[i];
9296
9297 for_each_cos_in_tx_queue(fp, cos)
9298 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9299#ifdef BNX2X_STOP_ON_ERROR
9300 if (rc)
9301 return;
9302#endif
9303 }
9304
9305
9306 usleep_range(1000, 2000);
9307
9308
9309 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9310 false);
9311 if (rc < 0)
9312 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9313
9314
9315 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9316 true);
9317 if (rc < 0)
9318 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9319 rc);
9320
9321
9322 if (!CHIP_IS_E1(bp))
9323 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9324
9325
9326
9327
9328
9329 netif_addr_lock_bh(bp->dev);
9330
9331 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9332 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9333 else
9334 bnx2x_set_storm_rx_mode(bp);
9335
9336
9337 rparam.mcast_obj = &bp->mcast_obj;
9338 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9339 if (rc < 0)
9340 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9341
9342 netif_addr_unlock_bh(bp->dev);
9343
9344 bnx2x_iov_chip_cleanup(bp);
9345
9346
9347
9348
9349
9350
9351 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9352
9353
9354
9355
9356
9357 rc = bnx2x_func_wait_started(bp);
9358 if (rc) {
9359 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9360#ifdef BNX2X_STOP_ON_ERROR
9361 return;
9362#endif
9363 }
9364
9365
9366
9367
9368 for_each_eth_queue(bp, i)
9369 if (bnx2x_stop_queue(bp, i))
9370#ifdef BNX2X_STOP_ON_ERROR
9371 return;
9372#else
9373 goto unload_error;
9374#endif
9375
9376 if (CNIC_LOADED(bp)) {
9377 for_each_cnic_queue(bp, i)
9378 if (bnx2x_stop_queue(bp, i))
9379#ifdef BNX2X_STOP_ON_ERROR
9380 return;
9381#else
9382 goto unload_error;
9383#endif
9384 }
9385
9386
9387
9388
9389 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9390 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9391
9392#ifndef BNX2X_STOP_ON_ERROR
9393unload_error:
9394#endif
9395 rc = bnx2x_func_stop(bp);
9396 if (rc) {
9397 BNX2X_ERR("Function stop failed!\n");
9398#ifdef BNX2X_STOP_ON_ERROR
9399 return;
9400#endif
9401 }
9402
9403
9404
9405
9406
9407
9408 if (bp->flags & PTP_SUPPORTED)
9409 bnx2x_stop_ptp(bp);
9410
9411
9412 bnx2x_netif_stop(bp, 1);
9413
9414 bnx2x_del_all_napi(bp);
9415 if (CNIC_LOADED(bp))
9416 bnx2x_del_all_napi_cnic(bp);
9417
9418
9419 bnx2x_free_irq(bp);
9420
9421
9422 rc = bnx2x_reset_hw(bp, reset_code);
9423 if (rc)
9424 BNX2X_ERR("HW_RESET failed\n");
9425
9426
9427 bnx2x_send_unload_done(bp, keep_link);
9428}
9429
9430void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9431{
9432 u32 val;
9433
9434 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9435
9436 if (CHIP_IS_E1(bp)) {
9437 int port = BP_PORT(bp);
9438 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9439 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9440
9441 val = REG_RD(bp, addr);
9442 val &= ~(0x300);
9443 REG_WR(bp, addr, val);
9444 } else {
9445 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9446 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9447 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9448 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9449 }
9450}
9451
9452
9453static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9454{
9455 u32 val;
9456
9457
9458 if (!CHIP_IS_E1(bp)) {
9459
9460 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9461
9462 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9463 }
9464
9465
9466 if (CHIP_IS_E1x(bp)) {
9467
9468 val = REG_RD(bp, HC_REG_CONFIG_1);
9469 REG_WR(bp, HC_REG_CONFIG_1,
9470 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9471 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9472
9473 val = REG_RD(bp, HC_REG_CONFIG_0);
9474 REG_WR(bp, HC_REG_CONFIG_0,
9475 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9476 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9477 } else {
9478
9479 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9480
9481 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9482 (!close) ?
9483 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9484 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9485 }
9486
9487 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9488 close ? "closing" : "opening");
9489 mmiowb();
9490}
9491
9492#define SHARED_MF_CLP_MAGIC 0x80000000
9493
9494static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9495{
9496
9497 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9498 *magic_val = val & SHARED_MF_CLP_MAGIC;
9499 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9500}
9501
9502
9503
9504
9505
9506
9507
9508static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9509{
9510
9511 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9512 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9513 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9514}
9515
9516
9517
9518
9519
9520
9521
9522
9523
9524static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9525{
9526 u32 shmem;
9527 u32 validity_offset;
9528
9529 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9530
9531
9532 if (!CHIP_IS_E1(bp))
9533 bnx2x_clp_reset_prep(bp, magic_val);
9534
9535
9536 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9537 validity_offset =
9538 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9539
9540
9541 if (shmem > 0)
9542 REG_WR(bp, shmem + validity_offset, 0);
9543}
9544
9545#define MCP_TIMEOUT 5000
9546#define MCP_ONE_TIMEOUT 100
9547
9548
9549
9550
9551
9552
9553static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9554{
9555
9556
9557 if (CHIP_REV_IS_SLOW(bp))
9558 msleep(MCP_ONE_TIMEOUT*10);
9559 else
9560 msleep(MCP_ONE_TIMEOUT);
9561}
9562
9563
9564
9565
9566static int bnx2x_init_shmem(struct bnx2x *bp)
9567{
9568 int cnt = 0;
9569 u32 val = 0;
9570
9571 do {
9572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9573 if (bp->common.shmem_base) {
9574 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9575 if (val & SHR_MEM_VALIDITY_MB)
9576 return 0;
9577 }
9578
9579 bnx2x_mcp_wait_one(bp);
9580
9581 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9582
9583 BNX2X_ERR("BAD MCP validity signature\n");
9584
9585 return -ENODEV;
9586}
9587
9588static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9589{
9590 int rc = bnx2x_init_shmem(bp);
9591
9592
9593 if (!CHIP_IS_E1(bp))
9594 bnx2x_clp_reset_done(bp, magic_val);
9595
9596 return rc;
9597}
9598
9599static void bnx2x_pxp_prep(struct bnx2x *bp)
9600{
9601 if (!CHIP_IS_E1(bp)) {
9602 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9603 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9604 mmiowb();
9605 }
9606}
9607
9608
9609
9610
9611
9612
9613
9614
9615
9616
9617
9618static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9619{
9620 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9621 u32 global_bits2, stay_reset2;
9622
9623
9624
9625
9626
9627 global_bits2 =
9628 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9629 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9630
9631
9632
9633
9634
9635
9636 not_reset_mask1 =
9637 MISC_REGISTERS_RESET_REG_1_RST_HC |
9638 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9639 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9640
9641 not_reset_mask2 =
9642 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9643 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9644 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9645 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9646 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9647 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9648 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9649 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9650 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9651 MISC_REGISTERS_RESET_REG_2_PGLC |
9652 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9653 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9654 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9655 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9656 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9657 MISC_REGISTERS_RESET_REG_2_UMAC1;
9658
9659
9660
9661
9662
9663 stay_reset2 =
9664 MISC_REGISTERS_RESET_REG_2_XMAC |
9665 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9666
9667
9668 reset_mask1 = 0xffffffff;
9669
9670 if (CHIP_IS_E1(bp))
9671 reset_mask2 = 0xffff;
9672 else if (CHIP_IS_E1H(bp))
9673 reset_mask2 = 0x1ffff;
9674 else if (CHIP_IS_E2(bp))
9675 reset_mask2 = 0xfffff;
9676 else
9677 reset_mask2 = 0x3ffffff;
9678
9679
9680 if (!global)
9681 reset_mask2 &= ~global_bits2;
9682
9683
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9698 reset_mask2 & (~not_reset_mask2));
9699
9700 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9701 reset_mask1 & (~not_reset_mask1));
9702
9703 barrier();
9704 mmiowb();
9705
9706 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9707 reset_mask2 & (~stay_reset2));
9708
9709 barrier();
9710 mmiowb();
9711
9712 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9713 mmiowb();
9714}
9715
9716
9717
9718
9719
9720
9721
9722
9723
9724
9725static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9726{
9727 u32 cnt = 1000;
9728 u32 pend_bits = 0;
9729
9730 do {
9731 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9732
9733 if (pend_bits == 0)
9734 break;
9735
9736 usleep_range(1000, 2000);
9737 } while (cnt-- > 0);
9738
9739 if (cnt <= 0) {
9740 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9741 pend_bits);
9742 return -EBUSY;
9743 }
9744
9745 return 0;
9746}
9747
9748static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9749{
9750 int cnt = 1000;
9751 u32 val = 0;
9752 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9753 u32 tags_63_32 = 0;
9754
9755
9756 do {
9757 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9758 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9759 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9760 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9761 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9762 if (CHIP_IS_E3(bp))
9763 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9764
9765 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9766 ((port_is_idle_0 & 0x1) == 0x1) &&
9767 ((port_is_idle_1 & 0x1) == 0x1) &&
9768 (pgl_exp_rom2 == 0xffffffff) &&
9769 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9770 break;
9771 usleep_range(1000, 2000);
9772 } while (cnt-- > 0);
9773
9774 if (cnt <= 0) {
9775 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9776 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9777 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9778 pgl_exp_rom2);
9779 return -EAGAIN;
9780 }
9781
9782 barrier();
9783
9784
9785 bnx2x_set_234_gates(bp, true);
9786
9787
9788 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9789 return -EAGAIN;
9790
9791
9792
9793
9794 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9795 barrier();
9796
9797
9798 mmiowb();
9799
9800
9801
9802
9803 usleep_range(1000, 2000);
9804
9805
9806
9807 if (global)
9808 bnx2x_reset_mcp_prep(bp, &val);
9809
9810
9811 bnx2x_pxp_prep(bp);
9812 barrier();
9813
9814
9815 bnx2x_process_kill_chip_reset(bp, global);
9816 barrier();
9817
9818
9819 if (!CHIP_IS_E1x(bp))
9820 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9821
9822
9823
9824 if (global && bnx2x_reset_mcp_comp(bp, val))
9825 return -EAGAIN;
9826
9827
9828
9829
9830 bnx2x_set_234_gates(bp, false);
9831
9832
9833
9834
9835 return 0;
9836}
9837
9838static int bnx2x_leader_reset(struct bnx2x *bp)
9839{
9840 int rc = 0;
9841 bool global = bnx2x_reset_is_global(bp);
9842 u32 load_code;
9843
9844
9845
9846
9847 if (!global && !BP_NOMCP(bp)) {
9848 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9849 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9850 if (!load_code) {
9851 BNX2X_ERR("MCP response failure, aborting\n");
9852 rc = -EAGAIN;
9853 goto exit_leader_reset;
9854 }
9855 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9856 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9857 BNX2X_ERR("MCP unexpected resp, aborting\n");
9858 rc = -EAGAIN;
9859 goto exit_leader_reset2;
9860 }
9861 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9862 if (!load_code) {
9863 BNX2X_ERR("MCP response failure, aborting\n");
9864 rc = -EAGAIN;
9865 goto exit_leader_reset2;
9866 }
9867 }
9868
9869
9870 if (bnx2x_process_kill(bp, global)) {
9871 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9872 BP_PATH(bp));
9873 rc = -EAGAIN;
9874 goto exit_leader_reset2;
9875 }
9876
9877
9878
9879
9880
9881 bnx2x_set_reset_done(bp);
9882 if (global)
9883 bnx2x_clear_reset_global(bp);
9884
9885exit_leader_reset2:
9886
9887 if (!global && !BP_NOMCP(bp)) {
9888 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9889 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9890 }
9891exit_leader_reset:
9892 bp->is_leader = 0;
9893 bnx2x_release_leader_lock(bp);
9894 smp_mb();
9895 return rc;
9896}
9897
9898static void bnx2x_recovery_failed(struct bnx2x *bp)
9899{
9900 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9901
9902
9903 netif_device_detach(bp->dev);
9904
9905
9906
9907
9908
9909 bnx2x_set_reset_in_progress(bp);
9910
9911
9912 bnx2x_set_power_state(bp, PCI_D3hot);
9913
9914 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9915
9916 smp_mb();
9917}
9918
9919
9920
9921
9922
9923
9924static void bnx2x_parity_recover(struct bnx2x *bp)
9925{
9926 bool global = false;
9927 u32 error_recovered, error_unrecovered;
9928 bool is_parity;
9929
9930 DP(NETIF_MSG_HW, "Handling parity\n");
9931 while (1) {
9932 switch (bp->recovery_state) {
9933 case BNX2X_RECOVERY_INIT:
9934 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9935 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9936 WARN_ON(!is_parity);
9937
9938
9939 if (bnx2x_trylock_leader_lock(bp)) {
9940 bnx2x_set_reset_in_progress(bp);
9941
9942
9943
9944
9945
9946
9947 if (global)
9948 bnx2x_set_reset_global(bp);
9949
9950 bp->is_leader = 1;
9951 }
9952
9953
9954
9955 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9956 return;
9957
9958 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9959
9960
9961
9962
9963
9964 smp_mb();
9965 break;
9966
9967 case BNX2X_RECOVERY_WAIT:
9968 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9969 if (bp->is_leader) {
9970 int other_engine = BP_PATH(bp) ? 0 : 1;
9971 bool other_load_status =
9972 bnx2x_get_load_status(bp, other_engine);
9973 bool load_status =
9974 bnx2x_get_load_status(bp, BP_PATH(bp));
9975 global = bnx2x_reset_is_global(bp);
9976
9977
9978
9979
9980
9981
9982
9983
9984
9985 if (load_status ||
9986 (global && other_load_status)) {
9987
9988
9989
9990 schedule_delayed_work(&bp->sp_rtnl_task,
9991 HZ/10);
9992 return;
9993 } else {
9994
9995
9996
9997
9998
9999 if (bnx2x_leader_reset(bp)) {
10000 bnx2x_recovery_failed(bp);
10001 return;
10002 }
10003
10004
10005
10006
10007
10008
10009 break;
10010 }
10011 } else {
10012 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10013
10014
10015
10016
10017
10018
10019 if (bnx2x_trylock_leader_lock(bp)) {
10020
10021
10022
10023 bp->is_leader = 1;
10024 break;
10025 }
10026
10027 schedule_delayed_work(&bp->sp_rtnl_task,
10028 HZ/10);
10029 return;
10030
10031 } else {
10032
10033
10034
10035
10036 if (bnx2x_reset_is_global(bp)) {
10037 schedule_delayed_work(
10038 &bp->sp_rtnl_task,
10039 HZ/10);
10040 return;
10041 }
10042
10043 error_recovered =
10044 bp->eth_stats.recoverable_error;
10045 error_unrecovered =
10046 bp->eth_stats.unrecoverable_error;
10047 bp->recovery_state =
10048 BNX2X_RECOVERY_NIC_LOADING;
10049 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10050 error_unrecovered++;
10051 netdev_err(bp->dev,
10052 "Recovery failed. Power cycle needed\n");
10053
10054 netif_device_detach(bp->dev);
10055
10056 bnx2x_set_power_state(
10057 bp, PCI_D3hot);
10058 smp_mb();
10059 } else {
10060 bp->recovery_state =
10061 BNX2X_RECOVERY_DONE;
10062 error_recovered++;
10063 smp_mb();
10064 }
10065 bp->eth_stats.recoverable_error =
10066 error_recovered;
10067 bp->eth_stats.unrecoverable_error =
10068 error_unrecovered;
10069
10070 return;
10071 }
10072 }
10073 default:
10074 return;
10075 }
10076 }
10077}
10078
10079#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10080static int bnx2x_udp_port_update(struct bnx2x *bp)
10081{
10082 struct bnx2x_func_switch_update_params *switch_update_params;
10083 struct bnx2x_func_state_params func_params = {NULL};
10084 struct bnx2x_udp_tunnel *udp_tunnel;
10085 u16 vxlan_port = 0, geneve_port = 0;
10086 int rc;
10087
10088 switch_update_params = &func_params.params.switch_update;
10089
10090
10091 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10092 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10093
10094 func_params.f_obj = &bp->func_obj;
10095 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10096
10097
10098 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10099 &switch_update_params->changes);
10100
10101 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10102 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10103 geneve_port = udp_tunnel->dst_port;
10104 switch_update_params->geneve_dst_port = geneve_port;
10105 }
10106
10107 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10108 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10109 vxlan_port = udp_tunnel->dst_port;
10110 switch_update_params->vxlan_dst_port = vxlan_port;
10111 }
10112
10113
10114 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10115 &switch_update_params->changes);
10116
10117 rc = bnx2x_func_state_change(bp, &func_params);
10118 if (rc)
10119 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10120 vxlan_port, geneve_port, rc);
10121 else
10122 DP(BNX2X_MSG_SP,
10123 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10124 vxlan_port, geneve_port);
10125
10126 return rc;
10127}
10128
10129static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10130 enum bnx2x_udp_port_type type)
10131{
10132 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10133
10134 if (!netif_running(bp->dev) || !IS_PF(bp))
10135 return;
10136
10137 if (udp_port->count && udp_port->dst_port == port) {
10138 udp_port->count++;
10139 return;
10140 }
10141
10142 if (udp_port->count) {
10143 DP(BNX2X_MSG_SP,
10144 "UDP tunnel [%d] - destination port limit reached\n",
10145 type);
10146 return;
10147 }
10148
10149 udp_port->dst_port = port;
10150 udp_port->count = 1;
10151 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10152}
10153
10154static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10155 enum bnx2x_udp_port_type type)
10156{
10157 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10158
10159 if (!IS_PF(bp))
10160 return;
10161
10162 if (!udp_port->count || udp_port->dst_port != port) {
10163 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10164 type);
10165 return;
10166 }
10167
10168
10169 udp_port->count--;
10170 if (udp_port->count)
10171 return;
10172 udp_port->dst_port = 0;
10173
10174 if (netif_running(bp->dev))
10175 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10176 else
10177 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10178 type, port);
10179}
10180#endif
10181
10182#ifdef CONFIG_BNX2X_VXLAN
10183static void bnx2x_add_vxlan_port(struct net_device *netdev,
10184 sa_family_t sa_family, __be16 port)
10185{
10186 struct bnx2x *bp = netdev_priv(netdev);
10187 u16 t_port = ntohs(port);
10188
10189 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10190}
10191
10192static void bnx2x_del_vxlan_port(struct net_device *netdev,
10193 sa_family_t sa_family, __be16 port)
10194{
10195 struct bnx2x *bp = netdev_priv(netdev);
10196 u16 t_port = ntohs(port);
10197
10198 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10199}
10200#endif
10201
10202#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
10203static void bnx2x_add_geneve_port(struct net_device *netdev,
10204 sa_family_t sa_family, __be16 port)
10205{
10206 struct bnx2x *bp = netdev_priv(netdev);
10207 u16 t_port = ntohs(port);
10208
10209 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10210}
10211
10212static void bnx2x_del_geneve_port(struct net_device *netdev,
10213 sa_family_t sa_family, __be16 port)
10214{
10215 struct bnx2x *bp = netdev_priv(netdev);
10216 u16 t_port = ntohs(port);
10217
10218 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10219}
10220#endif
10221
10222static int bnx2x_close(struct net_device *dev);
10223
10224
10225
10226
10227static void bnx2x_sp_rtnl_task(struct work_struct *work)
10228{
10229 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10230
10231 rtnl_lock();
10232
10233 if (!netif_running(bp->dev)) {
10234 rtnl_unlock();
10235 return;
10236 }
10237
10238 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10239#ifdef BNX2X_STOP_ON_ERROR
10240 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10241 "you will need to reboot when done\n");
10242 goto sp_rtnl_not_reset;
10243#endif
10244
10245
10246
10247
10248 bp->sp_rtnl_state = 0;
10249 smp_mb();
10250
10251 bnx2x_parity_recover(bp);
10252
10253 rtnl_unlock();
10254 return;
10255 }
10256
10257 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10258#ifdef BNX2X_STOP_ON_ERROR
10259 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10260 "you will need to reboot when done\n");
10261 goto sp_rtnl_not_reset;
10262#endif
10263
10264
10265
10266
10267
10268 bp->sp_rtnl_state = 0;
10269 smp_mb();
10270
10271 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10272 bnx2x_nic_load(bp, LOAD_NORMAL);
10273
10274 rtnl_unlock();
10275 return;
10276 }
10277#ifdef BNX2X_STOP_ON_ERROR
10278sp_rtnl_not_reset:
10279#endif
10280 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10281 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10282 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10283 bnx2x_after_function_update(bp);
10284
10285
10286
10287
10288
10289 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10290 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10291 netif_device_detach(bp->dev);
10292 bnx2x_close(bp->dev);
10293 rtnl_unlock();
10294 return;
10295 }
10296
10297 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10298 DP(BNX2X_MSG_SP,
10299 "sending set mcast vf pf channel message from rtnl sp-task\n");
10300 bnx2x_vfpf_set_mcast(bp->dev);
10301 }
10302 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10303 &bp->sp_rtnl_state)){
10304 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
10305 bnx2x_tx_disable(bp);
10306 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10307 }
10308 }
10309
10310 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10311 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10312 bnx2x_set_rx_mode_inner(bp);
10313 }
10314
10315 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10316 &bp->sp_rtnl_state))
10317 bnx2x_pf_set_vfs_vlan(bp);
10318
10319 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10320 bnx2x_dcbx_stop_hw_tx(bp);
10321 bnx2x_dcbx_resume_hw_tx(bp);
10322 }
10323
10324 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10325 &bp->sp_rtnl_state))
10326 bnx2x_update_mng_version(bp);
10327
10328#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10329 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10330 &bp->sp_rtnl_state)) {
10331 if (bnx2x_udp_port_update(bp)) {
10332
10333 memset(bp->udp_tunnel_ports, 0,
10334 sizeof(struct bnx2x_udp_tunnel) *
10335 BNX2X_UDP_PORT_MAX);
10336 } else {
10337
10338
10339
10340
10341#ifdef CONFIG_BNX2X_VXLAN
10342 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
10343 vxlan_get_rx_port(bp->dev);
10344#endif
10345#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
10346 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10347 geneve_get_rx_port(bp->dev);
10348#endif
10349 }
10350 }
10351#endif
10352
10353
10354
10355
10356 rtnl_unlock();
10357
10358
10359 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10360 &bp->sp_rtnl_state)) {
10361 bnx2x_disable_sriov(bp);
10362 bnx2x_enable_sriov(bp);
10363 }
10364}
10365
10366static void bnx2x_period_task(struct work_struct *work)
10367{
10368 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10369
10370 if (!netif_running(bp->dev))
10371 goto period_task_exit;
10372
10373 if (CHIP_REV_IS_SLOW(bp)) {
10374 BNX2X_ERR("period task called on emulation, ignoring\n");
10375 goto period_task_exit;
10376 }
10377
10378 bnx2x_acquire_phy_lock(bp);
10379
10380
10381
10382
10383
10384 smp_mb();
10385 if (bp->port.pmf) {
10386 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10387
10388
10389 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10390 }
10391
10392 bnx2x_release_phy_lock(bp);
10393period_task_exit:
10394 return;
10395}
10396
10397
10398
10399
10400
10401static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10402{
10403 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10404 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10405 return base + (BP_ABS_FUNC(bp)) * stride;
10406}
10407
10408static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10409 u8 port, u32 reset_reg,
10410 struct bnx2x_mac_vals *vals)
10411{
10412 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10413 u32 base_addr;
10414
10415 if (!(mask & reset_reg))
10416 return false;
10417
10418 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10419 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10420 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10421 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10422 REG_WR(bp, vals->umac_addr[port], 0);
10423
10424 return true;
10425}
10426
10427static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10428 struct bnx2x_mac_vals *vals)
10429{
10430 u32 val, base_addr, offset, mask, reset_reg;
10431 bool mac_stopped = false;
10432 u8 port = BP_PORT(bp);
10433
10434
10435 memset(vals, 0, sizeof(*vals));
10436
10437 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10438
10439 if (!CHIP_IS_E3(bp)) {
10440 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10441 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10442 if ((mask & reset_reg) && val) {
10443 u32 wb_data[2];
10444 BNX2X_DEV_INFO("Disable bmac Rx\n");
10445 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10446 : NIG_REG_INGRESS_BMAC0_MEM;
10447 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10448 : BIGMAC_REGISTER_BMAC_CONTROL;
10449
10450
10451
10452
10453
10454
10455
10456 wb_data[0] = REG_RD(bp, base_addr + offset);
10457 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10458 vals->bmac_addr = base_addr + offset;
10459 vals->bmac_val[0] = wb_data[0];
10460 vals->bmac_val[1] = wb_data[1];
10461 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10462 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10463 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10464 }
10465 BNX2X_DEV_INFO("Disable emac Rx\n");
10466 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10467 vals->emac_val = REG_RD(bp, vals->emac_addr);
10468 REG_WR(bp, vals->emac_addr, 0);
10469 mac_stopped = true;
10470 } else {
10471 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10472 BNX2X_DEV_INFO("Disable xmac Rx\n");
10473 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10474 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10475 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10476 val & ~(1 << 1));
10477 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10478 val | (1 << 1));
10479 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10480 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10481 REG_WR(bp, vals->xmac_addr, 0);
10482 mac_stopped = true;
10483 }
10484
10485 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10486 reset_reg, vals);
10487 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10488 reset_reg, vals);
10489 }
10490
10491 if (mac_stopped)
10492 msleep(20);
10493}
10494
10495#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10496#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10497 0x1848 + ((f) << 4))
10498#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10499#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10500#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10501
10502#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10503#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10504#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10505
10506static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10507{
10508
10509
10510
10511 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10512 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10513 return false;
10514
10515 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10516 BNX2X_DEV_INFO("UNDI previously loaded\n");
10517 return true;
10518 }
10519
10520 return false;
10521}
10522
10523static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10524{
10525 u16 rcq, bd;
10526 u32 addr, tmp_reg;
10527
10528 if (BP_FUNC(bp) < 2)
10529 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10530 else
10531 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10532
10533 tmp_reg = REG_RD(bp, addr);
10534 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10535 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10536
10537 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10538 REG_WR(bp, addr, tmp_reg);
10539
10540 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10541 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10542}
10543
10544static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10545{
10546 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10547 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10548 if (!rc) {
10549 BNX2X_ERR("MCP response failure, aborting\n");
10550 return -EBUSY;
10551 }
10552
10553 return 0;
10554}
10555
10556static struct bnx2x_prev_path_list *
10557 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10558{
10559 struct bnx2x_prev_path_list *tmp_list;
10560
10561 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10562 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10563 bp->pdev->bus->number == tmp_list->bus &&
10564 BP_PATH(bp) == tmp_list->path)
10565 return tmp_list;
10566
10567 return NULL;
10568}
10569
10570static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10571{
10572 struct bnx2x_prev_path_list *tmp_list;
10573 int rc;
10574
10575 rc = down_interruptible(&bnx2x_prev_sem);
10576 if (rc) {
10577 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10578 return rc;
10579 }
10580
10581 tmp_list = bnx2x_prev_path_get_entry(bp);
10582 if (tmp_list) {
10583 tmp_list->aer = 1;
10584 rc = 0;
10585 } else {
10586 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10587 BP_PATH(bp));
10588 }
10589
10590 up(&bnx2x_prev_sem);
10591
10592 return rc;
10593}
10594
10595static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10596{
10597 struct bnx2x_prev_path_list *tmp_list;
10598 bool rc = false;
10599
10600 if (down_trylock(&bnx2x_prev_sem))
10601 return false;
10602
10603 tmp_list = bnx2x_prev_path_get_entry(bp);
10604 if (tmp_list) {
10605 if (tmp_list->aer) {
10606 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10607 BP_PATH(bp));
10608 } else {
10609 rc = true;
10610 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10611 BP_PATH(bp));
10612 }
10613 }
10614
10615 up(&bnx2x_prev_sem);
10616
10617 return rc;
10618}
10619
10620bool bnx2x_port_after_undi(struct bnx2x *bp)
10621{
10622 struct bnx2x_prev_path_list *entry;
10623 bool val;
10624
10625 down(&bnx2x_prev_sem);
10626
10627 entry = bnx2x_prev_path_get_entry(bp);
10628 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10629
10630 up(&bnx2x_prev_sem);
10631
10632 return val;
10633}
10634
10635static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10636{
10637 struct bnx2x_prev_path_list *tmp_list;
10638 int rc;
10639
10640 rc = down_interruptible(&bnx2x_prev_sem);
10641 if (rc) {
10642 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10643 return rc;
10644 }
10645
10646
10647 tmp_list = bnx2x_prev_path_get_entry(bp);
10648 if (tmp_list) {
10649 if (!tmp_list->aer) {
10650 BNX2X_ERR("Re-Marking the path.\n");
10651 } else {
10652 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10653 BP_PATH(bp));
10654 tmp_list->aer = 0;
10655 }
10656 up(&bnx2x_prev_sem);
10657 return 0;
10658 }
10659 up(&bnx2x_prev_sem);
10660
10661
10662 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10663 if (!tmp_list) {
10664 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10665 return -ENOMEM;
10666 }
10667
10668 tmp_list->bus = bp->pdev->bus->number;
10669 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10670 tmp_list->path = BP_PATH(bp);
10671 tmp_list->aer = 0;
10672 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10673
10674 rc = down_interruptible(&bnx2x_prev_sem);
10675 if (rc) {
10676 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10677 kfree(tmp_list);
10678 } else {
10679 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10680 BP_PATH(bp));
10681 list_add(&tmp_list->list, &bnx2x_prev_list);
10682 up(&bnx2x_prev_sem);
10683 }
10684
10685 return rc;
10686}
10687
10688static int bnx2x_do_flr(struct bnx2x *bp)
10689{
10690 struct pci_dev *dev = bp->pdev;
10691
10692 if (CHIP_IS_E1x(bp)) {
10693 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10694 return -EINVAL;
10695 }
10696
10697
10698 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10699 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10700 bp->common.bc_ver);
10701 return -EINVAL;
10702 }
10703
10704 if (!pci_wait_for_pending_transaction(dev))
10705 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10706
10707 BNX2X_DEV_INFO("Initiating FLR\n");
10708 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10709
10710 return 0;
10711}
10712
10713static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10714{
10715 int rc;
10716
10717 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10718
10719
10720 if (bnx2x_prev_is_path_marked(bp))
10721 return bnx2x_prev_mcp_done(bp);
10722
10723 BNX2X_DEV_INFO("Path is unmarked\n");
10724
10725
10726 if (bnx2x_prev_is_after_undi(bp))
10727 goto out;
10728
10729
10730
10731
10732
10733 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10734
10735 if (!rc) {
10736
10737 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10738 rc = bnx2x_do_flr(bp);
10739 }
10740
10741 if (!rc) {
10742
10743 BNX2X_DEV_INFO("FLR successful\n");
10744 return 0;
10745 }
10746
10747 BNX2X_DEV_INFO("Could not FLR\n");
10748
10749out:
10750
10751 rc = bnx2x_prev_mcp_done(bp);
10752 if (!rc)
10753 rc = BNX2X_PREV_WAIT_NEEDED;
10754
10755 return rc;
10756}
10757
10758static int bnx2x_prev_unload_common(struct bnx2x *bp)
10759{
10760 u32 reset_reg, tmp_reg = 0, rc;
10761 bool prev_undi = false;
10762 struct bnx2x_mac_vals mac_vals;
10763
10764
10765
10766
10767
10768 BNX2X_DEV_INFO("Common unload Flow\n");
10769
10770 memset(&mac_vals, 0, sizeof(mac_vals));
10771
10772 if (bnx2x_prev_is_path_marked(bp))
10773 return bnx2x_prev_mcp_done(bp);
10774
10775 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10776
10777
10778 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10779 u32 timer_count = 1000;
10780
10781
10782 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10783
10784
10785 bnx2x_set_rx_filter(&bp->link_params, 0);
10786 bp->link_params.port ^= 1;
10787 bnx2x_set_rx_filter(&bp->link_params, 0);
10788 bp->link_params.port ^= 1;
10789
10790
10791 if (bnx2x_prev_is_after_undi(bp)) {
10792 prev_undi = true;
10793
10794 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10795
10796 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10797 }
10798 if (!CHIP_IS_E1x(bp))
10799
10800 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10801
10802
10803 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10804 while (timer_count) {
10805 u32 prev_brb = tmp_reg;
10806
10807 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10808 if (!tmp_reg)
10809 break;
10810
10811 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10812
10813
10814 if (prev_brb > tmp_reg)
10815 timer_count = 1000;
10816 else
10817 timer_count--;
10818
10819
10820 if (prev_undi)
10821 bnx2x_prev_unload_undi_inc(bp, 1);
10822
10823 udelay(10);
10824 }
10825
10826 if (!timer_count)
10827 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10828 }
10829
10830
10831 bnx2x_reset_common(bp);
10832
10833 if (mac_vals.xmac_addr)
10834 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10835 if (mac_vals.umac_addr[0])
10836 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10837 if (mac_vals.umac_addr[1])
10838 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10839 if (mac_vals.emac_addr)
10840 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10841 if (mac_vals.bmac_addr) {
10842 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10843 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10844 }
10845
10846 rc = bnx2x_prev_mark_path(bp, prev_undi);
10847 if (rc) {
10848 bnx2x_prev_mcp_done(bp);
10849 return rc;
10850 }
10851
10852 return bnx2x_prev_mcp_done(bp);
10853}
10854
10855static int bnx2x_prev_unload(struct bnx2x *bp)
10856{
10857 int time_counter = 10;
10858 u32 rc, fw, hw_lock_reg, hw_lock_val;
10859 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10860
10861
10862
10863
10864 bnx2x_clean_pglue_errors(bp);
10865
10866
10867 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10868 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10869 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10870
10871 hw_lock_val = REG_RD(bp, hw_lock_reg);
10872 if (hw_lock_val) {
10873 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10874 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10875 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10876 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10877 }
10878
10879 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10880 REG_WR(bp, hw_lock_reg, 0xffffffff);
10881 } else
10882 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10883
10884 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10885 BNX2X_DEV_INFO("Release previously held alr\n");
10886 bnx2x_release_alr(bp);
10887 }
10888
10889 do {
10890 int aer = 0;
10891
10892 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10893 if (!fw) {
10894 BNX2X_ERR("MCP response failure, aborting\n");
10895 rc = -EBUSY;
10896 break;
10897 }
10898
10899 rc = down_interruptible(&bnx2x_prev_sem);
10900 if (rc) {
10901 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10902 rc);
10903 } else {
10904
10905 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10906 bnx2x_prev_path_get_entry(bp)->aer);
10907 up(&bnx2x_prev_sem);
10908 }
10909
10910 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10911 rc = bnx2x_prev_unload_common(bp);
10912 break;
10913 }
10914
10915
10916 rc = bnx2x_prev_unload_uncommon(bp);
10917 if (rc != BNX2X_PREV_WAIT_NEEDED)
10918 break;
10919
10920 msleep(20);
10921 } while (--time_counter);
10922
10923 if (!time_counter || rc) {
10924 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10925 rc = -EPROBE_DEFER;
10926 }
10927
10928
10929 if (bnx2x_port_after_undi(bp))
10930 bp->link_params.feature_config_flags |=
10931 FEATURE_CONFIG_BOOT_FROM_SAN;
10932
10933 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10934
10935 return rc;
10936}
10937
10938static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10939{
10940 u32 val, val2, val3, val4, id, boot_mode;
10941 u16 pmc;
10942
10943
10944
10945 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10946 id = ((val & 0xffff) << 16);
10947 val = REG_RD(bp, MISC_REG_CHIP_REV);
10948 id |= ((val & 0xf) << 12);
10949
10950
10951
10952
10953 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10954 id |= (((val >> 24) & 0xf) << 4);
10955 val = REG_RD(bp, MISC_REG_BOND_ID);
10956 id |= (val & 0xf);
10957 bp->common.chip_id = id;
10958
10959
10960 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10961 if (CHIP_IS_57810(bp))
10962 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10963 (bp->common.chip_id & 0x0000FFFF);
10964 else if (CHIP_IS_57810_MF(bp))
10965 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10966 (bp->common.chip_id & 0x0000FFFF);
10967 bp->common.chip_id |= 0x1;
10968 }
10969
10970
10971 bp->db_size = (1 << BNX2X_DB_SHIFT);
10972
10973 if (!CHIP_IS_E1x(bp)) {
10974 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10975 if ((val & 1) == 0)
10976 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10977 else
10978 val = (val >> 1) & 1;
10979 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10980 "2_PORT_MODE");
10981 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10982 CHIP_2_PORT_MODE;
10983
10984 if (CHIP_MODE_IS_4_PORT(bp))
10985 bp->pfid = (bp->pf_num >> 1);
10986 else
10987 bp->pfid = (bp->pf_num & 0x6);
10988 } else {
10989 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10990 bp->pfid = bp->pf_num;
10991 }
10992
10993 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10994
10995 bp->link_params.chip_id = bp->common.chip_id;
10996 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10997
10998 val = (REG_RD(bp, 0x2874) & 0x55);
10999 if ((bp->common.chip_id & 0x1) ||
11000 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11001 bp->flags |= ONE_PORT_FLAG;
11002 BNX2X_DEV_INFO("single port device\n");
11003 }
11004
11005 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11006 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11007 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11008 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11009 bp->common.flash_size, bp->common.flash_size);
11010
11011 bnx2x_init_shmem(bp);
11012
11013 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11014 MISC_REG_GENERIC_CR_1 :
11015 MISC_REG_GENERIC_CR_0));
11016
11017 bp->link_params.shmem_base = bp->common.shmem_base;
11018 bp->link_params.shmem2_base = bp->common.shmem2_base;
11019 if (SHMEM2_RD(bp, size) >
11020 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11021 bp->link_params.lfa_base =
11022 REG_RD(bp, bp->common.shmem2_base +
11023 (u32)offsetof(struct shmem2_region,
11024 lfa_host_addr[BP_PORT(bp)]));
11025 else
11026 bp->link_params.lfa_base = 0;
11027 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11028 bp->common.shmem_base, bp->common.shmem2_base);
11029
11030 if (!bp->common.shmem_base) {
11031 BNX2X_DEV_INFO("MCP not active\n");
11032 bp->flags |= NO_MCP_FLAG;
11033 return;
11034 }
11035
11036 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11037 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11038
11039 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11040 SHARED_HW_CFG_LED_MODE_MASK) >>
11041 SHARED_HW_CFG_LED_MODE_SHIFT);
11042
11043 bp->link_params.feature_config_flags = 0;
11044 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11045 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11046 bp->link_params.feature_config_flags |=
11047 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11048 else
11049 bp->link_params.feature_config_flags &=
11050 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11051
11052 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11053 bp->common.bc_ver = val;
11054 BNX2X_DEV_INFO("bc_ver %X\n", val);
11055 if (val < BNX2X_BC_VER) {
11056
11057
11058 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11059 BNX2X_BC_VER, val);
11060 }
11061 bp->link_params.feature_config_flags |=
11062 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11063 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11064
11065 bp->link_params.feature_config_flags |=
11066 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11067 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11068 bp->link_params.feature_config_flags |=
11069 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11070 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11071 bp->link_params.feature_config_flags |=
11072 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11073 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11074
11075 bp->link_params.feature_config_flags |=
11076 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11077 FEATURE_CONFIG_MT_SUPPORT : 0;
11078
11079 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11080 BC_SUPPORTS_PFC_STATS : 0;
11081
11082 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11083 BC_SUPPORTS_FCOE_FEATURES : 0;
11084
11085 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11086 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11087
11088 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11089 BC_SUPPORTS_RMMOD_CMD : 0;
11090
11091 boot_mode = SHMEM_RD(bp,
11092 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11093 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11094 switch (boot_mode) {
11095 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11096 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11097 break;
11098 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11099 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11100 break;
11101 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11102 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11103 break;
11104 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11105 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11106 break;
11107 }
11108
11109 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11110 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11111
11112 BNX2X_DEV_INFO("%sWoL capable\n",
11113 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11114
11115 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11116 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11117 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11118 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11119
11120 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11121 val, val2, val3, val4);
11122}
11123
11124#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11125#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11126
11127static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11128{
11129 int pfid = BP_FUNC(bp);
11130 int igu_sb_id;
11131 u32 val;
11132 u8 fid, igu_sb_cnt = 0;
11133
11134 bp->igu_base_sb = 0xff;
11135 if (CHIP_INT_MODE_IS_BC(bp)) {
11136 int vn = BP_VN(bp);
11137 igu_sb_cnt = bp->igu_sb_cnt;
11138 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11139 FP_SB_MAX_E1x;
11140
11141 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11142 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11143
11144 return 0;
11145 }
11146
11147
11148 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11149 igu_sb_id++) {
11150 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11151 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11152 continue;
11153 fid = IGU_FID(val);
11154 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11155 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11156 continue;
11157 if (IGU_VEC(val) == 0)
11158
11159 bp->igu_dsb_id = igu_sb_id;
11160 else {
11161 if (bp->igu_base_sb == 0xff)
11162 bp->igu_base_sb = igu_sb_id;
11163 igu_sb_cnt++;
11164 }
11165 }
11166 }
11167
11168#ifdef CONFIG_PCI_MSI
11169
11170
11171
11172
11173
11174
11175 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11176#endif
11177
11178 if (igu_sb_cnt == 0) {
11179 BNX2X_ERR("CAM configuration error\n");
11180 return -EINVAL;
11181 }
11182
11183 return 0;
11184}
11185
11186static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11187{
11188 int cfg_size = 0, idx, port = BP_PORT(bp);
11189
11190
11191 bp->port.supported[0] = 0;
11192 bp->port.supported[1] = 0;
11193 switch (bp->link_params.num_phys) {
11194 case 1:
11195 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11196 cfg_size = 1;
11197 break;
11198 case 2:
11199 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11200 cfg_size = 1;
11201 break;
11202 case 3:
11203 if (bp->link_params.multi_phy_config &
11204 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11205 bp->port.supported[1] =
11206 bp->link_params.phy[EXT_PHY1].supported;
11207 bp->port.supported[0] =
11208 bp->link_params.phy[EXT_PHY2].supported;
11209 } else {
11210 bp->port.supported[0] =
11211 bp->link_params.phy[EXT_PHY1].supported;
11212 bp->port.supported[1] =
11213 bp->link_params.phy[EXT_PHY2].supported;
11214 }
11215 cfg_size = 2;
11216 break;
11217 }
11218
11219 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11220 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11221 SHMEM_RD(bp,
11222 dev_info.port_hw_config[port].external_phy_config),
11223 SHMEM_RD(bp,
11224 dev_info.port_hw_config[port].external_phy_config2));
11225 return;
11226 }
11227
11228 if (CHIP_IS_E3(bp))
11229 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11230 else {
11231 switch (switch_cfg) {
11232 case SWITCH_CFG_1G:
11233 bp->port.phy_addr = REG_RD(
11234 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11235 break;
11236 case SWITCH_CFG_10G:
11237 bp->port.phy_addr = REG_RD(
11238 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11239 break;
11240 default:
11241 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11242 bp->port.link_config[0]);
11243 return;
11244 }
11245 }
11246 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11247
11248 for (idx = 0; idx < cfg_size; idx++) {
11249 if (!(bp->link_params.speed_cap_mask[idx] &
11250 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11251 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11252
11253 if (!(bp->link_params.speed_cap_mask[idx] &
11254 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11255 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11256
11257 if (!(bp->link_params.speed_cap_mask[idx] &
11258 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11259 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11260
11261 if (!(bp->link_params.speed_cap_mask[idx] &
11262 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11263 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11264
11265 if (!(bp->link_params.speed_cap_mask[idx] &
11266 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11267 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11268 SUPPORTED_1000baseT_Full);
11269
11270 if (!(bp->link_params.speed_cap_mask[idx] &
11271 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11272 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11273
11274 if (!(bp->link_params.speed_cap_mask[idx] &
11275 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11276 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11277
11278 if (!(bp->link_params.speed_cap_mask[idx] &
11279 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11280 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11281 }
11282
11283 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11284 bp->port.supported[1]);
11285}
11286
11287static void bnx2x_link_settings_requested(struct bnx2x *bp)
11288{
11289 u32 link_config, idx, cfg_size = 0;
11290 bp->port.advertising[0] = 0;
11291 bp->port.advertising[1] = 0;
11292 switch (bp->link_params.num_phys) {
11293 case 1:
11294 case 2:
11295 cfg_size = 1;
11296 break;
11297 case 3:
11298 cfg_size = 2;
11299 break;
11300 }
11301 for (idx = 0; idx < cfg_size; idx++) {
11302 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11303 link_config = bp->port.link_config[idx];
11304 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11305 case PORT_FEATURE_LINK_SPEED_AUTO:
11306 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11307 bp->link_params.req_line_speed[idx] =
11308 SPEED_AUTO_NEG;
11309 bp->port.advertising[idx] |=
11310 bp->port.supported[idx];
11311 if (bp->link_params.phy[EXT_PHY1].type ==
11312 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11313 bp->port.advertising[idx] |=
11314 (SUPPORTED_100baseT_Half |
11315 SUPPORTED_100baseT_Full);
11316 } else {
11317
11318 bp->link_params.req_line_speed[idx] =
11319 SPEED_10000;
11320 bp->port.advertising[idx] |=
11321 (ADVERTISED_10000baseT_Full |
11322 ADVERTISED_FIBRE);
11323 continue;
11324 }
11325 break;
11326
11327 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11328 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11329 bp->link_params.req_line_speed[idx] =
11330 SPEED_10;
11331 bp->port.advertising[idx] |=
11332 (ADVERTISED_10baseT_Full |
11333 ADVERTISED_TP);
11334 } else {
11335 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11336 link_config,
11337 bp->link_params.speed_cap_mask[idx]);
11338 return;
11339 }
11340 break;
11341
11342 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11343 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11344 bp->link_params.req_line_speed[idx] =
11345 SPEED_10;
11346 bp->link_params.req_duplex[idx] =
11347 DUPLEX_HALF;
11348 bp->port.advertising[idx] |=
11349 (ADVERTISED_10baseT_Half |
11350 ADVERTISED_TP);
11351 } else {
11352 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11353 link_config,
11354 bp->link_params.speed_cap_mask[idx]);
11355 return;
11356 }
11357 break;
11358
11359 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11360 if (bp->port.supported[idx] &
11361 SUPPORTED_100baseT_Full) {
11362 bp->link_params.req_line_speed[idx] =
11363 SPEED_100;
11364 bp->port.advertising[idx] |=
11365 (ADVERTISED_100baseT_Full |
11366 ADVERTISED_TP);
11367 } else {
11368 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11369 link_config,
11370 bp->link_params.speed_cap_mask[idx]);
11371 return;
11372 }
11373 break;
11374
11375 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11376 if (bp->port.supported[idx] &
11377 SUPPORTED_100baseT_Half) {
11378 bp->link_params.req_line_speed[idx] =
11379 SPEED_100;
11380 bp->link_params.req_duplex[idx] =
11381 DUPLEX_HALF;
11382 bp->port.advertising[idx] |=
11383 (ADVERTISED_100baseT_Half |
11384 ADVERTISED_TP);
11385 } else {
11386 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11387 link_config,
11388 bp->link_params.speed_cap_mask[idx]);
11389 return;
11390 }
11391 break;
11392
11393 case PORT_FEATURE_LINK_SPEED_1G:
11394 if (bp->port.supported[idx] &
11395 SUPPORTED_1000baseT_Full) {
11396 bp->link_params.req_line_speed[idx] =
11397 SPEED_1000;
11398 bp->port.advertising[idx] |=
11399 (ADVERTISED_1000baseT_Full |
11400 ADVERTISED_TP);
11401 } else if (bp->port.supported[idx] &
11402 SUPPORTED_1000baseKX_Full) {
11403 bp->link_params.req_line_speed[idx] =
11404 SPEED_1000;
11405 bp->port.advertising[idx] |=
11406 ADVERTISED_1000baseKX_Full;
11407 } else {
11408 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11409 link_config,
11410 bp->link_params.speed_cap_mask[idx]);
11411 return;
11412 }
11413 break;
11414
11415 case PORT_FEATURE_LINK_SPEED_2_5G:
11416 if (bp->port.supported[idx] &
11417 SUPPORTED_2500baseX_Full) {
11418 bp->link_params.req_line_speed[idx] =
11419 SPEED_2500;
11420 bp->port.advertising[idx] |=
11421 (ADVERTISED_2500baseX_Full |
11422 ADVERTISED_TP);
11423 } else {
11424 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11425 link_config,
11426 bp->link_params.speed_cap_mask[idx]);
11427 return;
11428 }
11429 break;
11430
11431 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11432 if (bp->port.supported[idx] &
11433 SUPPORTED_10000baseT_Full) {
11434 bp->link_params.req_line_speed[idx] =
11435 SPEED_10000;
11436 bp->port.advertising[idx] |=
11437 (ADVERTISED_10000baseT_Full |
11438 ADVERTISED_FIBRE);
11439 } else if (bp->port.supported[idx] &
11440 SUPPORTED_10000baseKR_Full) {
11441 bp->link_params.req_line_speed[idx] =
11442 SPEED_10000;
11443 bp->port.advertising[idx] |=
11444 (ADVERTISED_10000baseKR_Full |
11445 ADVERTISED_FIBRE);
11446 } else {
11447 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11448 link_config,
11449 bp->link_params.speed_cap_mask[idx]);
11450 return;
11451 }
11452 break;
11453 case PORT_FEATURE_LINK_SPEED_20G:
11454 bp->link_params.req_line_speed[idx] = SPEED_20000;
11455
11456 break;
11457 default:
11458 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11459 link_config);
11460 bp->link_params.req_line_speed[idx] =
11461 SPEED_AUTO_NEG;
11462 bp->port.advertising[idx] =
11463 bp->port.supported[idx];
11464 break;
11465 }
11466
11467 bp->link_params.req_flow_ctrl[idx] = (link_config &
11468 PORT_FEATURE_FLOW_CONTROL_MASK);
11469 if (bp->link_params.req_flow_ctrl[idx] ==
11470 BNX2X_FLOW_CTRL_AUTO) {
11471 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11472 bp->link_params.req_flow_ctrl[idx] =
11473 BNX2X_FLOW_CTRL_NONE;
11474 else
11475 bnx2x_set_requested_fc(bp);
11476 }
11477
11478 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11479 bp->link_params.req_line_speed[idx],
11480 bp->link_params.req_duplex[idx],
11481 bp->link_params.req_flow_ctrl[idx],
11482 bp->port.advertising[idx]);
11483 }
11484}
11485
11486static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11487{
11488 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11489 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11490 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11491 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11492}
11493
11494static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11495{
11496 int port = BP_PORT(bp);
11497 u32 config;
11498 u32 ext_phy_type, ext_phy_config, eee_mode;
11499
11500 bp->link_params.bp = bp;
11501 bp->link_params.port = port;
11502
11503 bp->link_params.lane_config =
11504 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11505
11506 bp->link_params.speed_cap_mask[0] =
11507 SHMEM_RD(bp,
11508 dev_info.port_hw_config[port].speed_capability_mask) &
11509 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11510 bp->link_params.speed_cap_mask[1] =
11511 SHMEM_RD(bp,
11512 dev_info.port_hw_config[port].speed_capability_mask2) &
11513 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11514 bp->port.link_config[0] =
11515 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11516
11517 bp->port.link_config[1] =
11518 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11519
11520 bp->link_params.multi_phy_config =
11521 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11522
11523
11524
11525 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11526 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11527 (config & PORT_FEATURE_WOL_ENABLED));
11528
11529 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11530 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11531 bp->flags |= NO_ISCSI_FLAG;
11532 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11533 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11534 bp->flags |= NO_FCOE_FLAG;
11535
11536 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11537 bp->link_params.lane_config,
11538 bp->link_params.speed_cap_mask[0],
11539 bp->port.link_config[0]);
11540
11541 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11542 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11543 bnx2x_phy_probe(&bp->link_params);
11544 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11545
11546 bnx2x_link_settings_requested(bp);
11547
11548
11549
11550
11551
11552 ext_phy_config =
11553 SHMEM_RD(bp,
11554 dev_info.port_hw_config[port].external_phy_config);
11555 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11556 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11557 bp->mdio.prtad = bp->port.phy_addr;
11558
11559 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11560 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11561 bp->mdio.prtad =
11562 XGXS_EXT_PHY_ADDR(ext_phy_config);
11563
11564
11565 eee_mode = (((SHMEM_RD(bp, dev_info.
11566 port_feature_config[port].eee_power_mode)) &
11567 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11568 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11569 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11570 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11571 EEE_MODE_ENABLE_LPI |
11572 EEE_MODE_OUTPUT_TIME;
11573 } else {
11574 bp->link_params.eee_mode = 0;
11575 }
11576}
11577
11578void bnx2x_get_iscsi_info(struct bnx2x *bp)
11579{
11580 u32 no_flags = NO_ISCSI_FLAG;
11581 int port = BP_PORT(bp);
11582 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11583 drv_lic_key[port].max_iscsi_conn);
11584
11585 if (!CNIC_SUPPORT(bp)) {
11586 bp->flags |= no_flags;
11587 return;
11588 }
11589
11590
11591 bp->cnic_eth_dev.max_iscsi_conn =
11592 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11593 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11594
11595 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11596 bp->cnic_eth_dev.max_iscsi_conn);
11597
11598
11599
11600
11601
11602 if (!bp->cnic_eth_dev.max_iscsi_conn)
11603 bp->flags |= no_flags;
11604}
11605
11606static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11607{
11608
11609 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11610 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11611 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11612 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11613
11614
11615 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11616 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11617 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11618 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11619}
11620
11621static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11622{
11623 u8 count = 0;
11624
11625 if (IS_MF(bp)) {
11626 u8 fid;
11627
11628
11629 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11630 if (IS_MF_SD(bp)) {
11631 u32 cfg = MF_CFG_RD(bp,
11632 func_mf_config[fid].config);
11633
11634 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11635 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11636 FUNC_MF_CFG_PROTOCOL_FCOE))
11637 count++;
11638 } else {
11639 u32 cfg = MF_CFG_RD(bp,
11640 func_ext_config[fid].
11641 func_cfg);
11642
11643 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11644 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11645 count++;
11646 }
11647 }
11648 } else {
11649 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11650
11651 for (port = 0; port < port_cnt; port++) {
11652 u32 lic = SHMEM_RD(bp,
11653 drv_lic_key[port].max_fcoe_conn) ^
11654 FW_ENCODE_32BIT_PATTERN;
11655 if (lic)
11656 count++;
11657 }
11658 }
11659
11660 return count;
11661}
11662
11663static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11664{
11665 int port = BP_PORT(bp);
11666 int func = BP_ABS_FUNC(bp);
11667 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11668 drv_lic_key[port].max_fcoe_conn);
11669 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11670
11671 if (!CNIC_SUPPORT(bp)) {
11672 bp->flags |= NO_FCOE_FLAG;
11673 return;
11674 }
11675
11676
11677 bp->cnic_eth_dev.max_fcoe_conn =
11678 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11679 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11680
11681
11682 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11683
11684
11685 if (num_fcoe_func)
11686 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11687
11688
11689 if (!IS_MF(bp)) {
11690
11691 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11692 SHMEM_RD(bp,
11693 dev_info.port_hw_config[port].
11694 fcoe_wwn_port_name_upper);
11695 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11696 SHMEM_RD(bp,
11697 dev_info.port_hw_config[port].
11698 fcoe_wwn_port_name_lower);
11699
11700
11701 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11702 SHMEM_RD(bp,
11703 dev_info.port_hw_config[port].
11704 fcoe_wwn_node_name_upper);
11705 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11706 SHMEM_RD(bp,
11707 dev_info.port_hw_config[port].
11708 fcoe_wwn_node_name_lower);
11709 } else if (!IS_MF_SD(bp)) {
11710
11711
11712
11713 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11714 bnx2x_get_ext_wwn_info(bp, func);
11715 } else {
11716 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11717 bnx2x_get_ext_wwn_info(bp, func);
11718 }
11719
11720 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11721
11722
11723
11724
11725
11726 if (!bp->cnic_eth_dev.max_fcoe_conn)
11727 bp->flags |= NO_FCOE_FLAG;
11728}
11729
11730static void bnx2x_get_cnic_info(struct bnx2x *bp)
11731{
11732
11733
11734
11735
11736
11737 bnx2x_get_iscsi_info(bp);
11738 bnx2x_get_fcoe_info(bp);
11739}
11740
11741static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11742{
11743 u32 val, val2;
11744 int func = BP_ABS_FUNC(bp);
11745 int port = BP_PORT(bp);
11746 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11747 u8 *fip_mac = bp->fip_mac;
11748
11749 if (IS_MF(bp)) {
11750
11751
11752
11753
11754
11755 if (!IS_MF_SD(bp)) {
11756 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11757 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11758 val2 = MF_CFG_RD(bp, func_ext_config[func].
11759 iscsi_mac_addr_upper);
11760 val = MF_CFG_RD(bp, func_ext_config[func].
11761 iscsi_mac_addr_lower);
11762 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11763 BNX2X_DEV_INFO
11764 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11765 } else {
11766 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11767 }
11768
11769 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11770 val2 = MF_CFG_RD(bp, func_ext_config[func].
11771 fcoe_mac_addr_upper);
11772 val = MF_CFG_RD(bp, func_ext_config[func].
11773 fcoe_mac_addr_lower);
11774 bnx2x_set_mac_buf(fip_mac, val, val2);
11775 BNX2X_DEV_INFO
11776 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11777 } else {
11778 bp->flags |= NO_FCOE_FLAG;
11779 }
11780
11781 bp->mf_ext_config = cfg;
11782
11783 } else {
11784 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11785
11786 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11787
11788 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11789 BNX2X_DEV_INFO
11790 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11791 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11792
11793 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11794 BNX2X_DEV_INFO("SD FCoE MODE\n");
11795 BNX2X_DEV_INFO
11796 ("Read FIP MAC: %pM\n", fip_mac);
11797 }
11798 }
11799
11800
11801
11802
11803
11804 if (IS_MF_FCOE_AFEX(bp))
11805 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11806 } else {
11807 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11808 iscsi_mac_upper);
11809 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11810 iscsi_mac_lower);
11811 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11812
11813 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11814 fcoe_fip_mac_upper);
11815 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11816 fcoe_fip_mac_lower);
11817 bnx2x_set_mac_buf(fip_mac, val, val2);
11818 }
11819
11820
11821 if (!is_valid_ether_addr(iscsi_mac)) {
11822 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11823 eth_zero_addr(iscsi_mac);
11824 }
11825
11826
11827 if (!is_valid_ether_addr(fip_mac)) {
11828 bp->flags |= NO_FCOE_FLAG;
11829 eth_zero_addr(bp->fip_mac);
11830 }
11831}
11832
11833static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11834{
11835 u32 val, val2;
11836 int func = BP_ABS_FUNC(bp);
11837 int port = BP_PORT(bp);
11838
11839
11840 eth_zero_addr(bp->dev->dev_addr);
11841
11842 if (BP_NOMCP(bp)) {
11843 BNX2X_ERROR("warning: random MAC workaround active\n");
11844 eth_hw_addr_random(bp->dev);
11845 } else if (IS_MF(bp)) {
11846 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11847 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11848 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11849 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11850 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11851
11852 if (CNIC_SUPPORT(bp))
11853 bnx2x_get_cnic_mac_hwinfo(bp);
11854 } else {
11855
11856 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11857 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11858 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11859
11860 if (CNIC_SUPPORT(bp))
11861 bnx2x_get_cnic_mac_hwinfo(bp);
11862 }
11863
11864 if (!BP_NOMCP(bp)) {
11865
11866 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11867 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11868 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11869 bp->flags |= HAS_PHYS_PORT_ID;
11870 }
11871
11872 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11873
11874 if (!is_valid_ether_addr(bp->dev->dev_addr))
11875 dev_err(&bp->pdev->dev,
11876 "bad Ethernet MAC address configuration: %pM\n"
11877 "change it manually before bringing up the appropriate network interface\n",
11878 bp->dev->dev_addr);
11879}
11880
11881static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11882{
11883 int tmp;
11884 u32 cfg;
11885
11886 if (IS_VF(bp))
11887 return false;
11888
11889 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11890
11891 tmp = BP_ABS_FUNC(bp);
11892 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11893 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11894 } else {
11895
11896 tmp = BP_PORT(bp);
11897 cfg = SHMEM_RD(bp,
11898 dev_info.port_hw_config[tmp].generic_features);
11899 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11900 }
11901 return cfg;
11902}
11903
11904static void validate_set_si_mode(struct bnx2x *bp)
11905{
11906 u8 func = BP_ABS_FUNC(bp);
11907 u32 val;
11908
11909 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11910
11911
11912 if (val != 0xffff) {
11913 bp->mf_mode = MULTI_FUNCTION_SI;
11914 bp->mf_config[BP_VN(bp)] =
11915 MF_CFG_RD(bp, func_mf_config[func].config);
11916 } else
11917 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11918}
11919
11920static int bnx2x_get_hwinfo(struct bnx2x *bp)
11921{
11922 int func = BP_ABS_FUNC(bp);
11923 int vn, mfw_vn;
11924 u32 val = 0, val2 = 0;
11925 int rc = 0;
11926
11927
11928 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11929 dev_err(&bp->pdev->dev,
11930 "Chip read returns all Fs. Preventing probe from continuing\n");
11931 return -EINVAL;
11932 }
11933
11934 bnx2x_get_common_hwinfo(bp);
11935
11936
11937
11938
11939 if (CHIP_IS_E1x(bp)) {
11940 bp->common.int_block = INT_BLOCK_HC;
11941
11942 bp->igu_dsb_id = DEF_SB_IGU_ID;
11943 bp->igu_base_sb = 0;
11944 } else {
11945 bp->common.int_block = INT_BLOCK_IGU;
11946
11947
11948 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11949
11950 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11951
11952 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11953 int tout = 5000;
11954
11955 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11956
11957 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11958 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11959 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11960
11961 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11962 tout--;
11963 usleep_range(1000, 2000);
11964 }
11965
11966 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11967 dev_err(&bp->pdev->dev,
11968 "FORCING Normal Mode failed!!!\n");
11969 bnx2x_release_hw_lock(bp,
11970 HW_LOCK_RESOURCE_RESET);
11971 return -EPERM;
11972 }
11973 }
11974
11975 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11976 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11977 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11978 } else
11979 BNX2X_DEV_INFO("IGU Normal Mode\n");
11980
11981 rc = bnx2x_get_igu_cam_info(bp);
11982 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11983 if (rc)
11984 return rc;
11985 }
11986
11987
11988
11989
11990
11991
11992 if (CHIP_IS_E1x(bp))
11993 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11994 else
11995
11996
11997
11998
11999 bp->base_fw_ndsb = bp->igu_base_sb;
12000
12001 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12002 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12003 bp->igu_sb_cnt, bp->base_fw_ndsb);
12004
12005
12006
12007
12008
12009 bp->mf_ov = 0;
12010 bp->mf_mode = 0;
12011 bp->mf_sub_mode = 0;
12012 vn = BP_VN(bp);
12013 mfw_vn = BP_FW_MB_IDX(bp);
12014
12015 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12016 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12017 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12018 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12019
12020 if (SHMEM2_HAS(bp, mf_cfg_addr))
12021 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12022 else
12023 bp->common.mf_cfg_base = bp->common.shmem_base +
12024 offsetof(struct shmem_region, func_mb) +
12025 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12026
12027
12028
12029
12030
12031
12032
12033
12034 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12035
12036 val = SHMEM_RD(bp,
12037 dev_info.shared_feature_config.config);
12038 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12039
12040 switch (val) {
12041 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12042 validate_set_si_mode(bp);
12043 break;
12044 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12045 if ((!CHIP_IS_E1x(bp)) &&
12046 (MF_CFG_RD(bp, func_mf_config[func].
12047 mac_upper) != 0xffff) &&
12048 (SHMEM2_HAS(bp,
12049 afex_driver_support))) {
12050 bp->mf_mode = MULTI_FUNCTION_AFEX;
12051 bp->mf_config[vn] = MF_CFG_RD(bp,
12052 func_mf_config[func].config);
12053 } else {
12054 BNX2X_DEV_INFO("can not configure afex mode\n");
12055 }
12056 break;
12057 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12058
12059 val = MF_CFG_RD(bp,
12060 func_mf_config[FUNC_0].e1hov_tag);
12061 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12062
12063 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12064 bp->mf_mode = MULTI_FUNCTION_SD;
12065 bp->mf_config[vn] = MF_CFG_RD(bp,
12066 func_mf_config[func].config);
12067 } else
12068 BNX2X_DEV_INFO("illegal OV for SD\n");
12069 break;
12070 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12071 bp->mf_mode = MULTI_FUNCTION_SD;
12072 bp->mf_sub_mode = SUB_MF_MODE_BD;
12073 bp->mf_config[vn] =
12074 MF_CFG_RD(bp,
12075 func_mf_config[func].config);
12076
12077 if (SHMEM2_HAS(bp, mtu_size)) {
12078 int mtu_idx = BP_FW_MB_IDX(bp);
12079 u16 mtu_size;
12080 u32 mtu;
12081
12082 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12083 mtu_size = (u16)mtu;
12084 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12085 mtu_size, mtu);
12086
12087
12088 if (((mtu_size + ETH_HLEN) >=
12089 ETH_MIN_PACKET_SIZE) &&
12090 (mtu_size <=
12091 ETH_MAX_JUMBO_PACKET_SIZE))
12092 bp->dev->mtu = mtu_size;
12093 }
12094 break;
12095 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12096 bp->mf_mode = MULTI_FUNCTION_SD;
12097 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12098 bp->mf_config[vn] =
12099 MF_CFG_RD(bp,
12100 func_mf_config[func].config);
12101 break;
12102 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12103 bp->mf_config[vn] = 0;
12104 break;
12105 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12106 val2 = SHMEM_RD(bp,
12107 dev_info.shared_hw_config.config_3);
12108 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12109 switch (val2) {
12110 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12111 validate_set_si_mode(bp);
12112 bp->mf_sub_mode =
12113 SUB_MF_MODE_NPAR1_DOT_5;
12114 break;
12115 default:
12116
12117 bp->mf_config[vn] = 0;
12118 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12119 val);
12120 }
12121 break;
12122 default:
12123
12124 bp->mf_config[vn] = 0;
12125 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12126 }
12127 }
12128
12129 BNX2X_DEV_INFO("%s function mode\n",
12130 IS_MF(bp) ? "multi" : "single");
12131
12132 switch (bp->mf_mode) {
12133 case MULTI_FUNCTION_SD:
12134 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12135 FUNC_MF_CFG_E1HOV_TAG_MASK;
12136 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12137 bp->mf_ov = val;
12138 bp->path_has_ovlan = true;
12139
12140 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12141 func, bp->mf_ov, bp->mf_ov);
12142 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12143 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12144 dev_err(&bp->pdev->dev,
12145 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12146 func);
12147 bp->path_has_ovlan = true;
12148 } else {
12149 dev_err(&bp->pdev->dev,
12150 "No valid MF OV for func %d, aborting\n",
12151 func);
12152 return -EPERM;
12153 }
12154 break;
12155 case MULTI_FUNCTION_AFEX:
12156 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12157 break;
12158 case MULTI_FUNCTION_SI:
12159 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12160 func);
12161 break;
12162 default:
12163 if (vn) {
12164 dev_err(&bp->pdev->dev,
12165 "VN %d is in a single function mode, aborting\n",
12166 vn);
12167 return -EPERM;
12168 }
12169 break;
12170 }
12171
12172
12173
12174
12175
12176
12177 if (CHIP_MODE_IS_4_PORT(bp) &&
12178 !bp->path_has_ovlan &&
12179 !IS_MF(bp) &&
12180 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12181 u8 other_port = !BP_PORT(bp);
12182 u8 other_func = BP_PATH(bp) + 2*other_port;
12183 val = MF_CFG_RD(bp,
12184 func_mf_config[other_func].e1hov_tag);
12185 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12186 bp->path_has_ovlan = true;
12187 }
12188 }
12189
12190
12191 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12192 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12193
12194
12195 bnx2x_get_port_hwinfo(bp);
12196
12197
12198 bnx2x_get_mac_hwinfo(bp);
12199
12200 bnx2x_get_cnic_info(bp);
12201
12202 return rc;
12203}
12204
12205static void bnx2x_read_fwinfo(struct bnx2x *bp)
12206{
12207 int cnt, i, block_end, rodi;
12208 char vpd_start[BNX2X_VPD_LEN+1];
12209 char str_id_reg[VENDOR_ID_LEN+1];
12210 char str_id_cap[VENDOR_ID_LEN+1];
12211 char *vpd_data;
12212 char *vpd_extended_data = NULL;
12213 u8 len;
12214
12215 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12216 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12217
12218 if (cnt < BNX2X_VPD_LEN)
12219 goto out_not_found;
12220
12221
12222
12223
12224 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12225 PCI_VPD_LRDT_RO_DATA);
12226 if (i < 0)
12227 goto out_not_found;
12228
12229 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12230 pci_vpd_lrdt_size(&vpd_start[i]);
12231
12232 i += PCI_VPD_LRDT_TAG_SIZE;
12233
12234 if (block_end > BNX2X_VPD_LEN) {
12235 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12236 if (vpd_extended_data == NULL)
12237 goto out_not_found;
12238
12239
12240 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12241 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12242 block_end - BNX2X_VPD_LEN,
12243 vpd_extended_data + BNX2X_VPD_LEN);
12244 if (cnt < (block_end - BNX2X_VPD_LEN))
12245 goto out_not_found;
12246 vpd_data = vpd_extended_data;
12247 } else
12248 vpd_data = vpd_start;
12249
12250
12251
12252 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12253 PCI_VPD_RO_KEYWORD_MFR_ID);
12254 if (rodi < 0)
12255 goto out_not_found;
12256
12257 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12258
12259 if (len != VENDOR_ID_LEN)
12260 goto out_not_found;
12261
12262 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12263
12264
12265 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12266 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12267 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12268 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12269
12270 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12271 PCI_VPD_RO_KEYWORD_VENDOR0);
12272 if (rodi >= 0) {
12273 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12274
12275 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12276
12277 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12278 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12279 bp->fw_ver[len] = ' ';
12280 }
12281 }
12282 kfree(vpd_extended_data);
12283 return;
12284 }
12285out_not_found:
12286 kfree(vpd_extended_data);
12287 return;
12288}
12289
12290static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12291{
12292 u32 flags = 0;
12293
12294 if (CHIP_REV_IS_FPGA(bp))
12295 SET_FLAGS(flags, MODE_FPGA);
12296 else if (CHIP_REV_IS_EMUL(bp))
12297 SET_FLAGS(flags, MODE_EMUL);
12298 else
12299 SET_FLAGS(flags, MODE_ASIC);
12300
12301 if (CHIP_MODE_IS_4_PORT(bp))
12302 SET_FLAGS(flags, MODE_PORT4);
12303 else
12304 SET_FLAGS(flags, MODE_PORT2);
12305
12306 if (CHIP_IS_E2(bp))
12307 SET_FLAGS(flags, MODE_E2);
12308 else if (CHIP_IS_E3(bp)) {
12309 SET_FLAGS(flags, MODE_E3);
12310 if (CHIP_REV(bp) == CHIP_REV_Ax)
12311 SET_FLAGS(flags, MODE_E3_A0);
12312 else
12313 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12314 }
12315
12316 if (IS_MF(bp)) {
12317 SET_FLAGS(flags, MODE_MF);
12318 switch (bp->mf_mode) {
12319 case MULTI_FUNCTION_SD:
12320 SET_FLAGS(flags, MODE_MF_SD);
12321 break;
12322 case MULTI_FUNCTION_SI:
12323 SET_FLAGS(flags, MODE_MF_SI);
12324 break;
12325 case MULTI_FUNCTION_AFEX:
12326 SET_FLAGS(flags, MODE_MF_AFEX);
12327 break;
12328 }
12329 } else
12330 SET_FLAGS(flags, MODE_SF);
12331
12332#if defined(__LITTLE_ENDIAN)
12333 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12334#else
12335 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12336#endif
12337 INIT_MODE_FLAGS(bp) = flags;
12338}
12339
12340static int bnx2x_init_bp(struct bnx2x *bp)
12341{
12342 int func;
12343 int rc;
12344
12345 mutex_init(&bp->port.phy_mutex);
12346 mutex_init(&bp->fw_mb_mutex);
12347 mutex_init(&bp->drv_info_mutex);
12348 sema_init(&bp->stats_lock, 1);
12349 bp->drv_info_mng_owner = false;
12350 INIT_LIST_HEAD(&bp->vlan_reg);
12351
12352 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12353 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12354 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12355 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12356 if (IS_PF(bp)) {
12357 rc = bnx2x_get_hwinfo(bp);
12358 if (rc)
12359 return rc;
12360 } else {
12361 eth_zero_addr(bp->dev->dev_addr);
12362 }
12363
12364 bnx2x_set_modes_bitmap(bp);
12365
12366 rc = bnx2x_alloc_mem_bp(bp);
12367 if (rc)
12368 return rc;
12369
12370 bnx2x_read_fwinfo(bp);
12371
12372 func = BP_FUNC(bp);
12373
12374
12375 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12376
12377 bp->fw_seq =
12378 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12379 DRV_MSG_SEQ_NUMBER_MASK;
12380 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12381
12382 rc = bnx2x_prev_unload(bp);
12383 if (rc) {
12384 bnx2x_free_mem_bp(bp);
12385 return rc;
12386 }
12387 }
12388
12389 if (CHIP_REV_IS_FPGA(bp))
12390 dev_err(&bp->pdev->dev, "FPGA detected\n");
12391
12392 if (BP_NOMCP(bp) && (func == 0))
12393 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12394
12395 bp->disable_tpa = disable_tpa;
12396 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12397
12398 bp->disable_tpa |= is_kdump_kernel();
12399
12400
12401 if (bp->disable_tpa) {
12402 bp->dev->hw_features &= ~NETIF_F_LRO;
12403 bp->dev->features &= ~NETIF_F_LRO;
12404 }
12405
12406 if (CHIP_IS_E1(bp))
12407 bp->dropless_fc = 0;
12408 else
12409 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12410
12411 bp->mrrs = mrrs;
12412
12413 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12414 if (IS_VF(bp))
12415 bp->rx_ring_size = MAX_RX_AVAIL;
12416
12417
12418 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12419 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12420
12421 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12422
12423 init_timer(&bp->timer);
12424 bp->timer.expires = jiffies + bp->current_interval;
12425 bp->timer.data = (unsigned long) bp;
12426 bp->timer.function = bnx2x_timer;
12427
12428 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12429 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12430 SHMEM2_HAS(bp, dcbx_en) &&
12431 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12432 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12433 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12434 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12435 bnx2x_dcbx_init_params(bp);
12436 } else {
12437 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12438 }
12439
12440 if (CHIP_IS_E1x(bp))
12441 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12442 else
12443 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12444
12445
12446 if (IS_VF(bp))
12447 bp->max_cos = 1;
12448 else if (CHIP_IS_E1x(bp))
12449 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12450 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12451 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12452 else if (CHIP_IS_E3B0(bp))
12453 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12454 else
12455 BNX2X_ERR("unknown chip %x revision %x\n",
12456 CHIP_NUM(bp), CHIP_REV(bp));
12457 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12458
12459
12460
12461
12462
12463 if (IS_VF(bp))
12464 bp->min_msix_vec_cnt = 1;
12465 else if (CNIC_SUPPORT(bp))
12466 bp->min_msix_vec_cnt = 3;
12467 else
12468 bp->min_msix_vec_cnt = 2;
12469 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12470
12471 bp->dump_preset_idx = 1;
12472
12473 if (CHIP_IS_E3B0(bp))
12474 bp->flags |= PTP_SUPPORTED;
12475
12476 return rc;
12477}
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488static int bnx2x_open(struct net_device *dev)
12489{
12490 struct bnx2x *bp = netdev_priv(dev);
12491 int rc;
12492
12493 bp->stats_init = true;
12494
12495 netif_carrier_off(dev);
12496
12497 bnx2x_set_power_state(bp, PCI_D0);
12498
12499
12500
12501
12502
12503
12504
12505 if (IS_PF(bp)) {
12506 int other_engine = BP_PATH(bp) ? 0 : 1;
12507 bool other_load_status, load_status;
12508 bool global = false;
12509
12510 other_load_status = bnx2x_get_load_status(bp, other_engine);
12511 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12512 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12513 bnx2x_chk_parity_attn(bp, &global, true)) {
12514 do {
12515
12516
12517
12518
12519
12520 if (global)
12521 bnx2x_set_reset_global(bp);
12522
12523
12524
12525
12526
12527
12528 if ((!load_status &&
12529 (!global || !other_load_status)) &&
12530 bnx2x_trylock_leader_lock(bp) &&
12531 !bnx2x_leader_reset(bp)) {
12532 netdev_info(bp->dev,
12533 "Recovered in open\n");
12534 break;
12535 }
12536
12537
12538 bnx2x_set_power_state(bp, PCI_D3hot);
12539 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12540
12541 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12542 "If you still see this message after a few retries then power cycle is required.\n");
12543
12544 return -EAGAIN;
12545 } while (0);
12546 }
12547 }
12548
12549 bp->recovery_state = BNX2X_RECOVERY_DONE;
12550 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12551 if (rc)
12552 return rc;
12553
12554#ifdef CONFIG_BNX2X_VXLAN
12555 if (IS_PF(bp))
12556 vxlan_get_rx_port(dev);
12557#endif
12558#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
12559 if (IS_PF(bp))
12560 geneve_get_rx_port(dev);
12561#endif
12562
12563 return 0;
12564}
12565
12566
12567static int bnx2x_close(struct net_device *dev)
12568{
12569 struct bnx2x *bp = netdev_priv(dev);
12570
12571
12572 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12573
12574 return 0;
12575}
12576
12577static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12578 struct bnx2x_mcast_ramrod_params *p)
12579{
12580 int mc_count = netdev_mc_count(bp->dev);
12581 struct bnx2x_mcast_list_elem *mc_mac =
12582 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
12583 struct netdev_hw_addr *ha;
12584
12585 if (!mc_mac)
12586 return -ENOMEM;
12587
12588 INIT_LIST_HEAD(&p->mcast_list);
12589
12590 netdev_for_each_mc_addr(ha, bp->dev) {
12591 mc_mac->mac = bnx2x_mc_addr(ha);
12592 list_add_tail(&mc_mac->link, &p->mcast_list);
12593 mc_mac++;
12594 }
12595
12596 p->mcast_list_len = mc_count;
12597
12598 return 0;
12599}
12600
12601static void bnx2x_free_mcast_macs_list(
12602 struct bnx2x_mcast_ramrod_params *p)
12603{
12604 struct bnx2x_mcast_list_elem *mc_mac =
12605 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
12606 link);
12607
12608 WARN_ON(!mc_mac);
12609 kfree(mc_mac);
12610}
12611
12612
12613
12614
12615
12616
12617
12618
12619static int bnx2x_set_uc_list(struct bnx2x *bp)
12620{
12621 int rc;
12622 struct net_device *dev = bp->dev;
12623 struct netdev_hw_addr *ha;
12624 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12625 unsigned long ramrod_flags = 0;
12626
12627
12628 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12629 if (rc < 0) {
12630 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12631 return rc;
12632 }
12633
12634 netdev_for_each_uc_addr(ha, dev) {
12635 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12636 BNX2X_UC_LIST_MAC, &ramrod_flags);
12637 if (rc == -EEXIST) {
12638 DP(BNX2X_MSG_SP,
12639 "Failed to schedule ADD operations: %d\n", rc);
12640
12641 rc = 0;
12642
12643 } else if (rc < 0) {
12644
12645 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12646 rc);
12647 return rc;
12648 }
12649 }
12650
12651
12652 __set_bit(RAMROD_CONT, &ramrod_flags);
12653 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12654 BNX2X_UC_LIST_MAC, &ramrod_flags);
12655}
12656
12657static int bnx2x_set_mc_list(struct bnx2x *bp)
12658{
12659 struct net_device *dev = bp->dev;
12660 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12661 int rc = 0;
12662
12663 rparam.mcast_obj = &bp->mcast_obj;
12664
12665
12666 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12667 if (rc < 0) {
12668 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12669 return rc;
12670 }
12671
12672
12673 if (netdev_mc_count(dev)) {
12674 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12675 if (rc) {
12676 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12677 rc);
12678 return rc;
12679 }
12680
12681
12682 rc = bnx2x_config_mcast(bp, &rparam,
12683 BNX2X_MCAST_CMD_ADD);
12684 if (rc < 0)
12685 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12686 rc);
12687
12688 bnx2x_free_mcast_macs_list(&rparam);
12689 }
12690
12691 return rc;
12692}
12693
12694
12695static void bnx2x_set_rx_mode(struct net_device *dev)
12696{
12697 struct bnx2x *bp = netdev_priv(dev);
12698
12699 if (bp->state != BNX2X_STATE_OPEN) {
12700 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12701 return;
12702 } else {
12703
12704 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12705 NETIF_MSG_IFUP);
12706 }
12707}
12708
12709void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12710{
12711 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12712
12713 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12714
12715 netif_addr_lock_bh(bp->dev);
12716
12717 if (bp->dev->flags & IFF_PROMISC) {
12718 rx_mode = BNX2X_RX_MODE_PROMISC;
12719 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12720 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12721 CHIP_IS_E1(bp))) {
12722 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12723 } else {
12724 if (IS_PF(bp)) {
12725
12726 if (bnx2x_set_mc_list(bp) < 0)
12727 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12728
12729
12730 netif_addr_unlock_bh(bp->dev);
12731 if (bnx2x_set_uc_list(bp) < 0)
12732 rx_mode = BNX2X_RX_MODE_PROMISC;
12733 netif_addr_lock_bh(bp->dev);
12734 } else {
12735
12736
12737
12738 bnx2x_schedule_sp_rtnl(bp,
12739 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12740 }
12741 }
12742
12743 bp->rx_mode = rx_mode;
12744
12745 if (IS_MF_ISCSI_ONLY(bp))
12746 bp->rx_mode = BNX2X_RX_MODE_NONE;
12747
12748
12749 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12750 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12751 netif_addr_unlock_bh(bp->dev);
12752 return;
12753 }
12754
12755 if (IS_PF(bp)) {
12756 bnx2x_set_storm_rx_mode(bp);
12757 netif_addr_unlock_bh(bp->dev);
12758 } else {
12759
12760
12761
12762
12763 netif_addr_unlock_bh(bp->dev);
12764 bnx2x_vfpf_storm_rx_mode(bp);
12765 }
12766}
12767
12768
12769static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12770 int devad, u16 addr)
12771{
12772 struct bnx2x *bp = netdev_priv(netdev);
12773 u16 value;
12774 int rc;
12775
12776 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12777 prtad, devad, addr);
12778
12779
12780 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12781
12782 bnx2x_acquire_phy_lock(bp);
12783 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12784 bnx2x_release_phy_lock(bp);
12785 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12786
12787 if (!rc)
12788 rc = value;
12789 return rc;
12790}
12791
12792
12793static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12794 u16 addr, u16 value)
12795{
12796 struct bnx2x *bp = netdev_priv(netdev);
12797 int rc;
12798
12799 DP(NETIF_MSG_LINK,
12800 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12801 prtad, devad, addr, value);
12802
12803
12804 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12805
12806 bnx2x_acquire_phy_lock(bp);
12807 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12808 bnx2x_release_phy_lock(bp);
12809 return rc;
12810}
12811
12812
12813static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12814{
12815 struct bnx2x *bp = netdev_priv(dev);
12816 struct mii_ioctl_data *mdio = if_mii(ifr);
12817
12818 if (!netif_running(dev))
12819 return -EAGAIN;
12820
12821 switch (cmd) {
12822 case SIOCSHWTSTAMP:
12823 return bnx2x_hwtstamp_ioctl(bp, ifr);
12824 default:
12825 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12826 mdio->phy_id, mdio->reg_num, mdio->val_in);
12827 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12828 }
12829}
12830
12831#ifdef CONFIG_NET_POLL_CONTROLLER
12832static void poll_bnx2x(struct net_device *dev)
12833{
12834 struct bnx2x *bp = netdev_priv(dev);
12835 int i;
12836
12837 for_each_eth_queue(bp, i) {
12838 struct bnx2x_fastpath *fp = &bp->fp[i];
12839 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12840 }
12841}
12842#endif
12843
12844static int bnx2x_validate_addr(struct net_device *dev)
12845{
12846 struct bnx2x *bp = netdev_priv(dev);
12847
12848
12849 if (IS_VF(bp))
12850 bnx2x_sample_bulletin(bp);
12851
12852 if (!is_valid_ether_addr(dev->dev_addr)) {
12853 BNX2X_ERR("Non-valid Ethernet address\n");
12854 return -EADDRNOTAVAIL;
12855 }
12856 return 0;
12857}
12858
12859static int bnx2x_get_phys_port_id(struct net_device *netdev,
12860 struct netdev_phys_item_id *ppid)
12861{
12862 struct bnx2x *bp = netdev_priv(netdev);
12863
12864 if (!(bp->flags & HAS_PHYS_PORT_ID))
12865 return -EOPNOTSUPP;
12866
12867 ppid->id_len = sizeof(bp->phys_port_id);
12868 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12869
12870 return 0;
12871}
12872
12873static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12874 struct net_device *dev,
12875 netdev_features_t features)
12876{
12877 features = vlan_features_check(skb, features);
12878 return vxlan_features_check(skb, features);
12879}
12880
12881static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12882{
12883 int rc;
12884
12885 if (IS_PF(bp)) {
12886 unsigned long ramrod_flags = 0;
12887
12888 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12889 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12890 add, &ramrod_flags);
12891 } else {
12892 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12893 }
12894
12895 return rc;
12896}
12897
12898int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12899{
12900 struct bnx2x_vlan_entry *vlan;
12901 int rc = 0;
12902
12903 if (!bp->vlan_cnt) {
12904 DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
12905 return 0;
12906 }
12907
12908 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12909
12910 if (rc) {
12911 vlan->hw = false;
12912 continue;
12913 }
12914
12915 if (!vlan->hw)
12916 continue;
12917
12918 DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
12919
12920 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12921 if (rc) {
12922 BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
12923 vlan->hw = false;
12924 rc = -EINVAL;
12925 continue;
12926 }
12927 }
12928
12929 return rc;
12930}
12931
12932static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12933{
12934 struct bnx2x *bp = netdev_priv(dev);
12935 struct bnx2x_vlan_entry *vlan;
12936 bool hw = false;
12937 int rc = 0;
12938
12939 if (!netif_running(bp->dev)) {
12940 DP(NETIF_MSG_IFUP,
12941 "Ignoring VLAN configuration the interface is down\n");
12942 return -EFAULT;
12943 }
12944
12945 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12946
12947 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12948 if (!vlan)
12949 return -ENOMEM;
12950
12951 bp->vlan_cnt++;
12952 if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
12953 DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
12954 bp->accept_any_vlan = true;
12955 if (IS_PF(bp))
12956 bnx2x_set_rx_mode_inner(bp);
12957 else
12958 bnx2x_vfpf_storm_rx_mode(bp);
12959 } else if (bp->vlan_cnt <= bp->vlan_credit) {
12960 rc = __bnx2x_vlan_configure_vid(bp, vid, true);
12961 hw = true;
12962 }
12963
12964 vlan->vid = vid;
12965 vlan->hw = hw;
12966
12967 if (!rc) {
12968 list_add(&vlan->link, &bp->vlan_reg);
12969 } else {
12970 bp->vlan_cnt--;
12971 kfree(vlan);
12972 }
12973
12974 DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
12975
12976 return rc;
12977}
12978
12979static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12980{
12981 struct bnx2x *bp = netdev_priv(dev);
12982 struct bnx2x_vlan_entry *vlan;
12983 int rc = 0;
12984
12985 if (!netif_running(bp->dev)) {
12986 DP(NETIF_MSG_IFUP,
12987 "Ignoring VLAN configuration the interface is down\n");
12988 return -EFAULT;
12989 }
12990
12991 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12992
12993 if (!bp->vlan_cnt) {
12994 BNX2X_ERR("Unable to kill VLAN %d\n", vid);
12995 return -EINVAL;
12996 }
12997
12998 list_for_each_entry(vlan, &bp->vlan_reg, link)
12999 if (vlan->vid == vid)
13000 break;
13001
13002 if (vlan->vid != vid) {
13003 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13004 return -EINVAL;
13005 }
13006
13007 if (vlan->hw)
13008 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13009
13010 list_del(&vlan->link);
13011 kfree(vlan);
13012
13013 bp->vlan_cnt--;
13014
13015 if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
13016
13017 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13018 if (vlan->hw)
13019 continue;
13020
13021 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13022 if (rc) {
13023 BNX2X_ERR("Unable to config VLAN %d\n",
13024 vlan->vid);
13025 continue;
13026 }
13027 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
13028 vlan->vid);
13029 vlan->hw = true;
13030 }
13031 DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
13032 bp->accept_any_vlan = false;
13033 if (IS_PF(bp))
13034 bnx2x_set_rx_mode_inner(bp);
13035 else
13036 bnx2x_vfpf_storm_rx_mode(bp);
13037 }
13038
13039 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13040
13041 return rc;
13042}
13043
13044static const struct net_device_ops bnx2x_netdev_ops = {
13045 .ndo_open = bnx2x_open,
13046 .ndo_stop = bnx2x_close,
13047 .ndo_start_xmit = bnx2x_start_xmit,
13048 .ndo_select_queue = bnx2x_select_queue,
13049 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13050 .ndo_set_mac_address = bnx2x_change_mac_addr,
13051 .ndo_validate_addr = bnx2x_validate_addr,
13052 .ndo_do_ioctl = bnx2x_ioctl,
13053 .ndo_change_mtu = bnx2x_change_mtu,
13054 .ndo_fix_features = bnx2x_fix_features,
13055 .ndo_set_features = bnx2x_set_features,
13056 .ndo_tx_timeout = bnx2x_tx_timeout,
13057 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13058 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13059#ifdef CONFIG_NET_POLL_CONTROLLER
13060 .ndo_poll_controller = poll_bnx2x,
13061#endif
13062 .ndo_setup_tc = __bnx2x_setup_tc,
13063#ifdef CONFIG_BNX2X_SRIOV
13064 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13065 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13066 .ndo_get_vf_config = bnx2x_get_vf_config,
13067#endif
13068#ifdef NETDEV_FCOE_WWNN
13069 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13070#endif
13071
13072 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13073 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13074 .ndo_features_check = bnx2x_features_check,
13075#ifdef CONFIG_BNX2X_VXLAN
13076 .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
13077 .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
13078#endif
13079#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
13080 .ndo_add_geneve_port = bnx2x_add_geneve_port,
13081 .ndo_del_geneve_port = bnx2x_del_geneve_port,
13082#endif
13083};
13084
13085static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13086{
13087 struct device *dev = &bp->pdev->dev;
13088
13089 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13090 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13091 dev_err(dev, "System does not support DMA, aborting\n");
13092 return -EIO;
13093 }
13094
13095 return 0;
13096}
13097
13098static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13099{
13100 if (bp->flags & AER_ENABLED) {
13101 pci_disable_pcie_error_reporting(bp->pdev);
13102 bp->flags &= ~AER_ENABLED;
13103 }
13104}
13105
13106static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13107 struct net_device *dev, unsigned long board_type)
13108{
13109 int rc;
13110 u32 pci_cfg_dword;
13111 bool chip_is_e1x = (board_type == BCM57710 ||
13112 board_type == BCM57711 ||
13113 board_type == BCM57711E);
13114
13115 SET_NETDEV_DEV(dev, &pdev->dev);
13116
13117 bp->dev = dev;
13118 bp->pdev = pdev;
13119
13120 rc = pci_enable_device(pdev);
13121 if (rc) {
13122 dev_err(&bp->pdev->dev,
13123 "Cannot enable PCI device, aborting\n");
13124 goto err_out;
13125 }
13126
13127 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13128 dev_err(&bp->pdev->dev,
13129 "Cannot find PCI device base address, aborting\n");
13130 rc = -ENODEV;
13131 goto err_out_disable;
13132 }
13133
13134 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13135 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13136 rc = -ENODEV;
13137 goto err_out_disable;
13138 }
13139
13140 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13141 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13142 PCICFG_REVESION_ID_ERROR_VAL) {
13143 pr_err("PCI device error, probably due to fan failure, aborting\n");
13144 rc = -ENODEV;
13145 goto err_out_disable;
13146 }
13147
13148 if (atomic_read(&pdev->enable_cnt) == 1) {
13149 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13150 if (rc) {
13151 dev_err(&bp->pdev->dev,
13152 "Cannot obtain PCI resources, aborting\n");
13153 goto err_out_disable;
13154 }
13155
13156 pci_set_master(pdev);
13157 pci_save_state(pdev);
13158 }
13159
13160 if (IS_PF(bp)) {
13161 if (!pdev->pm_cap) {
13162 dev_err(&bp->pdev->dev,
13163 "Cannot find power management capability, aborting\n");
13164 rc = -EIO;
13165 goto err_out_release;
13166 }
13167 }
13168
13169 if (!pci_is_pcie(pdev)) {
13170 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13171 rc = -EIO;
13172 goto err_out_release;
13173 }
13174
13175 rc = bnx2x_set_coherency_mask(bp);
13176 if (rc)
13177 goto err_out_release;
13178
13179 dev->mem_start = pci_resource_start(pdev, 0);
13180 dev->base_addr = dev->mem_start;
13181 dev->mem_end = pci_resource_end(pdev, 0);
13182
13183 dev->irq = pdev->irq;
13184
13185 bp->regview = pci_ioremap_bar(pdev, 0);
13186 if (!bp->regview) {
13187 dev_err(&bp->pdev->dev,
13188 "Cannot map register space, aborting\n");
13189 rc = -ENOMEM;
13190 goto err_out_release;
13191 }
13192
13193
13194
13195
13196
13197
13198 if (chip_is_e1x) {
13199 bp->pf_num = PCI_FUNC(pdev->devfn);
13200 } else {
13201
13202 pci_read_config_dword(bp->pdev,
13203 PCICFG_ME_REGISTER, &pci_cfg_dword);
13204 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13205 ME_REG_ABS_PF_NUM_SHIFT);
13206 }
13207 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13208
13209
13210 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13211 PCICFG_VENDOR_ID_OFFSET);
13212
13213
13214 pdev->needs_freset = 1;
13215
13216
13217 rc = pci_enable_pcie_error_reporting(pdev);
13218 if (!rc)
13219 bp->flags |= AER_ENABLED;
13220 else
13221 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13222
13223
13224
13225
13226
13227 if (IS_PF(bp)) {
13228 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13229 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13230 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13231 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13232
13233 if (chip_is_e1x) {
13234 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13235 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13236 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13237 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13238 }
13239
13240
13241
13242
13243
13244 if (!chip_is_e1x)
13245 REG_WR(bp,
13246 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13247 }
13248
13249 dev->watchdog_timeo = TX_TIMEOUT;
13250
13251 dev->netdev_ops = &bnx2x_netdev_ops;
13252 bnx2x_set_ethtool_ops(bp, dev);
13253
13254 dev->priv_flags |= IFF_UNICAST_FLT;
13255
13256 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13257 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13258 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
13259 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13260 if (!chip_is_e1x) {
13261 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
13262 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
13263 dev->hw_enc_features =
13264 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13265 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13266 NETIF_F_GSO_IPIP |
13267 NETIF_F_GSO_SIT |
13268 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
13269 }
13270
13271 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13272 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13273
13274
13275 if (IS_PF(bp)) {
13276 if (chip_is_e1x)
13277 bp->accept_any_vlan = true;
13278 else
13279 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13280#ifdef CONFIG_BNX2X_SRIOV
13281 } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13282 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13283#endif
13284 }
13285
13286 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13287 dev->features |= NETIF_F_HIGHDMA;
13288
13289
13290 dev->hw_features |= NETIF_F_LOOPBACK;
13291
13292#ifdef BCM_DCBNL
13293 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13294#endif
13295
13296
13297 bp->mdio.prtad = MDIO_PRTAD_NONE;
13298 bp->mdio.mmds = 0;
13299 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13300 bp->mdio.dev = dev;
13301 bp->mdio.mdio_read = bnx2x_mdio_read;
13302 bp->mdio.mdio_write = bnx2x_mdio_write;
13303
13304 return 0;
13305
13306err_out_release:
13307 if (atomic_read(&pdev->enable_cnt) == 1)
13308 pci_release_regions(pdev);
13309
13310err_out_disable:
13311 pci_disable_device(pdev);
13312
13313err_out:
13314 return rc;
13315}
13316
13317static int bnx2x_check_firmware(struct bnx2x *bp)
13318{
13319 const struct firmware *firmware = bp->firmware;
13320 struct bnx2x_fw_file_hdr *fw_hdr;
13321 struct bnx2x_fw_file_section *sections;
13322 u32 offset, len, num_ops;
13323 __be16 *ops_offsets;
13324 int i;
13325 const u8 *fw_ver;
13326
13327 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13328 BNX2X_ERR("Wrong FW size\n");
13329 return -EINVAL;
13330 }
13331
13332 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13333 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13334
13335
13336
13337 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13338 offset = be32_to_cpu(sections[i].offset);
13339 len = be32_to_cpu(sections[i].len);
13340 if (offset + len > firmware->size) {
13341 BNX2X_ERR("Section %d length is out of bounds\n", i);
13342 return -EINVAL;
13343 }
13344 }
13345
13346
13347 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13348 ops_offsets = (__force __be16 *)(firmware->data + offset);
13349 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13350
13351 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13352 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13353 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13354 return -EINVAL;
13355 }
13356 }
13357
13358
13359 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13360 fw_ver = firmware->data + offset;
13361 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13362 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13363 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13364 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13365 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13366 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13367 BCM_5710_FW_MAJOR_VERSION,
13368 BCM_5710_FW_MINOR_VERSION,
13369 BCM_5710_FW_REVISION_VERSION,
13370 BCM_5710_FW_ENGINEERING_VERSION);
13371 return -EINVAL;
13372 }
13373
13374 return 0;
13375}
13376
13377static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13378{
13379 const __be32 *source = (const __be32 *)_source;
13380 u32 *target = (u32 *)_target;
13381 u32 i;
13382
13383 for (i = 0; i < n/4; i++)
13384 target[i] = be32_to_cpu(source[i]);
13385}
13386
13387
13388
13389
13390
13391static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13392{
13393 const __be32 *source = (const __be32 *)_source;
13394 struct raw_op *target = (struct raw_op *)_target;
13395 u32 i, j, tmp;
13396
13397 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13398 tmp = be32_to_cpu(source[j]);
13399 target[i].op = (tmp >> 24) & 0xff;
13400 target[i].offset = tmp & 0xffffff;
13401 target[i].raw_data = be32_to_cpu(source[j + 1]);
13402 }
13403}
13404
13405
13406
13407
13408static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13409{
13410 const __be32 *source = (const __be32 *)_source;
13411 struct iro *target = (struct iro *)_target;
13412 u32 i, j, tmp;
13413
13414 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13415 target[i].base = be32_to_cpu(source[j]);
13416 j++;
13417 tmp = be32_to_cpu(source[j]);
13418 target[i].m1 = (tmp >> 16) & 0xffff;
13419 target[i].m2 = tmp & 0xffff;
13420 j++;
13421 tmp = be32_to_cpu(source[j]);
13422 target[i].m3 = (tmp >> 16) & 0xffff;
13423 target[i].size = tmp & 0xffff;
13424 j++;
13425 }
13426}
13427
13428static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13429{
13430 const __be16 *source = (const __be16 *)_source;
13431 u16 *target = (u16 *)_target;
13432 u32 i;
13433
13434 for (i = 0; i < n/2; i++)
13435 target[i] = be16_to_cpu(source[i]);
13436}
13437
13438#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13439do { \
13440 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13441 bp->arr = kmalloc(len, GFP_KERNEL); \
13442 if (!bp->arr) \
13443 goto lbl; \
13444 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13445 (u8 *)bp->arr, len); \
13446} while (0)
13447
13448static int bnx2x_init_firmware(struct bnx2x *bp)
13449{
13450 const char *fw_file_name;
13451 struct bnx2x_fw_file_hdr *fw_hdr;
13452 int rc;
13453
13454 if (bp->firmware)
13455 return 0;
13456
13457 if (CHIP_IS_E1(bp))
13458 fw_file_name = FW_FILE_NAME_E1;
13459 else if (CHIP_IS_E1H(bp))
13460 fw_file_name = FW_FILE_NAME_E1H;
13461 else if (!CHIP_IS_E1x(bp))
13462 fw_file_name = FW_FILE_NAME_E2;
13463 else {
13464 BNX2X_ERR("Unsupported chip revision\n");
13465 return -EINVAL;
13466 }
13467 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13468
13469 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13470 if (rc) {
13471 BNX2X_ERR("Can't load firmware file %s\n",
13472 fw_file_name);
13473 goto request_firmware_exit;
13474 }
13475
13476 rc = bnx2x_check_firmware(bp);
13477 if (rc) {
13478 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13479 goto request_firmware_exit;
13480 }
13481
13482 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13483
13484
13485
13486 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13487
13488
13489 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13490
13491
13492 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13493 be16_to_cpu_n);
13494
13495
13496 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13497 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13498 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13499 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13500 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13501 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13502 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13503 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13504 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13505 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13506 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13507 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13508 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13509 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13510 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13511 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13512
13513 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13514
13515 return 0;
13516
13517iro_alloc_err:
13518 kfree(bp->init_ops_offsets);
13519init_offsets_alloc_err:
13520 kfree(bp->init_ops);
13521init_ops_alloc_err:
13522 kfree(bp->init_data);
13523request_firmware_exit:
13524 release_firmware(bp->firmware);
13525 bp->firmware = NULL;
13526
13527 return rc;
13528}
13529
13530static void bnx2x_release_firmware(struct bnx2x *bp)
13531{
13532 kfree(bp->init_ops_offsets);
13533 kfree(bp->init_ops);
13534 kfree(bp->init_data);
13535 release_firmware(bp->firmware);
13536 bp->firmware = NULL;
13537}
13538
13539static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13540 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13541 .init_hw_cmn = bnx2x_init_hw_common,
13542 .init_hw_port = bnx2x_init_hw_port,
13543 .init_hw_func = bnx2x_init_hw_func,
13544
13545 .reset_hw_cmn = bnx2x_reset_common,
13546 .reset_hw_port = bnx2x_reset_port,
13547 .reset_hw_func = bnx2x_reset_func,
13548
13549 .gunzip_init = bnx2x_gunzip_init,
13550 .gunzip_end = bnx2x_gunzip_end,
13551
13552 .init_fw = bnx2x_init_firmware,
13553 .release_fw = bnx2x_release_firmware,
13554};
13555
13556void bnx2x__init_func_obj(struct bnx2x *bp)
13557{
13558
13559 bnx2x_setup_dmae(bp);
13560
13561 bnx2x_init_func_obj(bp, &bp->func_obj,
13562 bnx2x_sp(bp, func_rdata),
13563 bnx2x_sp_mapping(bp, func_rdata),
13564 bnx2x_sp(bp, func_afex_rdata),
13565 bnx2x_sp_mapping(bp, func_afex_rdata),
13566 &bnx2x_func_sp_drv);
13567}
13568
13569
13570static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13571{
13572 int cid_count = BNX2X_L2_MAX_CID(bp);
13573
13574 if (IS_SRIOV(bp))
13575 cid_count += BNX2X_VF_CIDS;
13576
13577 if (CNIC_SUPPORT(bp))
13578 cid_count += CNIC_CID_MAX;
13579
13580 return roundup(cid_count, QM_CID_ROUND);
13581}
13582
13583
13584
13585
13586
13587
13588
13589static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13590{
13591 int index;
13592 u16 control = 0;
13593
13594
13595
13596
13597
13598 if (!pdev->msix_cap) {
13599 dev_info(&pdev->dev, "no msix capability found\n");
13600 return 1 + cnic_cnt;
13601 }
13602 dev_info(&pdev->dev, "msix capability found\n");
13603
13604
13605
13606
13607
13608
13609
13610
13611 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13612
13613 index = control & PCI_MSIX_FLAGS_QSIZE;
13614
13615 return index;
13616}
13617
13618static int set_max_cos_est(int chip_id)
13619{
13620 switch (chip_id) {
13621 case BCM57710:
13622 case BCM57711:
13623 case BCM57711E:
13624 return BNX2X_MULTI_TX_COS_E1X;
13625 case BCM57712:
13626 case BCM57712_MF:
13627 return BNX2X_MULTI_TX_COS_E2_E3A0;
13628 case BCM57800:
13629 case BCM57800_MF:
13630 case BCM57810:
13631 case BCM57810_MF:
13632 case BCM57840_4_10:
13633 case BCM57840_2_20:
13634 case BCM57840_O:
13635 case BCM57840_MFO:
13636 case BCM57840_MF:
13637 case BCM57811:
13638 case BCM57811_MF:
13639 return BNX2X_MULTI_TX_COS_E3B0;
13640 case BCM57712_VF:
13641 case BCM57800_VF:
13642 case BCM57810_VF:
13643 case BCM57840_VF:
13644 case BCM57811_VF:
13645 return 1;
13646 default:
13647 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13648 return -ENODEV;
13649 }
13650}
13651
13652static int set_is_vf(int chip_id)
13653{
13654 switch (chip_id) {
13655 case BCM57712_VF:
13656 case BCM57800_VF:
13657 case BCM57810_VF:
13658 case BCM57840_VF:
13659 case BCM57811_VF:
13660 return true;
13661 default:
13662 return false;
13663 }
13664}
13665
13666
13667#define tsgen_ctrl 0x0
13668#define tsgen_freecount 0x10
13669#define tsgen_synctime_t0 0x20
13670#define tsgen_offset_t0 0x28
13671#define tsgen_drift_t0 0x30
13672#define tsgen_synctime_t1 0x58
13673#define tsgen_offset_t1 0x60
13674#define tsgen_drift_t1 0x68
13675
13676
13677static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13678 int best_val, int best_period)
13679{
13680 struct bnx2x_func_state_params func_params = {NULL};
13681 struct bnx2x_func_set_timesync_params *set_timesync_params =
13682 &func_params.params.set_timesync;
13683
13684
13685 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13686 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13687
13688 func_params.f_obj = &bp->func_obj;
13689 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13690
13691
13692 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13693 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13694 set_timesync_params->add_sub_drift_adjust_value =
13695 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13696 set_timesync_params->drift_adjust_value = best_val;
13697 set_timesync_params->drift_adjust_period = best_period;
13698
13699 return bnx2x_func_state_change(bp, &func_params);
13700}
13701
13702static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13703{
13704 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13705 int rc;
13706 int drift_dir = 1;
13707 int val, period, period1, period2, dif, dif1, dif2;
13708 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13709
13710 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13711
13712 if (!netif_running(bp->dev)) {
13713 DP(BNX2X_MSG_PTP,
13714 "PTP adjfreq called while the interface is down\n");
13715 return -EFAULT;
13716 }
13717
13718 if (ppb < 0) {
13719 ppb = -ppb;
13720 drift_dir = 0;
13721 }
13722
13723 if (ppb == 0) {
13724 best_val = 1;
13725 best_period = 0x1FFFFFF;
13726 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13727 best_val = 31;
13728 best_period = 1;
13729 } else {
13730
13731
13732
13733 for (val = 0; val <= 31; val++) {
13734 if ((val & 0x7) == 0)
13735 continue;
13736 period1 = val * 1000000 / ppb;
13737 period2 = period1 + 1;
13738 if (period1 != 0)
13739 dif1 = ppb - (val * 1000000 / period1);
13740 else
13741 dif1 = BNX2X_MAX_PHC_DRIFT;
13742 if (dif1 < 0)
13743 dif1 = -dif1;
13744 dif2 = ppb - (val * 1000000 / period2);
13745 if (dif2 < 0)
13746 dif2 = -dif2;
13747 dif = (dif1 < dif2) ? dif1 : dif2;
13748 period = (dif1 < dif2) ? period1 : period2;
13749 if (dif < best_dif) {
13750 best_dif = dif;
13751 best_val = val;
13752 best_period = period;
13753 }
13754 }
13755 }
13756
13757 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13758 best_period);
13759 if (rc) {
13760 BNX2X_ERR("Failed to set drift\n");
13761 return -EFAULT;
13762 }
13763
13764 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13765 best_period);
13766
13767 return 0;
13768}
13769
13770static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13771{
13772 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13773
13774 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13775
13776 timecounter_adjtime(&bp->timecounter, delta);
13777
13778 return 0;
13779}
13780
13781static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13782{
13783 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13784 u64 ns;
13785
13786 ns = timecounter_read(&bp->timecounter);
13787
13788 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13789
13790 *ts = ns_to_timespec64(ns);
13791
13792 return 0;
13793}
13794
13795static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13796 const struct timespec64 *ts)
13797{
13798 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13799 u64 ns;
13800
13801 ns = timespec64_to_ns(ts);
13802
13803 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13804
13805
13806 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13807
13808 return 0;
13809}
13810
13811
13812static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13813 struct ptp_clock_request *rq, int on)
13814{
13815 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13816
13817 BNX2X_ERR("PHC ancillary features are not supported\n");
13818 return -ENOTSUPP;
13819}
13820
13821static void bnx2x_register_phc(struct bnx2x *bp)
13822{
13823
13824 bp->ptp_clock_info.owner = THIS_MODULE;
13825 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13826 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13827 bp->ptp_clock_info.n_alarm = 0;
13828 bp->ptp_clock_info.n_ext_ts = 0;
13829 bp->ptp_clock_info.n_per_out = 0;
13830 bp->ptp_clock_info.pps = 0;
13831 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13832 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13833 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13834 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13835 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13836
13837 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13838 if (IS_ERR(bp->ptp_clock)) {
13839 bp->ptp_clock = NULL;
13840 BNX2X_ERR("PTP clock registeration failed\n");
13841 }
13842}
13843
13844static int bnx2x_init_one(struct pci_dev *pdev,
13845 const struct pci_device_id *ent)
13846{
13847 struct net_device *dev = NULL;
13848 struct bnx2x *bp;
13849 enum pcie_link_width pcie_width;
13850 enum pci_bus_speed pcie_speed;
13851 int rc, max_non_def_sbs;
13852 int rx_count, tx_count, rss_count, doorbell_size;
13853 int max_cos_est;
13854 bool is_vf;
13855 int cnic_cnt;
13856
13857
13858
13859
13860 if (is_kdump_kernel()) {
13861 ktime_t now = ktime_get_boottime();
13862 ktime_t fw_ready_time = ktime_set(5, 0);
13863
13864 if (ktime_before(now, fw_ready_time))
13865 msleep(ktime_ms_delta(fw_ready_time, now));
13866 }
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876 max_cos_est = set_max_cos_est(ent->driver_data);
13877 if (max_cos_est < 0)
13878 return max_cos_est;
13879 is_vf = set_is_vf(ent->driver_data);
13880 cnic_cnt = is_vf ? 0 : 1;
13881
13882 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13883
13884
13885 max_non_def_sbs += is_vf ? 1 : 0;
13886
13887
13888 rss_count = max_non_def_sbs - cnic_cnt;
13889
13890 if (rss_count < 1)
13891 return -EINVAL;
13892
13893
13894 rx_count = rss_count + cnic_cnt;
13895
13896
13897
13898
13899 tx_count = rss_count * max_cos_est + cnic_cnt;
13900
13901
13902 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13903 if (!dev)
13904 return -ENOMEM;
13905
13906 bp = netdev_priv(dev);
13907
13908 bp->flags = 0;
13909 if (is_vf)
13910 bp->flags |= IS_VF_FLAG;
13911
13912 bp->igu_sb_cnt = max_non_def_sbs;
13913 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13914 bp->msg_enable = debug;
13915 bp->cnic_support = cnic_cnt;
13916 bp->cnic_probe = bnx2x_cnic_probe;
13917
13918 pci_set_drvdata(pdev, dev);
13919
13920 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13921 if (rc < 0) {
13922 free_netdev(dev);
13923 return rc;
13924 }
13925
13926 BNX2X_DEV_INFO("This is a %s function\n",
13927 IS_PF(bp) ? "physical" : "virtual");
13928 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13929 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13930 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13931 tx_count, rx_count);
13932
13933 rc = bnx2x_init_bp(bp);
13934 if (rc)
13935 goto init_one_exit;
13936
13937
13938
13939
13940
13941 if (IS_VF(bp)) {
13942 bp->doorbells = bnx2x_vf_doorbells(bp);
13943 rc = bnx2x_vf_pci_alloc(bp);
13944 if (rc)
13945 goto init_one_exit;
13946 } else {
13947 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13948 if (doorbell_size > pci_resource_len(pdev, 2)) {
13949 dev_err(&bp->pdev->dev,
13950 "Cannot map doorbells, bar size too small, aborting\n");
13951 rc = -ENOMEM;
13952 goto init_one_exit;
13953 }
13954 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13955 doorbell_size);
13956 }
13957 if (!bp->doorbells) {
13958 dev_err(&bp->pdev->dev,
13959 "Cannot map doorbell space, aborting\n");
13960 rc = -ENOMEM;
13961 goto init_one_exit;
13962 }
13963
13964 if (IS_VF(bp)) {
13965 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13966 if (rc)
13967 goto init_one_exit;
13968 }
13969
13970
13971 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13972 if (rc)
13973 goto init_one_exit;
13974
13975
13976 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13977 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13978
13979
13980 if (CHIP_IS_E1x(bp))
13981 bp->flags |= NO_FCOE_FLAG;
13982
13983
13984 bnx2x_set_num_queues(bp);
13985
13986
13987
13988
13989 rc = bnx2x_set_int_mode(bp);
13990 if (rc) {
13991 dev_err(&pdev->dev, "Cannot set interrupts\n");
13992 goto init_one_exit;
13993 }
13994 BNX2X_DEV_INFO("set interrupts successfully\n");
13995
13996
13997 rc = register_netdev(dev);
13998 if (rc) {
13999 dev_err(&pdev->dev, "Cannot register net device\n");
14000 goto init_one_exit;
14001 }
14002 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14003
14004 if (!NO_FCOE(bp)) {
14005
14006 rtnl_lock();
14007 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14008 rtnl_unlock();
14009 }
14010 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
14011 pcie_speed == PCI_SPEED_UNKNOWN ||
14012 pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
14013 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
14014 else
14015 BNX2X_DEV_INFO(
14016 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
14017 board_info[ent->driver_data].name,
14018 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14019 pcie_width,
14020 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
14021 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
14022 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
14023 "Unknown",
14024 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14025
14026 bnx2x_register_phc(bp);
14027
14028 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14029 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14030
14031 return 0;
14032
14033init_one_exit:
14034 bnx2x_disable_pcie_error_reporting(bp);
14035
14036 if (bp->regview)
14037 iounmap(bp->regview);
14038
14039 if (IS_PF(bp) && bp->doorbells)
14040 iounmap(bp->doorbells);
14041
14042 free_netdev(dev);
14043
14044 if (atomic_read(&pdev->enable_cnt) == 1)
14045 pci_release_regions(pdev);
14046
14047 pci_disable_device(pdev);
14048
14049 return rc;
14050}
14051
14052static void __bnx2x_remove(struct pci_dev *pdev,
14053 struct net_device *dev,
14054 struct bnx2x *bp,
14055 bool remove_netdev)
14056{
14057 if (bp->ptp_clock) {
14058 ptp_clock_unregister(bp->ptp_clock);
14059 bp->ptp_clock = NULL;
14060 }
14061
14062
14063 if (!NO_FCOE(bp)) {
14064 rtnl_lock();
14065 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14066 rtnl_unlock();
14067 }
14068
14069#ifdef BCM_DCBNL
14070
14071 bnx2x_dcbnl_update_applist(bp, true);
14072#endif
14073
14074 if (IS_PF(bp) &&
14075 !BP_NOMCP(bp) &&
14076 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14077 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14078
14079
14080 if (remove_netdev) {
14081 unregister_netdev(dev);
14082 } else {
14083 rtnl_lock();
14084 dev_close(dev);
14085 rtnl_unlock();
14086 }
14087
14088 bnx2x_iov_remove_one(bp);
14089
14090
14091 if (IS_PF(bp)) {
14092 bnx2x_set_power_state(bp, PCI_D0);
14093 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14094
14095
14096
14097
14098 bnx2x_reset_endianity(bp);
14099 }
14100
14101
14102 bnx2x_disable_msi(bp);
14103
14104
14105 if (IS_PF(bp))
14106 bnx2x_set_power_state(bp, PCI_D3hot);
14107
14108
14109 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14110
14111
14112 if (IS_VF(bp))
14113 bnx2x_vfpf_release(bp);
14114
14115
14116 if (system_state == SYSTEM_POWER_OFF) {
14117 pci_wake_from_d3(pdev, bp->wol);
14118 pci_set_power_state(pdev, PCI_D3hot);
14119 }
14120
14121 bnx2x_disable_pcie_error_reporting(bp);
14122 if (remove_netdev) {
14123 if (bp->regview)
14124 iounmap(bp->regview);
14125
14126
14127
14128
14129 if (IS_PF(bp)) {
14130 if (bp->doorbells)
14131 iounmap(bp->doorbells);
14132
14133 bnx2x_release_firmware(bp);
14134 } else {
14135 bnx2x_vf_pci_dealloc(bp);
14136 }
14137 bnx2x_free_mem_bp(bp);
14138
14139 free_netdev(dev);
14140
14141 if (atomic_read(&pdev->enable_cnt) == 1)
14142 pci_release_regions(pdev);
14143
14144 pci_disable_device(pdev);
14145 }
14146}
14147
14148static void bnx2x_remove_one(struct pci_dev *pdev)
14149{
14150 struct net_device *dev = pci_get_drvdata(pdev);
14151 struct bnx2x *bp;
14152
14153 if (!dev) {
14154 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14155 return;
14156 }
14157 bp = netdev_priv(dev);
14158
14159 __bnx2x_remove(pdev, dev, bp, true);
14160}
14161
14162static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14163{
14164 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14165
14166 bp->rx_mode = BNX2X_RX_MODE_NONE;
14167
14168 if (CNIC_LOADED(bp))
14169 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14170
14171
14172 bnx2x_tx_disable(bp);
14173
14174 bnx2x_del_all_napi(bp);
14175 if (CNIC_LOADED(bp))
14176 bnx2x_del_all_napi_cnic(bp);
14177 netdev_reset_tc(bp->dev);
14178
14179 del_timer_sync(&bp->timer);
14180 cancel_delayed_work_sync(&bp->sp_task);
14181 cancel_delayed_work_sync(&bp->period_task);
14182
14183 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14184 bp->stats_state = STATS_STATE_DISABLED;
14185 up(&bp->stats_lock);
14186 }
14187
14188 bnx2x_save_statistics(bp);
14189
14190 netif_carrier_off(bp->dev);
14191
14192 return 0;
14193}
14194
14195
14196
14197
14198
14199
14200
14201
14202
14203static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14204 pci_channel_state_t state)
14205{
14206 struct net_device *dev = pci_get_drvdata(pdev);
14207 struct bnx2x *bp = netdev_priv(dev);
14208
14209 rtnl_lock();
14210
14211 BNX2X_ERR("IO error detected\n");
14212
14213 netif_device_detach(dev);
14214
14215 if (state == pci_channel_io_perm_failure) {
14216 rtnl_unlock();
14217 return PCI_ERS_RESULT_DISCONNECT;
14218 }
14219
14220 if (netif_running(dev))
14221 bnx2x_eeh_nic_unload(bp);
14222
14223 bnx2x_prev_path_mark_eeh(bp);
14224
14225 pci_disable_device(pdev);
14226
14227 rtnl_unlock();
14228
14229
14230 return PCI_ERS_RESULT_NEED_RESET;
14231}
14232
14233
14234
14235
14236
14237
14238
14239static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14240{
14241 struct net_device *dev = pci_get_drvdata(pdev);
14242 struct bnx2x *bp = netdev_priv(dev);
14243 int i;
14244
14245 rtnl_lock();
14246 BNX2X_ERR("IO slot reset initializing...\n");
14247 if (pci_enable_device(pdev)) {
14248 dev_err(&pdev->dev,
14249 "Cannot re-enable PCI device after reset\n");
14250 rtnl_unlock();
14251 return PCI_ERS_RESULT_DISCONNECT;
14252 }
14253
14254 pci_set_master(pdev);
14255 pci_restore_state(pdev);
14256 pci_save_state(pdev);
14257
14258 if (netif_running(dev))
14259 bnx2x_set_power_state(bp, PCI_D0);
14260
14261 if (netif_running(dev)) {
14262 BNX2X_ERR("IO slot reset --> driver unload\n");
14263
14264
14265 bnx2x_init_shmem(bp);
14266
14267 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14268 u32 v;
14269
14270 v = SHMEM2_RD(bp,
14271 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14272 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14273 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14274 }
14275 bnx2x_drain_tx_queues(bp);
14276 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14277 bnx2x_netif_stop(bp, 1);
14278 bnx2x_free_irq(bp);
14279
14280
14281 bnx2x_send_unload_done(bp, true);
14282
14283 bp->sp_state = 0;
14284 bp->port.pmf = 0;
14285
14286 bnx2x_prev_unload(bp);
14287
14288
14289
14290
14291 bnx2x_squeeze_objects(bp);
14292 bnx2x_free_skbs(bp);
14293 for_each_rx_queue(bp, i)
14294 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14295 bnx2x_free_fp_mem(bp);
14296 bnx2x_free_mem(bp);
14297
14298 bp->state = BNX2X_STATE_CLOSED;
14299 }
14300
14301 rtnl_unlock();
14302
14303
14304 if (bp->flags & AER_ENABLED) {
14305 if (pci_cleanup_aer_uncorrect_error_status(pdev))
14306 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
14307 else
14308 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
14309 }
14310
14311 return PCI_ERS_RESULT_RECOVERED;
14312}
14313
14314
14315
14316
14317
14318
14319
14320
14321static void bnx2x_io_resume(struct pci_dev *pdev)
14322{
14323 struct net_device *dev = pci_get_drvdata(pdev);
14324 struct bnx2x *bp = netdev_priv(dev);
14325
14326 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14327 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14328 return;
14329 }
14330
14331 rtnl_lock();
14332
14333 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14334 DRV_MSG_SEQ_NUMBER_MASK;
14335
14336 if (netif_running(dev))
14337 bnx2x_nic_load(bp, LOAD_NORMAL);
14338
14339 netif_device_attach(dev);
14340
14341 rtnl_unlock();
14342}
14343
14344static const struct pci_error_handlers bnx2x_err_handler = {
14345 .error_detected = bnx2x_io_error_detected,
14346 .slot_reset = bnx2x_io_slot_reset,
14347 .resume = bnx2x_io_resume,
14348};
14349
14350static void bnx2x_shutdown(struct pci_dev *pdev)
14351{
14352 struct net_device *dev = pci_get_drvdata(pdev);
14353 struct bnx2x *bp;
14354
14355 if (!dev)
14356 return;
14357
14358 bp = netdev_priv(dev);
14359 if (!bp)
14360 return;
14361
14362 rtnl_lock();
14363 netif_device_detach(dev);
14364 rtnl_unlock();
14365
14366
14367
14368
14369
14370 __bnx2x_remove(pdev, dev, bp, false);
14371}
14372
14373static struct pci_driver bnx2x_pci_driver = {
14374 .name = DRV_MODULE_NAME,
14375 .id_table = bnx2x_pci_tbl,
14376 .probe = bnx2x_init_one,
14377 .remove = bnx2x_remove_one,
14378 .suspend = bnx2x_suspend,
14379 .resume = bnx2x_resume,
14380 .err_handler = &bnx2x_err_handler,
14381#ifdef CONFIG_BNX2X_SRIOV
14382 .sriov_configure = bnx2x_sriov_configure,
14383#endif
14384 .shutdown = bnx2x_shutdown,
14385};
14386
14387static int __init bnx2x_init(void)
14388{
14389 int ret;
14390
14391 pr_info("%s", version);
14392
14393 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14394 if (bnx2x_wq == NULL) {
14395 pr_err("Cannot create workqueue\n");
14396 return -ENOMEM;
14397 }
14398 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14399 if (!bnx2x_iov_wq) {
14400 pr_err("Cannot create iov workqueue\n");
14401 destroy_workqueue(bnx2x_wq);
14402 return -ENOMEM;
14403 }
14404
14405 ret = pci_register_driver(&bnx2x_pci_driver);
14406 if (ret) {
14407 pr_err("Cannot register driver\n");
14408 destroy_workqueue(bnx2x_wq);
14409 destroy_workqueue(bnx2x_iov_wq);
14410 }
14411 return ret;
14412}
14413
14414static void __exit bnx2x_cleanup(void)
14415{
14416 struct list_head *pos, *q;
14417
14418 pci_unregister_driver(&bnx2x_pci_driver);
14419
14420 destroy_workqueue(bnx2x_wq);
14421 destroy_workqueue(bnx2x_iov_wq);
14422
14423
14424 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14425 struct bnx2x_prev_path_list *tmp =
14426 list_entry(pos, struct bnx2x_prev_path_list, list);
14427 list_del(pos);
14428 kfree(tmp);
14429 }
14430}
14431
14432void bnx2x_notify_link_changed(struct bnx2x *bp)
14433{
14434 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14435}
14436
14437module_init(bnx2x_init);
14438module_exit(bnx2x_cleanup);
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448
14449static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14450{
14451 unsigned long ramrod_flags = 0;
14452
14453 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14454 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14455 &bp->iscsi_l2_mac_obj, true,
14456 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14457}
14458
14459
14460static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14461{
14462 struct eth_spe *spe;
14463 int cxt_index, cxt_offset;
14464
14465#ifdef BNX2X_STOP_ON_ERROR
14466 if (unlikely(bp->panic))
14467 return;
14468#endif
14469
14470 spin_lock_bh(&bp->spq_lock);
14471 BUG_ON(bp->cnic_spq_pending < count);
14472 bp->cnic_spq_pending -= count;
14473
14474 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14475 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14476 & SPE_HDR_CONN_TYPE) >>
14477 SPE_HDR_CONN_TYPE_SHIFT;
14478 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14479 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14480
14481
14482
14483
14484 if (type == ETH_CONNECTION_TYPE) {
14485 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14486 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14487 ILT_PAGE_CIDS;
14488 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14489 (cxt_index * ILT_PAGE_CIDS);
14490 bnx2x_set_ctx_validation(bp,
14491 &bp->context[cxt_index].
14492 vcxt[cxt_offset].eth,
14493 BNX2X_ISCSI_ETH_CID(bp));
14494 }
14495 }
14496
14497
14498
14499
14500
14501
14502
14503 if (type == ETH_CONNECTION_TYPE) {
14504 if (!atomic_read(&bp->cq_spq_left))
14505 break;
14506 else
14507 atomic_dec(&bp->cq_spq_left);
14508 } else if (type == NONE_CONNECTION_TYPE) {
14509 if (!atomic_read(&bp->eq_spq_left))
14510 break;
14511 else
14512 atomic_dec(&bp->eq_spq_left);
14513 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14514 (type == FCOE_CONNECTION_TYPE)) {
14515 if (bp->cnic_spq_pending >=
14516 bp->cnic_eth_dev.max_kwqe_pending)
14517 break;
14518 else
14519 bp->cnic_spq_pending++;
14520 } else {
14521 BNX2X_ERR("Unknown SPE type: %d\n", type);
14522 bnx2x_panic();
14523 break;
14524 }
14525
14526 spe = bnx2x_sp_get_next(bp);
14527 *spe = *bp->cnic_kwq_cons;
14528
14529 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14530 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14531
14532 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14533 bp->cnic_kwq_cons = bp->cnic_kwq;
14534 else
14535 bp->cnic_kwq_cons++;
14536 }
14537 bnx2x_sp_prod_update(bp);
14538 spin_unlock_bh(&bp->spq_lock);
14539}
14540
14541static int bnx2x_cnic_sp_queue(struct net_device *dev,
14542 struct kwqe_16 *kwqes[], u32 count)
14543{
14544 struct bnx2x *bp = netdev_priv(dev);
14545 int i;
14546
14547#ifdef BNX2X_STOP_ON_ERROR
14548 if (unlikely(bp->panic)) {
14549 BNX2X_ERR("Can't post to SP queue while panic\n");
14550 return -EIO;
14551 }
14552#endif
14553
14554 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14555 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14556 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14557 return -EAGAIN;
14558 }
14559
14560 spin_lock_bh(&bp->spq_lock);
14561
14562 for (i = 0; i < count; i++) {
14563 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14564
14565 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14566 break;
14567
14568 *bp->cnic_kwq_prod = *spe;
14569
14570 bp->cnic_kwq_pending++;
14571
14572 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14573 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14574 spe->data.update_data_addr.hi,
14575 spe->data.update_data_addr.lo,
14576 bp->cnic_kwq_pending);
14577
14578 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14579 bp->cnic_kwq_prod = bp->cnic_kwq;
14580 else
14581 bp->cnic_kwq_prod++;
14582 }
14583
14584 spin_unlock_bh(&bp->spq_lock);
14585
14586 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14587 bnx2x_cnic_sp_post(bp, 0);
14588
14589 return i;
14590}
14591
14592static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14593{
14594 struct cnic_ops *c_ops;
14595 int rc = 0;
14596
14597 mutex_lock(&bp->cnic_mutex);
14598 c_ops = rcu_dereference_protected(bp->cnic_ops,
14599 lockdep_is_held(&bp->cnic_mutex));
14600 if (c_ops)
14601 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14602 mutex_unlock(&bp->cnic_mutex);
14603
14604 return rc;
14605}
14606
14607static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14608{
14609 struct cnic_ops *c_ops;
14610 int rc = 0;
14611
14612 rcu_read_lock();
14613 c_ops = rcu_dereference(bp->cnic_ops);
14614 if (c_ops)
14615 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14616 rcu_read_unlock();
14617
14618 return rc;
14619}
14620
14621
14622
14623
14624int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14625{
14626 struct cnic_ctl_info ctl = {0};
14627
14628 ctl.cmd = cmd;
14629
14630 return bnx2x_cnic_ctl_send(bp, &ctl);
14631}
14632
14633static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14634{
14635 struct cnic_ctl_info ctl = {0};
14636
14637
14638 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14639 ctl.data.comp.cid = cid;
14640 ctl.data.comp.error = err;
14641
14642 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14643 bnx2x_cnic_sp_post(bp, 0);
14644}
14645
14646
14647
14648
14649
14650
14651static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14652{
14653 unsigned long accept_flags = 0, ramrod_flags = 0;
14654 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14655 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14656
14657 if (start) {
14658
14659
14660
14661
14662
14663
14664 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14665 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14666 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14667 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14668
14669
14670 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14671
14672 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14673 } else
14674
14675 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14676
14677 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14678 set_bit(sched_state, &bp->sp_state);
14679 else {
14680 __set_bit(RAMROD_RX, &ramrod_flags);
14681 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14682 ramrod_flags);
14683 }
14684}
14685
14686static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14687{
14688 struct bnx2x *bp = netdev_priv(dev);
14689 int rc = 0;
14690
14691 switch (ctl->cmd) {
14692 case DRV_CTL_CTXTBL_WR_CMD: {
14693 u32 index = ctl->data.io.offset;
14694 dma_addr_t addr = ctl->data.io.dma_addr;
14695
14696 bnx2x_ilt_wr(bp, index, addr);
14697 break;
14698 }
14699
14700 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14701 int count = ctl->data.credit.credit_count;
14702
14703 bnx2x_cnic_sp_post(bp, count);
14704 break;
14705 }
14706
14707
14708 case DRV_CTL_START_L2_CMD: {
14709 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14710 unsigned long sp_bits = 0;
14711
14712
14713 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14714 cp->iscsi_l2_client_id,
14715 cp->iscsi_l2_cid, BP_FUNC(bp),
14716 bnx2x_sp(bp, mac_rdata),
14717 bnx2x_sp_mapping(bp, mac_rdata),
14718 BNX2X_FILTER_MAC_PENDING,
14719 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14720 &bp->macs_pool);
14721
14722
14723 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14724 if (rc)
14725 break;
14726
14727 mmiowb();
14728 barrier();
14729
14730
14731
14732 netif_addr_lock_bh(dev);
14733 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14734 netif_addr_unlock_bh(dev);
14735
14736
14737 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14738 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14739
14740 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14741 BNX2X_ERR("rx_mode completion timed out!\n");
14742
14743 break;
14744 }
14745
14746
14747 case DRV_CTL_STOP_L2_CMD: {
14748 unsigned long sp_bits = 0;
14749
14750
14751 netif_addr_lock_bh(dev);
14752 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14753 netif_addr_unlock_bh(dev);
14754
14755
14756 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14757 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14758
14759 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14760 BNX2X_ERR("rx_mode completion timed out!\n");
14761
14762 mmiowb();
14763 barrier();
14764
14765
14766 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14767 BNX2X_ISCSI_ETH_MAC, true);
14768 break;
14769 }
14770 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14771 int count = ctl->data.credit.credit_count;
14772
14773 smp_mb__before_atomic();
14774 atomic_add(count, &bp->cq_spq_left);
14775 smp_mb__after_atomic();
14776 break;
14777 }
14778 case DRV_CTL_ULP_REGISTER_CMD: {
14779 int ulp_type = ctl->data.register_data.ulp_type;
14780
14781 if (CHIP_IS_E3(bp)) {
14782 int idx = BP_FW_MB_IDX(bp);
14783 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14784 int path = BP_PATH(bp);
14785 int port = BP_PORT(bp);
14786 int i;
14787 u32 scratch_offset;
14788 u32 *host_addr;
14789
14790
14791 if (ulp_type == CNIC_ULP_ISCSI)
14792 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14793 else if (ulp_type == CNIC_ULP_FCOE)
14794 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14795 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14796
14797 if ((ulp_type != CNIC_ULP_FCOE) ||
14798 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14799 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14800 break;
14801
14802
14803 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14804 if (!scratch_offset)
14805 break;
14806 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14807 fcoe_features[path][port]);
14808 host_addr = (u32 *) &(ctl->data.register_data.
14809 fcoe_features);
14810 for (i = 0; i < sizeof(struct fcoe_capabilities);
14811 i += 4)
14812 REG_WR(bp, scratch_offset + i,
14813 *(host_addr + i/4));
14814 }
14815 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14816 break;
14817 }
14818
14819 case DRV_CTL_ULP_UNREGISTER_CMD: {
14820 int ulp_type = ctl->data.ulp_type;
14821
14822 if (CHIP_IS_E3(bp)) {
14823 int idx = BP_FW_MB_IDX(bp);
14824 u32 cap;
14825
14826 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14827 if (ulp_type == CNIC_ULP_ISCSI)
14828 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14829 else if (ulp_type == CNIC_ULP_FCOE)
14830 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14831 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14832 }
14833 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14834 break;
14835 }
14836
14837 default:
14838 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14839 rc = -EINVAL;
14840 }
14841
14842
14843 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14844 switch (ctl->drv_state) {
14845 case DRV_NOP:
14846 break;
14847 case DRV_ACTIVE:
14848 bnx2x_set_os_driver_state(bp,
14849 OS_DRIVER_STATE_ACTIVE);
14850 break;
14851 case DRV_INACTIVE:
14852 bnx2x_set_os_driver_state(bp,
14853 OS_DRIVER_STATE_DISABLED);
14854 break;
14855 case DRV_UNLOADED:
14856 bnx2x_set_os_driver_state(bp,
14857 OS_DRIVER_STATE_NOT_LOADED);
14858 break;
14859 default:
14860 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14861 }
14862 }
14863
14864 return rc;
14865}
14866
14867static int bnx2x_get_fc_npiv(struct net_device *dev,
14868 struct cnic_fc_npiv_tbl *cnic_tbl)
14869{
14870 struct bnx2x *bp = netdev_priv(dev);
14871 struct bdn_fc_npiv_tbl *tbl = NULL;
14872 u32 offset, entries;
14873 int rc = -EINVAL;
14874 int i;
14875
14876 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14877 goto out;
14878
14879 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14880
14881 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14882 if (!tbl) {
14883 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14884 goto out;
14885 }
14886
14887 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14888 if (!offset) {
14889 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14890 goto out;
14891 }
14892 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14893
14894
14895 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14896 BNX2X_ERR("Failed to read FC-NPIV table\n");
14897 goto out;
14898 }
14899
14900
14901
14902
14903 entries = tbl->fc_npiv_cfg.num_of_npiv;
14904 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14905 tbl->fc_npiv_cfg.num_of_npiv = entries;
14906
14907 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14908 DP(BNX2X_MSG_MCP,
14909 "No FC-NPIV table [valid, simply not present]\n");
14910 goto out;
14911 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14912 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14913 tbl->fc_npiv_cfg.num_of_npiv);
14914 goto out;
14915 } else {
14916 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14917 tbl->fc_npiv_cfg.num_of_npiv);
14918 }
14919
14920
14921 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14922 for (i = 0; i < cnic_tbl->count; i++) {
14923 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14924 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14925 }
14926
14927 rc = 0;
14928out:
14929 kfree(tbl);
14930 return rc;
14931}
14932
14933void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14934{
14935 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14936
14937 if (bp->flags & USING_MSIX_FLAG) {
14938 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14939 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14940 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14941 } else {
14942 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14943 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14944 }
14945 if (!CHIP_IS_E1x(bp))
14946 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14947 else
14948 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14949
14950 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14951 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14952 cp->irq_arr[1].status_blk = bp->def_status_blk;
14953 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14954 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14955
14956 cp->num_irq = 2;
14957}
14958
14959void bnx2x_setup_cnic_info(struct bnx2x *bp)
14960{
14961 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14962
14963 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14964 bnx2x_cid_ilt_lines(bp);
14965 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14966 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14967 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14968
14969 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14970 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14971 cp->iscsi_l2_cid);
14972
14973 if (NO_ISCSI_OOO(bp))
14974 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14975}
14976
14977static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14978 void *data)
14979{
14980 struct bnx2x *bp = netdev_priv(dev);
14981 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14982 int rc;
14983
14984 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14985
14986 if (ops == NULL) {
14987 BNX2X_ERR("NULL ops received\n");
14988 return -EINVAL;
14989 }
14990
14991 if (!CNIC_SUPPORT(bp)) {
14992 BNX2X_ERR("Can't register CNIC when not supported\n");
14993 return -EOPNOTSUPP;
14994 }
14995
14996 if (!CNIC_LOADED(bp)) {
14997 rc = bnx2x_load_cnic(bp);
14998 if (rc) {
14999 BNX2X_ERR("CNIC-related load failed\n");
15000 return rc;
15001 }
15002 }
15003
15004 bp->cnic_enabled = true;
15005
15006 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15007 if (!bp->cnic_kwq)
15008 return -ENOMEM;
15009
15010 bp->cnic_kwq_cons = bp->cnic_kwq;
15011 bp->cnic_kwq_prod = bp->cnic_kwq;
15012 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15013
15014 bp->cnic_spq_pending = 0;
15015 bp->cnic_kwq_pending = 0;
15016
15017 bp->cnic_data = data;
15018
15019 cp->num_irq = 0;
15020 cp->drv_state |= CNIC_DRV_STATE_REGD;
15021 cp->iro_arr = bp->iro_arr;
15022
15023 bnx2x_setup_cnic_irq_info(bp);
15024
15025 rcu_assign_pointer(bp->cnic_ops, ops);
15026
15027
15028 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15029
15030 return 0;
15031}
15032
15033static int bnx2x_unregister_cnic(struct net_device *dev)
15034{
15035 struct bnx2x *bp = netdev_priv(dev);
15036 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15037
15038 mutex_lock(&bp->cnic_mutex);
15039 cp->drv_state = 0;
15040 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15041 mutex_unlock(&bp->cnic_mutex);
15042 synchronize_rcu();
15043 bp->cnic_enabled = false;
15044 kfree(bp->cnic_kwq);
15045 bp->cnic_kwq = NULL;
15046
15047 return 0;
15048}
15049
15050static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15051{
15052 struct bnx2x *bp = netdev_priv(dev);
15053 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15054
15055
15056
15057
15058
15059 if (NO_ISCSI(bp) && NO_FCOE(bp))
15060 return NULL;
15061
15062 cp->drv_owner = THIS_MODULE;
15063 cp->chip_id = CHIP_ID(bp);
15064 cp->pdev = bp->pdev;
15065 cp->io_base = bp->regview;
15066 cp->io_base2 = bp->doorbells;
15067 cp->max_kwqe_pending = 8;
15068 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15069 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15070 bnx2x_cid_ilt_lines(bp);
15071 cp->ctx_tbl_len = CNIC_ILT_LINES;
15072 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15073 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15074 cp->drv_ctl = bnx2x_drv_ctl;
15075 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15076 cp->drv_register_cnic = bnx2x_register_cnic;
15077 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15078 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15079 cp->iscsi_l2_client_id =
15080 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15081 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15082
15083 if (NO_ISCSI_OOO(bp))
15084 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15085
15086 if (NO_ISCSI(bp))
15087 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15088
15089 if (NO_FCOE(bp))
15090 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15091
15092 BNX2X_DEV_INFO(
15093 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15094 cp->ctx_blk_size,
15095 cp->ctx_tbl_offset,
15096 cp->ctx_tbl_len,
15097 cp->starting_cid);
15098 return cp;
15099}
15100
15101static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15102{
15103 struct bnx2x *bp = fp->bp;
15104 u32 offset = BAR_USTRORM_INTMEM;
15105
15106 if (IS_VF(bp))
15107 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15108 else if (!CHIP_IS_E1x(bp))
15109 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15110 else
15111 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15112
15113 return offset;
15114}
15115
15116
15117
15118
15119
15120
15121int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15122{
15123 u32 pretend_reg;
15124
15125 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15126 return -1;
15127
15128
15129 pretend_reg = bnx2x_get_pretend_reg(bp);
15130 REG_WR(bp, pretend_reg, pretend_func_val);
15131 REG_RD(bp, pretend_reg);
15132 return 0;
15133}
15134
15135static void bnx2x_ptp_task(struct work_struct *work)
15136{
15137 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15138 int port = BP_PORT(bp);
15139 u32 val_seq;
15140 u64 timestamp, ns;
15141 struct skb_shared_hwtstamps shhwtstamps;
15142
15143
15144 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15145 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15146 if (val_seq & 0x10000) {
15147
15148 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15149 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15150 timestamp <<= 32;
15151 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15152 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15153
15154 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15155 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15156 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15157
15158 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15159 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15160 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15161 dev_kfree_skb_any(bp->ptp_tx_skb);
15162 bp->ptp_tx_skb = NULL;
15163
15164 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15165 timestamp, ns);
15166 } else {
15167 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
15168
15169 schedule_work(&bp->ptp_task);
15170 }
15171}
15172
15173void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15174{
15175 int port = BP_PORT(bp);
15176 u64 timestamp, ns;
15177
15178 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15179 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15180 timestamp <<= 32;
15181 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15182 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15183
15184
15185 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15186 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15187
15188 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15189
15190 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15191
15192 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15193 timestamp, ns);
15194}
15195
15196
15197static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15198{
15199 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15200 int port = BP_PORT(bp);
15201 u32 wb_data[2];
15202 u64 phc_cycles;
15203
15204 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15205 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15206 phc_cycles = wb_data[1];
15207 phc_cycles = (phc_cycles << 32) + wb_data[0];
15208
15209 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15210
15211 return phc_cycles;
15212}
15213
15214static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15215{
15216 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15217 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15218 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15219 bp->cyclecounter.shift = 1;
15220 bp->cyclecounter.mult = 1;
15221}
15222
15223static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15224{
15225 struct bnx2x_func_state_params func_params = {NULL};
15226 struct bnx2x_func_set_timesync_params *set_timesync_params =
15227 &func_params.params.set_timesync;
15228
15229
15230 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15231 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15232
15233 func_params.f_obj = &bp->func_obj;
15234 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15235
15236
15237 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15238 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15239
15240 return bnx2x_func_state_change(bp, &func_params);
15241}
15242
15243static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15244{
15245 struct bnx2x_queue_state_params q_params;
15246 int rc, i;
15247
15248
15249 memset(&q_params, 0, sizeof(q_params));
15250 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15251 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15252 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15253 &q_params.params.update.update_flags);
15254 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15255 &q_params.params.update.update_flags);
15256
15257
15258 for_each_eth_queue(bp, i) {
15259 struct bnx2x_fastpath *fp = &bp->fp[i];
15260
15261
15262 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15263
15264
15265 rc = bnx2x_queue_state_change(bp, &q_params);
15266 if (rc) {
15267 BNX2X_ERR("Failed to enable PTP packets\n");
15268 return rc;
15269 }
15270 }
15271
15272 return 0;
15273}
15274
15275int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15276{
15277 int port = BP_PORT(bp);
15278 int rc;
15279
15280 if (!bp->hwtstamp_ioctl_called)
15281 return 0;
15282
15283 switch (bp->tx_type) {
15284 case HWTSTAMP_TX_ON:
15285 bp->flags |= TX_TIMESTAMPING_EN;
15286 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15287 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
15288 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15289 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
15290 break;
15291 case HWTSTAMP_TX_ONESTEP_SYNC:
15292 BNX2X_ERR("One-step timestamping is not supported\n");
15293 return -ERANGE;
15294 }
15295
15296 switch (bp->rx_filter) {
15297 case HWTSTAMP_FILTER_NONE:
15298 break;
15299 case HWTSTAMP_FILTER_ALL:
15300 case HWTSTAMP_FILTER_SOME:
15301 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15302 break;
15303 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15304 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15305 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15306 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15307
15308 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15309 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
15310 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15311 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
15312 break;
15313 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15314 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15315 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15316 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15317
15318 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15319 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
15320 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15321 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
15322 break;
15323 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15324 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15325 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15326 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15327
15328 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15329 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
15330 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15331 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
15332
15333 break;
15334 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15335 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15336 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15337 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15338
15339 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15340 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
15341 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15342 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
15343 break;
15344 }
15345
15346
15347 rc = bnx2x_enable_ptp_packets(bp);
15348 if (rc)
15349 return rc;
15350
15351
15352 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15353 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15354
15355 return 0;
15356}
15357
15358static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15359{
15360 struct hwtstamp_config config;
15361 int rc;
15362
15363 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15364
15365 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15366 return -EFAULT;
15367
15368 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15369 config.tx_type, config.rx_filter);
15370
15371 if (config.flags) {
15372 BNX2X_ERR("config.flags is reserved for future use\n");
15373 return -EINVAL;
15374 }
15375
15376 bp->hwtstamp_ioctl_called = 1;
15377 bp->tx_type = config.tx_type;
15378 bp->rx_filter = config.rx_filter;
15379
15380 rc = bnx2x_configure_ptp_filters(bp);
15381 if (rc)
15382 return rc;
15383
15384 config.rx_filter = bp->rx_filter;
15385
15386 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15387 -EFAULT : 0;
15388}
15389
15390
15391static int bnx2x_configure_ptp(struct bnx2x *bp)
15392{
15393 int rc, port = BP_PORT(bp);
15394 u32 wb_data[2];
15395
15396
15397 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15398 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15399 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15400 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15401 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15402 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15403 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15404 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15405
15406
15407 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15408 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15409
15410
15411 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15412 NIG_REG_P0_PTP_EN, 0x3F);
15413
15414
15415 wb_data[0] = 0;
15416 wb_data[1] = 0;
15417 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15418
15419
15420 rc = bnx2x_send_reset_timesync_ramrod(bp);
15421 if (rc) {
15422 BNX2X_ERR("Failed to reset PHC drift register\n");
15423 return -EFAULT;
15424 }
15425
15426
15427 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15428 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15429 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15430 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15431
15432 return 0;
15433}
15434
15435
15436void bnx2x_init_ptp(struct bnx2x *bp)
15437{
15438 int rc;
15439
15440
15441 rc = bnx2x_configure_ptp(bp);
15442 if (rc) {
15443 BNX2X_ERR("Stopping PTP initialization\n");
15444 return;
15445 }
15446
15447
15448 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15449
15450
15451
15452
15453
15454 if (!bp->timecounter_init_done) {
15455 bnx2x_init_cyclecounter(bp);
15456 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15457 ktime_to_ns(ktime_get_real()));
15458 bp->timecounter_init_done = 1;
15459 }
15460
15461 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15462}
15463