1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/ioport.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/aer.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39#include <linux/irq.h>
40#include <linux/delay.h>
41#include <asm/byteorder.h>
42#include <linux/time.h>
43#include <linux/ethtool.h>
44#include <linux/mii.h>
45#include <linux/if_vlan.h>
46#include <linux/crash_dump.h>
47#include <net/ip.h>
48#include <net/ipv6.h>
49#include <net/tcp.h>
50#include <net/vxlan.h>
51#include <net/checksum.h>
52#include <net/ip6_checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/crc32c.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/io.h>
59#include <linux/semaphore.h>
60#include <linux/stringify.h>
61#include <linux/vmalloc.h>
62#include "bnx2x.h"
63#include "bnx2x_init.h"
64#include "bnx2x_init_ops.h"
65#include "bnx2x_cmn.h"
66#include "bnx2x_vfpf.h"
67#include "bnx2x_dcb.h"
68#include "bnx2x_sp.h"
69#include <linux/firmware.h>
70#include "bnx2x_fw_file_hdr.h"
71
72#define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81
82#define TX_TIMEOUT (5*HZ)
83
84static char version[] =
85 "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88MODULE_AUTHOR("Eliezer Tamir");
89MODULE_DESCRIPTION("QLogic "
90 "BCM57710/57711/57711E/"
91 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92 "57840/57840_MF Driver");
93MODULE_LICENSE("GPL");
94MODULE_VERSION(DRV_MODULE_VERSION);
95MODULE_FIRMWARE(FW_FILE_NAME_E1);
96MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97MODULE_FIRMWARE(FW_FILE_NAME_E2);
98
99int bnx2x_num_queues;
100module_param_named(num_queues, bnx2x_num_queues, int, 0444);
101MODULE_PARM_DESC(num_queues,
102 " Set number of queues (default is as a number of CPUs)");
103
104static int disable_tpa;
105module_param(disable_tpa, int, 0444);
106MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
107
108static int int_mode;
109module_param(int_mode, int, 0444);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
111 "(1 INT#x; 2 MSI)");
112
113static int dropless_fc;
114module_param(dropless_fc, int, 0444);
115MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
117static int mrrs = -1;
118module_param(mrrs, int, 0444);
119MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
121static int debug;
122module_param(debug, int, 0444);
123MODULE_PARM_DESC(debug, " Default debug msglevel");
124
125static struct workqueue_struct *bnx2x_wq;
126struct workqueue_struct *bnx2x_iov_wq;
127
128struct bnx2x_mac_vals {
129 u32 xmac_addr;
130 u32 xmac_val;
131 u32 emac_addr;
132 u32 emac_val;
133 u32 umac_addr[2];
134 u32 umac_val[2];
135 u32 bmac_addr;
136 u32 bmac_val[2];
137};
138
139enum bnx2x_board_type {
140 BCM57710 = 0,
141 BCM57711,
142 BCM57711E,
143 BCM57712,
144 BCM57712_MF,
145 BCM57712_VF,
146 BCM57800,
147 BCM57800_MF,
148 BCM57800_VF,
149 BCM57810,
150 BCM57810_MF,
151 BCM57810_VF,
152 BCM57840_4_10,
153 BCM57840_2_20,
154 BCM57840_MF,
155 BCM57840_VF,
156 BCM57811,
157 BCM57811_MF,
158 BCM57840_O,
159 BCM57840_MFO,
160 BCM57811_VF
161};
162
163
164static struct {
165 char *name;
166} board_info[] = {
167 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
168 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
169 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
170 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
171 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
172 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
173 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
174 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
175 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
176 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
177 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
178 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
179 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
180 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
181 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
183 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
184 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
185 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
186 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
187 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
188};
189
190#ifndef PCI_DEVICE_ID_NX2_57710
191#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
192#endif
193#ifndef PCI_DEVICE_ID_NX2_57711
194#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
195#endif
196#ifndef PCI_DEVICE_ID_NX2_57711E
197#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57712
200#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
201#endif
202#ifndef PCI_DEVICE_ID_NX2_57712_MF
203#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
204#endif
205#ifndef PCI_DEVICE_ID_NX2_57712_VF
206#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
207#endif
208#ifndef PCI_DEVICE_ID_NX2_57800
209#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
210#endif
211#ifndef PCI_DEVICE_ID_NX2_57800_MF
212#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
213#endif
214#ifndef PCI_DEVICE_ID_NX2_57800_VF
215#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
216#endif
217#ifndef PCI_DEVICE_ID_NX2_57810
218#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
219#endif
220#ifndef PCI_DEVICE_ID_NX2_57810_MF
221#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
222#endif
223#ifndef PCI_DEVICE_ID_NX2_57840_O
224#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
225#endif
226#ifndef PCI_DEVICE_ID_NX2_57810_VF
227#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
228#endif
229#ifndef PCI_DEVICE_ID_NX2_57840_4_10
230#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
231#endif
232#ifndef PCI_DEVICE_ID_NX2_57840_2_20
233#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
234#endif
235#ifndef PCI_DEVICE_ID_NX2_57840_MFO
236#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
237#endif
238#ifndef PCI_DEVICE_ID_NX2_57840_MF
239#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
240#endif
241#ifndef PCI_DEVICE_ID_NX2_57840_VF
242#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
243#endif
244#ifndef PCI_DEVICE_ID_NX2_57811
245#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
246#endif
247#ifndef PCI_DEVICE_ID_NX2_57811_MF
248#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
249#endif
250#ifndef PCI_DEVICE_ID_NX2_57811_VF
251#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
252#endif
253
254static const struct pci_device_id bnx2x_pci_tbl[] = {
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
273 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
275 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
276 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
279 { 0 }
280};
281
282MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
283
284
285#define BNX2X_PREV_WAIT_NEEDED 1
286static DEFINE_SEMAPHORE(bnx2x_prev_sem);
287static LIST_HEAD(bnx2x_prev_list);
288
289
290static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
291static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
292static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
293
294
295
296
297
298static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
299
300static void __storm_memset_dma_mapping(struct bnx2x *bp,
301 u32 addr, dma_addr_t mapping)
302{
303 REG_WR(bp, addr, U64_LO(mapping));
304 REG_WR(bp, addr + 4, U64_HI(mapping));
305}
306
307static void storm_memset_spq_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = XSEM_REG_FAST_MEMORY +
311 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
317 u16 pf_id)
318{
319 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
320 pf_id);
321 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327}
328
329static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
330 u8 enable)
331{
332 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
333 enable);
334 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340}
341
342static void storm_memset_eq_data(struct bnx2x *bp,
343 struct event_ring_data *eq_data,
344 u16 pfid)
345{
346 size_t size = sizeof(struct event_ring_data);
347
348 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
349
350 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
351}
352
353static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
354 u16 pfid)
355{
356 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
357 REG_WR16(bp, addr, eq_prod);
358}
359
360
361
362
363static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
364{
365 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
366 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
368 PCICFG_VENDOR_ID_OFFSET);
369}
370
371static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
372{
373 u32 val;
374
375 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
376 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
378 PCICFG_VENDOR_ID_OFFSET);
379
380 return val;
381}
382
383#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
384#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
385#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
386#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
387#define DMAE_DP_DST_NONE "dst_addr [none]"
388
389static void bnx2x_dp_dmae(struct bnx2x *bp,
390 struct dmae_command *dmae, int msglvl)
391{
392 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
393 int i;
394
395 switch (dmae->opcode & DMAE_COMMAND_DST) {
396 case DMAE_CMD_DST_PCI:
397 if (src_type == DMAE_CMD_SRC_PCI)
398 DP(msglvl, "DMAE: opcode 0x%08x\n"
399 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
400 "comp_addr [%x:%08x], comp_val 0x%08x\n",
401 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
402 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403 dmae->comp_addr_hi, dmae->comp_addr_lo,
404 dmae->comp_val);
405 else
406 DP(msglvl, "DMAE: opcode 0x%08x\n"
407 "src [%08x], len [%d*4], dst [%x:%08x]\n"
408 "comp_addr [%x:%08x], comp_val 0x%08x\n",
409 dmae->opcode, dmae->src_addr_lo >> 2,
410 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
411 dmae->comp_addr_hi, dmae->comp_addr_lo,
412 dmae->comp_val);
413 break;
414 case DMAE_CMD_DST_GRC:
415 if (src_type == DMAE_CMD_SRC_PCI)
416 DP(msglvl, "DMAE: opcode 0x%08x\n"
417 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
418 "comp_addr [%x:%08x], comp_val 0x%08x\n",
419 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
420 dmae->len, dmae->dst_addr_lo >> 2,
421 dmae->comp_addr_hi, dmae->comp_addr_lo,
422 dmae->comp_val);
423 else
424 DP(msglvl, "DMAE: opcode 0x%08x\n"
425 "src [%08x], len [%d*4], dst [%08x]\n"
426 "comp_addr [%x:%08x], comp_val 0x%08x\n",
427 dmae->opcode, dmae->src_addr_lo >> 2,
428 dmae->len, dmae->dst_addr_lo >> 2,
429 dmae->comp_addr_hi, dmae->comp_addr_lo,
430 dmae->comp_val);
431 break;
432 default:
433 if (src_type == DMAE_CMD_SRC_PCI)
434 DP(msglvl, "DMAE: opcode 0x%08x\n"
435 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
436 "comp_addr [%x:%08x] comp_val 0x%08x\n",
437 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
438 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
439 dmae->comp_val);
440 else
441 DP(msglvl, "DMAE: opcode 0x%08x\n"
442 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
443 "comp_addr [%x:%08x] comp_val 0x%08x\n",
444 dmae->opcode, dmae->src_addr_lo >> 2,
445 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 break;
448 }
449
450 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
451 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
452 i, *(((u32 *)dmae) + i));
453}
454
455
456void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
457{
458 u32 cmd_offset;
459 int i;
460
461 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
462 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
463 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
464 }
465 REG_WR(bp, dmae_reg_go_c[idx], 1);
466}
467
468u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
469{
470 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
471 DMAE_CMD_C_ENABLE);
472}
473
474u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
475{
476 return opcode & ~DMAE_CMD_SRC_RESET;
477}
478
479u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
480 bool with_comp, u8 comp_type)
481{
482 u32 opcode = 0;
483
484 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
485 (dst_type << DMAE_COMMAND_DST_SHIFT));
486
487 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
488
489 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
490 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
491 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
492 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
493
494#ifdef __BIG_ENDIAN
495 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
496#else
497 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
498#endif
499 if (with_comp)
500 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
501 return opcode;
502}
503
504void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
505 struct dmae_command *dmae,
506 u8 src_type, u8 dst_type)
507{
508 memset(dmae, 0, sizeof(struct dmae_command));
509
510
511 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
512 true, DMAE_COMP_PCI);
513
514
515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
517 dmae->comp_val = DMAE_COMP_VAL;
518}
519
520
521int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
522 u32 *comp)
523{
524 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
525 int rc = 0;
526
527 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
528
529
530
531
532
533
534 spin_lock_bh(&bp->dmae_lock);
535
536
537 *comp = 0;
538
539
540 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
541
542
543 udelay(5);
544 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
545
546 if (!cnt ||
547 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
548 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
549 BNX2X_ERR("DMAE timeout!\n");
550 rc = DMAE_TIMEOUT;
551 goto unlock;
552 }
553 cnt--;
554 udelay(50);
555 }
556 if (*comp & DMAE_PCI_ERR_FLAG) {
557 BNX2X_ERR("DMAE PCI error!\n");
558 rc = DMAE_PCI_ERROR;
559 }
560
561unlock:
562
563 spin_unlock_bh(&bp->dmae_lock);
564
565 return rc;
566}
567
568void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
569 u32 len32)
570{
571 int rc;
572 struct dmae_command dmae;
573
574 if (!bp->dmae_ready) {
575 u32 *data = bnx2x_sp(bp, wb_data[0]);
576
577 if (CHIP_IS_E1(bp))
578 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
579 else
580 bnx2x_init_str_wr(bp, dst_addr, data, len32);
581 return;
582 }
583
584
585 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
586
587
588 dmae.src_addr_lo = U64_LO(dma_addr);
589 dmae.src_addr_hi = U64_HI(dma_addr);
590 dmae.dst_addr_lo = dst_addr >> 2;
591 dmae.dst_addr_hi = 0;
592 dmae.len = len32;
593
594
595 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
596 if (rc) {
597 BNX2X_ERR("DMAE returned failure %d\n", rc);
598#ifdef BNX2X_STOP_ON_ERROR
599 bnx2x_panic();
600#endif
601 }
602}
603
604void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
605{
606 int rc;
607 struct dmae_command dmae;
608
609 if (!bp->dmae_ready) {
610 u32 *data = bnx2x_sp(bp, wb_data[0]);
611 int i;
612
613 if (CHIP_IS_E1(bp))
614 for (i = 0; i < len32; i++)
615 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
616 else
617 for (i = 0; i < len32; i++)
618 data[i] = REG_RD(bp, src_addr + i*4);
619
620 return;
621 }
622
623
624 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
625
626
627 dmae.src_addr_lo = src_addr >> 2;
628 dmae.src_addr_hi = 0;
629 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
630 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
631 dmae.len = len32;
632
633
634 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
635 if (rc) {
636 BNX2X_ERR("DMAE returned failure %d\n", rc);
637#ifdef BNX2X_STOP_ON_ERROR
638 bnx2x_panic();
639#endif
640 }
641}
642
643static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
644 u32 addr, u32 len)
645{
646 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
647 int offset = 0;
648
649 while (len > dmae_wr_max) {
650 bnx2x_write_dmae(bp, phys_addr + offset,
651 addr + offset, dmae_wr_max);
652 offset += dmae_wr_max * 4;
653 len -= dmae_wr_max;
654 }
655
656 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
657}
658
659enum storms {
660 XSTORM,
661 TSTORM,
662 CSTORM,
663 USTORM,
664 MAX_STORMS
665};
666
667#define STORMS_NUM 4
668#define REGS_IN_ENTRY 4
669
670static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
671 enum storms storm,
672 int entry)
673{
674 switch (storm) {
675 case XSTORM:
676 return XSTORM_ASSERT_LIST_OFFSET(entry);
677 case TSTORM:
678 return TSTORM_ASSERT_LIST_OFFSET(entry);
679 case CSTORM:
680 return CSTORM_ASSERT_LIST_OFFSET(entry);
681 case USTORM:
682 return USTORM_ASSERT_LIST_OFFSET(entry);
683 case MAX_STORMS:
684 default:
685 BNX2X_ERR("unknown storm\n");
686 }
687 return -EINVAL;
688}
689
690static int bnx2x_mc_assert(struct bnx2x *bp)
691{
692 char last_idx;
693 int i, j, rc = 0;
694 enum storms storm;
695 u32 regs[REGS_IN_ENTRY];
696 u32 bar_storm_intmem[STORMS_NUM] = {
697 BAR_XSTRORM_INTMEM,
698 BAR_TSTRORM_INTMEM,
699 BAR_CSTRORM_INTMEM,
700 BAR_USTRORM_INTMEM
701 };
702 u32 storm_assert_list_index[STORMS_NUM] = {
703 XSTORM_ASSERT_LIST_INDEX_OFFSET,
704 TSTORM_ASSERT_LIST_INDEX_OFFSET,
705 CSTORM_ASSERT_LIST_INDEX_OFFSET,
706 USTORM_ASSERT_LIST_INDEX_OFFSET
707 };
708 char *storms_string[STORMS_NUM] = {
709 "XSTORM",
710 "TSTORM",
711 "CSTORM",
712 "USTORM"
713 };
714
715 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
716 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
717 storm_assert_list_index[storm]);
718 if (last_idx)
719 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
720 storms_string[storm], last_idx);
721
722
723 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
724
725 for (j = 0; j < REGS_IN_ENTRY; j++)
726 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
727 bnx2x_get_assert_list_entry(bp,
728 storm,
729 i) +
730 sizeof(u32) * j);
731
732
733 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
734 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
735 storms_string[storm], i, regs[3],
736 regs[2], regs[1], regs[0]);
737 rc++;
738 } else {
739 break;
740 }
741 }
742 }
743
744 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
745 CHIP_IS_E1(bp) ? "everest1" :
746 CHIP_IS_E1H(bp) ? "everest1h" :
747 CHIP_IS_E2(bp) ? "everest2" : "everest3",
748 BCM_5710_FW_MAJOR_VERSION,
749 BCM_5710_FW_MINOR_VERSION,
750 BCM_5710_FW_REVISION_VERSION);
751
752 return rc;
753}
754
755#define MCPR_TRACE_BUFFER_SIZE (0x800)
756#define SCRATCH_BUFFER_SIZE(bp) \
757 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
758
759void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
760{
761 u32 addr, val;
762 u32 mark, offset;
763 __be32 data[9];
764 int word;
765 u32 trace_shmem_base;
766 if (BP_NOMCP(bp)) {
767 BNX2X_ERR("NO MCP - can not dump\n");
768 return;
769 }
770 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
771 (bp->common.bc_ver & 0xff0000) >> 16,
772 (bp->common.bc_ver & 0xff00) >> 8,
773 (bp->common.bc_ver & 0xff));
774
775 if (pci_channel_offline(bp->pdev)) {
776 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
777 return;
778 }
779
780 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
781 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
782 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
783
784 if (BP_PATH(bp) == 0)
785 trace_shmem_base = bp->common.shmem_base;
786 else
787 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
788
789
790 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
791 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
792 SCRATCH_BUFFER_SIZE(bp)) {
793 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
794 trace_shmem_base);
795 return;
796 }
797
798 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
799
800
801 mark = REG_RD(bp, addr);
802 if (mark != MFW_TRACE_SIGNATURE) {
803 BNX2X_ERR("Trace buffer signature is missing.");
804 return ;
805 }
806
807
808 addr += 4;
809 mark = REG_RD(bp, addr);
810 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
811 if (mark >= trace_shmem_base || mark < addr + 4) {
812 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
813 return;
814 }
815 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
816
817 printk("%s", lvl);
818
819
820 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
821 for (word = 0; word < 8; word++)
822 data[word] = htonl(REG_RD(bp, offset + 4*word));
823 data[8] = 0x0;
824 pr_cont("%s", (char *)data);
825 }
826
827
828 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
829 for (word = 0; word < 8; word++)
830 data[word] = htonl(REG_RD(bp, offset + 4*word));
831 data[8] = 0x0;
832 pr_cont("%s", (char *)data);
833 }
834 printk("%s" "end of fw dump\n", lvl);
835}
836
837static void bnx2x_fw_dump(struct bnx2x *bp)
838{
839 bnx2x_fw_dump_lvl(bp, KERN_ERR);
840}
841
842static void bnx2x_hc_int_disable(struct bnx2x *bp)
843{
844 int port = BP_PORT(bp);
845 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
846 u32 val = REG_RD(bp, addr);
847
848
849
850
851
852 if (CHIP_IS_E1(bp)) {
853
854
855
856
857 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
858
859 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
860 HC_CONFIG_0_REG_INT_LINE_EN_0 |
861 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
862 } else
863 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
864 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
865 HC_CONFIG_0_REG_INT_LINE_EN_0 |
866 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
867
868 DP(NETIF_MSG_IFDOWN,
869 "write %x to HC %d (addr 0x%x)\n",
870 val, port, addr);
871
872 REG_WR(bp, addr, val);
873 if (REG_RD(bp, addr) != val)
874 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
875}
876
877static void bnx2x_igu_int_disable(struct bnx2x *bp)
878{
879 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
880
881 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
882 IGU_PF_CONF_INT_LINE_EN |
883 IGU_PF_CONF_ATTN_BIT_EN);
884
885 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
886
887 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
888 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
889 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
890}
891
892static void bnx2x_int_disable(struct bnx2x *bp)
893{
894 if (bp->common.int_block == INT_BLOCK_HC)
895 bnx2x_hc_int_disable(bp);
896 else
897 bnx2x_igu_int_disable(bp);
898}
899
900void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
901{
902 int i;
903 u16 j;
904 struct hc_sp_status_block_data sp_sb_data;
905 int func = BP_FUNC(bp);
906#ifdef BNX2X_STOP_ON_ERROR
907 u16 start = 0, end = 0;
908 u8 cos;
909#endif
910 if (IS_PF(bp) && disable_int)
911 bnx2x_int_disable(bp);
912
913 bp->stats_state = STATS_STATE_DISABLED;
914 bp->eth_stats.unrecoverable_error++;
915 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
916
917 BNX2X_ERR("begin crash dump -----------------\n");
918
919
920
921 if (IS_PF(bp)) {
922 struct host_sp_status_block *def_sb = bp->def_status_blk;
923 int data_size, cstorm_offset;
924
925 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
926 bp->def_idx, bp->def_att_idx, bp->attn_state,
927 bp->spq_prod_idx, bp->stats_counter);
928 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
929 def_sb->atten_status_block.attn_bits,
930 def_sb->atten_status_block.attn_bits_ack,
931 def_sb->atten_status_block.status_block_id,
932 def_sb->atten_status_block.attn_bits_index);
933 BNX2X_ERR(" def (");
934 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
935 pr_cont("0x%x%s",
936 def_sb->sp_sb.index_values[i],
937 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
938
939 data_size = sizeof(struct hc_sp_status_block_data) /
940 sizeof(u32);
941 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
942 for (i = 0; i < data_size; i++)
943 *((u32 *)&sp_sb_data + i) =
944 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
945 i * sizeof(u32));
946
947 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
948 sp_sb_data.igu_sb_id,
949 sp_sb_data.igu_seg_id,
950 sp_sb_data.p_func.pf_id,
951 sp_sb_data.p_func.vnic_id,
952 sp_sb_data.p_func.vf_id,
953 sp_sb_data.p_func.vf_valid,
954 sp_sb_data.state);
955 }
956
957 for_each_eth_queue(bp, i) {
958 struct bnx2x_fastpath *fp = &bp->fp[i];
959 int loop;
960 struct hc_status_block_data_e2 sb_data_e2;
961 struct hc_status_block_data_e1x sb_data_e1x;
962 struct hc_status_block_sm *hc_sm_p =
963 CHIP_IS_E1x(bp) ?
964 sb_data_e1x.common.state_machine :
965 sb_data_e2.common.state_machine;
966 struct hc_index_data *hc_index_p =
967 CHIP_IS_E1x(bp) ?
968 sb_data_e1x.index_data :
969 sb_data_e2.index_data;
970 u8 data_size, cos;
971 u32 *sb_data_p;
972 struct bnx2x_fp_txdata txdata;
973
974 if (!bp->fp)
975 break;
976
977 if (!fp->rx_cons_sb)
978 continue;
979
980
981 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
982 i, fp->rx_bd_prod, fp->rx_bd_cons,
983 fp->rx_comp_prod,
984 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
985 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
986 fp->rx_sge_prod, fp->last_max_sge,
987 le16_to_cpu(fp->fp_hc_idx));
988
989
990 for_each_cos_in_tx_queue(fp, cos)
991 {
992 if (!fp->txdata_ptr[cos])
993 break;
994
995 txdata = *fp->txdata_ptr[cos];
996
997 if (!txdata.tx_cons_sb)
998 continue;
999
1000 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1001 i, txdata.tx_pkt_prod,
1002 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1003 txdata.tx_bd_cons,
1004 le16_to_cpu(*txdata.tx_cons_sb));
1005 }
1006
1007 loop = CHIP_IS_E1x(bp) ?
1008 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1009
1010
1011
1012 if (IS_FCOE_FP(fp))
1013 continue;
1014
1015 BNX2X_ERR(" run indexes (");
1016 for (j = 0; j < HC_SB_MAX_SM; j++)
1017 pr_cont("0x%x%s",
1018 fp->sb_running_index[j],
1019 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1020
1021 BNX2X_ERR(" indexes (");
1022 for (j = 0; j < loop; j++)
1023 pr_cont("0x%x%s",
1024 fp->sb_index_values[j],
1025 (j == loop - 1) ? ")" : " ");
1026
1027
1028 if (IS_VF(bp))
1029 continue;
1030
1031
1032 data_size = CHIP_IS_E1x(bp) ?
1033 sizeof(struct hc_status_block_data_e1x) :
1034 sizeof(struct hc_status_block_data_e2);
1035 data_size /= sizeof(u32);
1036 sb_data_p = CHIP_IS_E1x(bp) ?
1037 (u32 *)&sb_data_e1x :
1038 (u32 *)&sb_data_e2;
1039
1040 for (j = 0; j < data_size; j++)
1041 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1042 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1043 j * sizeof(u32));
1044
1045 if (!CHIP_IS_E1x(bp)) {
1046 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1047 sb_data_e2.common.p_func.pf_id,
1048 sb_data_e2.common.p_func.vf_id,
1049 sb_data_e2.common.p_func.vf_valid,
1050 sb_data_e2.common.p_func.vnic_id,
1051 sb_data_e2.common.same_igu_sb_1b,
1052 sb_data_e2.common.state);
1053 } else {
1054 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1055 sb_data_e1x.common.p_func.pf_id,
1056 sb_data_e1x.common.p_func.vf_id,
1057 sb_data_e1x.common.p_func.vf_valid,
1058 sb_data_e1x.common.p_func.vnic_id,
1059 sb_data_e1x.common.same_igu_sb_1b,
1060 sb_data_e1x.common.state);
1061 }
1062
1063
1064 for (j = 0; j < HC_SB_MAX_SM; j++) {
1065 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1066 j, hc_sm_p[j].__flags,
1067 hc_sm_p[j].igu_sb_id,
1068 hc_sm_p[j].igu_seg_id,
1069 hc_sm_p[j].time_to_expire,
1070 hc_sm_p[j].timer_value);
1071 }
1072
1073
1074 for (j = 0; j < loop; j++) {
1075 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1076 hc_index_p[j].flags,
1077 hc_index_p[j].timeout);
1078 }
1079 }
1080
1081#ifdef BNX2X_STOP_ON_ERROR
1082 if (IS_PF(bp)) {
1083
1084 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1085 for (i = 0; i < NUM_EQ_DESC; i++) {
1086 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1087
1088 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1089 i, bp->eq_ring[i].message.opcode,
1090 bp->eq_ring[i].message.error);
1091 BNX2X_ERR("data: %x %x %x\n",
1092 data[0], data[1], data[2]);
1093 }
1094 }
1095
1096
1097
1098 for_each_valid_rx_queue(bp, i) {
1099 struct bnx2x_fastpath *fp = &bp->fp[i];
1100
1101 if (!bp->fp)
1102 break;
1103
1104 if (!fp->rx_cons_sb)
1105 continue;
1106
1107 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1108 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1109 for (j = start; j != end; j = RX_BD(j + 1)) {
1110 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1111 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1112
1113 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1114 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1115 }
1116
1117 start = RX_SGE(fp->rx_sge_prod);
1118 end = RX_SGE(fp->last_max_sge);
1119 for (j = start; j != end; j = RX_SGE(j + 1)) {
1120 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1121 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1122
1123 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1124 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1125 }
1126
1127 start = RCQ_BD(fp->rx_comp_cons - 10);
1128 end = RCQ_BD(fp->rx_comp_cons + 503);
1129 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1130 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1131
1132 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1133 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1134 }
1135 }
1136
1137
1138 for_each_valid_tx_queue(bp, i) {
1139 struct bnx2x_fastpath *fp = &bp->fp[i];
1140
1141 if (!bp->fp)
1142 break;
1143
1144 for_each_cos_in_tx_queue(fp, cos) {
1145 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1146
1147 if (!fp->txdata_ptr[cos])
1148 break;
1149
1150 if (!txdata->tx_cons_sb)
1151 continue;
1152
1153 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1154 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1155 for (j = start; j != end; j = TX_BD(j + 1)) {
1156 struct sw_tx_bd *sw_bd =
1157 &txdata->tx_buf_ring[j];
1158
1159 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1160 i, cos, j, sw_bd->skb,
1161 sw_bd->first_bd);
1162 }
1163
1164 start = TX_BD(txdata->tx_bd_cons - 10);
1165 end = TX_BD(txdata->tx_bd_cons + 254);
1166 for (j = start; j != end; j = TX_BD(j + 1)) {
1167 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1168
1169 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1170 i, cos, j, tx_bd[0], tx_bd[1],
1171 tx_bd[2], tx_bd[3]);
1172 }
1173 }
1174 }
1175#endif
1176 if (IS_PF(bp)) {
1177 bnx2x_fw_dump(bp);
1178 bnx2x_mc_assert(bp);
1179 }
1180 BNX2X_ERR("end crash dump -----------------\n");
1181}
1182
1183
1184
1185
1186
1187
1188
1189#define FLR_WAIT_USEC 10000
1190#define FLR_WAIT_INTERVAL 50
1191#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1192
1193struct pbf_pN_buf_regs {
1194 int pN;
1195 u32 init_crd;
1196 u32 crd;
1197 u32 crd_freed;
1198};
1199
1200struct pbf_pN_cmd_regs {
1201 int pN;
1202 u32 lines_occup;
1203 u32 lines_freed;
1204};
1205
1206static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1207 struct pbf_pN_buf_regs *regs,
1208 u32 poll_count)
1209{
1210 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1211 u32 cur_cnt = poll_count;
1212
1213 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1214 crd = crd_start = REG_RD(bp, regs->crd);
1215 init_crd = REG_RD(bp, regs->init_crd);
1216
1217 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1218 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1219 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1220
1221 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1222 (init_crd - crd_start))) {
1223 if (cur_cnt--) {
1224 udelay(FLR_WAIT_INTERVAL);
1225 crd = REG_RD(bp, regs->crd);
1226 crd_freed = REG_RD(bp, regs->crd_freed);
1227 } else {
1228 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1229 regs->pN);
1230 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1231 regs->pN, crd);
1232 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1233 regs->pN, crd_freed);
1234 break;
1235 }
1236 }
1237 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1238 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1239}
1240
1241static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1242 struct pbf_pN_cmd_regs *regs,
1243 u32 poll_count)
1244{
1245 u32 occup, to_free, freed, freed_start;
1246 u32 cur_cnt = poll_count;
1247
1248 occup = to_free = REG_RD(bp, regs->lines_occup);
1249 freed = freed_start = REG_RD(bp, regs->lines_freed);
1250
1251 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1252 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1253
1254 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1255 if (cur_cnt--) {
1256 udelay(FLR_WAIT_INTERVAL);
1257 occup = REG_RD(bp, regs->lines_occup);
1258 freed = REG_RD(bp, regs->lines_freed);
1259 } else {
1260 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1261 regs->pN);
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1263 regs->pN, occup);
1264 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1265 regs->pN, freed);
1266 break;
1267 }
1268 }
1269 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1270 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1271}
1272
1273static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1274 u32 expected, u32 poll_count)
1275{
1276 u32 cur_cnt = poll_count;
1277 u32 val;
1278
1279 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1280 udelay(FLR_WAIT_INTERVAL);
1281
1282 return val;
1283}
1284
1285int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1286 char *msg, u32 poll_cnt)
1287{
1288 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1289 if (val != 0) {
1290 BNX2X_ERR("%s usage count=%d\n", msg, val);
1291 return 1;
1292 }
1293 return 0;
1294}
1295
1296
1297u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1298{
1299
1300 if (CHIP_REV_IS_EMUL(bp))
1301 return FLR_POLL_CNT * 2000;
1302
1303 if (CHIP_REV_IS_FPGA(bp))
1304 return FLR_POLL_CNT * 120;
1305
1306 return FLR_POLL_CNT;
1307}
1308
1309void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1310{
1311 struct pbf_pN_cmd_regs cmd_regs[] = {
1312 {0, (CHIP_IS_E3B0(bp)) ?
1313 PBF_REG_TQ_OCCUPANCY_Q0 :
1314 PBF_REG_P0_TQ_OCCUPANCY,
1315 (CHIP_IS_E3B0(bp)) ?
1316 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1317 PBF_REG_P0_TQ_LINES_FREED_CNT},
1318 {1, (CHIP_IS_E3B0(bp)) ?
1319 PBF_REG_TQ_OCCUPANCY_Q1 :
1320 PBF_REG_P1_TQ_OCCUPANCY,
1321 (CHIP_IS_E3B0(bp)) ?
1322 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1323 PBF_REG_P1_TQ_LINES_FREED_CNT},
1324 {4, (CHIP_IS_E3B0(bp)) ?
1325 PBF_REG_TQ_OCCUPANCY_LB_Q :
1326 PBF_REG_P4_TQ_OCCUPANCY,
1327 (CHIP_IS_E3B0(bp)) ?
1328 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1329 PBF_REG_P4_TQ_LINES_FREED_CNT}
1330 };
1331
1332 struct pbf_pN_buf_regs buf_regs[] = {
1333 {0, (CHIP_IS_E3B0(bp)) ?
1334 PBF_REG_INIT_CRD_Q0 :
1335 PBF_REG_P0_INIT_CRD ,
1336 (CHIP_IS_E3B0(bp)) ?
1337 PBF_REG_CREDIT_Q0 :
1338 PBF_REG_P0_CREDIT,
1339 (CHIP_IS_E3B0(bp)) ?
1340 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1341 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1342 {1, (CHIP_IS_E3B0(bp)) ?
1343 PBF_REG_INIT_CRD_Q1 :
1344 PBF_REG_P1_INIT_CRD,
1345 (CHIP_IS_E3B0(bp)) ?
1346 PBF_REG_CREDIT_Q1 :
1347 PBF_REG_P1_CREDIT,
1348 (CHIP_IS_E3B0(bp)) ?
1349 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1350 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1351 {4, (CHIP_IS_E3B0(bp)) ?
1352 PBF_REG_INIT_CRD_LB_Q :
1353 PBF_REG_P4_INIT_CRD,
1354 (CHIP_IS_E3B0(bp)) ?
1355 PBF_REG_CREDIT_LB_Q :
1356 PBF_REG_P4_CREDIT,
1357 (CHIP_IS_E3B0(bp)) ?
1358 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1359 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1360 };
1361
1362 int i;
1363
1364
1365 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1366 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1367
1368
1369 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1370 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1371}
1372
1373#define OP_GEN_PARAM(param) \
1374 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1375
1376#define OP_GEN_TYPE(type) \
1377 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1378
1379#define OP_GEN_AGG_VECT(index) \
1380 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1381
1382int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1383{
1384 u32 op_gen_command = 0;
1385 u32 comp_addr = BAR_CSTRORM_INTMEM +
1386 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1387 int ret = 0;
1388
1389 if (REG_RD(bp, comp_addr)) {
1390 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1391 return 1;
1392 }
1393
1394 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1395 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1396 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1397 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1398
1399 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1400 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1401
1402 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1403 BNX2X_ERR("FW final cleanup did not succeed\n");
1404 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1405 (REG_RD(bp, comp_addr)));
1406 bnx2x_panic();
1407 return 1;
1408 }
1409
1410 REG_WR(bp, comp_addr, 0);
1411
1412 return ret;
1413}
1414
1415u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1416{
1417 u16 status;
1418
1419 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1420 return status & PCI_EXP_DEVSTA_TRPND;
1421}
1422
1423
1424
1425static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1426{
1427
1428 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1429 CFC_REG_NUM_LCIDS_INSIDE_PF,
1430 "CFC PF usage counter timed out",
1431 poll_cnt))
1432 return 1;
1433
1434
1435 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1436 DORQ_REG_PF_USAGE_CNT,
1437 "DQ PF usage counter timed out",
1438 poll_cnt))
1439 return 1;
1440
1441
1442 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1443 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1444 "QM PF usage counter timed out",
1445 poll_cnt))
1446 return 1;
1447
1448
1449 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1450 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1451 "Timers VNIC usage counter timed out",
1452 poll_cnt))
1453 return 1;
1454 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1455 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1456 "Timers NUM_SCANS usage counter timed out",
1457 poll_cnt))
1458 return 1;
1459
1460
1461 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1462 dmae_reg_go_c[INIT_DMAE_C(bp)],
1463 "DMAE command register timed out",
1464 poll_cnt))
1465 return 1;
1466
1467 return 0;
1468}
1469
1470static void bnx2x_hw_enable_status(struct bnx2x *bp)
1471{
1472 u32 val;
1473
1474 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1475 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1476
1477 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1478 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1479
1480 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1481 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1482
1483 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1484 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1485
1486 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1487 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1488
1489 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1490 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1491
1492 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1493 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1494
1495 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1496 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1497 val);
1498}
1499
1500static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1501{
1502 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1503
1504 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1505
1506
1507 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1508
1509
1510 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1511 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1512 return -EBUSY;
1513
1514
1515
1516
1517 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1518 return -EBUSY;
1519
1520
1521
1522
1523 bnx2x_tx_hw_flushed(bp, poll_cnt);
1524
1525
1526 msleep(100);
1527
1528
1529 if (bnx2x_is_pcie_pending(bp->pdev))
1530 BNX2X_ERR("PCIE Transactions still pending\n");
1531
1532
1533 bnx2x_hw_enable_status(bp);
1534
1535
1536
1537
1538
1539 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1540
1541 return 0;
1542}
1543
1544static void bnx2x_hc_int_enable(struct bnx2x *bp)
1545{
1546 int port = BP_PORT(bp);
1547 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1548 u32 val = REG_RD(bp, addr);
1549 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1550 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1551 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1552
1553 if (msix) {
1554 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1555 HC_CONFIG_0_REG_INT_LINE_EN_0);
1556 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1557 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1558 if (single_msix)
1559 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1560 } else if (msi) {
1561 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1562 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1563 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1564 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1565 } else {
1566 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1567 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1569 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1570
1571 if (!CHIP_IS_E1(bp)) {
1572 DP(NETIF_MSG_IFUP,
1573 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1574
1575 REG_WR(bp, addr, val);
1576
1577 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1578 }
1579 }
1580
1581 if (CHIP_IS_E1(bp))
1582 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1583
1584 DP(NETIF_MSG_IFUP,
1585 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1586 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1587
1588 REG_WR(bp, addr, val);
1589
1590
1591
1592 barrier();
1593
1594 if (!CHIP_IS_E1(bp)) {
1595
1596 if (IS_MF(bp)) {
1597 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1598 if (bp->port.pmf)
1599
1600 val |= 0x1100;
1601 } else
1602 val = 0xffff;
1603
1604 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1605 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1606 }
1607}
1608
1609static void bnx2x_igu_int_enable(struct bnx2x *bp)
1610{
1611 u32 val;
1612 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1613 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1614 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1615
1616 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1617
1618 if (msix) {
1619 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1620 IGU_PF_CONF_SINGLE_ISR_EN);
1621 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1622 IGU_PF_CONF_ATTN_BIT_EN);
1623
1624 if (single_msix)
1625 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1626 } else if (msi) {
1627 val &= ~IGU_PF_CONF_INT_LINE_EN;
1628 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1629 IGU_PF_CONF_ATTN_BIT_EN |
1630 IGU_PF_CONF_SINGLE_ISR_EN);
1631 } else {
1632 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1633 val |= (IGU_PF_CONF_INT_LINE_EN |
1634 IGU_PF_CONF_ATTN_BIT_EN |
1635 IGU_PF_CONF_SINGLE_ISR_EN);
1636 }
1637
1638
1639 if ((!msix) || single_msix) {
1640 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1641 bnx2x_ack_int(bp);
1642 }
1643
1644 val |= IGU_PF_CONF_FUNC_EN;
1645
1646 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1647 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1648
1649 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1650
1651 if (val & IGU_PF_CONF_INT_LINE_EN)
1652 pci_intx(bp->pdev, true);
1653
1654 barrier();
1655
1656
1657 if (IS_MF(bp)) {
1658 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1659 if (bp->port.pmf)
1660
1661 val |= 0x1100;
1662 } else
1663 val = 0xffff;
1664
1665 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1666 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1667}
1668
1669void bnx2x_int_enable(struct bnx2x *bp)
1670{
1671 if (bp->common.int_block == INT_BLOCK_HC)
1672 bnx2x_hc_int_enable(bp);
1673 else
1674 bnx2x_igu_int_enable(bp);
1675}
1676
1677void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1678{
1679 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1680 int i, offset;
1681
1682 if (disable_hw)
1683
1684 bnx2x_int_disable(bp);
1685
1686
1687 if (msix) {
1688 synchronize_irq(bp->msix_table[0].vector);
1689 offset = 1;
1690 if (CNIC_SUPPORT(bp))
1691 offset++;
1692 for_each_eth_queue(bp, i)
1693 synchronize_irq(bp->msix_table[offset++].vector);
1694 } else
1695 synchronize_irq(bp->pdev->irq);
1696
1697
1698 cancel_delayed_work(&bp->sp_task);
1699 cancel_delayed_work(&bp->period_task);
1700 flush_workqueue(bnx2x_wq);
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1711{
1712 u32 lock_status;
1713 u32 resource_bit = (1 << resource);
1714 int func = BP_FUNC(bp);
1715 u32 hw_lock_control_reg;
1716
1717 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1718 "Trying to take a lock on resource %d\n", resource);
1719
1720
1721 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1722 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1723 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1724 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1725 return false;
1726 }
1727
1728 if (func <= 5)
1729 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1730 else
1731 hw_lock_control_reg =
1732 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1733
1734
1735 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1736 lock_status = REG_RD(bp, hw_lock_control_reg);
1737 if (lock_status & resource_bit)
1738 return true;
1739
1740 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1741 "Failed to get a lock on resource %d\n", resource);
1742 return false;
1743}
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1754{
1755 if (BP_PATH(bp))
1756 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1757 else
1758 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1769{
1770 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1771}
1772
1773static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1774
1775
1776static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1777{
1778
1779
1780
1781
1782 atomic_set(&bp->interrupt_occurred, 1);
1783
1784
1785
1786
1787
1788 smp_wmb();
1789
1790
1791 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1792}
1793
1794void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1795{
1796 struct bnx2x *bp = fp->bp;
1797 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1798 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1799 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1800 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1801
1802 DP(BNX2X_MSG_SP,
1803 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1804 fp->index, cid, command, bp->state,
1805 rr_cqe->ramrod_cqe.ramrod_type);
1806
1807
1808
1809
1810 if (cid >= BNX2X_FIRST_VF_CID &&
1811 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1812 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1813
1814 switch (command) {
1815 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1816 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1817 drv_cmd = BNX2X_Q_CMD_UPDATE;
1818 break;
1819
1820 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1821 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1822 drv_cmd = BNX2X_Q_CMD_SETUP;
1823 break;
1824
1825 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1826 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1827 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1828 break;
1829
1830 case (RAMROD_CMD_ID_ETH_HALT):
1831 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1832 drv_cmd = BNX2X_Q_CMD_HALT;
1833 break;
1834
1835 case (RAMROD_CMD_ID_ETH_TERMINATE):
1836 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1837 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1838 break;
1839
1840 case (RAMROD_CMD_ID_ETH_EMPTY):
1841 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1842 drv_cmd = BNX2X_Q_CMD_EMPTY;
1843 break;
1844
1845 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1846 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1847 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1848 break;
1849
1850 default:
1851 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1852 command, fp->index);
1853 return;
1854 }
1855
1856 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1857 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1858
1859
1860
1861
1862
1863
1864
1865#ifdef BNX2X_STOP_ON_ERROR
1866 bnx2x_panic();
1867#else
1868 return;
1869#endif
1870
1871 smp_mb__before_atomic();
1872 atomic_inc(&bp->cq_spq_left);
1873
1874 smp_mb__after_atomic();
1875
1876 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1877
1878 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1879 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 smp_mb__before_atomic();
1890 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1891 wmb();
1892 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1893 smp_mb__after_atomic();
1894
1895
1896 bnx2x_schedule_sp_task(bp);
1897 }
1898
1899 return;
1900}
1901
1902irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1903{
1904 struct bnx2x *bp = netdev_priv(dev_instance);
1905 u16 status = bnx2x_ack_int(bp);
1906 u16 mask;
1907 int i;
1908 u8 cos;
1909
1910
1911 if (unlikely(status == 0)) {
1912 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1913 return IRQ_NONE;
1914 }
1915 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1916
1917#ifdef BNX2X_STOP_ON_ERROR
1918 if (unlikely(bp->panic))
1919 return IRQ_HANDLED;
1920#endif
1921
1922 for_each_eth_queue(bp, i) {
1923 struct bnx2x_fastpath *fp = &bp->fp[i];
1924
1925 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1926 if (status & mask) {
1927
1928 for_each_cos_in_tx_queue(fp, cos)
1929 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1930 prefetch(&fp->sb_running_index[SM_RX_ID]);
1931 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1932 status &= ~mask;
1933 }
1934 }
1935
1936 if (CNIC_SUPPORT(bp)) {
1937 mask = 0x2;
1938 if (status & (mask | 0x1)) {
1939 struct cnic_ops *c_ops = NULL;
1940
1941 rcu_read_lock();
1942 c_ops = rcu_dereference(bp->cnic_ops);
1943 if (c_ops && (bp->cnic_eth_dev.drv_state &
1944 CNIC_DRV_STATE_HANDLES_IRQ))
1945 c_ops->cnic_handler(bp->cnic_data, NULL);
1946 rcu_read_unlock();
1947
1948 status &= ~mask;
1949 }
1950 }
1951
1952 if (unlikely(status & 0x1)) {
1953
1954
1955
1956
1957 bnx2x_schedule_sp_task(bp);
1958
1959 status &= ~0x1;
1960 if (!status)
1961 return IRQ_HANDLED;
1962 }
1963
1964 if (unlikely(status))
1965 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1966 status);
1967
1968 return IRQ_HANDLED;
1969}
1970
1971
1972
1973
1974
1975
1976
1977int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1978{
1979 u32 lock_status;
1980 u32 resource_bit = (1 << resource);
1981 int func = BP_FUNC(bp);
1982 u32 hw_lock_control_reg;
1983 int cnt;
1984
1985
1986 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1987 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1988 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1989 return -EINVAL;
1990 }
1991
1992 if (func <= 5) {
1993 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1994 } else {
1995 hw_lock_control_reg =
1996 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1997 }
1998
1999
2000 lock_status = REG_RD(bp, hw_lock_control_reg);
2001 if (lock_status & resource_bit) {
2002 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2003 lock_status, resource_bit);
2004 return -EEXIST;
2005 }
2006
2007
2008 for (cnt = 0; cnt < 1000; cnt++) {
2009
2010 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2011 lock_status = REG_RD(bp, hw_lock_control_reg);
2012 if (lock_status & resource_bit)
2013 return 0;
2014
2015 usleep_range(5000, 10000);
2016 }
2017 BNX2X_ERR("Timeout\n");
2018 return -EAGAIN;
2019}
2020
2021int bnx2x_release_leader_lock(struct bnx2x *bp)
2022{
2023 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2024}
2025
2026int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2027{
2028 u32 lock_status;
2029 u32 resource_bit = (1 << resource);
2030 int func = BP_FUNC(bp);
2031 u32 hw_lock_control_reg;
2032
2033
2034 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2035 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2036 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2037 return -EINVAL;
2038 }
2039
2040 if (func <= 5) {
2041 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2042 } else {
2043 hw_lock_control_reg =
2044 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2045 }
2046
2047
2048 lock_status = REG_RD(bp, hw_lock_control_reg);
2049 if (!(lock_status & resource_bit)) {
2050 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2051 lock_status, resource_bit);
2052 return -EFAULT;
2053 }
2054
2055 REG_WR(bp, hw_lock_control_reg, resource_bit);
2056 return 0;
2057}
2058
2059int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2060{
2061
2062 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2063 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2064 int gpio_shift = gpio_num +
2065 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2066 u32 gpio_mask = (1 << gpio_shift);
2067 u32 gpio_reg;
2068 int value;
2069
2070 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2071 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2072 return -EINVAL;
2073 }
2074
2075
2076 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2077
2078
2079 if ((gpio_reg & gpio_mask) == gpio_mask)
2080 value = 1;
2081 else
2082 value = 0;
2083
2084 return value;
2085}
2086
2087int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2088{
2089
2090 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2091 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2092 int gpio_shift = gpio_num +
2093 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2094 u32 gpio_mask = (1 << gpio_shift);
2095 u32 gpio_reg;
2096
2097 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2098 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2099 return -EINVAL;
2100 }
2101
2102 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2103
2104 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2105
2106 switch (mode) {
2107 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2108 DP(NETIF_MSG_LINK,
2109 "Set GPIO %d (shift %d) -> output low\n",
2110 gpio_num, gpio_shift);
2111
2112 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2113 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2114 break;
2115
2116 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2117 DP(NETIF_MSG_LINK,
2118 "Set GPIO %d (shift %d) -> output high\n",
2119 gpio_num, gpio_shift);
2120
2121 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2122 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2123 break;
2124
2125 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2126 DP(NETIF_MSG_LINK,
2127 "Set GPIO %d (shift %d) -> input\n",
2128 gpio_num, gpio_shift);
2129
2130 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2131 break;
2132
2133 default:
2134 break;
2135 }
2136
2137 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2138 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2139
2140 return 0;
2141}
2142
2143int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2144{
2145 u32 gpio_reg = 0;
2146 int rc = 0;
2147
2148
2149
2150 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2151
2152 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2153 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2154 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2155 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2156
2157 switch (mode) {
2158 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2159 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2160
2161 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2162 break;
2163
2164 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2165 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2166
2167 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2168 break;
2169
2170 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2172
2173 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2174 break;
2175
2176 default:
2177 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2178 rc = -EINVAL;
2179 break;
2180 }
2181
2182 if (rc == 0)
2183 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2184
2185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2186
2187 return rc;
2188}
2189
2190int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2191{
2192
2193 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2194 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2195 int gpio_shift = gpio_num +
2196 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2197 u32 gpio_mask = (1 << gpio_shift);
2198 u32 gpio_reg;
2199
2200 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2201 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2202 return -EINVAL;
2203 }
2204
2205 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2206
2207 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2208
2209 switch (mode) {
2210 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2211 DP(NETIF_MSG_LINK,
2212 "Clear GPIO INT %d (shift %d) -> output low\n",
2213 gpio_num, gpio_shift);
2214
2215 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2216 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2217 break;
2218
2219 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2220 DP(NETIF_MSG_LINK,
2221 "Set GPIO INT %d (shift %d) -> output high\n",
2222 gpio_num, gpio_shift);
2223
2224 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2225 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2226 break;
2227
2228 default:
2229 break;
2230 }
2231
2232 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2233 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2234
2235 return 0;
2236}
2237
2238static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2239{
2240 u32 spio_reg;
2241
2242
2243 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2244 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2245 return -EINVAL;
2246 }
2247
2248 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2249
2250 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2251
2252 switch (mode) {
2253 case MISC_SPIO_OUTPUT_LOW:
2254 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2255
2256 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2257 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2258 break;
2259
2260 case MISC_SPIO_OUTPUT_HIGH:
2261 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2262
2263 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2264 spio_reg |= (spio << MISC_SPIO_SET_POS);
2265 break;
2266
2267 case MISC_SPIO_INPUT_HI_Z:
2268 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2269
2270 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2271 break;
2272
2273 default:
2274 break;
2275 }
2276
2277 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2278 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2279
2280 return 0;
2281}
2282
2283void bnx2x_calc_fc_adv(struct bnx2x *bp)
2284{
2285 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2286
2287 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2288 ADVERTISED_Pause);
2289 switch (bp->link_vars.ieee_fc &
2290 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2291 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2292 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2293 ADVERTISED_Pause);
2294 break;
2295
2296 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2297 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2298 break;
2299
2300 default:
2301 break;
2302 }
2303}
2304
2305static void bnx2x_set_requested_fc(struct bnx2x *bp)
2306{
2307
2308
2309
2310
2311 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2312 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2313 else
2314 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2315}
2316
2317static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2318{
2319 u32 pause_enabled = 0;
2320
2321 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2322 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2323 pause_enabled = 1;
2324
2325 REG_WR(bp, BAR_USTRORM_INTMEM +
2326 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2327 pause_enabled);
2328 }
2329
2330 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2331 pause_enabled ? "enabled" : "disabled");
2332}
2333
2334int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2335{
2336 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2337 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2338
2339 if (!BP_NOMCP(bp)) {
2340 bnx2x_set_requested_fc(bp);
2341 bnx2x_acquire_phy_lock(bp);
2342
2343 if (load_mode == LOAD_DIAG) {
2344 struct link_params *lp = &bp->link_params;
2345 lp->loopback_mode = LOOPBACK_XGXS;
2346
2347 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2348 if (lp->speed_cap_mask[cfx_idx] &
2349 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2350 lp->req_line_speed[cfx_idx] =
2351 SPEED_20000;
2352 else if (lp->speed_cap_mask[cfx_idx] &
2353 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2354 lp->req_line_speed[cfx_idx] =
2355 SPEED_10000;
2356 else
2357 lp->req_line_speed[cfx_idx] =
2358 SPEED_1000;
2359 }
2360 }
2361
2362 if (load_mode == LOAD_LOOPBACK_EXT) {
2363 struct link_params *lp = &bp->link_params;
2364 lp->loopback_mode = LOOPBACK_EXT;
2365 }
2366
2367 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2368
2369 bnx2x_release_phy_lock(bp);
2370
2371 bnx2x_init_dropless_fc(bp);
2372
2373 bnx2x_calc_fc_adv(bp);
2374
2375 if (bp->link_vars.link_up) {
2376 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2377 bnx2x_link_report(bp);
2378 }
2379 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2380 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2381 return rc;
2382 }
2383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2384 return -EINVAL;
2385}
2386
2387void bnx2x_link_set(struct bnx2x *bp)
2388{
2389 if (!BP_NOMCP(bp)) {
2390 bnx2x_acquire_phy_lock(bp);
2391 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2392 bnx2x_release_phy_lock(bp);
2393
2394 bnx2x_init_dropless_fc(bp);
2395
2396 bnx2x_calc_fc_adv(bp);
2397 } else
2398 BNX2X_ERR("Bootcode is missing - can not set link\n");
2399}
2400
2401static void bnx2x__link_reset(struct bnx2x *bp)
2402{
2403 if (!BP_NOMCP(bp)) {
2404 bnx2x_acquire_phy_lock(bp);
2405 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2406 bnx2x_release_phy_lock(bp);
2407 } else
2408 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2409}
2410
2411void bnx2x_force_link_reset(struct bnx2x *bp)
2412{
2413 bnx2x_acquire_phy_lock(bp);
2414 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2415 bnx2x_release_phy_lock(bp);
2416}
2417
2418u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2419{
2420 u8 rc = 0;
2421
2422 if (!BP_NOMCP(bp)) {
2423 bnx2x_acquire_phy_lock(bp);
2424 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2425 is_serdes);
2426 bnx2x_release_phy_lock(bp);
2427 } else
2428 BNX2X_ERR("Bootcode is missing - can not test link\n");
2429
2430 return rc;
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442static void bnx2x_calc_vn_min(struct bnx2x *bp,
2443 struct cmng_init_input *input)
2444{
2445 int all_zero = 1;
2446 int vn;
2447
2448 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2449 u32 vn_cfg = bp->mf_config[vn];
2450 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2451 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2452
2453
2454 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2455 vn_min_rate = 0;
2456
2457 else if (!vn_min_rate)
2458 vn_min_rate = DEF_MIN_RATE;
2459 else
2460 all_zero = 0;
2461
2462 input->vnic_min_rate[vn] = vn_min_rate;
2463 }
2464
2465
2466 if (BNX2X_IS_ETS_ENABLED(bp)) {
2467 input->flags.cmng_enables &=
2468 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2469 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2470 } else if (all_zero) {
2471 input->flags.cmng_enables &=
2472 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2473 DP(NETIF_MSG_IFUP,
2474 "All MIN values are zeroes fairness will be disabled\n");
2475 } else
2476 input->flags.cmng_enables |=
2477 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2478}
2479
2480static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2481 struct cmng_init_input *input)
2482{
2483 u16 vn_max_rate;
2484 u32 vn_cfg = bp->mf_config[vn];
2485
2486 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2487 vn_max_rate = 0;
2488 else {
2489 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2490
2491 if (IS_MF_PERCENT_BW(bp)) {
2492
2493 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2494 } else
2495
2496 vn_max_rate = maxCfg * 100;
2497 }
2498
2499 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2500
2501 input->vnic_max_rate[vn] = vn_max_rate;
2502}
2503
2504static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2505{
2506 if (CHIP_REV_IS_SLOW(bp))
2507 return CMNG_FNS_NONE;
2508 if (IS_MF(bp))
2509 return CMNG_FNS_MINMAX;
2510
2511 return CMNG_FNS_NONE;
2512}
2513
2514void bnx2x_read_mf_cfg(struct bnx2x *bp)
2515{
2516 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2517
2518 if (BP_NOMCP(bp))
2519 return;
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2533 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2534
2535 if (func >= E1H_FUNC_MAX)
2536 break;
2537
2538 bp->mf_config[vn] =
2539 MF_CFG_RD(bp, func_mf_config[func].config);
2540 }
2541 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2542 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2543 bp->flags |= MF_FUNC_DIS;
2544 } else {
2545 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2546 bp->flags &= ~MF_FUNC_DIS;
2547 }
2548}
2549
2550static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2551{
2552 struct cmng_init_input input;
2553 memset(&input, 0, sizeof(struct cmng_init_input));
2554
2555 input.port_rate = bp->link_vars.line_speed;
2556
2557 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2558 int vn;
2559
2560
2561 if (read_cfg)
2562 bnx2x_read_mf_cfg(bp);
2563
2564
2565 bnx2x_calc_vn_min(bp, &input);
2566
2567
2568 if (bp->port.pmf)
2569 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2570 bnx2x_calc_vn_max(bp, vn, &input);
2571
2572
2573 input.flags.cmng_enables |=
2574 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2575
2576 bnx2x_init_cmng(&input, &bp->cmng);
2577 return;
2578 }
2579
2580
2581 DP(NETIF_MSG_IFUP,
2582 "rate shaping and fairness are disabled\n");
2583}
2584
2585static void storm_memset_cmng(struct bnx2x *bp,
2586 struct cmng_init *cmng,
2587 u8 port)
2588{
2589 int vn;
2590 size_t size = sizeof(struct cmng_struct_per_port);
2591
2592 u32 addr = BAR_XSTRORM_INTMEM +
2593 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2594
2595 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2596
2597 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2598 int func = func_by_vn(bp, vn);
2599
2600 addr = BAR_XSTRORM_INTMEM +
2601 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2602 size = sizeof(struct rate_shaping_vars_per_vn);
2603 __storm_memset_struct(bp, addr, size,
2604 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2605
2606 addr = BAR_XSTRORM_INTMEM +
2607 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2608 size = sizeof(struct fairness_vars_per_vn);
2609 __storm_memset_struct(bp, addr, size,
2610 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2611 }
2612}
2613
2614
2615void bnx2x_set_local_cmng(struct bnx2x *bp)
2616{
2617 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2618
2619 if (cmng_fns != CMNG_FNS_NONE) {
2620 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2621 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2622 } else {
2623
2624 DP(NETIF_MSG_IFUP,
2625 "single function mode without fairness\n");
2626 }
2627}
2628
2629
2630static void bnx2x_link_attn(struct bnx2x *bp)
2631{
2632
2633 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2634
2635 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2636
2637 bnx2x_init_dropless_fc(bp);
2638
2639 if (bp->link_vars.link_up) {
2640
2641 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2642 struct host_port_stats *pstats;
2643
2644 pstats = bnx2x_sp(bp, port_stats);
2645
2646 memset(&(pstats->mac_stx[0]), 0,
2647 sizeof(struct mac_stx));
2648 }
2649 if (bp->state == BNX2X_STATE_OPEN)
2650 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2651 }
2652
2653 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2654 bnx2x_set_local_cmng(bp);
2655
2656 __bnx2x_link_report(bp);
2657
2658 if (IS_MF(bp))
2659 bnx2x_link_sync_notify(bp);
2660}
2661
2662void bnx2x__link_status_update(struct bnx2x *bp)
2663{
2664 if (bp->state != BNX2X_STATE_OPEN)
2665 return;
2666
2667
2668 if (IS_PF(bp)) {
2669 bnx2x_dcbx_pmf_update(bp);
2670 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2671 if (bp->link_vars.link_up)
2672 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2673 else
2674 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2675
2676 bnx2x_link_report(bp);
2677
2678 } else {
2679 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2680 SUPPORTED_10baseT_Full |
2681 SUPPORTED_100baseT_Half |
2682 SUPPORTED_100baseT_Full |
2683 SUPPORTED_1000baseT_Full |
2684 SUPPORTED_2500baseX_Full |
2685 SUPPORTED_10000baseT_Full |
2686 SUPPORTED_TP |
2687 SUPPORTED_FIBRE |
2688 SUPPORTED_Autoneg |
2689 SUPPORTED_Pause |
2690 SUPPORTED_Asym_Pause);
2691 bp->port.advertising[0] = bp->port.supported[0];
2692
2693 bp->link_params.bp = bp;
2694 bp->link_params.port = BP_PORT(bp);
2695 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2696 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2697 bp->link_params.req_line_speed[0] = SPEED_10000;
2698 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2699 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2700 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2701 bp->link_vars.line_speed = SPEED_10000;
2702 bp->link_vars.link_status =
2703 (LINK_STATUS_LINK_UP |
2704 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2705 bp->link_vars.link_up = 1;
2706 bp->link_vars.duplex = DUPLEX_FULL;
2707 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2708 __bnx2x_link_report(bp);
2709
2710 bnx2x_sample_bulletin(bp);
2711
2712
2713
2714
2715
2716
2717 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2718 }
2719}
2720
2721static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2722 u16 vlan_val, u8 allowed_prio)
2723{
2724 struct bnx2x_func_state_params func_params = {NULL};
2725 struct bnx2x_func_afex_update_params *f_update_params =
2726 &func_params.params.afex_update;
2727
2728 func_params.f_obj = &bp->func_obj;
2729 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2730
2731
2732
2733
2734
2735 f_update_params->vif_id = vifid;
2736 f_update_params->afex_default_vlan = vlan_val;
2737 f_update_params->allowed_priorities = allowed_prio;
2738
2739
2740 if (bnx2x_func_state_change(bp, &func_params) < 0)
2741 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2742
2743 return 0;
2744}
2745
2746static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2747 u16 vif_index, u8 func_bit_map)
2748{
2749 struct bnx2x_func_state_params func_params = {NULL};
2750 struct bnx2x_func_afex_viflists_params *update_params =
2751 &func_params.params.afex_viflists;
2752 int rc;
2753 u32 drv_msg_code;
2754
2755
2756 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2757 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2758 cmd_type);
2759
2760 func_params.f_obj = &bp->func_obj;
2761 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2762
2763
2764 update_params->afex_vif_list_command = cmd_type;
2765 update_params->vif_list_index = vif_index;
2766 update_params->func_bit_map =
2767 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2768 update_params->func_to_clear = 0;
2769 drv_msg_code =
2770 (cmd_type == VIF_LIST_RULE_GET) ?
2771 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2772 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2773
2774
2775
2776
2777 rc = bnx2x_func_state_change(bp, &func_params);
2778 if (rc < 0)
2779 bnx2x_fw_command(bp, drv_msg_code, 0);
2780
2781 return 0;
2782}
2783
2784static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2785{
2786 struct afex_stats afex_stats;
2787 u32 func = BP_ABS_FUNC(bp);
2788 u32 mf_config;
2789 u16 vlan_val;
2790 u32 vlan_prio;
2791 u16 vif_id;
2792 u8 allowed_prio;
2793 u8 vlan_mode;
2794 u32 addr_to_write, vifid, addrs, stats_type, i;
2795
2796 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2797 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2798 DP(BNX2X_MSG_MCP,
2799 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2800 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2801 }
2802
2803 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2804 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2805 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2806 DP(BNX2X_MSG_MCP,
2807 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2808 vifid, addrs);
2809 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2810 addrs);
2811 }
2812
2813 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2814 addr_to_write = SHMEM2_RD(bp,
2815 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2816 stats_type = SHMEM2_RD(bp,
2817 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2818
2819 DP(BNX2X_MSG_MCP,
2820 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2821 addr_to_write);
2822
2823 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2824
2825
2826 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2827 REG_WR(bp, addr_to_write + i*sizeof(u32),
2828 *(((u32 *)(&afex_stats))+i));
2829
2830
2831 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2832 }
2833
2834 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2835 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2836 bp->mf_config[BP_VN(bp)] = mf_config;
2837 DP(BNX2X_MSG_MCP,
2838 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2839 mf_config);
2840
2841
2842 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2843
2844 struct cmng_init_input cmng_input;
2845 struct rate_shaping_vars_per_vn m_rs_vn;
2846 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2847 u32 addr = BAR_XSTRORM_INTMEM +
2848 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2849
2850 bp->mf_config[BP_VN(bp)] = mf_config;
2851
2852 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2853 m_rs_vn.vn_counter.rate =
2854 cmng_input.vnic_max_rate[BP_VN(bp)];
2855 m_rs_vn.vn_counter.quota =
2856 (m_rs_vn.vn_counter.rate *
2857 RS_PERIODIC_TIMEOUT_USEC) / 8;
2858
2859 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2860
2861
2862 vif_id =
2863 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2864 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2865 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2866 vlan_val =
2867 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2868 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2869 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2870 vlan_prio = (mf_config &
2871 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2872 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2873 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2874 vlan_mode =
2875 (MF_CFG_RD(bp,
2876 func_mf_config[func].afex_config) &
2877 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2878 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2879 allowed_prio =
2880 (MF_CFG_RD(bp,
2881 func_mf_config[func].afex_config) &
2882 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2883 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2884
2885
2886 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2887 allowed_prio))
2888 return;
2889
2890 bp->afex_def_vlan_tag = vlan_val;
2891 bp->afex_vlan_mode = vlan_mode;
2892 } else {
2893
2894 bnx2x_link_report(bp);
2895
2896
2897 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2898
2899
2900 bp->afex_def_vlan_tag = -1;
2901 }
2902 }
2903}
2904
2905static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2906{
2907 struct bnx2x_func_switch_update_params *switch_update_params;
2908 struct bnx2x_func_state_params func_params;
2909
2910 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2911 switch_update_params = &func_params.params.switch_update;
2912 func_params.f_obj = &bp->func_obj;
2913 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2914
2915
2916 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2917 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2918
2919 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2920 int func = BP_ABS_FUNC(bp);
2921 u32 val;
2922
2923
2924 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2925 FUNC_MF_CFG_E1HOV_TAG_MASK;
2926 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2927 bp->mf_ov = val;
2928 } else {
2929 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2930 goto fail;
2931 }
2932
2933
2934 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2935 bp->mf_ov);
2936
2937
2938 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2939 &switch_update_params->changes);
2940 switch_update_params->vlan = bp->mf_ov;
2941
2942 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2943 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2944 bp->mf_ov);
2945 goto fail;
2946 } else {
2947 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2948 bp->mf_ov);
2949 }
2950 } else {
2951 goto fail;
2952 }
2953
2954 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2955 return;
2956fail:
2957 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2958}
2959
2960static void bnx2x_pmf_update(struct bnx2x *bp)
2961{
2962 int port = BP_PORT(bp);
2963 u32 val;
2964
2965 bp->port.pmf = 1;
2966 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2967
2968
2969
2970
2971
2972 smp_mb();
2973
2974
2975 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2976
2977 bnx2x_dcbx_pmf_update(bp);
2978
2979
2980 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2981 if (bp->common.int_block == INT_BLOCK_HC) {
2982 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2984 } else if (!CHIP_IS_E1x(bp)) {
2985 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2987 }
2988
2989 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3002{
3003 int mb_idx = BP_FW_MB_IDX(bp);
3004 u32 seq;
3005 u32 rc = 0;
3006 u32 cnt = 1;
3007 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3008
3009 mutex_lock(&bp->fw_mb_mutex);
3010 seq = ++bp->fw_seq;
3011 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3012 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3013
3014 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3015 (command | seq), param);
3016
3017 do {
3018
3019 msleep(delay);
3020
3021 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3022
3023
3024 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3025
3026 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3027 cnt*delay, rc, seq);
3028
3029
3030 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3031 rc &= FW_MSG_CODE_MASK;
3032 else {
3033
3034 BNX2X_ERR("FW failed to respond!\n");
3035 bnx2x_fw_dump(bp);
3036 rc = 0;
3037 }
3038 mutex_unlock(&bp->fw_mb_mutex);
3039
3040 return rc;
3041}
3042
3043static void storm_memset_func_cfg(struct bnx2x *bp,
3044 struct tstorm_eth_function_common_config *tcfg,
3045 u16 abs_fid)
3046{
3047 size_t size = sizeof(struct tstorm_eth_function_common_config);
3048
3049 u32 addr = BAR_TSTRORM_INTMEM +
3050 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3051
3052 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3053}
3054
3055void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3056{
3057 if (CHIP_IS_E1x(bp)) {
3058 struct tstorm_eth_function_common_config tcfg = {0};
3059
3060 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3061 }
3062
3063
3064 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3065 storm_memset_func_en(bp, p->func_id, 1);
3066
3067
3068 if (p->spq_active) {
3069 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3070 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3071 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3072 }
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3085 struct bnx2x_fastpath *fp,
3086 bool zero_stats)
3087{
3088 unsigned long flags = 0;
3089
3090
3091 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3092
3093
3094
3095
3096
3097
3098 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3099 if (zero_stats)
3100 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3101
3102 if (bp->flags & TX_SWITCHING)
3103 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3104
3105 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3106 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3107
3108#ifdef BNX2X_STOP_ON_ERROR
3109 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3110#endif
3111
3112 return flags;
3113}
3114
3115static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3116 struct bnx2x_fastpath *fp,
3117 bool leading)
3118{
3119 unsigned long flags = 0;
3120
3121
3122 if (IS_MF_SD(bp))
3123 __set_bit(BNX2X_Q_FLG_OV, &flags);
3124
3125 if (IS_FCOE_FP(fp)) {
3126 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3127
3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3129 }
3130
3131 if (fp->mode != TPA_MODE_DISABLED) {
3132 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3134 if (fp->mode == TPA_MODE_GRO)
3135 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3136 }
3137
3138 if (leading) {
3139 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3140 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3141 }
3142
3143
3144 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3145
3146
3147 if (IS_MF_AFEX(bp))
3148 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3149
3150 return flags | bnx2x_get_common_flags(bp, fp, true);
3151}
3152
3153static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3154 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3155 u8 cos)
3156{
3157 gen_init->stat_id = bnx2x_stats_id(fp);
3158 gen_init->spcl_id = fp->cl_id;
3159
3160
3161 if (IS_FCOE_FP(fp))
3162 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3163 else
3164 gen_init->mtu = bp->dev->mtu;
3165
3166 gen_init->cos = cos;
3167
3168 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3169}
3170
3171static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3172 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3173 struct bnx2x_rxq_setup_params *rxq_init)
3174{
3175 u8 max_sge = 0;
3176 u16 sge_sz = 0;
3177 u16 tpa_agg_size = 0;
3178
3179 if (fp->mode != TPA_MODE_DISABLED) {
3180 pause->sge_th_lo = SGE_TH_LO(bp);
3181 pause->sge_th_hi = SGE_TH_HI(bp);
3182
3183
3184 WARN_ON(bp->dropless_fc &&
3185 pause->sge_th_hi + FW_PREFETCH_CNT >
3186 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3187
3188 tpa_agg_size = TPA_AGG_SIZE;
3189 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3190 SGE_PAGE_SHIFT;
3191 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3192 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3193 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3194 }
3195
3196
3197 if (!CHIP_IS_E1(bp)) {
3198 pause->bd_th_lo = BD_TH_LO(bp);
3199 pause->bd_th_hi = BD_TH_HI(bp);
3200
3201 pause->rcq_th_lo = RCQ_TH_LO(bp);
3202 pause->rcq_th_hi = RCQ_TH_HI(bp);
3203
3204
3205
3206
3207 WARN_ON(bp->dropless_fc &&
3208 pause->bd_th_hi + FW_PREFETCH_CNT >
3209 bp->rx_ring_size);
3210 WARN_ON(bp->dropless_fc &&
3211 pause->rcq_th_hi + FW_PREFETCH_CNT >
3212 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3213
3214 pause->pri_map = 1;
3215 }
3216
3217
3218 rxq_init->dscr_map = fp->rx_desc_mapping;
3219 rxq_init->sge_map = fp->rx_sge_mapping;
3220 rxq_init->rcq_map = fp->rx_comp_mapping;
3221 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3222
3223
3224
3225
3226 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3227 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3228
3229 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3230 rxq_init->tpa_agg_sz = tpa_agg_size;
3231 rxq_init->sge_buf_sz = sge_sz;
3232 rxq_init->max_sges_pkt = max_sge;
3233 rxq_init->rss_engine_id = BP_FUNC(bp);
3234 rxq_init->mcast_engine_id = BP_FUNC(bp);
3235
3236
3237
3238
3239
3240
3241 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3242
3243 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3244 rxq_init->fw_sb_id = fp->fw_sb_id;
3245
3246 if (IS_FCOE_FP(fp))
3247 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3248 else
3249 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3250
3251
3252
3253 if (IS_MF_AFEX(bp)) {
3254 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3255 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3256 }
3257}
3258
3259static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3260 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3261 u8 cos)
3262{
3263 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3264 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3265 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3266 txq_init->fw_sb_id = fp->fw_sb_id;
3267
3268
3269
3270
3271
3272 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3273
3274 if (IS_FCOE_FP(fp)) {
3275 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3277 }
3278}
3279
3280static void bnx2x_pf_init(struct bnx2x *bp)
3281{
3282 struct bnx2x_func_init_params func_init = {0};
3283 struct event_ring_data eq_data = { {0} };
3284
3285 if (!CHIP_IS_E1x(bp)) {
3286
3287
3288 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3289 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3290 (CHIP_MODE_IS_4_PORT(bp) ?
3291 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3292
3293 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3294 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3295 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3296 (CHIP_MODE_IS_4_PORT(bp) ?
3297 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3298 }
3299
3300 func_init.spq_active = true;
3301 func_init.pf_id = BP_FUNC(bp);
3302 func_init.func_id = BP_FUNC(bp);
3303 func_init.spq_map = bp->spq_mapping;
3304 func_init.spq_prod = bp->spq_prod_idx;
3305
3306 bnx2x_func_init(bp, &func_init);
3307
3308 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3309
3310
3311
3312
3313
3314
3315
3316 bp->link_vars.line_speed = SPEED_10000;
3317 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3318
3319
3320 if (bp->port.pmf)
3321 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3322
3323
3324 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3325 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3326 eq_data.producer = bp->eq_prod;
3327 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3328 eq_data.sb_id = DEF_SB_ID;
3329 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3330}
3331
3332static void bnx2x_e1h_disable(struct bnx2x *bp)
3333{
3334 int port = BP_PORT(bp);
3335
3336 bnx2x_tx_disable(bp);
3337
3338 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3339}
3340
3341static void bnx2x_e1h_enable(struct bnx2x *bp)
3342{
3343 int port = BP_PORT(bp);
3344
3345 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3346 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3347
3348
3349 netif_tx_wake_all_queues(bp->dev);
3350
3351
3352
3353
3354
3355}
3356
3357#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3358
3359static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3360{
3361 struct eth_stats_info *ether_stat =
3362 &bp->slowpath->drv_info_to_mcp.ether_stat;
3363 struct bnx2x_vlan_mac_obj *mac_obj =
3364 &bp->sp_objs->mac_obj;
3365 int i;
3366
3367 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3368 ETH_STAT_INFO_VERSION_LEN);
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3379 memset(ether_stat->mac_local + i, 0,
3380 sizeof(ether_stat->mac_local[0]));
3381 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3382 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3383 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3384 ETH_ALEN);
3385 ether_stat->mtu_size = bp->dev->mtu;
3386 if (bp->dev->features & NETIF_F_RXCSUM)
3387 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3388 if (bp->dev->features & NETIF_F_TSO)
3389 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3390 ether_stat->feature_flags |= bp->common.boot_mode;
3391
3392 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3393
3394 ether_stat->txq_size = bp->tx_ring_size;
3395 ether_stat->rxq_size = bp->rx_ring_size;
3396
3397#ifdef CONFIG_BNX2X_SRIOV
3398 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3399#endif
3400}
3401
3402static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3403{
3404 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3405 struct fcoe_stats_info *fcoe_stat =
3406 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3407
3408 if (!CNIC_LOADED(bp))
3409 return;
3410
3411 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3412
3413 fcoe_stat->qos_priority =
3414 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3415
3416
3417 if (!NO_FCOE(bp)) {
3418 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3419 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3420 tstorm_queue_statistics;
3421
3422 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3423 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3424 xstorm_queue_statistics;
3425
3426 struct fcoe_statistics_params *fw_fcoe_stat =
3427 &bp->fw_stats_data->fcoe;
3428
3429 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3430 fcoe_stat->rx_bytes_lo,
3431 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3432
3433 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3434 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3435 fcoe_stat->rx_bytes_lo,
3436 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3437
3438 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3439 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3440 fcoe_stat->rx_bytes_lo,
3441 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3442
3443 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3444 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3445 fcoe_stat->rx_bytes_lo,
3446 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3447
3448 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3449 fcoe_stat->rx_frames_lo,
3450 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3451
3452 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3453 fcoe_stat->rx_frames_lo,
3454 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3455
3456 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3457 fcoe_stat->rx_frames_lo,
3458 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3459
3460 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3461 fcoe_stat->rx_frames_lo,
3462 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3463
3464 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3465 fcoe_stat->tx_bytes_lo,
3466 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3467
3468 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3469 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3470 fcoe_stat->tx_bytes_lo,
3471 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3472
3473 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3474 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3475 fcoe_stat->tx_bytes_lo,
3476 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3477
3478 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3479 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3480 fcoe_stat->tx_bytes_lo,
3481 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3482
3483 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3484 fcoe_stat->tx_frames_lo,
3485 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3486
3487 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3488 fcoe_stat->tx_frames_lo,
3489 fcoe_q_xstorm_stats->ucast_pkts_sent);
3490
3491 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3492 fcoe_stat->tx_frames_lo,
3493 fcoe_q_xstorm_stats->bcast_pkts_sent);
3494
3495 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3496 fcoe_stat->tx_frames_lo,
3497 fcoe_q_xstorm_stats->mcast_pkts_sent);
3498 }
3499
3500
3501 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3502}
3503
3504static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3505{
3506 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3507 struct iscsi_stats_info *iscsi_stat =
3508 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3509
3510 if (!CNIC_LOADED(bp))
3511 return;
3512
3513 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3514 ETH_ALEN);
3515
3516 iscsi_stat->qos_priority =
3517 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3518
3519
3520 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3521}
3522
3523
3524
3525
3526
3527
3528static void bnx2x_config_mf_bw(struct bnx2x *bp)
3529{
3530
3531
3532
3533
3534 if (!IS_MF(bp)) {
3535 DP(BNX2X_MSG_MCP,
3536 "Ignoring MF BW config in single function mode\n");
3537 return;
3538 }
3539
3540 if (bp->link_vars.link_up) {
3541 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3542 bnx2x_link_sync_notify(bp);
3543 }
3544 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3545}
3546
3547static void bnx2x_set_mf_bw(struct bnx2x *bp)
3548{
3549 bnx2x_config_mf_bw(bp);
3550 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3551}
3552
3553static void bnx2x_handle_eee_event(struct bnx2x *bp)
3554{
3555 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3556 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3557}
3558
3559#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3560#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3561
3562static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3563{
3564 enum drv_info_opcode op_code;
3565 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3566 bool release = false;
3567 int wait;
3568
3569
3570 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3571 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3572 return;
3573 }
3574
3575 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3576 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3577
3578
3579 mutex_lock(&bp->drv_info_mutex);
3580
3581 memset(&bp->slowpath->drv_info_to_mcp, 0,
3582 sizeof(union drv_info_to_mcp));
3583
3584 switch (op_code) {
3585 case ETH_STATS_OPCODE:
3586 bnx2x_drv_info_ether_stat(bp);
3587 break;
3588 case FCOE_STATS_OPCODE:
3589 bnx2x_drv_info_fcoe_stat(bp);
3590 break;
3591 case ISCSI_STATS_OPCODE:
3592 bnx2x_drv_info_iscsi_stat(bp);
3593 break;
3594 default:
3595
3596 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3597 goto out;
3598 }
3599
3600
3601
3602
3603 SHMEM2_WR(bp, drv_info_host_addr_lo,
3604 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3605 SHMEM2_WR(bp, drv_info_host_addr_hi,
3606 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3607
3608 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3609
3610
3611
3612
3613
3614 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3615 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3616 } else if (!bp->drv_info_mng_owner) {
3617 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3618
3619 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3620 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3621
3622
3623 if (indication & bit) {
3624 SHMEM2_WR(bp, mfw_drv_indication,
3625 indication & ~bit);
3626 release = true;
3627 break;
3628 }
3629
3630 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3631 }
3632 }
3633 if (!release) {
3634 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3635 bp->drv_info_mng_owner = true;
3636 }
3637
3638out:
3639 mutex_unlock(&bp->drv_info_mutex);
3640}
3641
3642static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3643{
3644 u8 vals[4];
3645 int i = 0;
3646
3647 if (bnx2x_format) {
3648 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3649 &vals[0], &vals[1], &vals[2], &vals[3]);
3650 if (i > 0)
3651 vals[0] -= '0';
3652 } else {
3653 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3654 &vals[0], &vals[1], &vals[2], &vals[3]);
3655 }
3656
3657 while (i < 4)
3658 vals[i++] = 0;
3659
3660 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3661}
3662
3663void bnx2x_update_mng_version(struct bnx2x *bp)
3664{
3665 u32 iscsiver = DRV_VER_NOT_LOADED;
3666 u32 fcoever = DRV_VER_NOT_LOADED;
3667 u32 ethver = DRV_VER_NOT_LOADED;
3668 int idx = BP_FW_MB_IDX(bp);
3669 u8 *version;
3670
3671 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3672 return;
3673
3674 mutex_lock(&bp->drv_info_mutex);
3675
3676 if (bp->drv_info_mng_owner)
3677 goto out;
3678
3679 if (bp->state != BNX2X_STATE_OPEN)
3680 goto out;
3681
3682
3683 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3684 if (!CNIC_LOADED(bp))
3685 goto out;
3686
3687
3688 memset(&bp->slowpath->drv_info_to_mcp, 0,
3689 sizeof(union drv_info_to_mcp));
3690 bnx2x_drv_info_iscsi_stat(bp);
3691 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3692 iscsiver = bnx2x_update_mng_version_utility(version, false);
3693
3694 memset(&bp->slowpath->drv_info_to_mcp, 0,
3695 sizeof(union drv_info_to_mcp));
3696 bnx2x_drv_info_fcoe_stat(bp);
3697 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3698 fcoever = bnx2x_update_mng_version_utility(version, false);
3699
3700out:
3701 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3702 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3703 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3704
3705 mutex_unlock(&bp->drv_info_mutex);
3706
3707 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3708 ethver, iscsiver, fcoever);
3709}
3710
3711void bnx2x_update_mfw_dump(struct bnx2x *bp)
3712{
3713 u32 drv_ver;
3714 u32 valid_dump;
3715
3716 if (!SHMEM2_HAS(bp, drv_info))
3717 return;
3718
3719
3720 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3721
3722 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3723 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3724
3725 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3726
3727
3728 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3729
3730 if (valid_dump & FIRST_DUMP_VALID)
3731 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3732
3733 if (valid_dump & SECOND_DUMP_VALID)
3734 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3735}
3736
3737static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3738{
3739 u32 cmd_ok, cmd_fail;
3740
3741
3742 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3743 event & DRV_STATUS_OEM_EVENT_MASK) {
3744 BNX2X_ERR("Received simultaneous events %08x\n", event);
3745 return;
3746 }
3747
3748 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3749 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3750 cmd_ok = DRV_MSG_CODE_DCC_OK;
3751 } else {
3752 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3753 cmd_ok = DRV_MSG_CODE_OEM_OK;
3754 }
3755
3756 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3757
3758 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3759 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3760
3761
3762
3763
3764 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3765 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3766 bp->flags |= MF_FUNC_DIS;
3767
3768 bnx2x_e1h_disable(bp);
3769 } else {
3770 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3771 bp->flags &= ~MF_FUNC_DIS;
3772
3773 bnx2x_e1h_enable(bp);
3774 }
3775 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3776 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3777 }
3778
3779 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3780 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3781 bnx2x_config_mf_bw(bp);
3782 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3783 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3784 }
3785
3786
3787 if (event)
3788 bnx2x_fw_command(bp, cmd_fail, 0);
3789 else
3790 bnx2x_fw_command(bp, cmd_ok, 0);
3791}
3792
3793
3794static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3795{
3796 struct eth_spe *next_spe = bp->spq_prod_bd;
3797
3798 if (bp->spq_prod_bd == bp->spq_last_bd) {
3799 bp->spq_prod_bd = bp->spq;
3800 bp->spq_prod_idx = 0;
3801 DP(BNX2X_MSG_SP, "end of spq\n");
3802 } else {
3803 bp->spq_prod_bd++;
3804 bp->spq_prod_idx++;
3805 }
3806 return next_spe;
3807}
3808
3809
3810static void bnx2x_sp_prod_update(struct bnx2x *bp)
3811{
3812 int func = BP_FUNC(bp);
3813
3814
3815
3816
3817
3818
3819 mb();
3820
3821 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3822 bp->spq_prod_idx);
3823}
3824
3825
3826
3827
3828
3829
3830
3831static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3832{
3833 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3834 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3835 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3836 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3837 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3838 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3839 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3840 return true;
3841 else
3842 return false;
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3860 u32 data_hi, u32 data_lo, int cmd_type)
3861{
3862 struct eth_spe *spe;
3863 u16 type;
3864 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3865
3866#ifdef BNX2X_STOP_ON_ERROR
3867 if (unlikely(bp->panic)) {
3868 BNX2X_ERR("Can't post SP when there is panic\n");
3869 return -EIO;
3870 }
3871#endif
3872
3873 spin_lock_bh(&bp->spq_lock);
3874
3875 if (common) {
3876 if (!atomic_read(&bp->eq_spq_left)) {
3877 BNX2X_ERR("BUG! EQ ring full!\n");
3878 spin_unlock_bh(&bp->spq_lock);
3879 bnx2x_panic();
3880 return -EBUSY;
3881 }
3882 } else if (!atomic_read(&bp->cq_spq_left)) {
3883 BNX2X_ERR("BUG! SPQ ring full!\n");
3884 spin_unlock_bh(&bp->spq_lock);
3885 bnx2x_panic();
3886 return -EBUSY;
3887 }
3888
3889 spe = bnx2x_sp_get_next(bp);
3890
3891
3892 spe->hdr.conn_and_cmd_data =
3893 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3894 HW_CID(bp, cid));
3895
3896
3897
3898
3899
3900 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3901 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3902 SPE_HDR_CONN_TYPE;
3903 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3904 SPE_HDR_FUNCTION_ID);
3905 } else {
3906 type = cmd_type;
3907 }
3908
3909 spe->hdr.type = cpu_to_le16(type);
3910
3911 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3912 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3913
3914
3915
3916
3917
3918
3919 if (common)
3920 atomic_dec(&bp->eq_spq_left);
3921 else
3922 atomic_dec(&bp->cq_spq_left);
3923
3924 DP(BNX2X_MSG_SP,
3925 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3926 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3927 (u32)(U64_LO(bp->spq_mapping) +
3928 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3929 HW_CID(bp, cid), data_hi, data_lo, type,
3930 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3931
3932 bnx2x_sp_prod_update(bp);
3933 spin_unlock_bh(&bp->spq_lock);
3934 return 0;
3935}
3936
3937
3938static int bnx2x_acquire_alr(struct bnx2x *bp)
3939{
3940 u32 j, val;
3941 int rc = 0;
3942
3943 might_sleep();
3944 for (j = 0; j < 1000; j++) {
3945 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3946 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3947 if (val & MCPR_ACCESS_LOCK_LOCK)
3948 break;
3949
3950 usleep_range(5000, 10000);
3951 }
3952 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3953 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3954 rc = -EBUSY;
3955 }
3956
3957 return rc;
3958}
3959
3960
3961static void bnx2x_release_alr(struct bnx2x *bp)
3962{
3963 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3964}
3965
3966#define BNX2X_DEF_SB_ATT_IDX 0x0001
3967#define BNX2X_DEF_SB_IDX 0x0002
3968
3969static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3970{
3971 struct host_sp_status_block *def_sb = bp->def_status_blk;
3972 u16 rc = 0;
3973
3974 barrier();
3975 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3976 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3977 rc |= BNX2X_DEF_SB_ATT_IDX;
3978 }
3979
3980 if (bp->def_idx != def_sb->sp_sb.running_index) {
3981 bp->def_idx = def_sb->sp_sb.running_index;
3982 rc |= BNX2X_DEF_SB_IDX;
3983 }
3984
3985
3986 barrier();
3987 return rc;
3988}
3989
3990
3991
3992
3993
3994static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3995{
3996 int port = BP_PORT(bp);
3997 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3998 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3999 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4000 NIG_REG_MASK_INTERRUPT_PORT0;
4001 u32 aeu_mask;
4002 u32 nig_mask = 0;
4003 u32 reg_addr;
4004
4005 if (bp->attn_state & asserted)
4006 BNX2X_ERR("IGU ERROR\n");
4007
4008 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4009 aeu_mask = REG_RD(bp, aeu_addr);
4010
4011 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4012 aeu_mask, asserted);
4013 aeu_mask &= ~(asserted & 0x3ff);
4014 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4015
4016 REG_WR(bp, aeu_addr, aeu_mask);
4017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4018
4019 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4020 bp->attn_state |= asserted;
4021 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4022
4023 if (asserted & ATTN_HARD_WIRED_MASK) {
4024 if (asserted & ATTN_NIG_FOR_FUNC) {
4025
4026 bnx2x_acquire_phy_lock(bp);
4027
4028
4029 nig_mask = REG_RD(bp, nig_int_mask_addr);
4030
4031
4032
4033
4034 if (nig_mask) {
4035 REG_WR(bp, nig_int_mask_addr, 0);
4036
4037 bnx2x_link_attn(bp);
4038 }
4039
4040
4041 }
4042 if (asserted & ATTN_SW_TIMER_4_FUNC)
4043 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4044
4045 if (asserted & GPIO_2_FUNC)
4046 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4047
4048 if (asserted & GPIO_3_FUNC)
4049 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4050
4051 if (asserted & GPIO_4_FUNC)
4052 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4053
4054 if (port == 0) {
4055 if (asserted & ATTN_GENERAL_ATTN_1) {
4056 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4057 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4058 }
4059 if (asserted & ATTN_GENERAL_ATTN_2) {
4060 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4062 }
4063 if (asserted & ATTN_GENERAL_ATTN_3) {
4064 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4066 }
4067 } else {
4068 if (asserted & ATTN_GENERAL_ATTN_4) {
4069 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4071 }
4072 if (asserted & ATTN_GENERAL_ATTN_5) {
4073 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4075 }
4076 if (asserted & ATTN_GENERAL_ATTN_6) {
4077 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4079 }
4080 }
4081
4082 }
4083
4084 if (bp->common.int_block == INT_BLOCK_HC)
4085 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4086 COMMAND_REG_ATTN_BITS_SET);
4087 else
4088 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4089
4090 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4091 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4092 REG_WR(bp, reg_addr, asserted);
4093
4094
4095 if (asserted & ATTN_NIG_FOR_FUNC) {
4096
4097
4098
4099 if (bp->common.int_block != INT_BLOCK_HC) {
4100 u32 cnt = 0, igu_acked;
4101 do {
4102 igu_acked = REG_RD(bp,
4103 IGU_REG_ATTENTION_ACK_BITS);
4104 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4105 (++cnt < MAX_IGU_ATTN_ACK_TO));
4106 if (!igu_acked)
4107 DP(NETIF_MSG_HW,
4108 "Failed to verify IGU ack on time\n");
4109 barrier();
4110 }
4111 REG_WR(bp, nig_int_mask_addr, nig_mask);
4112 bnx2x_release_phy_lock(bp);
4113 }
4114}
4115
4116static void bnx2x_fan_failure(struct bnx2x *bp)
4117{
4118 int port = BP_PORT(bp);
4119 u32 ext_phy_config;
4120
4121 ext_phy_config =
4122 SHMEM_RD(bp,
4123 dev_info.port_hw_config[port].external_phy_config);
4124
4125 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4126 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4127 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4128 ext_phy_config);
4129
4130
4131 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4132 "Please contact OEM Support for assistance\n");
4133
4134
4135
4136
4137
4138 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4139}
4140
4141static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4142{
4143 int port = BP_PORT(bp);
4144 int reg_offset;
4145 u32 val;
4146
4147 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4148 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4149
4150 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4151
4152 val = REG_RD(bp, reg_offset);
4153 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4154 REG_WR(bp, reg_offset, val);
4155
4156 BNX2X_ERR("SPIO5 hw attention\n");
4157
4158
4159 bnx2x_hw_reset_phy(&bp->link_params);
4160 bnx2x_fan_failure(bp);
4161 }
4162
4163 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4164 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_handle_module_detect_int(&bp->link_params);
4166 bnx2x_release_phy_lock(bp);
4167 }
4168
4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170
4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val);
4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic();
4178 }
4179}
4180
4181static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4182{
4183 u32 val;
4184
4185 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4186
4187 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4188 BNX2X_ERR("DB hw attention 0x%x\n", val);
4189
4190 if (val & 0x2)
4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 }
4193
4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195
4196 int port = BP_PORT(bp);
4197 int reg_offset;
4198
4199 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201
4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val);
4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic();
4209 }
4210}
4211
4212static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4213{
4214 u32 val;
4215
4216 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4217
4218 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4219 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4220
4221 if (val & 0x2)
4222 BNX2X_ERR("FATAL error from CFC\n");
4223 }
4224
4225 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4226 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4227 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4228
4229 if (val & 0x18000)
4230 BNX2X_ERR("FATAL error from PXP\n");
4231
4232 if (!CHIP_IS_E1x(bp)) {
4233 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4234 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4235 }
4236 }
4237
4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239
4240 int port = BP_PORT(bp);
4241 int reg_offset;
4242
4243 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245
4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val);
4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic();
4253 }
4254}
4255
4256static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4257{
4258 u32 val;
4259
4260 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4261
4262 if (attn & BNX2X_PMF_LINK_ASSERT) {
4263 int func = BP_FUNC(bp);
4264
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4266 bnx2x_read_mf_cfg(bp);
4267 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4268 func_mf_config[BP_ABS_FUNC(bp)].config);
4269 val = SHMEM_RD(bp,
4270 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4271
4272 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4273 DRV_STATUS_OEM_EVENT_MASK))
4274 bnx2x_oem_event(bp,
4275 (val & (DRV_STATUS_DCC_EVENT_MASK |
4276 DRV_STATUS_OEM_EVENT_MASK)));
4277
4278 if (val & DRV_STATUS_SET_MF_BW)
4279 bnx2x_set_mf_bw(bp);
4280
4281 if (val & DRV_STATUS_DRV_INFO_REQ)
4282 bnx2x_handle_drv_info_req(bp);
4283
4284 if (val & DRV_STATUS_VF_DISABLED)
4285 bnx2x_schedule_iov_task(bp,
4286 BNX2X_IOV_HANDLE_FLR);
4287
4288 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4289 bnx2x_pmf_update(bp);
4290
4291 if (bp->port.pmf &&
4292 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4293 bp->dcbx_enabled > 0)
4294
4295 bnx2x_dcbx_set_params(bp,
4296 BNX2X_DCBX_STATE_NEG_RECEIVED);
4297 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4298 bnx2x_handle_afex_cmd(bp,
4299 val & DRV_STATUS_AFEX_EVENT_MASK);
4300 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4301 bnx2x_handle_eee_event(bp);
4302
4303 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4304 bnx2x_schedule_sp_rtnl(bp,
4305 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4306
4307 if (bp->link_vars.periodic_flags &
4308 PERIODIC_FLAGS_LINK_EVENT) {
4309
4310 bnx2x_acquire_phy_lock(bp);
4311 bp->link_vars.periodic_flags &=
4312 ~PERIODIC_FLAGS_LINK_EVENT;
4313 bnx2x_release_phy_lock(bp);
4314 if (IS_MF(bp))
4315 bnx2x_link_sync_notify(bp);
4316 bnx2x_link_report(bp);
4317 }
4318
4319
4320
4321 bnx2x__link_status_update(bp);
4322 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4323
4324 BNX2X_ERR("MC assert!\n");
4325 bnx2x_mc_assert(bp);
4326 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4327 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4328 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4329 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4330 bnx2x_panic();
4331
4332 } else if (attn & BNX2X_MCP_ASSERT) {
4333
4334 BNX2X_ERR("MCP assert!\n");
4335 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4336 bnx2x_fw_dump(bp);
4337
4338 } else
4339 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4340 }
4341
4342 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4343 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4344 if (attn & BNX2X_GRC_TIMEOUT) {
4345 val = CHIP_IS_E1(bp) ? 0 :
4346 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4347 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4348 }
4349 if (attn & BNX2X_GRC_RSV) {
4350 val = CHIP_IS_E1(bp) ? 0 :
4351 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4352 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4353 }
4354 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4355 }
4356}
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4373
4374#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4375#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4376#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4377#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4378#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4379#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4380#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4381
4382
4383
4384
4385
4386
4387void bnx2x_set_reset_global(struct bnx2x *bp)
4388{
4389 u32 val;
4390 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4391 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4392 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4393 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4394}
4395
4396
4397
4398
4399
4400
4401static void bnx2x_clear_reset_global(struct bnx2x *bp)
4402{
4403 u32 val;
4404 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4406 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4407 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4408}
4409
4410
4411
4412
4413
4414
4415static bool bnx2x_reset_is_global(struct bnx2x *bp)
4416{
4417 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4418
4419 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4420 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4421}
4422
4423
4424
4425
4426
4427
4428static void bnx2x_set_reset_done(struct bnx2x *bp)
4429{
4430 u32 val;
4431 u32 bit = BP_PATH(bp) ?
4432 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4433 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4434 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4435
4436
4437 val &= ~bit;
4438 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4439
4440 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4441}
4442
4443
4444
4445
4446
4447
4448void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4449{
4450 u32 val;
4451 u32 bit = BP_PATH(bp) ?
4452 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4453 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4454 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4455
4456
4457 val |= bit;
4458 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4459 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4460}
4461
4462
4463
4464
4465
4466bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4467{
4468 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4469 u32 bit = engine ?
4470 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4471
4472
4473 return (val & bit) ? false : true;
4474}
4475
4476
4477
4478
4479
4480
4481void bnx2x_set_pf_load(struct bnx2x *bp)
4482{
4483 u32 val1, val;
4484 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4485 BNX2X_PATH0_LOAD_CNT_MASK;
4486 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4487 BNX2X_PATH0_LOAD_CNT_SHIFT;
4488
4489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4490 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4491
4492 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4493
4494
4495 val1 = (val & mask) >> shift;
4496
4497
4498 val1 |= (1 << bp->pf_num);
4499
4500
4501 val &= ~mask;
4502
4503
4504 val |= ((val1 << shift) & mask);
4505
4506 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4507 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4508}
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519bool bnx2x_clear_pf_load(struct bnx2x *bp)
4520{
4521 u32 val1, val;
4522 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4523 BNX2X_PATH0_LOAD_CNT_MASK;
4524 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4525 BNX2X_PATH0_LOAD_CNT_SHIFT;
4526
4527 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4528 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4529 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4530
4531
4532 val1 = (val & mask) >> shift;
4533
4534
4535 val1 &= ~(1 << bp->pf_num);
4536
4537
4538 val &= ~mask;
4539
4540
4541 val |= ((val1 << shift) & mask);
4542
4543 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4544 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4545 return val1 != 0;
4546}
4547
4548
4549
4550
4551
4552
4553static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4554{
4555 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4556 BNX2X_PATH0_LOAD_CNT_MASK);
4557 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4558 BNX2X_PATH0_LOAD_CNT_SHIFT);
4559 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4560
4561 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4562
4563 val = (val & mask) >> shift;
4564
4565 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4566 engine, val);
4567
4568 return val != 0;
4569}
4570
4571static void _print_parity(struct bnx2x *bp, u32 reg)
4572{
4573 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4574}
4575
4576static void _print_next_block(int idx, const char *blk)
4577{
4578 pr_cont("%s%s", idx ? ", " : "", blk);
4579}
4580
4581static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4582 int *par_num, bool print)
4583{
4584 u32 cur_bit;
4585 bool res;
4586 int i;
4587
4588 res = false;
4589
4590 for (i = 0; sig; i++) {
4591 cur_bit = (0x1UL << i);
4592 if (sig & cur_bit) {
4593 res |= true;
4594
4595 if (print) {
4596 switch (cur_bit) {
4597 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4598 _print_next_block((*par_num)++, "BRB");
4599 _print_parity(bp,
4600 BRB1_REG_BRB1_PRTY_STS);
4601 break;
4602 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4603 _print_next_block((*par_num)++,
4604 "PARSER");
4605 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4606 break;
4607 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4608 _print_next_block((*par_num)++, "TSDM");
4609 _print_parity(bp,
4610 TSDM_REG_TSDM_PRTY_STS);
4611 break;
4612 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4613 _print_next_block((*par_num)++,
4614 "SEARCHER");
4615 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4616 break;
4617 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4618 _print_next_block((*par_num)++, "TCM");
4619 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4620 break;
4621 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4622 _print_next_block((*par_num)++,
4623 "TSEMI");
4624 _print_parity(bp,
4625 TSEM_REG_TSEM_PRTY_STS_0);
4626 _print_parity(bp,
4627 TSEM_REG_TSEM_PRTY_STS_1);
4628 break;
4629 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4630 _print_next_block((*par_num)++, "XPB");
4631 _print_parity(bp, GRCBASE_XPB +
4632 PB_REG_PB_PRTY_STS);
4633 break;
4634 }
4635 }
4636
4637
4638 sig &= ~cur_bit;
4639 }
4640 }
4641
4642 return res;
4643}
4644
4645static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4646 int *par_num, bool *global,
4647 bool print)
4648{
4649 u32 cur_bit;
4650 bool res;
4651 int i;
4652
4653 res = false;
4654
4655 for (i = 0; sig; i++) {
4656 cur_bit = (0x1UL << i);
4657 if (sig & cur_bit) {
4658 res |= true;
4659 switch (cur_bit) {
4660 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4661 if (print) {
4662 _print_next_block((*par_num)++, "PBF");
4663 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4664 }
4665 break;
4666 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4667 if (print) {
4668 _print_next_block((*par_num)++, "QM");
4669 _print_parity(bp, QM_REG_QM_PRTY_STS);
4670 }
4671 break;
4672 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4673 if (print) {
4674 _print_next_block((*par_num)++, "TM");
4675 _print_parity(bp, TM_REG_TM_PRTY_STS);
4676 }
4677 break;
4678 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4679 if (print) {
4680 _print_next_block((*par_num)++, "XSDM");
4681 _print_parity(bp,
4682 XSDM_REG_XSDM_PRTY_STS);
4683 }
4684 break;
4685 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4686 if (print) {
4687 _print_next_block((*par_num)++, "XCM");
4688 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4689 }
4690 break;
4691 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4692 if (print) {
4693 _print_next_block((*par_num)++,
4694 "XSEMI");
4695 _print_parity(bp,
4696 XSEM_REG_XSEM_PRTY_STS_0);
4697 _print_parity(bp,
4698 XSEM_REG_XSEM_PRTY_STS_1);
4699 }
4700 break;
4701 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4702 if (print) {
4703 _print_next_block((*par_num)++,
4704 "DOORBELLQ");
4705 _print_parity(bp,
4706 DORQ_REG_DORQ_PRTY_STS);
4707 }
4708 break;
4709 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4710 if (print) {
4711 _print_next_block((*par_num)++, "NIG");
4712 if (CHIP_IS_E1x(bp)) {
4713 _print_parity(bp,
4714 NIG_REG_NIG_PRTY_STS);
4715 } else {
4716 _print_parity(bp,
4717 NIG_REG_NIG_PRTY_STS_0);
4718 _print_parity(bp,
4719 NIG_REG_NIG_PRTY_STS_1);
4720 }
4721 }
4722 break;
4723 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4724 if (print)
4725 _print_next_block((*par_num)++,
4726 "VAUX PCI CORE");
4727 *global = true;
4728 break;
4729 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4730 if (print) {
4731 _print_next_block((*par_num)++,
4732 "DEBUG");
4733 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4734 }
4735 break;
4736 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4737 if (print) {
4738 _print_next_block((*par_num)++, "USDM");
4739 _print_parity(bp,
4740 USDM_REG_USDM_PRTY_STS);
4741 }
4742 break;
4743 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4744 if (print) {
4745 _print_next_block((*par_num)++, "UCM");
4746 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4747 }
4748 break;
4749 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4750 if (print) {
4751 _print_next_block((*par_num)++,
4752 "USEMI");
4753 _print_parity(bp,
4754 USEM_REG_USEM_PRTY_STS_0);
4755 _print_parity(bp,
4756 USEM_REG_USEM_PRTY_STS_1);
4757 }
4758 break;
4759 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4760 if (print) {
4761 _print_next_block((*par_num)++, "UPB");
4762 _print_parity(bp, GRCBASE_UPB +
4763 PB_REG_PB_PRTY_STS);
4764 }
4765 break;
4766 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4767 if (print) {
4768 _print_next_block((*par_num)++, "CSDM");
4769 _print_parity(bp,
4770 CSDM_REG_CSDM_PRTY_STS);
4771 }
4772 break;
4773 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4774 if (print) {
4775 _print_next_block((*par_num)++, "CCM");
4776 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4777 }
4778 break;
4779 }
4780
4781
4782 sig &= ~cur_bit;
4783 }
4784 }
4785
4786 return res;
4787}
4788
4789static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4790 int *par_num, bool print)
4791{
4792 u32 cur_bit;
4793 bool res;
4794 int i;
4795
4796 res = false;
4797
4798 for (i = 0; sig; i++) {
4799 cur_bit = (0x1UL << i);
4800 if (sig & cur_bit) {
4801 res = true;
4802 if (print) {
4803 switch (cur_bit) {
4804 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4805 _print_next_block((*par_num)++,
4806 "CSEMI");
4807 _print_parity(bp,
4808 CSEM_REG_CSEM_PRTY_STS_0);
4809 _print_parity(bp,
4810 CSEM_REG_CSEM_PRTY_STS_1);
4811 break;
4812 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4813 _print_next_block((*par_num)++, "PXP");
4814 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4815 _print_parity(bp,
4816 PXP2_REG_PXP2_PRTY_STS_0);
4817 _print_parity(bp,
4818 PXP2_REG_PXP2_PRTY_STS_1);
4819 break;
4820 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4821 _print_next_block((*par_num)++,
4822 "PXPPCICLOCKCLIENT");
4823 break;
4824 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4825 _print_next_block((*par_num)++, "CFC");
4826 _print_parity(bp,
4827 CFC_REG_CFC_PRTY_STS);
4828 break;
4829 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4830 _print_next_block((*par_num)++, "CDU");
4831 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4832 break;
4833 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4834 _print_next_block((*par_num)++, "DMAE");
4835 _print_parity(bp,
4836 DMAE_REG_DMAE_PRTY_STS);
4837 break;
4838 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4839 _print_next_block((*par_num)++, "IGU");
4840 if (CHIP_IS_E1x(bp))
4841 _print_parity(bp,
4842 HC_REG_HC_PRTY_STS);
4843 else
4844 _print_parity(bp,
4845 IGU_REG_IGU_PRTY_STS);
4846 break;
4847 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4848 _print_next_block((*par_num)++, "MISC");
4849 _print_parity(bp,
4850 MISC_REG_MISC_PRTY_STS);
4851 break;
4852 }
4853 }
4854
4855
4856 sig &= ~cur_bit;
4857 }
4858 }
4859
4860 return res;
4861}
4862
4863static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4864 int *par_num, bool *global,
4865 bool print)
4866{
4867 bool res = false;
4868 u32 cur_bit;
4869 int i;
4870
4871 for (i = 0; sig; i++) {
4872 cur_bit = (0x1UL << i);
4873 if (sig & cur_bit) {
4874 switch (cur_bit) {
4875 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4876 if (print)
4877 _print_next_block((*par_num)++,
4878 "MCP ROM");
4879 *global = true;
4880 res = true;
4881 break;
4882 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4883 if (print)
4884 _print_next_block((*par_num)++,
4885 "MCP UMP RX");
4886 *global = true;
4887 res = true;
4888 break;
4889 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4890 if (print)
4891 _print_next_block((*par_num)++,
4892 "MCP UMP TX");
4893 *global = true;
4894 res = true;
4895 break;
4896 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4897 (*par_num)++;
4898
4899 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4900 1UL << 10);
4901 break;
4902 }
4903
4904
4905 sig &= ~cur_bit;
4906 }
4907 }
4908
4909 return res;
4910}
4911
4912static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4913 int *par_num, bool print)
4914{
4915 u32 cur_bit;
4916 bool res;
4917 int i;
4918
4919 res = false;
4920
4921 for (i = 0; sig; i++) {
4922 cur_bit = (0x1UL << i);
4923 if (sig & cur_bit) {
4924 res = true;
4925 if (print) {
4926 switch (cur_bit) {
4927 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4928 _print_next_block((*par_num)++,
4929 "PGLUE_B");
4930 _print_parity(bp,
4931 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4932 break;
4933 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4934 _print_next_block((*par_num)++, "ATC");
4935 _print_parity(bp,
4936 ATC_REG_ATC_PRTY_STS);
4937 break;
4938 }
4939 }
4940
4941 sig &= ~cur_bit;
4942 }
4943 }
4944
4945 return res;
4946}
4947
4948static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4949 u32 *sig)
4950{
4951 bool res = false;
4952
4953 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4954 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4955 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4956 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4957 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4958 int par_num = 0;
4959
4960 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4961 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4962 sig[0] & HW_PRTY_ASSERT_SET_0,
4963 sig[1] & HW_PRTY_ASSERT_SET_1,
4964 sig[2] & HW_PRTY_ASSERT_SET_2,
4965 sig[3] & HW_PRTY_ASSERT_SET_3,
4966 sig[4] & HW_PRTY_ASSERT_SET_4);
4967 if (print) {
4968 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4969 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4970 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4971 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4972 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4973 netdev_err(bp->dev,
4974 "Parity errors detected in blocks: ");
4975 } else {
4976 print = false;
4977 }
4978 }
4979 res |= bnx2x_check_blocks_with_parity0(bp,
4980 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4981 res |= bnx2x_check_blocks_with_parity1(bp,
4982 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4983 res |= bnx2x_check_blocks_with_parity2(bp,
4984 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4985 res |= bnx2x_check_blocks_with_parity3(bp,
4986 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4987 res |= bnx2x_check_blocks_with_parity4(bp,
4988 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4989
4990 if (print)
4991 pr_cont("\n");
4992 }
4993
4994 return res;
4995}
4996
4997
4998
4999
5000
5001
5002
5003
5004bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5005{
5006 struct attn_route attn = { {0} };
5007 int port = BP_PORT(bp);
5008
5009 attn.sig[0] = REG_RD(bp,
5010 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5011 port*4);
5012 attn.sig[1] = REG_RD(bp,
5013 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5014 port*4);
5015 attn.sig[2] = REG_RD(bp,
5016 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5017 port*4);
5018 attn.sig[3] = REG_RD(bp,
5019 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5020 port*4);
5021
5022
5023
5024 attn.sig[3] &= ((REG_RD(bp,
5025 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5026 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5027 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5028 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5029
5030 if (!CHIP_IS_E1x(bp))
5031 attn.sig[4] = REG_RD(bp,
5032 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5033 port*4);
5034
5035 return bnx2x_parity_attn(bp, global, print, attn.sig);
5036}
5037
5038static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5039{
5040 u32 val;
5041 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5042
5043 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5044 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5045 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5046 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5047 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5048 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5049 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5050 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5051 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5052 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5053 if (val &
5054 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5055 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5056 if (val &
5057 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5058 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5059 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5060 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5061 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5063 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5064 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5065 }
5066 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5067 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5068 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5069 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5070 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5071 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5072 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5073 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5074 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5075 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5076 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5077 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5078 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5079 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5080 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5081 }
5082
5083 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5084 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5085 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5086 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5087 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5088 }
5089}
5090
5091static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5092{
5093 struct attn_route attn, *group_mask;
5094 int port = BP_PORT(bp);
5095 int index;
5096 u32 reg_addr;
5097 u32 val;
5098 u32 aeu_mask;
5099 bool global = false;
5100
5101
5102
5103 bnx2x_acquire_alr(bp);
5104
5105 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5106#ifndef BNX2X_STOP_ON_ERROR
5107 bp->recovery_state = BNX2X_RECOVERY_INIT;
5108 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5109
5110 bnx2x_int_disable(bp);
5111
5112
5113
5114#else
5115 bnx2x_panic();
5116#endif
5117 bnx2x_release_alr(bp);
5118 return;
5119 }
5120
5121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5124 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5125 if (!CHIP_IS_E1x(bp))
5126 attn.sig[4] =
5127 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5128 else
5129 attn.sig[4] = 0;
5130
5131 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5132 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5133
5134 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5135 if (deasserted & (1 << index)) {
5136 group_mask = &bp->attn_group[index];
5137
5138 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5139 index,
5140 group_mask->sig[0], group_mask->sig[1],
5141 group_mask->sig[2], group_mask->sig[3],
5142 group_mask->sig[4]);
5143
5144 bnx2x_attn_int_deasserted4(bp,
5145 attn.sig[4] & group_mask->sig[4]);
5146 bnx2x_attn_int_deasserted3(bp,
5147 attn.sig[3] & group_mask->sig[3]);
5148 bnx2x_attn_int_deasserted1(bp,
5149 attn.sig[1] & group_mask->sig[1]);
5150 bnx2x_attn_int_deasserted2(bp,
5151 attn.sig[2] & group_mask->sig[2]);
5152 bnx2x_attn_int_deasserted0(bp,
5153 attn.sig[0] & group_mask->sig[0]);
5154 }
5155 }
5156
5157 bnx2x_release_alr(bp);
5158
5159 if (bp->common.int_block == INT_BLOCK_HC)
5160 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5161 COMMAND_REG_ATTN_BITS_CLR);
5162 else
5163 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5164
5165 val = ~deasserted;
5166 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5167 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5168 REG_WR(bp, reg_addr, val);
5169
5170 if (~bp->attn_state & deasserted)
5171 BNX2X_ERR("IGU ERROR\n");
5172
5173 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5174 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5175
5176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5177 aeu_mask = REG_RD(bp, reg_addr);
5178
5179 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5180 aeu_mask, deasserted);
5181 aeu_mask |= (deasserted & 0x3ff);
5182 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5183
5184 REG_WR(bp, reg_addr, aeu_mask);
5185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5186
5187 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5188 bp->attn_state &= ~deasserted;
5189 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5190}
5191
5192static void bnx2x_attn_int(struct bnx2x *bp)
5193{
5194
5195 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5196 attn_bits);
5197 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5198 attn_bits_ack);
5199 u32 attn_state = bp->attn_state;
5200
5201
5202 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5203 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5204
5205 DP(NETIF_MSG_HW,
5206 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5207 attn_bits, attn_ack, asserted, deasserted);
5208
5209 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5210 BNX2X_ERR("BAD attention state\n");
5211
5212
5213 if (asserted)
5214 bnx2x_attn_int_asserted(bp, asserted);
5215
5216 if (deasserted)
5217 bnx2x_attn_int_deasserted(bp, deasserted);
5218}
5219
5220void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5221 u16 index, u8 op, u8 update)
5222{
5223 u32 igu_addr = bp->igu_base_addr;
5224 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5225 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5226 igu_addr);
5227}
5228
5229static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5230{
5231
5232 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5233}
5234
5235static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5236 union event_ring_elem *elem)
5237{
5238 u8 err = elem->message.error;
5239
5240 if (!bp->cnic_eth_dev.starting_cid ||
5241 (cid < bp->cnic_eth_dev.starting_cid &&
5242 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5243 return 1;
5244
5245 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5246
5247 if (unlikely(err)) {
5248
5249 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5250 cid);
5251 bnx2x_panic_dump(bp, false);
5252 }
5253 bnx2x_cnic_cfc_comp(bp, cid, err);
5254 return 0;
5255}
5256
5257static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5258{
5259 struct bnx2x_mcast_ramrod_params rparam;
5260 int rc;
5261
5262 memset(&rparam, 0, sizeof(rparam));
5263
5264 rparam.mcast_obj = &bp->mcast_obj;
5265
5266 netif_addr_lock_bh(bp->dev);
5267
5268
5269 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5270
5271
5272 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5273 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5274 if (rc < 0)
5275 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5276 rc);
5277 }
5278
5279 netif_addr_unlock_bh(bp->dev);
5280}
5281
5282static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5283 union event_ring_elem *elem)
5284{
5285 unsigned long ramrod_flags = 0;
5286 int rc = 0;
5287 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5288 u32 cid = echo & BNX2X_SWCID_MASK;
5289 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5290
5291
5292 __set_bit(RAMROD_CONT, &ramrod_flags);
5293
5294 switch (echo >> BNX2X_SWCID_SHIFT) {
5295 case BNX2X_FILTER_MAC_PENDING:
5296 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5297 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5298 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5299 else
5300 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5301
5302 break;
5303 case BNX2X_FILTER_VLAN_PENDING:
5304 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5305 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5306 break;
5307 case BNX2X_FILTER_MCAST_PENDING:
5308 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5309
5310
5311
5312 bnx2x_handle_mcast_eqe(bp);
5313 return;
5314 default:
5315 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5316 return;
5317 }
5318
5319 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5320
5321 if (rc < 0)
5322 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5323 else if (rc > 0)
5324 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5325}
5326
5327static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5328
5329static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5330{
5331 netif_addr_lock_bh(bp->dev);
5332
5333 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5334
5335
5336 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5337 bnx2x_set_storm_rx_mode(bp);
5338 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5339 &bp->sp_state))
5340 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5341 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5342 &bp->sp_state))
5343 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5344
5345 netif_addr_unlock_bh(bp->dev);
5346}
5347
5348static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5349 union event_ring_elem *elem)
5350{
5351 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5352 DP(BNX2X_MSG_SP,
5353 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5354 elem->message.data.vif_list_event.func_bit_map);
5355 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5356 elem->message.data.vif_list_event.func_bit_map);
5357 } else if (elem->message.data.vif_list_event.echo ==
5358 VIF_LIST_RULE_SET) {
5359 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5360 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5361 }
5362}
5363
5364
5365static void bnx2x_after_function_update(struct bnx2x *bp)
5366{
5367 int q, rc;
5368 struct bnx2x_fastpath *fp;
5369 struct bnx2x_queue_state_params queue_params = {NULL};
5370 struct bnx2x_queue_update_params *q_update_params =
5371 &queue_params.params.update;
5372
5373
5374 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5375
5376
5377 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5378 &q_update_params->update_flags);
5379 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5380 &q_update_params->update_flags);
5381 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5382
5383
5384 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5385 q_update_params->silent_removal_value = 0;
5386 q_update_params->silent_removal_mask = 0;
5387 } else {
5388 q_update_params->silent_removal_value =
5389 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5390 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5391 }
5392
5393 for_each_eth_queue(bp, q) {
5394
5395 fp = &bp->fp[q];
5396 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5397
5398
5399 rc = bnx2x_queue_state_change(bp, &queue_params);
5400 if (rc < 0)
5401 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5402 q);
5403 }
5404
5405 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5406 fp = &bp->fp[FCOE_IDX(bp)];
5407 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5408
5409
5410 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5411
5412
5413 smp_mb__before_atomic();
5414 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5415 smp_mb__after_atomic();
5416
5417
5418 rc = bnx2x_queue_state_change(bp, &queue_params);
5419 if (rc < 0)
5420 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5421 q);
5422 } else {
5423
5424 bnx2x_link_report(bp);
5425 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5426 }
5427}
5428
5429static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5430 struct bnx2x *bp, u32 cid)
5431{
5432 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5433
5434 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5435 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5436 else
5437 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5438}
5439
5440static void bnx2x_eq_int(struct bnx2x *bp)
5441{
5442 u16 hw_cons, sw_cons, sw_prod;
5443 union event_ring_elem *elem;
5444 u8 echo;
5445 u32 cid;
5446 u8 opcode;
5447 int rc, spqe_cnt = 0;
5448 struct bnx2x_queue_sp_obj *q_obj;
5449 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5450 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5451
5452 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5453
5454
5455
5456
5457
5458
5459 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5460 hw_cons++;
5461
5462
5463
5464
5465
5466 sw_cons = bp->eq_cons;
5467 sw_prod = bp->eq_prod;
5468
5469 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5470 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5471
5472 for (; sw_cons != hw_cons;
5473 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5474
5475 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5476
5477 rc = bnx2x_iov_eq_sp_event(bp, elem);
5478 if (!rc) {
5479 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5480 rc);
5481 goto next_spqe;
5482 }
5483
5484 opcode = elem->message.opcode;
5485
5486
5487 switch (opcode) {
5488 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5489 bnx2x_vf_mbx_schedule(bp,
5490 &elem->message.data.vf_pf_event);
5491 continue;
5492
5493 case EVENT_RING_OPCODE_STAT_QUERY:
5494 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5495 "got statistics comp event %d\n",
5496 bp->stats_comp++);
5497
5498 goto next_spqe;
5499
5500 case EVENT_RING_OPCODE_CFC_DEL:
5501
5502
5503
5504
5505
5506
5507
5508 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5509
5510 DP(BNX2X_MSG_SP,
5511 "got delete ramrod for MULTI[%d]\n", cid);
5512
5513 if (CNIC_LOADED(bp) &&
5514 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5515 goto next_spqe;
5516
5517 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5518
5519 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5520 break;
5521
5522 goto next_spqe;
5523
5524 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5525 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5526 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5527 if (f_obj->complete_cmd(bp, f_obj,
5528 BNX2X_F_CMD_TX_STOP))
5529 break;
5530 goto next_spqe;
5531
5532 case EVENT_RING_OPCODE_START_TRAFFIC:
5533 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5534 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5535 if (f_obj->complete_cmd(bp, f_obj,
5536 BNX2X_F_CMD_TX_START))
5537 break;
5538 goto next_spqe;
5539
5540 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5541 echo = elem->message.data.function_update_event.echo;
5542 if (echo == SWITCH_UPDATE) {
5543 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5544 "got FUNC_SWITCH_UPDATE ramrod\n");
5545 if (f_obj->complete_cmd(
5546 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5547 break;
5548
5549 } else {
5550 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5551
5552 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5553 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5554 f_obj->complete_cmd(bp, f_obj,
5555 BNX2X_F_CMD_AFEX_UPDATE);
5556
5557
5558
5559
5560
5561 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5562 }
5563
5564 goto next_spqe;
5565
5566 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5567 f_obj->complete_cmd(bp, f_obj,
5568 BNX2X_F_CMD_AFEX_VIFLISTS);
5569 bnx2x_after_afex_vif_lists(bp, elem);
5570 goto next_spqe;
5571 case EVENT_RING_OPCODE_FUNCTION_START:
5572 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5573 "got FUNC_START ramrod\n");
5574 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5575 break;
5576
5577 goto next_spqe;
5578
5579 case EVENT_RING_OPCODE_FUNCTION_STOP:
5580 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5581 "got FUNC_STOP ramrod\n");
5582 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5583 break;
5584
5585 goto next_spqe;
5586
5587 case EVENT_RING_OPCODE_SET_TIMESYNC:
5588 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5589 "got set_timesync ramrod completion\n");
5590 if (f_obj->complete_cmd(bp, f_obj,
5591 BNX2X_F_CMD_SET_TIMESYNC))
5592 break;
5593 goto next_spqe;
5594 }
5595
5596 switch (opcode | bp->state) {
5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598 BNX2X_STATE_OPEN):
5599 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5600 BNX2X_STATE_OPENING_WAIT4_PORT):
5601 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5602 BNX2X_STATE_CLOSING_WAIT4_HALT):
5603 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5604 SW_CID(elem->message.data.eth_event.echo));
5605 rss_raw->clear_pending(rss_raw);
5606 break;
5607
5608 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5609 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5610 case (EVENT_RING_OPCODE_SET_MAC |
5611 BNX2X_STATE_CLOSING_WAIT4_HALT):
5612 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5613 BNX2X_STATE_OPEN):
5614 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5615 BNX2X_STATE_DIAG):
5616 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5617 BNX2X_STATE_CLOSING_WAIT4_HALT):
5618 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5619 bnx2x_handle_classification_eqe(bp, elem);
5620 break;
5621
5622 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5623 BNX2X_STATE_OPEN):
5624 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5625 BNX2X_STATE_DIAG):
5626 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5627 BNX2X_STATE_CLOSING_WAIT4_HALT):
5628 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5629 bnx2x_handle_mcast_eqe(bp);
5630 break;
5631
5632 case (EVENT_RING_OPCODE_FILTERS_RULES |
5633 BNX2X_STATE_OPEN):
5634 case (EVENT_RING_OPCODE_FILTERS_RULES |
5635 BNX2X_STATE_DIAG):
5636 case (EVENT_RING_OPCODE_FILTERS_RULES |
5637 BNX2X_STATE_CLOSING_WAIT4_HALT):
5638 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5639 bnx2x_handle_rx_mode_eqe(bp);
5640 break;
5641 default:
5642
5643 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5644 elem->message.opcode, bp->state);
5645 }
5646next_spqe:
5647 spqe_cnt++;
5648 }
5649
5650 smp_mb__before_atomic();
5651 atomic_add(spqe_cnt, &bp->eq_spq_left);
5652
5653 bp->eq_cons = sw_cons;
5654 bp->eq_prod = sw_prod;
5655
5656 smp_wmb();
5657
5658
5659 bnx2x_update_eq_prod(bp, bp->eq_prod);
5660}
5661
5662static void bnx2x_sp_task(struct work_struct *work)
5663{
5664 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5665
5666 DP(BNX2X_MSG_SP, "sp task invoked\n");
5667
5668
5669 smp_rmb();
5670 if (atomic_read(&bp->interrupt_occurred)) {
5671
5672
5673 u16 status = bnx2x_update_dsb_idx(bp);
5674
5675 DP(BNX2X_MSG_SP, "status %x\n", status);
5676 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5677 atomic_set(&bp->interrupt_occurred, 0);
5678
5679
5680 if (status & BNX2X_DEF_SB_ATT_IDX) {
5681 bnx2x_attn_int(bp);
5682 status &= ~BNX2X_DEF_SB_ATT_IDX;
5683 }
5684
5685
5686 if (status & BNX2X_DEF_SB_IDX) {
5687 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5688
5689 if (FCOE_INIT(bp) &&
5690 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5691
5692
5693
5694 local_bh_disable();
5695 napi_schedule(&bnx2x_fcoe(bp, napi));
5696 local_bh_enable();
5697 }
5698
5699
5700 bnx2x_eq_int(bp);
5701 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5702 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5703
5704 status &= ~BNX2X_DEF_SB_IDX;
5705 }
5706
5707
5708 if (unlikely(status))
5709 DP(BNX2X_MSG_SP,
5710 "got an unknown interrupt! (status 0x%x)\n", status);
5711
5712
5713 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5714 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5715 }
5716
5717
5718 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5719 &bp->sp_state)) {
5720 bnx2x_link_report(bp);
5721 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5722 }
5723}
5724
5725irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5726{
5727 struct net_device *dev = dev_instance;
5728 struct bnx2x *bp = netdev_priv(dev);
5729
5730 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5731 IGU_INT_DISABLE, 0);
5732
5733#ifdef BNX2X_STOP_ON_ERROR
5734 if (unlikely(bp->panic))
5735 return IRQ_HANDLED;
5736#endif
5737
5738 if (CNIC_LOADED(bp)) {
5739 struct cnic_ops *c_ops;
5740
5741 rcu_read_lock();
5742 c_ops = rcu_dereference(bp->cnic_ops);
5743 if (c_ops)
5744 c_ops->cnic_handler(bp->cnic_data, NULL);
5745 rcu_read_unlock();
5746 }
5747
5748
5749
5750
5751 bnx2x_schedule_sp_task(bp);
5752
5753 return IRQ_HANDLED;
5754}
5755
5756
5757
5758void bnx2x_drv_pulse(struct bnx2x *bp)
5759{
5760 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5761 bp->fw_drv_pulse_wr_seq);
5762}
5763
5764static void bnx2x_timer(struct timer_list *t)
5765{
5766 struct bnx2x *bp = from_timer(bp, t, timer);
5767
5768 if (!netif_running(bp->dev))
5769 return;
5770
5771 if (IS_PF(bp) &&
5772 !BP_NOMCP(bp)) {
5773 int mb_idx = BP_FW_MB_IDX(bp);
5774 u16 drv_pulse;
5775 u16 mcp_pulse;
5776
5777 ++bp->fw_drv_pulse_wr_seq;
5778 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5779 drv_pulse = bp->fw_drv_pulse_wr_seq;
5780 bnx2x_drv_pulse(bp);
5781
5782 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5783 MCP_PULSE_SEQ_MASK);
5784
5785
5786
5787
5788
5789 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5790 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5791 drv_pulse, mcp_pulse);
5792 }
5793
5794 if (bp->state == BNX2X_STATE_OPEN)
5795 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5796
5797
5798 if (IS_VF(bp))
5799 bnx2x_timer_sriov(bp);
5800
5801 mod_timer(&bp->timer, jiffies + bp->current_interval);
5802}
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5813{
5814 u32 i;
5815 if (!(len%4) && !(addr%4))
5816 for (i = 0; i < len; i += 4)
5817 REG_WR(bp, addr + i, fill);
5818 else
5819 for (i = 0; i < len; i++)
5820 REG_WR8(bp, addr + i, fill);
5821}
5822
5823
5824static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5825 int fw_sb_id,
5826 u32 *sb_data_p,
5827 u32 data_size)
5828{
5829 int index;
5830 for (index = 0; index < data_size; index++)
5831 REG_WR(bp, BAR_CSTRORM_INTMEM +
5832 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5833 sizeof(u32)*index,
5834 *(sb_data_p + index));
5835}
5836
5837static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5838{
5839 u32 *sb_data_p;
5840 u32 data_size = 0;
5841 struct hc_status_block_data_e2 sb_data_e2;
5842 struct hc_status_block_data_e1x sb_data_e1x;
5843
5844
5845 if (!CHIP_IS_E1x(bp)) {
5846 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5847 sb_data_e2.common.state = SB_DISABLED;
5848 sb_data_e2.common.p_func.vf_valid = false;
5849 sb_data_p = (u32 *)&sb_data_e2;
5850 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5851 } else {
5852 memset(&sb_data_e1x, 0,
5853 sizeof(struct hc_status_block_data_e1x));
5854 sb_data_e1x.common.state = SB_DISABLED;
5855 sb_data_e1x.common.p_func.vf_valid = false;
5856 sb_data_p = (u32 *)&sb_data_e1x;
5857 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5858 }
5859 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5860
5861 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5862 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5863 CSTORM_STATUS_BLOCK_SIZE);
5864 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5865 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5866 CSTORM_SYNC_BLOCK_SIZE);
5867}
5868
5869
5870static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5871 struct hc_sp_status_block_data *sp_sb_data)
5872{
5873 int func = BP_FUNC(bp);
5874 int i;
5875 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5876 REG_WR(bp, BAR_CSTRORM_INTMEM +
5877 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5878 i*sizeof(u32),
5879 *((u32 *)sp_sb_data + i));
5880}
5881
5882static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5883{
5884 int func = BP_FUNC(bp);
5885 struct hc_sp_status_block_data sp_sb_data;
5886 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5887
5888 sp_sb_data.state = SB_DISABLED;
5889 sp_sb_data.p_func.vf_valid = false;
5890
5891 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5892
5893 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5894 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5895 CSTORM_SP_STATUS_BLOCK_SIZE);
5896 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5897 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5898 CSTORM_SP_SYNC_BLOCK_SIZE);
5899}
5900
5901static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5902 int igu_sb_id, int igu_seg_id)
5903{
5904 hc_sm->igu_sb_id = igu_sb_id;
5905 hc_sm->igu_seg_id = igu_seg_id;
5906 hc_sm->timer_value = 0xFF;
5907 hc_sm->time_to_expire = 0xFFFFFFFF;
5908}
5909
5910
5911static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5912{
5913
5914
5915 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5916
5917
5918 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5919 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5920 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5921 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5922
5923
5924
5925 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5926 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5927
5928
5929 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5930 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5932 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5933 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5934 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5935 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5936 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5937}
5938
5939void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5940 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5941{
5942 int igu_seg_id;
5943
5944 struct hc_status_block_data_e2 sb_data_e2;
5945 struct hc_status_block_data_e1x sb_data_e1x;
5946 struct hc_status_block_sm *hc_sm_p;
5947 int data_size;
5948 u32 *sb_data_p;
5949
5950 if (CHIP_INT_MODE_IS_BC(bp))
5951 igu_seg_id = HC_SEG_ACCESS_NORM;
5952 else
5953 igu_seg_id = IGU_SEG_ACCESS_NORM;
5954
5955 bnx2x_zero_fp_sb(bp, fw_sb_id);
5956
5957 if (!CHIP_IS_E1x(bp)) {
5958 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5959 sb_data_e2.common.state = SB_ENABLED;
5960 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5961 sb_data_e2.common.p_func.vf_id = vfid;
5962 sb_data_e2.common.p_func.vf_valid = vf_valid;
5963 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5964 sb_data_e2.common.same_igu_sb_1b = true;
5965 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5966 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5967 hc_sm_p = sb_data_e2.common.state_machine;
5968 sb_data_p = (u32 *)&sb_data_e2;
5969 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5970 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5971 } else {
5972 memset(&sb_data_e1x, 0,
5973 sizeof(struct hc_status_block_data_e1x));
5974 sb_data_e1x.common.state = SB_ENABLED;
5975 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5976 sb_data_e1x.common.p_func.vf_id = 0xff;
5977 sb_data_e1x.common.p_func.vf_valid = false;
5978 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5979 sb_data_e1x.common.same_igu_sb_1b = true;
5980 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5981 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5982 hc_sm_p = sb_data_e1x.common.state_machine;
5983 sb_data_p = (u32 *)&sb_data_e1x;
5984 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5985 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5986 }
5987
5988 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5989 igu_sb_id, igu_seg_id);
5990 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5991 igu_sb_id, igu_seg_id);
5992
5993 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5994
5995
5996 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5997}
5998
5999static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6000 u16 tx_usec, u16 rx_usec)
6001{
6002 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6003 false, rx_usec);
6004 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6005 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6006 tx_usec);
6007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6008 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6009 tx_usec);
6010 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6011 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6012 tx_usec);
6013}
6014
6015static void bnx2x_init_def_sb(struct bnx2x *bp)
6016{
6017 struct host_sp_status_block *def_sb = bp->def_status_blk;
6018 dma_addr_t mapping = bp->def_status_blk_mapping;
6019 int igu_sp_sb_index;
6020 int igu_seg_id;
6021 int port = BP_PORT(bp);
6022 int func = BP_FUNC(bp);
6023 int reg_offset, reg_offset_en5;
6024 u64 section;
6025 int index;
6026 struct hc_sp_status_block_data sp_sb_data;
6027 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6028
6029 if (CHIP_INT_MODE_IS_BC(bp)) {
6030 igu_sp_sb_index = DEF_SB_IGU_ID;
6031 igu_seg_id = HC_SEG_ACCESS_DEF;
6032 } else {
6033 igu_sp_sb_index = bp->igu_dsb_id;
6034 igu_seg_id = IGU_SEG_ACCESS_DEF;
6035 }
6036
6037
6038 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6039 atten_status_block);
6040 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6041
6042 bp->attn_state = 0;
6043
6044 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6045 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6046 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6047 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6048 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6049 int sindex;
6050
6051 for (sindex = 0; sindex < 4; sindex++)
6052 bp->attn_group[index].sig[sindex] =
6053 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6054
6055 if (!CHIP_IS_E1x(bp))
6056
6057
6058
6059
6060
6061 bp->attn_group[index].sig[4] = REG_RD(bp,
6062 reg_offset_en5 + 0x4*index);
6063 else
6064 bp->attn_group[index].sig[4] = 0;
6065 }
6066
6067 if (bp->common.int_block == INT_BLOCK_HC) {
6068 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6069 HC_REG_ATTN_MSG0_ADDR_L);
6070
6071 REG_WR(bp, reg_offset, U64_LO(section));
6072 REG_WR(bp, reg_offset + 4, U64_HI(section));
6073 } else if (!CHIP_IS_E1x(bp)) {
6074 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6075 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6076 }
6077
6078 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6079 sp_sb);
6080
6081 bnx2x_zero_sp_sb(bp);
6082
6083
6084 sp_sb_data.state = SB_ENABLED;
6085 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6086 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6087 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6088 sp_sb_data.igu_seg_id = igu_seg_id;
6089 sp_sb_data.p_func.pf_id = func;
6090 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6091 sp_sb_data.p_func.vf_id = 0xff;
6092
6093 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6094
6095 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6096}
6097
6098void bnx2x_update_coalesce(struct bnx2x *bp)
6099{
6100 int i;
6101
6102 for_each_eth_queue(bp, i)
6103 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6104 bp->tx_ticks, bp->rx_ticks);
6105}
6106
6107static void bnx2x_init_sp_ring(struct bnx2x *bp)
6108{
6109 spin_lock_init(&bp->spq_lock);
6110 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6111
6112 bp->spq_prod_idx = 0;
6113 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6114 bp->spq_prod_bd = bp->spq;
6115 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6116}
6117
6118static void bnx2x_init_eq_ring(struct bnx2x *bp)
6119{
6120 int i;
6121 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6122 union event_ring_elem *elem =
6123 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6124
6125 elem->next_page.addr.hi =
6126 cpu_to_le32(U64_HI(bp->eq_mapping +
6127 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6128 elem->next_page.addr.lo =
6129 cpu_to_le32(U64_LO(bp->eq_mapping +
6130 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6131 }
6132 bp->eq_cons = 0;
6133 bp->eq_prod = NUM_EQ_DESC;
6134 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6135
6136 atomic_set(&bp->eq_spq_left,
6137 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6138}
6139
6140
6141static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6142 unsigned long rx_mode_flags,
6143 unsigned long rx_accept_flags,
6144 unsigned long tx_accept_flags,
6145 unsigned long ramrod_flags)
6146{
6147 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6148 int rc;
6149
6150 memset(&ramrod_param, 0, sizeof(ramrod_param));
6151
6152
6153 ramrod_param.cid = 0;
6154 ramrod_param.cl_id = cl_id;
6155 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6156 ramrod_param.func_id = BP_FUNC(bp);
6157
6158 ramrod_param.pstate = &bp->sp_state;
6159 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6160
6161 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6162 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6163
6164 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6165
6166 ramrod_param.ramrod_flags = ramrod_flags;
6167 ramrod_param.rx_mode_flags = rx_mode_flags;
6168
6169 ramrod_param.rx_accept_flags = rx_accept_flags;
6170 ramrod_param.tx_accept_flags = tx_accept_flags;
6171
6172 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6173 if (rc < 0) {
6174 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6175 return rc;
6176 }
6177
6178 return 0;
6179}
6180
6181static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6182 unsigned long *rx_accept_flags,
6183 unsigned long *tx_accept_flags)
6184{
6185
6186 *rx_accept_flags = 0;
6187 *tx_accept_flags = 0;
6188
6189 switch (rx_mode) {
6190 case BNX2X_RX_MODE_NONE:
6191
6192
6193
6194
6195 break;
6196 case BNX2X_RX_MODE_NORMAL:
6197 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6198 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6199 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6200
6201
6202 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6203 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6204 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6205
6206 if (bp->accept_any_vlan) {
6207 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6208 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6209 }
6210
6211 break;
6212 case BNX2X_RX_MODE_ALLMULTI:
6213 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6214 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6215 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6216
6217
6218 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6220 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6221
6222 if (bp->accept_any_vlan) {
6223 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6225 }
6226
6227 break;
6228 case BNX2X_RX_MODE_PROMISC:
6229
6230
6231
6232
6233 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6234 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6235 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6236 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6237
6238
6239 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6240 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6241
6242 if (IS_MF_SI(bp))
6243 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6244 else
6245 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6246
6247 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6248 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6249
6250 break;
6251 default:
6252 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6253 return -EINVAL;
6254 }
6255
6256 return 0;
6257}
6258
6259
6260static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6261{
6262 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6263 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6264 int rc;
6265
6266 if (!NO_FCOE(bp))
6267
6268 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6269
6270 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6271 &tx_accept_flags);
6272 if (rc)
6273 return rc;
6274
6275 __set_bit(RAMROD_RX, &ramrod_flags);
6276 __set_bit(RAMROD_TX, &ramrod_flags);
6277
6278 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6279 rx_accept_flags, tx_accept_flags,
6280 ramrod_flags);
6281}
6282
6283static void bnx2x_init_internal_common(struct bnx2x *bp)
6284{
6285 int i;
6286
6287
6288
6289 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6290 REG_WR(bp, BAR_USTRORM_INTMEM +
6291 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6292 if (!CHIP_IS_E1x(bp)) {
6293 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6294 CHIP_INT_MODE_IS_BC(bp) ?
6295 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6296 }
6297}
6298
6299static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6300{
6301 switch (load_code) {
6302 case FW_MSG_CODE_DRV_LOAD_COMMON:
6303 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6304 bnx2x_init_internal_common(bp);
6305
6306
6307 case FW_MSG_CODE_DRV_LOAD_PORT:
6308
6309
6310
6311 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6312
6313
6314 break;
6315
6316 default:
6317 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6318 break;
6319 }
6320}
6321
6322static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6323{
6324 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6325}
6326
6327static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6328{
6329 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6330}
6331
6332static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6333{
6334 if (CHIP_IS_E1x(fp->bp))
6335 return BP_L_ID(fp->bp) + fp->index;
6336 else
6337 return bnx2x_fp_igu_sb_id(fp);
6338}
6339
6340static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6341{
6342 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6343 u8 cos;
6344 unsigned long q_type = 0;
6345 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6346 fp->rx_queue = fp_idx;
6347 fp->cid = fp_idx;
6348 fp->cl_id = bnx2x_fp_cl_id(fp);
6349 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6350 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6351
6352 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6353
6354
6355 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6356
6357
6358 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6359
6360
6361 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6362 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6363
6364 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6365
6366
6367 for_each_cos_in_tx_queue(fp, cos) {
6368 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6369 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6370 FP_COS_TO_TXQ(fp, cos, bp),
6371 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6372 cids[cos] = fp->txdata_ptr[cos]->cid;
6373 }
6374
6375
6376 if (IS_VF(bp))
6377 return;
6378
6379 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6380 fp->fw_sb_id, fp->igu_sb_id);
6381 bnx2x_update_fpsb_idx(fp);
6382 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6383 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6384 bnx2x_sp_mapping(bp, q_rdata), q_type);
6385
6386
6387
6388
6389 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6390
6391 DP(NETIF_MSG_IFUP,
6392 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6393 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6394 fp->igu_sb_id);
6395}
6396
6397static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6398{
6399 int i;
6400
6401 for (i = 1; i <= NUM_TX_RINGS; i++) {
6402 struct eth_tx_next_bd *tx_next_bd =
6403 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6404
6405 tx_next_bd->addr_hi =
6406 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6407 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6408 tx_next_bd->addr_lo =
6409 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6410 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6411 }
6412
6413 *txdata->tx_cons_sb = cpu_to_le16(0);
6414
6415 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6416 txdata->tx_db.data.zero_fill1 = 0;
6417 txdata->tx_db.data.prod = 0;
6418
6419 txdata->tx_pkt_prod = 0;
6420 txdata->tx_pkt_cons = 0;
6421 txdata->tx_bd_prod = 0;
6422 txdata->tx_bd_cons = 0;
6423 txdata->tx_pkt = 0;
6424}
6425
6426static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6427{
6428 int i;
6429
6430 for_each_tx_queue_cnic(bp, i)
6431 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6432}
6433
6434static void bnx2x_init_tx_rings(struct bnx2x *bp)
6435{
6436 int i;
6437 u8 cos;
6438
6439 for_each_eth_queue(bp, i)
6440 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6441 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6442}
6443
6444static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6445{
6446 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6447 unsigned long q_type = 0;
6448
6449 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6450 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6451 BNX2X_FCOE_ETH_CL_ID_IDX);
6452 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6453 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6454 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6455 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6456 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6457 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6458 fp);
6459
6460 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6461
6462
6463 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6464
6465 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6466 bnx2x_rx_ustorm_prods_offset(fp);
6467
6468
6469 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6470 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6471
6472
6473 BUG_ON(fp->max_cos != 1);
6474
6475 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6476 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6477 bnx2x_sp_mapping(bp, q_rdata), q_type);
6478
6479 DP(NETIF_MSG_IFUP,
6480 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6481 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6482 fp->igu_sb_id);
6483}
6484
6485void bnx2x_nic_init_cnic(struct bnx2x *bp)
6486{
6487 if (!NO_FCOE(bp))
6488 bnx2x_init_fcoe_fp(bp);
6489
6490 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6491 BNX2X_VF_ID_INVALID, false,
6492 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6493
6494
6495 rmb();
6496 bnx2x_init_rx_rings_cnic(bp);
6497 bnx2x_init_tx_rings_cnic(bp);
6498
6499
6500 mb();
6501}
6502
6503void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6504{
6505 int i;
6506
6507
6508 for_each_eth_queue(bp, i)
6509 bnx2x_init_eth_fp(bp, i);
6510
6511
6512 rmb();
6513 bnx2x_init_rx_rings(bp);
6514 bnx2x_init_tx_rings(bp);
6515
6516 if (IS_PF(bp)) {
6517
6518 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6519 bp->common.shmem_base,
6520 bp->common.shmem2_base, BP_PORT(bp));
6521
6522
6523 bnx2x_init_def_sb(bp);
6524 bnx2x_update_dsb_idx(bp);
6525 bnx2x_init_sp_ring(bp);
6526 } else {
6527 bnx2x_memset_stats(bp);
6528 }
6529}
6530
6531void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6532{
6533 bnx2x_init_eq_ring(bp);
6534 bnx2x_init_internal(bp, load_code);
6535 bnx2x_pf_init(bp);
6536 bnx2x_stats_init(bp);
6537
6538
6539 mb();
6540
6541 bnx2x_int_enable(bp);
6542
6543
6544 bnx2x_attn_int_deasserted0(bp,
6545 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6546 AEU_INPUTS_ATTN_BITS_SPIO5);
6547}
6548
6549
6550static int bnx2x_gunzip_init(struct bnx2x *bp)
6551{
6552 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6553 &bp->gunzip_mapping, GFP_KERNEL);
6554 if (bp->gunzip_buf == NULL)
6555 goto gunzip_nomem1;
6556
6557 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6558 if (bp->strm == NULL)
6559 goto gunzip_nomem2;
6560
6561 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6562 if (bp->strm->workspace == NULL)
6563 goto gunzip_nomem3;
6564
6565 return 0;
6566
6567gunzip_nomem3:
6568 kfree(bp->strm);
6569 bp->strm = NULL;
6570
6571gunzip_nomem2:
6572 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6573 bp->gunzip_mapping);
6574 bp->gunzip_buf = NULL;
6575
6576gunzip_nomem1:
6577 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6578 return -ENOMEM;
6579}
6580
6581static void bnx2x_gunzip_end(struct bnx2x *bp)
6582{
6583 if (bp->strm) {
6584 vfree(bp->strm->workspace);
6585 kfree(bp->strm);
6586 bp->strm = NULL;
6587 }
6588
6589 if (bp->gunzip_buf) {
6590 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6591 bp->gunzip_mapping);
6592 bp->gunzip_buf = NULL;
6593 }
6594}
6595
6596static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6597{
6598 int n, rc;
6599
6600
6601 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6602 BNX2X_ERR("Bad gzip header\n");
6603 return -EINVAL;
6604 }
6605
6606 n = 10;
6607
6608#define FNAME 0x8
6609
6610 if (zbuf[3] & FNAME)
6611 while ((zbuf[n++] != 0) && (n < len));
6612
6613 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6614 bp->strm->avail_in = len - n;
6615 bp->strm->next_out = bp->gunzip_buf;
6616 bp->strm->avail_out = FW_BUF_SIZE;
6617
6618 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6619 if (rc != Z_OK)
6620 return rc;
6621
6622 rc = zlib_inflate(bp->strm, Z_FINISH);
6623 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6624 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6625 bp->strm->msg);
6626
6627 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6628 if (bp->gunzip_outlen & 0x3)
6629 netdev_err(bp->dev,
6630 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6631 bp->gunzip_outlen);
6632 bp->gunzip_outlen >>= 2;
6633
6634 zlib_inflateEnd(bp->strm);
6635
6636 if (rc == Z_STREAM_END)
6637 return 0;
6638
6639 return rc;
6640}
6641
6642
6643
6644
6645
6646
6647
6648
6649static void bnx2x_lb_pckt(struct bnx2x *bp)
6650{
6651 u32 wb_write[3];
6652
6653
6654 wb_write[0] = 0x55555555;
6655 wb_write[1] = 0x55555555;
6656 wb_write[2] = 0x20;
6657 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6658
6659
6660 wb_write[0] = 0x09000000;
6661 wb_write[1] = 0x55555555;
6662 wb_write[2] = 0x10;
6663 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6664}
6665
6666
6667
6668
6669
6670static int bnx2x_int_mem_test(struct bnx2x *bp)
6671{
6672 int factor;
6673 int count, i;
6674 u32 val = 0;
6675
6676 if (CHIP_REV_IS_FPGA(bp))
6677 factor = 120;
6678 else if (CHIP_REV_IS_EMUL(bp))
6679 factor = 200;
6680 else
6681 factor = 1;
6682
6683
6684 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6685 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6686 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6687 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6688
6689
6690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6691
6692
6693 bnx2x_lb_pckt(bp);
6694
6695
6696
6697 count = 1000 * factor;
6698 while (count) {
6699
6700 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6701 val = *bnx2x_sp(bp, wb_data[0]);
6702 if (val == 0x10)
6703 break;
6704
6705 usleep_range(10000, 20000);
6706 count--;
6707 }
6708 if (val != 0x10) {
6709 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6710 return -1;
6711 }
6712
6713
6714 count = 1000 * factor;
6715 while (count) {
6716 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6717 if (val == 1)
6718 break;
6719
6720 usleep_range(10000, 20000);
6721 count--;
6722 }
6723 if (val != 0x1) {
6724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6725 return -2;
6726 }
6727
6728
6729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6730 msleep(50);
6731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6732 msleep(50);
6733 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6734 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6735
6736 DP(NETIF_MSG_HW, "part2\n");
6737
6738
6739 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6740 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6741 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6742 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6743
6744
6745 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6746
6747
6748 for (i = 0; i < 10; i++)
6749 bnx2x_lb_pckt(bp);
6750
6751
6752
6753 count = 1000 * factor;
6754 while (count) {
6755
6756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6757 val = *bnx2x_sp(bp, wb_data[0]);
6758 if (val == 0xb0)
6759 break;
6760
6761 usleep_range(10000, 20000);
6762 count--;
6763 }
6764 if (val != 0xb0) {
6765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6766 return -3;
6767 }
6768
6769
6770 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6771 if (val != 2)
6772 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6773
6774
6775 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6776
6777
6778 msleep(10 * factor);
6779
6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6781 if (val != 3)
6782 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6783
6784
6785 for (i = 0; i < 11; i++)
6786 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6787 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6788 if (val != 1) {
6789 BNX2X_ERR("clear of NIG failed\n");
6790 return -4;
6791 }
6792
6793
6794 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6795 msleep(50);
6796 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6797 msleep(50);
6798 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6799 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6800 if (!CNIC_SUPPORT(bp))
6801
6802 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6803
6804
6805 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6806 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6807 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6808 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6809
6810 DP(NETIF_MSG_HW, "done\n");
6811
6812 return 0;
6813}
6814
6815static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6816{
6817 u32 val;
6818
6819 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6820 if (!CHIP_IS_E1x(bp))
6821 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6822 else
6823 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6824 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6825 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6826
6827
6828
6829
6830
6831
6832 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6833 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6834 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6835 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6836 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6837 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6838
6839
6840 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6841 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6842 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6843
6844
6845 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6846 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6847 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6848 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6849
6850
6851
6852 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6853 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6854 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6855 if (!CHIP_IS_E1x(bp))
6856 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6857 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6858 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6859
6860 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6861 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6862 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6863
6864
6865 if (!CHIP_IS_E1x(bp))
6866
6867 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6868
6869 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6870 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6871
6872 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6873}
6874
6875static void bnx2x_reset_common(struct bnx2x *bp)
6876{
6877 u32 val = 0x1400;
6878
6879
6880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6881 0xd3ffff7f);
6882
6883 if (CHIP_IS_E3(bp)) {
6884 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6885 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6886 }
6887
6888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6889}
6890
6891static void bnx2x_setup_dmae(struct bnx2x *bp)
6892{
6893 bp->dmae_ready = 0;
6894 spin_lock_init(&bp->dmae_lock);
6895}
6896
6897static void bnx2x_init_pxp(struct bnx2x *bp)
6898{
6899 u16 devctl;
6900 int r_order, w_order;
6901
6902 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6903 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6904 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6905 if (bp->mrrs == -1)
6906 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6907 else {
6908 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6909 r_order = bp->mrrs;
6910 }
6911
6912 bnx2x_init_pxp_arb(bp, r_order, w_order);
6913}
6914
6915static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6916{
6917 int is_required;
6918 u32 val;
6919 int port;
6920
6921 if (BP_NOMCP(bp))
6922 return;
6923
6924 is_required = 0;
6925 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6926 SHARED_HW_CFG_FAN_FAILURE_MASK;
6927
6928 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6929 is_required = 1;
6930
6931
6932
6933
6934
6935
6936 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6937 for (port = PORT_0; port < PORT_MAX; port++) {
6938 is_required |=
6939 bnx2x_fan_failure_det_req(
6940 bp,
6941 bp->common.shmem_base,
6942 bp->common.shmem2_base,
6943 port);
6944 }
6945
6946 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6947
6948 if (is_required == 0)
6949 return;
6950
6951
6952 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6953
6954
6955 val = REG_RD(bp, MISC_REG_SPIO_INT);
6956 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6957 REG_WR(bp, MISC_REG_SPIO_INT, val);
6958
6959
6960 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6961 val |= MISC_SPIO_SPIO5;
6962 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6963}
6964
6965void bnx2x_pf_disable(struct bnx2x *bp)
6966{
6967 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6968 val &= ~IGU_PF_CONF_FUNC_EN;
6969
6970 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6971 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6972 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6973}
6974
6975static void bnx2x__common_init_phy(struct bnx2x *bp)
6976{
6977 u32 shmem_base[2], shmem2_base[2];
6978
6979 if (SHMEM2_RD(bp, size) >
6980 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6981 return;
6982 shmem_base[0] = bp->common.shmem_base;
6983 shmem2_base[0] = bp->common.shmem2_base;
6984 if (!CHIP_IS_E1x(bp)) {
6985 shmem_base[1] =
6986 SHMEM2_RD(bp, other_shmem_base_addr);
6987 shmem2_base[1] =
6988 SHMEM2_RD(bp, other_shmem2_base_addr);
6989 }
6990 bnx2x_acquire_phy_lock(bp);
6991 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6992 bp->common.chip_id);
6993 bnx2x_release_phy_lock(bp);
6994}
6995
6996static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6997{
6998 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6999 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7000 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7001 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7002 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7003
7004
7005 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7006
7007 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7008 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7009 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7010 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7011}
7012
7013static void bnx2x_set_endianity(struct bnx2x *bp)
7014{
7015#ifdef __BIG_ENDIAN
7016 bnx2x_config_endianity(bp, 1);
7017#else
7018 bnx2x_config_endianity(bp, 0);
7019#endif
7020}
7021
7022static void bnx2x_reset_endianity(struct bnx2x *bp)
7023{
7024 bnx2x_config_endianity(bp, 0);
7025}
7026
7027
7028
7029
7030
7031
7032static int bnx2x_init_hw_common(struct bnx2x *bp)
7033{
7034 u32 val;
7035
7036 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7037
7038
7039
7040
7041
7042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7043
7044 bnx2x_reset_common(bp);
7045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7046
7047 val = 0xfffc;
7048 if (CHIP_IS_E3(bp)) {
7049 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7050 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7051 }
7052 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7053
7054 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7055
7056 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7057
7058 if (!CHIP_IS_E1x(bp)) {
7059 u8 abs_func_id;
7060
7061
7062
7063
7064
7065
7066
7067
7068 for (abs_func_id = BP_PATH(bp);
7069 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7070 if (abs_func_id == BP_ABS_FUNC(bp)) {
7071 REG_WR(bp,
7072 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7073 1);
7074 continue;
7075 }
7076
7077 bnx2x_pretend_func(bp, abs_func_id);
7078
7079 bnx2x_pf_disable(bp);
7080 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7081 }
7082 }
7083
7084 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7085 if (CHIP_IS_E1(bp)) {
7086
7087
7088 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7089 }
7090
7091 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7092 bnx2x_init_pxp(bp);
7093 bnx2x_set_endianity(bp);
7094 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7095
7096 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7097 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7098
7099
7100 msleep(100);
7101
7102 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7103 if (val != 1) {
7104 BNX2X_ERR("PXP2 CFG failed\n");
7105 return -EBUSY;
7106 }
7107 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7108 if (val != 1) {
7109 BNX2X_ERR("PXP2 RD_INIT failed\n");
7110 return -EBUSY;
7111 }
7112
7113
7114
7115
7116
7117
7118 if (!CHIP_IS_E1x(bp)) {
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181 struct ilt_client_info ilt_cli;
7182 struct bnx2x_ilt ilt;
7183 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7184 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7185
7186
7187 ilt_cli.start = 0;
7188 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7189 ilt_cli.client_num = ILT_CLIENT_TM;
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7203 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7204 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7205
7206 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7207 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7208 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7209 }
7210
7211 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7212 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7213
7214 if (!CHIP_IS_E1x(bp)) {
7215 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7216 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7217 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7218
7219 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7220
7221
7222 do {
7223 msleep(200);
7224 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7225 } while (factor-- && (val != 1));
7226
7227 if (val != 1) {
7228 BNX2X_ERR("ATC_INIT failed\n");
7229 return -EBUSY;
7230 }
7231 }
7232
7233 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7234
7235 bnx2x_iov_init_dmae(bp);
7236
7237
7238 bp->dmae_ready = 1;
7239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7240
7241 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7242
7243 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7244
7245 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7246
7247 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7248
7249 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7250 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7251 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7252 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7253
7254 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7255
7256
7257 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7258
7259
7260 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7261 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7262
7263 if (CNIC_SUPPORT(bp))
7264 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7265
7266 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7267
7268 if (!CHIP_REV_IS_SLOW(bp))
7269
7270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7271
7272 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7273
7274 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7275 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7276
7277 if (!CHIP_IS_E1(bp))
7278 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7279
7280 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7281 if (IS_MF_AFEX(bp)) {
7282
7283
7284
7285 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7286 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7287 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7288 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7289 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7290 } else {
7291
7292
7293
7294 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7295 bp->path_has_ovlan ? 7 : 6);
7296 }
7297 }
7298
7299 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7300 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7301 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7302 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7303
7304 if (!CHIP_IS_E1x(bp)) {
7305
7306 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7307 VFC_MEMORIES_RST_REG_CAM_RST |
7308 VFC_MEMORIES_RST_REG_RAM_RST);
7309 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7310 VFC_MEMORIES_RST_REG_CAM_RST |
7311 VFC_MEMORIES_RST_REG_RAM_RST);
7312
7313 msleep(20);
7314 }
7315
7316 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7317 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7318 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7319 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7320
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323 0x80000000);
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7325 0x80000000);
7326
7327 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7328 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7329 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7330
7331 if (!CHIP_IS_E1x(bp)) {
7332 if (IS_MF_AFEX(bp)) {
7333
7334
7335
7336 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7337 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7338 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7339 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7340 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7341 } else {
7342 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7343 bp->path_has_ovlan ? 7 : 6);
7344 }
7345 }
7346
7347 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7348
7349 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7350
7351 if (CNIC_SUPPORT(bp)) {
7352 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7353 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7354 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7355 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7356 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7357 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7358 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7359 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7360 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7361 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7362 }
7363 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7364
7365 if (sizeof(union cdu_context) != 1024)
7366
7367 dev_alert(&bp->pdev->dev,
7368 "please adjust the size of cdu_context(%ld)\n",
7369 (long)sizeof(union cdu_context));
7370
7371 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7372 val = (4 << 24) + (0 << 12) + 1024;
7373 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7374
7375 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7376 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7377
7378 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7379
7380
7381 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7382
7383 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7384
7385 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7386 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7387
7388 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7389 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7390
7391
7392 REG_WR(bp, 0x2814, 0xffffffff);
7393 REG_WR(bp, 0x3820, 0xffffffff);
7394
7395 if (!CHIP_IS_E1x(bp)) {
7396 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7397 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7398 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7399 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7400 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7401 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7402 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7403 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7404 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7405 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7406 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7407 }
7408
7409 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7410 if (!CHIP_IS_E1(bp)) {
7411
7412 if (!CHIP_IS_E3(bp))
7413 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7414 }
7415 if (CHIP_IS_E1H(bp))
7416
7417 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7418
7419 if (CHIP_REV_IS_SLOW(bp))
7420 msleep(200);
7421
7422
7423 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7424 if (val != 1) {
7425 BNX2X_ERR("CFC LL_INIT failed\n");
7426 return -EBUSY;
7427 }
7428 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7429 if (val != 1) {
7430 BNX2X_ERR("CFC AC_INIT failed\n");
7431 return -EBUSY;
7432 }
7433 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7434 if (val != 1) {
7435 BNX2X_ERR("CFC CAM_INIT failed\n");
7436 return -EBUSY;
7437 }
7438 REG_WR(bp, CFC_REG_DEBUG0, 0);
7439
7440 if (CHIP_IS_E1(bp)) {
7441
7442
7443 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7444 val = *bnx2x_sp(bp, wb_data[0]);
7445
7446
7447 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7448 BNX2X_ERR("internal mem self test failed\n");
7449 return -EBUSY;
7450 }
7451 }
7452
7453 bnx2x_setup_fan_failure_detection(bp);
7454
7455
7456 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7457
7458 bnx2x_enable_blocks_attention(bp);
7459 bnx2x_enable_blocks_parity(bp);
7460
7461 if (!BP_NOMCP(bp)) {
7462 if (CHIP_IS_E1x(bp))
7463 bnx2x__common_init_phy(bp);
7464 } else
7465 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7466
7467 if (SHMEM2_HAS(bp, netproc_fw_ver))
7468 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7469
7470 return 0;
7471}
7472
7473
7474
7475
7476
7477
7478static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7479{
7480 int rc = bnx2x_init_hw_common(bp);
7481
7482 if (rc)
7483 return rc;
7484
7485
7486 if (!BP_NOMCP(bp))
7487 bnx2x__common_init_phy(bp);
7488
7489 return 0;
7490}
7491
7492static int bnx2x_init_hw_port(struct bnx2x *bp)
7493{
7494 int port = BP_PORT(bp);
7495 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7496 u32 low, high;
7497 u32 val, reg;
7498
7499 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7500
7501 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7502
7503 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7504 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7505 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7506
7507
7508
7509
7510
7511
7512 if (!CHIP_IS_E1x(bp))
7513 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7514
7515 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7516 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7517 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7518 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7519
7520 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7521 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7522 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7523 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7524
7525
7526 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7527
7528 if (CNIC_SUPPORT(bp)) {
7529 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7530 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7531 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7532 }
7533
7534 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7535
7536 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7537
7538 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7539
7540 if (IS_MF(bp))
7541 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7542 else if (bp->dev->mtu > 4096) {
7543 if (bp->flags & ONE_PORT_FLAG)
7544 low = 160;
7545 else {
7546 val = bp->dev->mtu;
7547
7548 low = 96 + (val/64) +
7549 ((val % 64) ? 1 : 0);
7550 }
7551 } else
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7553 high = low + 56;
7554 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7555 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7556 }
7557
7558 if (CHIP_MODE_IS_4_PORT(bp))
7559 REG_WR(bp, (BP_PORT(bp) ?
7560 BRB1_REG_MAC_GUARANTIED_1 :
7561 BRB1_REG_MAC_GUARANTIED_0), 40);
7562
7563 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7564 if (CHIP_IS_E3B0(bp)) {
7565 if (IS_MF_AFEX(bp)) {
7566
7567 REG_WR(bp, BP_PORT(bp) ?
7568 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7569 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7570 REG_WR(bp, BP_PORT(bp) ?
7571 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7572 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7573 REG_WR(bp, BP_PORT(bp) ?
7574 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7575 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7576 } else {
7577
7578
7579
7580
7581 REG_WR(bp, BP_PORT(bp) ?
7582 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7583 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7584 (bp->path_has_ovlan ? 7 : 6));
7585 }
7586 }
7587
7588 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7589 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7590 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7591 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7592
7593 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7594 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7595 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7596 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7597
7598 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7599 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7600
7601 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7602
7603 if (CHIP_IS_E1x(bp)) {
7604
7605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7606
7607
7608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7609
7610 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7611
7612
7613 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7614 udelay(50);
7615 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7616 }
7617
7618 if (CNIC_SUPPORT(bp))
7619 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7620
7621 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7622 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7623
7624 if (CHIP_IS_E1(bp)) {
7625 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7626 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7627 }
7628 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7629
7630 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7631
7632 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7633
7634
7635
7636
7637 val = IS_MF(bp) ? 0xF7 : 0x7;
7638
7639 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7640 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7641
7642
7643 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7644 REG_WR(bp, reg,
7645 REG_RD(bp, reg) &
7646 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7647
7648 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7649 REG_WR(bp, reg,
7650 REG_RD(bp, reg) &
7651 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7652
7653 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7654
7655 if (!CHIP_IS_E1x(bp)) {
7656
7657
7658
7659 if (IS_MF_AFEX(bp))
7660 REG_WR(bp, BP_PORT(bp) ?
7661 NIG_REG_P1_HDRS_AFTER_BASIC :
7662 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7663 else
7664 REG_WR(bp, BP_PORT(bp) ?
7665 NIG_REG_P1_HDRS_AFTER_BASIC :
7666 NIG_REG_P0_HDRS_AFTER_BASIC,
7667 IS_MF_SD(bp) ? 7 : 6);
7668
7669 if (CHIP_IS_E3(bp))
7670 REG_WR(bp, BP_PORT(bp) ?
7671 NIG_REG_LLH1_MF_MODE :
7672 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7673 }
7674 if (!CHIP_IS_E3(bp))
7675 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7676
7677 if (!CHIP_IS_E1(bp)) {
7678
7679 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7680 (IS_MF_SD(bp) ? 0x1 : 0x2));
7681
7682 if (!CHIP_IS_E1x(bp)) {
7683 val = 0;
7684 switch (bp->mf_mode) {
7685 case MULTI_FUNCTION_SD:
7686 val = 1;
7687 break;
7688 case MULTI_FUNCTION_SI:
7689 case MULTI_FUNCTION_AFEX:
7690 val = 2;
7691 break;
7692 }
7693
7694 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7695 NIG_REG_LLH0_CLS_TYPE), val);
7696 }
7697 {
7698 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7699 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7700 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7701 }
7702 }
7703
7704
7705 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7706 if (val & MISC_SPIO_SPIO5) {
7707 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7709 val = REG_RD(bp, reg_addr);
7710 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7711 REG_WR(bp, reg_addr, val);
7712 }
7713
7714 if (CHIP_IS_E3B0(bp))
7715 bp->flags |= PTP_SUPPORTED;
7716
7717 return 0;
7718}
7719
7720static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7721{
7722 int reg;
7723 u32 wb_write[2];
7724
7725 if (CHIP_IS_E1(bp))
7726 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7727 else
7728 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7729
7730 wb_write[0] = ONCHIP_ADDR1(addr);
7731 wb_write[1] = ONCHIP_ADDR2(addr);
7732 REG_WR_DMAE(bp, reg, wb_write, 2);
7733}
7734
7735void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7736{
7737 u32 data, ctl, cnt = 100;
7738 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7739 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7740 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7741 u32 sb_bit = 1 << (idu_sb_id%32);
7742 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7743 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7744
7745
7746 if (CHIP_INT_MODE_IS_BC(bp))
7747 return;
7748
7749 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7750 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7751 IGU_REGULAR_CLEANUP_SET |
7752 IGU_REGULAR_BCLEANUP;
7753
7754 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7755 func_encode << IGU_CTRL_REG_FID_SHIFT |
7756 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7757
7758 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7759 data, igu_addr_data);
7760 REG_WR(bp, igu_addr_data, data);
7761 barrier();
7762 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7763 ctl, igu_addr_ctl);
7764 REG_WR(bp, igu_addr_ctl, ctl);
7765 barrier();
7766
7767
7768 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7769 msleep(20);
7770
7771 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7772 DP(NETIF_MSG_HW,
7773 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7774 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7775 }
7776}
7777
7778static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7779{
7780 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7781}
7782
7783static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7784{
7785 u32 i, base = FUNC_ILT_BASE(func);
7786 for (i = base; i < base + ILT_PER_FUNC; i++)
7787 bnx2x_ilt_wr(bp, i, 0);
7788}
7789
7790static void bnx2x_init_searcher(struct bnx2x *bp)
7791{
7792 int port = BP_PORT(bp);
7793 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7794
7795 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7796}
7797
7798static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7799{
7800 int rc;
7801 struct bnx2x_func_state_params func_params = {NULL};
7802 struct bnx2x_func_switch_update_params *switch_update_params =
7803 &func_params.params.switch_update;
7804
7805
7806 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7807 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7808
7809 func_params.f_obj = &bp->func_obj;
7810 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7811
7812
7813 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7814 &switch_update_params->changes);
7815 if (suspend)
7816 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7817 &switch_update_params->changes);
7818
7819 rc = bnx2x_func_state_change(bp, &func_params);
7820
7821 return rc;
7822}
7823
7824static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7825{
7826 int rc, i, port = BP_PORT(bp);
7827 int vlan_en = 0, mac_en[NUM_MACS];
7828
7829
7830 if (bp->mf_mode == SINGLE_FUNCTION) {
7831 bnx2x_set_rx_filter(&bp->link_params, 0);
7832 } else {
7833 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7834 NIG_REG_LLH0_FUNC_EN);
7835 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7836 NIG_REG_LLH0_FUNC_EN, 0);
7837 for (i = 0; i < NUM_MACS; i++) {
7838 mac_en[i] = REG_RD(bp, port ?
7839 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7840 4 * i) :
7841 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7842 4 * i));
7843 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7844 4 * i) :
7845 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7846 }
7847 }
7848
7849
7850 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7851 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7852
7853
7854
7855
7856
7857
7858 rc = bnx2x_func_switch_update(bp, 1);
7859 if (rc) {
7860 BNX2X_ERR("Can't suspend tx-switching!\n");
7861 return rc;
7862 }
7863
7864
7865 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7866
7867
7868 if (bp->mf_mode == SINGLE_FUNCTION) {
7869 bnx2x_set_rx_filter(&bp->link_params, 1);
7870 } else {
7871 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7872 NIG_REG_LLH0_FUNC_EN, vlan_en);
7873 for (i = 0; i < NUM_MACS; i++) {
7874 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7875 4 * i) :
7876 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7877 mac_en[i]);
7878 }
7879 }
7880
7881
7882 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7883 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7884
7885
7886 rc = bnx2x_func_switch_update(bp, 0);
7887 if (rc) {
7888 BNX2X_ERR("Can't resume tx-switching!\n");
7889 return rc;
7890 }
7891
7892 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7893 return 0;
7894}
7895
7896int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7897{
7898 int rc;
7899
7900 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7901
7902 if (CONFIGURE_NIC_MODE(bp)) {
7903
7904 bnx2x_init_searcher(bp);
7905
7906
7907 rc = bnx2x_reset_nic_mode(bp);
7908 if (rc)
7909 BNX2X_ERR("Can't change NIC mode!\n");
7910 return rc;
7911 }
7912
7913 return 0;
7914}
7915
7916
7917
7918
7919
7920
7921
7922
7923static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7924{
7925 if (!CHIP_IS_E1x(bp))
7926 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7927 1 << BP_ABS_FUNC(bp));
7928}
7929
7930static int bnx2x_init_hw_func(struct bnx2x *bp)
7931{
7932 int port = BP_PORT(bp);
7933 int func = BP_FUNC(bp);
7934 int init_phase = PHASE_PF0 + func;
7935 struct bnx2x_ilt *ilt = BP_ILT(bp);
7936 u16 cdu_ilt_start;
7937 u32 addr, val;
7938 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7939 int i, main_mem_width, rc;
7940
7941 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7942
7943
7944 if (!CHIP_IS_E1x(bp)) {
7945 rc = bnx2x_pf_flr_clnup(bp);
7946 if (rc) {
7947 bnx2x_fw_dump(bp);
7948 return rc;
7949 }
7950 }
7951
7952
7953 if (bp->common.int_block == INT_BLOCK_HC) {
7954 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7955 val = REG_RD(bp, addr);
7956 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7957 REG_WR(bp, addr, val);
7958 }
7959
7960 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7961 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7962
7963 ilt = BP_ILT(bp);
7964 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7965
7966 if (IS_SRIOV(bp))
7967 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7968 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7969
7970
7971
7972
7973 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7974 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7975 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7976 ilt->lines[cdu_ilt_start + i].page_mapping =
7977 bp->context[i].cxt_mapping;
7978 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7979 }
7980
7981 bnx2x_ilt_init_op(bp, INITOP_SET);
7982
7983 if (!CONFIGURE_NIC_MODE(bp)) {
7984 bnx2x_init_searcher(bp);
7985 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7986 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7987 } else {
7988
7989 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7990 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7991 }
7992
7993 if (!CHIP_IS_E1x(bp)) {
7994 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7995
7996
7997
7998
7999 if (!(bp->flags & USING_MSIX_FLAG))
8000 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8001
8002
8003
8004
8005
8006
8007 msleep(20);
8008
8009
8010
8011
8012
8013 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8014
8015 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8016 }
8017
8018 bp->dmae_ready = 1;
8019
8020 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8021
8022 bnx2x_clean_pglue_errors(bp);
8023
8024 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8025 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8026 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8027 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8028 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8029 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8030 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8031 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8032 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8033 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8034 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8035 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8036 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8037
8038 if (!CHIP_IS_E1x(bp))
8039 REG_WR(bp, QM_REG_PF_EN, 1);
8040
8041 if (!CHIP_IS_E1x(bp)) {
8042 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8043 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8044 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8045 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8046 }
8047 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8048
8049 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8050 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8051 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8052
8053 bnx2x_iov_init_dq(bp);
8054
8055 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8056 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8057 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8058 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8059 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8060 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8061 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8062 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8063 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8064 if (!CHIP_IS_E1x(bp))
8065 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8066
8067 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8068
8069 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8070
8071 if (!CHIP_IS_E1x(bp))
8072 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8073
8074 if (IS_MF(bp)) {
8075 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8076 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8077 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8078 bp->mf_ov);
8079 }
8080 }
8081
8082 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8083
8084
8085 if (bp->common.int_block == INT_BLOCK_HC) {
8086 if (CHIP_IS_E1H(bp)) {
8087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8088
8089 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8090 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8091 }
8092 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8093
8094 } else {
8095 int num_segs, sb_idx, prod_offset;
8096
8097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8098
8099 if (!CHIP_IS_E1x(bp)) {
8100 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8101 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8102 }
8103
8104 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8105
8106 if (!CHIP_IS_E1x(bp)) {
8107 int dsb_idx = 0;
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8130 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8131 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8132 prod_offset = (bp->igu_base_sb + sb_idx) *
8133 num_segs;
8134
8135 for (i = 0; i < num_segs; i++) {
8136 addr = IGU_REG_PROD_CONS_MEMORY +
8137 (prod_offset + i) * 4;
8138 REG_WR(bp, addr, 0);
8139 }
8140
8141 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8142 USTORM_ID, 0, IGU_INT_NOP, 1);
8143 bnx2x_igu_clear_sb(bp,
8144 bp->igu_base_sb + sb_idx);
8145 }
8146
8147
8148 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8149 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8150
8151 if (CHIP_MODE_IS_4_PORT(bp))
8152 dsb_idx = BP_FUNC(bp);
8153 else
8154 dsb_idx = BP_VN(bp);
8155
8156 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8157 IGU_BC_BASE_DSB_PROD + dsb_idx :
8158 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8159
8160
8161
8162
8163
8164 for (i = 0; i < (num_segs * E1HVN_MAX);
8165 i += E1HVN_MAX) {
8166 addr = IGU_REG_PROD_CONS_MEMORY +
8167 (prod_offset + i)*4;
8168 REG_WR(bp, addr, 0);
8169 }
8170
8171 if (CHIP_INT_MODE_IS_BC(bp)) {
8172 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8173 USTORM_ID, 0, IGU_INT_NOP, 1);
8174 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8175 CSTORM_ID, 0, IGU_INT_NOP, 1);
8176 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8177 XSTORM_ID, 0, IGU_INT_NOP, 1);
8178 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8179 TSTORM_ID, 0, IGU_INT_NOP, 1);
8180 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8181 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8182 } else {
8183 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8184 USTORM_ID, 0, IGU_INT_NOP, 1);
8185 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8186 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8187 }
8188 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8189
8190
8191
8192 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8193 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8194 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8195 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8196 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8197 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8198 }
8199 }
8200
8201
8202 REG_WR(bp, 0x2114, 0xffffffff);
8203 REG_WR(bp, 0x2120, 0xffffffff);
8204
8205 if (CHIP_IS_E1x(bp)) {
8206 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8207 main_mem_base = HC_REG_MAIN_MEMORY +
8208 BP_PORT(bp) * (main_mem_size * 4);
8209 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8210 main_mem_width = 8;
8211
8212 val = REG_RD(bp, main_mem_prty_clr);
8213 if (val)
8214 DP(NETIF_MSG_HW,
8215 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8216 val);
8217
8218
8219 for (i = main_mem_base;
8220 i < main_mem_base + main_mem_size * 4;
8221 i += main_mem_width) {
8222 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8223 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8224 i, main_mem_width / 4);
8225 }
8226
8227 REG_RD(bp, main_mem_prty_clr);
8228 }
8229
8230#ifdef BNX2X_STOP_ON_ERROR
8231
8232 REG_WR8(bp, BAR_USTRORM_INTMEM +
8233 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8234 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8235 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8236 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8237 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8238 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8239 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8240#endif
8241
8242 bnx2x_phy_probe(&bp->link_params);
8243
8244 return 0;
8245}
8246
8247void bnx2x_free_mem_cnic(struct bnx2x *bp)
8248{
8249 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8250
8251 if (!CHIP_IS_E1x(bp))
8252 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8253 sizeof(struct host_hc_status_block_e2));
8254 else
8255 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8256 sizeof(struct host_hc_status_block_e1x));
8257
8258 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8259}
8260
8261void bnx2x_free_mem(struct bnx2x *bp)
8262{
8263 int i;
8264
8265 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8266 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8267
8268 if (IS_VF(bp))
8269 return;
8270
8271 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8272 sizeof(struct host_sp_status_block));
8273
8274 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8275 sizeof(struct bnx2x_slowpath));
8276
8277 for (i = 0; i < L2_ILT_LINES(bp); i++)
8278 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8279 bp->context[i].size);
8280 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8281
8282 BNX2X_FREE(bp->ilt->lines);
8283
8284 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8285
8286 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8287 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8288
8289 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8290
8291 bnx2x_iov_free_mem(bp);
8292}
8293
8294int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8295{
8296 if (!CHIP_IS_E1x(bp)) {
8297
8298 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8299 sizeof(struct host_hc_status_block_e2));
8300 if (!bp->cnic_sb.e2_sb)
8301 goto alloc_mem_err;
8302 } else {
8303 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8304 sizeof(struct host_hc_status_block_e1x));
8305 if (!bp->cnic_sb.e1x_sb)
8306 goto alloc_mem_err;
8307 }
8308
8309 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8310
8311 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8312 if (!bp->t2)
8313 goto alloc_mem_err;
8314 }
8315
8316
8317 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8318 &bp->slowpath->drv_info_to_mcp;
8319
8320 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8321 goto alloc_mem_err;
8322
8323 return 0;
8324
8325alloc_mem_err:
8326 bnx2x_free_mem_cnic(bp);
8327 BNX2X_ERR("Can't allocate memory\n");
8328 return -ENOMEM;
8329}
8330
8331int bnx2x_alloc_mem(struct bnx2x *bp)
8332{
8333 int i, allocated, context_size;
8334
8335 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8336
8337 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8338 if (!bp->t2)
8339 goto alloc_mem_err;
8340 }
8341
8342 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8343 sizeof(struct host_sp_status_block));
8344 if (!bp->def_status_blk)
8345 goto alloc_mem_err;
8346
8347 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8348 sizeof(struct bnx2x_slowpath));
8349 if (!bp->slowpath)
8350 goto alloc_mem_err;
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361
8362
8363
8364
8365 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8366
8367 for (i = 0, allocated = 0; allocated < context_size; i++) {
8368 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8369 (context_size - allocated));
8370 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8371 bp->context[i].size);
8372 if (!bp->context[i].vcxt)
8373 goto alloc_mem_err;
8374 allocated += bp->context[i].size;
8375 }
8376 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8377 GFP_KERNEL);
8378 if (!bp->ilt->lines)
8379 goto alloc_mem_err;
8380
8381 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8382 goto alloc_mem_err;
8383
8384 if (bnx2x_iov_alloc_mem(bp))
8385 goto alloc_mem_err;
8386
8387
8388 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8389 if (!bp->spq)
8390 goto alloc_mem_err;
8391
8392
8393 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8394 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8395 if (!bp->eq_ring)
8396 goto alloc_mem_err;
8397
8398 return 0;
8399
8400alloc_mem_err:
8401 bnx2x_free_mem(bp);
8402 BNX2X_ERR("Can't allocate memory\n");
8403 return -ENOMEM;
8404}
8405
8406
8407
8408
8409
8410int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8411 struct bnx2x_vlan_mac_obj *obj, bool set,
8412 int mac_type, unsigned long *ramrod_flags)
8413{
8414 int rc;
8415 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8416
8417 memset(&ramrod_param, 0, sizeof(ramrod_param));
8418
8419
8420 ramrod_param.vlan_mac_obj = obj;
8421 ramrod_param.ramrod_flags = *ramrod_flags;
8422
8423
8424 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8425 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8426
8427 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8428
8429
8430 if (set)
8431 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8432 else
8433 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8434 }
8435
8436 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8437
8438 if (rc == -EEXIST) {
8439 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8440
8441 rc = 0;
8442 } else if (rc < 0)
8443 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8444
8445 return rc;
8446}
8447
8448int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8449 struct bnx2x_vlan_mac_obj *obj, bool set,
8450 unsigned long *ramrod_flags)
8451{
8452 int rc;
8453 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8454
8455 memset(&ramrod_param, 0, sizeof(ramrod_param));
8456
8457
8458 ramrod_param.vlan_mac_obj = obj;
8459 ramrod_param.ramrod_flags = *ramrod_flags;
8460
8461
8462 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8463 ramrod_param.user_req.u.vlan.vlan = vlan;
8464 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8465
8466 if (set)
8467 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8468 else
8469 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8470 }
8471
8472 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8473
8474 if (rc == -EEXIST) {
8475
8476 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8477 rc = 0;
8478 } else if (rc < 0) {
8479 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8480 }
8481
8482 return rc;
8483}
8484
8485void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486{
8487 struct bnx2x_vlan_entry *vlan;
8488
8489
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494}
8495
8496static int bnx2x_del_all_vlans(struct bnx2x *bp)
8497{
8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8500 int rc;
8501
8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8503 __set_bit(BNX2X_VLAN, &vlan_flags);
8504 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8505 if (rc)
8506 return rc;
8507
8508 bnx2x_clear_vlan_info(bp);
8509
8510 return 0;
8511}
8512
8513int bnx2x_del_all_macs(struct bnx2x *bp,
8514 struct bnx2x_vlan_mac_obj *mac_obj,
8515 int mac_type, bool wait_for_comp)
8516{
8517 int rc;
8518 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8519
8520
8521 if (wait_for_comp)
8522 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8523
8524
8525 __set_bit(mac_type, &vlan_mac_flags);
8526
8527 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8528 if (rc < 0)
8529 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8530
8531 return rc;
8532}
8533
8534int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8535{
8536 if (IS_PF(bp)) {
8537 unsigned long ramrod_flags = 0;
8538
8539 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8540 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8541 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8542 &bp->sp_objs->mac_obj, set,
8543 BNX2X_ETH_MAC, &ramrod_flags);
8544 } else {
8545 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8546 bp->fp->index, set);
8547 }
8548}
8549
8550int bnx2x_setup_leading(struct bnx2x *bp)
8551{
8552 if (IS_PF(bp))
8553 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8554 else
8555 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8556}
8557
8558
8559
8560
8561
8562
8563
8564
8565int bnx2x_set_int_mode(struct bnx2x *bp)
8566{
8567 int rc = 0;
8568
8569 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8570 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8571 return -EINVAL;
8572 }
8573
8574 switch (int_mode) {
8575 case BNX2X_INT_MODE_MSIX:
8576
8577 rc = bnx2x_enable_msix(bp);
8578
8579
8580 if (!rc)
8581 return 0;
8582
8583
8584 if (rc && IS_VF(bp))
8585 return rc;
8586
8587
8588 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8589 bp->num_queues,
8590 1 + bp->num_cnic_queues);
8591
8592
8593 case BNX2X_INT_MODE_MSI:
8594 bnx2x_enable_msi(bp);
8595
8596
8597 case BNX2X_INT_MODE_INTX:
8598 bp->num_ethernet_queues = 1;
8599 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8600 BNX2X_DEV_INFO("set number of queues to 1\n");
8601 break;
8602 default:
8603 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8604 return -EINVAL;
8605 }
8606 return 0;
8607}
8608
8609
8610static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8611{
8612 if (IS_SRIOV(bp))
8613 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8614 return L2_ILT_LINES(bp);
8615}
8616
8617void bnx2x_ilt_set_info(struct bnx2x *bp)
8618{
8619 struct ilt_client_info *ilt_client;
8620 struct bnx2x_ilt *ilt = BP_ILT(bp);
8621 u16 line = 0;
8622
8623 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8624 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8625
8626
8627 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8628 ilt_client->client_num = ILT_CLIENT_CDU;
8629 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8630 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8631 ilt_client->start = line;
8632 line += bnx2x_cid_ilt_lines(bp);
8633
8634 if (CNIC_SUPPORT(bp))
8635 line += CNIC_ILT_LINES;
8636 ilt_client->end = line - 1;
8637
8638 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8639 ilt_client->start,
8640 ilt_client->end,
8641 ilt_client->page_size,
8642 ilt_client->flags,
8643 ilog2(ilt_client->page_size >> 12));
8644
8645
8646 if (QM_INIT(bp->qm_cid_count)) {
8647 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8648 ilt_client->client_num = ILT_CLIENT_QM;
8649 ilt_client->page_size = QM_ILT_PAGE_SZ;
8650 ilt_client->flags = 0;
8651 ilt_client->start = line;
8652
8653
8654 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8655 QM_ILT_PAGE_SZ);
8656
8657 ilt_client->end = line - 1;
8658
8659 DP(NETIF_MSG_IFUP,
8660 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8661 ilt_client->start,
8662 ilt_client->end,
8663 ilt_client->page_size,
8664 ilt_client->flags,
8665 ilog2(ilt_client->page_size >> 12));
8666 }
8667
8668 if (CNIC_SUPPORT(bp)) {
8669
8670 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8671 ilt_client->client_num = ILT_CLIENT_SRC;
8672 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8673 ilt_client->flags = 0;
8674 ilt_client->start = line;
8675 line += SRC_ILT_LINES;
8676 ilt_client->end = line - 1;
8677
8678 DP(NETIF_MSG_IFUP,
8679 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8680 ilt_client->start,
8681 ilt_client->end,
8682 ilt_client->page_size,
8683 ilt_client->flags,
8684 ilog2(ilt_client->page_size >> 12));
8685
8686
8687 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8688 ilt_client->client_num = ILT_CLIENT_TM;
8689 ilt_client->page_size = TM_ILT_PAGE_SZ;
8690 ilt_client->flags = 0;
8691 ilt_client->start = line;
8692 line += TM_ILT_LINES;
8693 ilt_client->end = line - 1;
8694
8695 DP(NETIF_MSG_IFUP,
8696 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8697 ilt_client->start,
8698 ilt_client->end,
8699 ilt_client->page_size,
8700 ilt_client->flags,
8701 ilog2(ilt_client->page_size >> 12));
8702 }
8703
8704 BUG_ON(line > ILT_MAX_LINES);
8705}
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8719 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8720{
8721 u8 cos;
8722 int cxt_index, cxt_offset;
8723
8724
8725 if (!IS_FCOE_FP(fp)) {
8726 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8727 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8728
8729
8730
8731
8732 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8733 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8734
8735
8736 init_params->rx.hc_rate = bp->rx_ticks ?
8737 (1000000 / bp->rx_ticks) : 0;
8738 init_params->tx.hc_rate = bp->tx_ticks ?
8739 (1000000 / bp->tx_ticks) : 0;
8740
8741
8742 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8743 fp->fw_sb_id;
8744
8745
8746
8747
8748
8749 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8750 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8751 }
8752
8753
8754 init_params->max_cos = fp->max_cos;
8755
8756 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8757 fp->index, init_params->max_cos);
8758
8759
8760 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8761 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8762 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8763 ILT_PAGE_CIDS);
8764 init_params->cxts[cos] =
8765 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8766 }
8767}
8768
8769static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8770 struct bnx2x_queue_state_params *q_params,
8771 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8772 int tx_index, bool leading)
8773{
8774 memset(tx_only_params, 0, sizeof(*tx_only_params));
8775
8776
8777 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8778
8779
8780 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8781
8782
8783 tx_only_params->cid_index = tx_index;
8784
8785
8786 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8787
8788
8789 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8790
8791 DP(NETIF_MSG_IFUP,
8792 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8793 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8794 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8795 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8796
8797
8798 return bnx2x_queue_state_change(bp, q_params);
8799}
8800
8801
8802
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8813 bool leading)
8814{
8815 struct bnx2x_queue_state_params q_params = {NULL};
8816 struct bnx2x_queue_setup_params *setup_params =
8817 &q_params.params.setup;
8818 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8819 &q_params.params.tx_only;
8820 int rc;
8821 u8 tx_index;
8822
8823 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8824
8825
8826 if (!IS_FCOE_FP(fp))
8827 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8828 IGU_INT_ENABLE, 0);
8829
8830 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8831
8832 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8833
8834
8835 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8836
8837
8838 q_params.cmd = BNX2X_Q_CMD_INIT;
8839
8840
8841 rc = bnx2x_queue_state_change(bp, &q_params);
8842 if (rc) {
8843 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8844 return rc;
8845 }
8846
8847 DP(NETIF_MSG_IFUP, "init complete\n");
8848
8849
8850 memset(setup_params, 0, sizeof(*setup_params));
8851
8852
8853 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8854
8855
8856 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8857 FIRST_TX_COS_INDEX);
8858
8859 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8860 &setup_params->rxq_params);
8861
8862 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8863 FIRST_TX_COS_INDEX);
8864
8865
8866 q_params.cmd = BNX2X_Q_CMD_SETUP;
8867
8868 if (IS_FCOE_FP(fp))
8869 bp->fcoe_init = true;
8870
8871
8872 rc = bnx2x_queue_state_change(bp, &q_params);
8873 if (rc) {
8874 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8875 return rc;
8876 }
8877
8878
8879 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8880 tx_index < fp->max_cos;
8881 tx_index++) {
8882
8883
8884 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8885 tx_only_params, tx_index, leading);
8886 if (rc) {
8887 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8888 fp->index, tx_index);
8889 return rc;
8890 }
8891 }
8892
8893 return rc;
8894}
8895
8896static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8897{
8898 struct bnx2x_fastpath *fp = &bp->fp[index];
8899 struct bnx2x_fp_txdata *txdata;
8900 struct bnx2x_queue_state_params q_params = {NULL};
8901 int rc, tx_index;
8902
8903 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8904
8905 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8906
8907 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8908
8909
8910 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8911 tx_index < fp->max_cos;
8912 tx_index++){
8913
8914
8915 txdata = fp->txdata_ptr[tx_index];
8916
8917 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8918 txdata->txq_index);
8919
8920
8921 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8922 memset(&q_params.params.terminate, 0,
8923 sizeof(q_params.params.terminate));
8924 q_params.params.terminate.cid_index = tx_index;
8925
8926 rc = bnx2x_queue_state_change(bp, &q_params);
8927 if (rc)
8928 return rc;
8929
8930
8931 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8932 memset(&q_params.params.cfc_del, 0,
8933 sizeof(q_params.params.cfc_del));
8934 q_params.params.cfc_del.cid_index = tx_index;
8935 rc = bnx2x_queue_state_change(bp, &q_params);
8936 if (rc)
8937 return rc;
8938 }
8939
8940
8941 q_params.cmd = BNX2X_Q_CMD_HALT;
8942 rc = bnx2x_queue_state_change(bp, &q_params);
8943 if (rc)
8944 return rc;
8945
8946
8947 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8948 memset(&q_params.params.terminate, 0,
8949 sizeof(q_params.params.terminate));
8950 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8951 rc = bnx2x_queue_state_change(bp, &q_params);
8952 if (rc)
8953 return rc;
8954
8955 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8956 memset(&q_params.params.cfc_del, 0,
8957 sizeof(q_params.params.cfc_del));
8958 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8959 return bnx2x_queue_state_change(bp, &q_params);
8960}
8961
8962static void bnx2x_reset_func(struct bnx2x *bp)
8963{
8964 int port = BP_PORT(bp);
8965 int func = BP_FUNC(bp);
8966 int i;
8967
8968
8969 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8970 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8971 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8972 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8973
8974
8975 for_each_eth_queue(bp, i) {
8976 struct bnx2x_fastpath *fp = &bp->fp[i];
8977 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8978 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8979 SB_DISABLED);
8980 }
8981
8982 if (CNIC_LOADED(bp))
8983
8984 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8985 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8986 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8987
8988
8989 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8990 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8991 SB_DISABLED);
8992
8993 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8994 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8995 0);
8996
8997
8998 if (bp->common.int_block == INT_BLOCK_HC) {
8999 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9000 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9001 } else {
9002 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9003 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9004 }
9005
9006 if (CNIC_LOADED(bp)) {
9007
9008 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9009
9010
9011
9012
9013 for (i = 0; i < 200; i++) {
9014 usleep_range(10000, 20000);
9015 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9016 break;
9017 }
9018 }
9019
9020 bnx2x_clear_func_ilt(bp, func);
9021
9022
9023
9024
9025 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9026 struct ilt_client_info ilt_cli;
9027
9028 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9029 ilt_cli.start = 0;
9030 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9031 ilt_cli.client_num = ILT_CLIENT_TM;
9032
9033 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9034 }
9035
9036
9037 if (!CHIP_IS_E1x(bp))
9038 bnx2x_pf_disable(bp);
9039
9040 bp->dmae_ready = 0;
9041}
9042
9043static void bnx2x_reset_port(struct bnx2x *bp)
9044{
9045 int port = BP_PORT(bp);
9046 u32 val;
9047
9048
9049 bnx2x__link_reset(bp);
9050
9051 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9052
9053
9054 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9055
9056 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9057 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9058
9059
9060 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9061
9062 msleep(100);
9063
9064 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9065 if (val)
9066 DP(NETIF_MSG_IFDOWN,
9067 "BRB1 is not empty %d blocks are occupied\n", val);
9068
9069
9070}
9071
9072static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9073{
9074 struct bnx2x_func_state_params func_params = {NULL};
9075
9076
9077 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9078
9079 func_params.f_obj = &bp->func_obj;
9080 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9081
9082 func_params.params.hw_init.load_phase = load_code;
9083
9084 return bnx2x_func_state_change(bp, &func_params);
9085}
9086
9087static int bnx2x_func_stop(struct bnx2x *bp)
9088{
9089 struct bnx2x_func_state_params func_params = {NULL};
9090 int rc;
9091
9092
9093 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9094 func_params.f_obj = &bp->func_obj;
9095 func_params.cmd = BNX2X_F_CMD_STOP;
9096
9097
9098
9099
9100
9101
9102
9103 rc = bnx2x_func_state_change(bp, &func_params);
9104 if (rc) {
9105#ifdef BNX2X_STOP_ON_ERROR
9106 return rc;
9107#else
9108 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9109 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9110 return bnx2x_func_state_change(bp, &func_params);
9111#endif
9112 }
9113
9114 return 0;
9115}
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9126{
9127 u32 reset_code = 0;
9128 int port = BP_PORT(bp);
9129
9130
9131 if (unload_mode == UNLOAD_NORMAL)
9132 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9133
9134 else if (bp->flags & NO_WOL_FLAG)
9135 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9136
9137 else if (bp->wol) {
9138 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9139 u8 *mac_addr = bp->dev->dev_addr;
9140 struct pci_dev *pdev = bp->pdev;
9141 u32 val;
9142 u16 pmc;
9143
9144
9145
9146
9147 u8 entry = (BP_VN(bp) + 1)*8;
9148
9149 val = (mac_addr[0] << 8) | mac_addr[1];
9150 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9151
9152 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9153 (mac_addr[4] << 8) | mac_addr[5];
9154 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9155
9156
9157 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9158 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9159 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9160
9161 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9162
9163 } else
9164 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9165
9166
9167 if (!BP_NOMCP(bp))
9168 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9169 else {
9170 int path = BP_PATH(bp);
9171
9172 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9173 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9174 bnx2x_load_count[path][2]);
9175 bnx2x_load_count[path][0]--;
9176 bnx2x_load_count[path][1 + port]--;
9177 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9178 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9179 bnx2x_load_count[path][2]);
9180 if (bnx2x_load_count[path][0] == 0)
9181 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9182 else if (bnx2x_load_count[path][1 + port] == 0)
9183 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9184 else
9185 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9186 }
9187
9188 return reset_code;
9189}
9190
9191
9192
9193
9194
9195
9196
9197void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9198{
9199 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9200
9201
9202 if (!BP_NOMCP(bp))
9203 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9204}
9205
9206static int bnx2x_func_wait_started(struct bnx2x *bp)
9207{
9208 int tout = 50;
9209 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9210
9211 if (!bp->port.pmf)
9212 return 0;
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229 if (msix)
9230 synchronize_irq(bp->msix_table[0].vector);
9231 else
9232 synchronize_irq(bp->pdev->irq);
9233
9234 flush_workqueue(bnx2x_wq);
9235 flush_workqueue(bnx2x_iov_wq);
9236
9237 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9238 BNX2X_F_STATE_STARTED && tout--)
9239 msleep(20);
9240
9241 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9242 BNX2X_F_STATE_STARTED) {
9243#ifdef BNX2X_STOP_ON_ERROR
9244 BNX2X_ERR("Wrong function state\n");
9245 return -EBUSY;
9246#else
9247
9248
9249
9250
9251 struct bnx2x_func_state_params func_params = {NULL};
9252
9253 DP(NETIF_MSG_IFDOWN,
9254 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9255
9256 func_params.f_obj = &bp->func_obj;
9257 __set_bit(RAMROD_DRV_CLR_ONLY,
9258 &func_params.ramrod_flags);
9259
9260
9261 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9262 bnx2x_func_state_change(bp, &func_params);
9263
9264
9265 func_params.cmd = BNX2X_F_CMD_TX_START;
9266 return bnx2x_func_state_change(bp, &func_params);
9267#endif
9268 }
9269
9270 return 0;
9271}
9272
9273static void bnx2x_disable_ptp(struct bnx2x *bp)
9274{
9275 int port = BP_PORT(bp);
9276
9277
9278 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9279 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9280
9281
9282 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9283 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9284 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9285 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9286 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9287 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9288 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9289 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9290
9291
9292 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9293 NIG_REG_P0_PTP_EN, 0x0);
9294}
9295
9296
9297static void bnx2x_stop_ptp(struct bnx2x *bp)
9298{
9299
9300
9301
9302 cancel_work_sync(&bp->ptp_task);
9303
9304 if (bp->ptp_tx_skb) {
9305 dev_kfree_skb_any(bp->ptp_tx_skb);
9306 bp->ptp_tx_skb = NULL;
9307 }
9308
9309
9310 bnx2x_disable_ptp(bp);
9311
9312 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9313}
9314
9315void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9316{
9317 int port = BP_PORT(bp);
9318 int i, rc = 0;
9319 u8 cos;
9320 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9321 u32 reset_code;
9322
9323
9324 for_each_tx_queue(bp, i) {
9325 struct bnx2x_fastpath *fp = &bp->fp[i];
9326
9327 for_each_cos_in_tx_queue(fp, cos)
9328 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9329#ifdef BNX2X_STOP_ON_ERROR
9330 if (rc)
9331 return;
9332#endif
9333 }
9334
9335
9336 usleep_range(1000, 2000);
9337
9338
9339 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9340 false);
9341 if (rc < 0)
9342 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9343
9344
9345 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9346 true);
9347 if (rc < 0)
9348 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9349 rc);
9350
9351
9352
9353
9354
9355 if (!CHIP_IS_E1x(bp)) {
9356
9357 rc = bnx2x_del_all_vlans(bp);
9358 if (rc < 0)
9359 BNX2X_ERR("Failed to delete all VLANs\n");
9360 }
9361
9362
9363 if (!CHIP_IS_E1(bp))
9364 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9365
9366
9367
9368
9369
9370 netif_addr_lock_bh(bp->dev);
9371
9372 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9373 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9374 else if (bp->slowpath)
9375 bnx2x_set_storm_rx_mode(bp);
9376
9377
9378 rparam.mcast_obj = &bp->mcast_obj;
9379 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9380 if (rc < 0)
9381 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9382
9383 netif_addr_unlock_bh(bp->dev);
9384
9385 bnx2x_iov_chip_cleanup(bp);
9386
9387
9388
9389
9390
9391
9392 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9393
9394
9395
9396
9397
9398 rc = bnx2x_func_wait_started(bp);
9399 if (rc) {
9400 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9401#ifdef BNX2X_STOP_ON_ERROR
9402 return;
9403#endif
9404 }
9405
9406
9407
9408
9409 for_each_eth_queue(bp, i)
9410 if (bnx2x_stop_queue(bp, i))
9411#ifdef BNX2X_STOP_ON_ERROR
9412 return;
9413#else
9414 goto unload_error;
9415#endif
9416
9417 if (CNIC_LOADED(bp)) {
9418 for_each_cnic_queue(bp, i)
9419 if (bnx2x_stop_queue(bp, i))
9420#ifdef BNX2X_STOP_ON_ERROR
9421 return;
9422#else
9423 goto unload_error;
9424#endif
9425 }
9426
9427
9428
9429
9430 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9431 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9432
9433#ifndef BNX2X_STOP_ON_ERROR
9434unload_error:
9435#endif
9436 rc = bnx2x_func_stop(bp);
9437 if (rc) {
9438 BNX2X_ERR("Function stop failed!\n");
9439#ifdef BNX2X_STOP_ON_ERROR
9440 return;
9441#endif
9442 }
9443
9444
9445
9446
9447
9448
9449 if (bp->flags & PTP_SUPPORTED) {
9450 bnx2x_stop_ptp(bp);
9451 if (bp->ptp_clock) {
9452 ptp_clock_unregister(bp->ptp_clock);
9453 bp->ptp_clock = NULL;
9454 }
9455 }
9456
9457
9458 bnx2x_netif_stop(bp, 1);
9459
9460 bnx2x_del_all_napi(bp);
9461 if (CNIC_LOADED(bp))
9462 bnx2x_del_all_napi_cnic(bp);
9463
9464
9465 bnx2x_free_irq(bp);
9466
9467
9468
9469
9470
9471
9472 if (!pci_channel_offline(bp->pdev)) {
9473 rc = bnx2x_reset_hw(bp, reset_code);
9474 if (rc)
9475 BNX2X_ERR("HW_RESET failed\n");
9476 }
9477
9478
9479 bnx2x_send_unload_done(bp, keep_link);
9480}
9481
9482void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9483{
9484 u32 val;
9485
9486 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9487
9488 if (CHIP_IS_E1(bp)) {
9489 int port = BP_PORT(bp);
9490 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9491 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9492
9493 val = REG_RD(bp, addr);
9494 val &= ~(0x300);
9495 REG_WR(bp, addr, val);
9496 } else {
9497 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9498 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9499 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9500 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9501 }
9502}
9503
9504
9505static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9506{
9507 u32 val;
9508
9509
9510 if (!CHIP_IS_E1(bp)) {
9511
9512 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9513
9514 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9515 }
9516
9517
9518 if (CHIP_IS_E1x(bp)) {
9519
9520 val = REG_RD(bp, HC_REG_CONFIG_1);
9521 REG_WR(bp, HC_REG_CONFIG_1,
9522 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9523 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9524
9525 val = REG_RD(bp, HC_REG_CONFIG_0);
9526 REG_WR(bp, HC_REG_CONFIG_0,
9527 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9528 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9529 } else {
9530
9531 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9532
9533 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9534 (!close) ?
9535 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9536 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9537 }
9538
9539 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9540 close ? "closing" : "opening");
9541}
9542
9543#define SHARED_MF_CLP_MAGIC 0x80000000
9544
9545static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9546{
9547
9548 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9549 *magic_val = val & SHARED_MF_CLP_MAGIC;
9550 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9551}
9552
9553
9554
9555
9556
9557
9558
9559static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9560{
9561
9562 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9563 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9564 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9565}
9566
9567
9568
9569
9570
9571
9572
9573
9574
9575static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9576{
9577 u32 shmem;
9578 u32 validity_offset;
9579
9580 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9581
9582
9583 if (!CHIP_IS_E1(bp))
9584 bnx2x_clp_reset_prep(bp, magic_val);
9585
9586
9587 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9588 validity_offset =
9589 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9590
9591
9592 if (shmem > 0)
9593 REG_WR(bp, shmem + validity_offset, 0);
9594}
9595
9596#define MCP_TIMEOUT 5000
9597#define MCP_ONE_TIMEOUT 100
9598
9599
9600
9601
9602
9603
9604static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9605{
9606
9607
9608 if (CHIP_REV_IS_SLOW(bp))
9609 msleep(MCP_ONE_TIMEOUT*10);
9610 else
9611 msleep(MCP_ONE_TIMEOUT);
9612}
9613
9614
9615
9616
9617static int bnx2x_init_shmem(struct bnx2x *bp)
9618{
9619 int cnt = 0;
9620 u32 val = 0;
9621
9622 do {
9623 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9624
9625
9626
9627
9628 if (bp->common.shmem_base == 0xFFFFFFFF) {
9629 bp->flags |= NO_MCP_FLAG;
9630 return -ENODEV;
9631 }
9632
9633 if (bp->common.shmem_base) {
9634 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9635 if (val & SHR_MEM_VALIDITY_MB)
9636 return 0;
9637 }
9638
9639 bnx2x_mcp_wait_one(bp);
9640
9641 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9642
9643 BNX2X_ERR("BAD MCP validity signature\n");
9644
9645 return -ENODEV;
9646}
9647
9648static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9649{
9650 int rc = bnx2x_init_shmem(bp);
9651
9652
9653 if (!CHIP_IS_E1(bp))
9654 bnx2x_clp_reset_done(bp, magic_val);
9655
9656 return rc;
9657}
9658
9659static void bnx2x_pxp_prep(struct bnx2x *bp)
9660{
9661 if (!CHIP_IS_E1(bp)) {
9662 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9663 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9664 }
9665}
9666
9667
9668
9669
9670
9671
9672
9673
9674
9675
9676
9677static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9678{
9679 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9680 u32 global_bits2, stay_reset2;
9681
9682
9683
9684
9685
9686 global_bits2 =
9687 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9688 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9689
9690
9691
9692
9693
9694
9695 not_reset_mask1 =
9696 MISC_REGISTERS_RESET_REG_1_RST_HC |
9697 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9698 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9699
9700 not_reset_mask2 =
9701 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9702 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9703 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9704 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9705 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9706 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9707 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9708 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9709 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9710 MISC_REGISTERS_RESET_REG_2_PGLC |
9711 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9712 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9713 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9714 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9715 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9716 MISC_REGISTERS_RESET_REG_2_UMAC1;
9717
9718
9719
9720
9721
9722 stay_reset2 =
9723 MISC_REGISTERS_RESET_REG_2_XMAC |
9724 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9725
9726
9727 reset_mask1 = 0xffffffff;
9728
9729 if (CHIP_IS_E1(bp))
9730 reset_mask2 = 0xffff;
9731 else if (CHIP_IS_E1H(bp))
9732 reset_mask2 = 0x1ffff;
9733 else if (CHIP_IS_E2(bp))
9734 reset_mask2 = 0xfffff;
9735 else
9736 reset_mask2 = 0x3ffffff;
9737
9738
9739 if (!global)
9740 reset_mask2 &= ~global_bits2;
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9757 reset_mask2 & (~not_reset_mask2));
9758
9759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9760 reset_mask1 & (~not_reset_mask1));
9761
9762 barrier();
9763
9764 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9765 reset_mask2 & (~stay_reset2));
9766
9767 barrier();
9768
9769 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9770}
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9782{
9783 u32 cnt = 1000;
9784 u32 pend_bits = 0;
9785
9786 do {
9787 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9788
9789 if (pend_bits == 0)
9790 break;
9791
9792 usleep_range(1000, 2000);
9793 } while (cnt-- > 0);
9794
9795 if (cnt <= 0) {
9796 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9797 pend_bits);
9798 return -EBUSY;
9799 }
9800
9801 return 0;
9802}
9803
9804static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9805{
9806 int cnt = 1000;
9807 u32 val = 0;
9808 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9809 u32 tags_63_32 = 0;
9810
9811
9812 do {
9813 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9814 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9815 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9816 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9817 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9818 if (CHIP_IS_E3(bp))
9819 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9820
9821 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9822 ((port_is_idle_0 & 0x1) == 0x1) &&
9823 ((port_is_idle_1 & 0x1) == 0x1) &&
9824 (pgl_exp_rom2 == 0xffffffff) &&
9825 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9826 break;
9827 usleep_range(1000, 2000);
9828 } while (cnt-- > 0);
9829
9830 if (cnt <= 0) {
9831 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9832 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9833 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9834 pgl_exp_rom2);
9835 return -EAGAIN;
9836 }
9837
9838 barrier();
9839
9840
9841 bnx2x_set_234_gates(bp, true);
9842
9843
9844 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9845 return -EAGAIN;
9846
9847
9848
9849
9850 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9851 barrier();
9852
9853
9854
9855
9856 usleep_range(1000, 2000);
9857
9858
9859
9860 if (global)
9861 bnx2x_reset_mcp_prep(bp, &val);
9862
9863
9864 bnx2x_pxp_prep(bp);
9865 barrier();
9866
9867
9868 bnx2x_process_kill_chip_reset(bp, global);
9869 barrier();
9870
9871
9872 if (!CHIP_IS_E1x(bp))
9873 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9874
9875
9876
9877 if (global && bnx2x_reset_mcp_comp(bp, val))
9878 return -EAGAIN;
9879
9880
9881
9882
9883 bnx2x_set_234_gates(bp, false);
9884
9885
9886
9887
9888 return 0;
9889}
9890
9891static int bnx2x_leader_reset(struct bnx2x *bp)
9892{
9893 int rc = 0;
9894 bool global = bnx2x_reset_is_global(bp);
9895 u32 load_code;
9896
9897
9898
9899
9900 if (!global && !BP_NOMCP(bp)) {
9901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9902 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9903 if (!load_code) {
9904 BNX2X_ERR("MCP response failure, aborting\n");
9905 rc = -EAGAIN;
9906 goto exit_leader_reset;
9907 }
9908 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9909 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9910 BNX2X_ERR("MCP unexpected resp, aborting\n");
9911 rc = -EAGAIN;
9912 goto exit_leader_reset2;
9913 }
9914 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9915 if (!load_code) {
9916 BNX2X_ERR("MCP response failure, aborting\n");
9917 rc = -EAGAIN;
9918 goto exit_leader_reset2;
9919 }
9920 }
9921
9922
9923 if (bnx2x_process_kill(bp, global)) {
9924 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9925 BP_PATH(bp));
9926 rc = -EAGAIN;
9927 goto exit_leader_reset2;
9928 }
9929
9930
9931
9932
9933
9934 bnx2x_set_reset_done(bp);
9935 if (global)
9936 bnx2x_clear_reset_global(bp);
9937
9938exit_leader_reset2:
9939
9940 if (!global && !BP_NOMCP(bp)) {
9941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9943 }
9944exit_leader_reset:
9945 bp->is_leader = 0;
9946 bnx2x_release_leader_lock(bp);
9947 smp_mb();
9948 return rc;
9949}
9950
9951static void bnx2x_recovery_failed(struct bnx2x *bp)
9952{
9953 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9954
9955
9956 netif_device_detach(bp->dev);
9957
9958
9959
9960
9961
9962 bnx2x_set_reset_in_progress(bp);
9963
9964
9965 bnx2x_set_power_state(bp, PCI_D3hot);
9966
9967 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9968
9969 smp_mb();
9970}
9971
9972
9973
9974
9975
9976
9977static void bnx2x_parity_recover(struct bnx2x *bp)
9978{
9979 bool global = false;
9980 u32 error_recovered, error_unrecovered;
9981 bool is_parity;
9982
9983 DP(NETIF_MSG_HW, "Handling parity\n");
9984 while (1) {
9985 switch (bp->recovery_state) {
9986 case BNX2X_RECOVERY_INIT:
9987 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9988 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9989 WARN_ON(!is_parity);
9990
9991
9992 if (bnx2x_trylock_leader_lock(bp)) {
9993 bnx2x_set_reset_in_progress(bp);
9994
9995
9996
9997
9998
9999
10000 if (global)
10001 bnx2x_set_reset_global(bp);
10002
10003 bp->is_leader = 1;
10004 }
10005
10006
10007
10008 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10009 return;
10010
10011 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10012
10013
10014
10015
10016
10017 smp_mb();
10018 break;
10019
10020 case BNX2X_RECOVERY_WAIT:
10021 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10022 if (bp->is_leader) {
10023 int other_engine = BP_PATH(bp) ? 0 : 1;
10024 bool other_load_status =
10025 bnx2x_get_load_status(bp, other_engine);
10026 bool load_status =
10027 bnx2x_get_load_status(bp, BP_PATH(bp));
10028 global = bnx2x_reset_is_global(bp);
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038 if (load_status ||
10039 (global && other_load_status)) {
10040
10041
10042
10043 schedule_delayed_work(&bp->sp_rtnl_task,
10044 HZ/10);
10045 return;
10046 } else {
10047
10048
10049
10050
10051
10052 if (bnx2x_leader_reset(bp)) {
10053 bnx2x_recovery_failed(bp);
10054 return;
10055 }
10056
10057
10058
10059
10060
10061
10062 break;
10063 }
10064 } else {
10065 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10066
10067
10068
10069
10070
10071
10072 if (bnx2x_trylock_leader_lock(bp)) {
10073
10074
10075
10076 bp->is_leader = 1;
10077 break;
10078 }
10079
10080 schedule_delayed_work(&bp->sp_rtnl_task,
10081 HZ/10);
10082 return;
10083
10084 } else {
10085
10086
10087
10088
10089 if (bnx2x_reset_is_global(bp)) {
10090 schedule_delayed_work(
10091 &bp->sp_rtnl_task,
10092 HZ/10);
10093 return;
10094 }
10095
10096 error_recovered =
10097 bp->eth_stats.recoverable_error;
10098 error_unrecovered =
10099 bp->eth_stats.unrecoverable_error;
10100 bp->recovery_state =
10101 BNX2X_RECOVERY_NIC_LOADING;
10102 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10103 error_unrecovered++;
10104 netdev_err(bp->dev,
10105 "Recovery failed. Power cycle needed\n");
10106
10107 netif_device_detach(bp->dev);
10108
10109 bnx2x_set_power_state(
10110 bp, PCI_D3hot);
10111 smp_mb();
10112 } else {
10113 bp->recovery_state =
10114 BNX2X_RECOVERY_DONE;
10115 error_recovered++;
10116 smp_mb();
10117 }
10118 bp->eth_stats.recoverable_error =
10119 error_recovered;
10120 bp->eth_stats.unrecoverable_error =
10121 error_unrecovered;
10122
10123 return;
10124 }
10125 }
10126 default:
10127 return;
10128 }
10129 }
10130}
10131
10132static int bnx2x_udp_port_update(struct bnx2x *bp)
10133{
10134 struct bnx2x_func_switch_update_params *switch_update_params;
10135 struct bnx2x_func_state_params func_params = {NULL};
10136 struct bnx2x_udp_tunnel *udp_tunnel;
10137 u16 vxlan_port = 0, geneve_port = 0;
10138 int rc;
10139
10140 switch_update_params = &func_params.params.switch_update;
10141
10142
10143 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10144 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10145
10146 func_params.f_obj = &bp->func_obj;
10147 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10148
10149
10150 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10151 &switch_update_params->changes);
10152
10153 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10154 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10155 geneve_port = udp_tunnel->dst_port;
10156 switch_update_params->geneve_dst_port = geneve_port;
10157 }
10158
10159 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10160 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10161 vxlan_port = udp_tunnel->dst_port;
10162 switch_update_params->vxlan_dst_port = vxlan_port;
10163 }
10164
10165
10166 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10167 &switch_update_params->changes);
10168
10169 rc = bnx2x_func_state_change(bp, &func_params);
10170 if (rc)
10171 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10172 vxlan_port, geneve_port, rc);
10173 else
10174 DP(BNX2X_MSG_SP,
10175 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10176 vxlan_port, geneve_port);
10177
10178 return rc;
10179}
10180
10181static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10182 enum bnx2x_udp_port_type type)
10183{
10184 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10185
10186 if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
10187 return;
10188
10189 if (udp_port->count && udp_port->dst_port == port) {
10190 udp_port->count++;
10191 return;
10192 }
10193
10194 if (udp_port->count) {
10195 DP(BNX2X_MSG_SP,
10196 "UDP tunnel [%d] - destination port limit reached\n",
10197 type);
10198 return;
10199 }
10200
10201 udp_port->dst_port = port;
10202 udp_port->count = 1;
10203 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10204}
10205
10206static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10207 enum bnx2x_udp_port_type type)
10208{
10209 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10210
10211 if (!IS_PF(bp) || CHIP_IS_E1x(bp))
10212 return;
10213
10214 if (!udp_port->count || udp_port->dst_port != port) {
10215 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10216 type);
10217 return;
10218 }
10219
10220
10221 udp_port->count--;
10222 if (udp_port->count)
10223 return;
10224 udp_port->dst_port = 0;
10225
10226 if (netif_running(bp->dev))
10227 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10228 else
10229 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10230 type, port);
10231}
10232
10233static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10234 struct udp_tunnel_info *ti)
10235{
10236 struct bnx2x *bp = netdev_priv(netdev);
10237 u16 t_port = ntohs(ti->port);
10238
10239 switch (ti->type) {
10240 case UDP_TUNNEL_TYPE_VXLAN:
10241 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10242 break;
10243 case UDP_TUNNEL_TYPE_GENEVE:
10244 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10245 break;
10246 default:
10247 break;
10248 }
10249}
10250
10251static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10252 struct udp_tunnel_info *ti)
10253{
10254 struct bnx2x *bp = netdev_priv(netdev);
10255 u16 t_port = ntohs(ti->port);
10256
10257 switch (ti->type) {
10258 case UDP_TUNNEL_TYPE_VXLAN:
10259 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10260 break;
10261 case UDP_TUNNEL_TYPE_GENEVE:
10262 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10263 break;
10264 default:
10265 break;
10266 }
10267}
10268
10269static int bnx2x_close(struct net_device *dev);
10270
10271
10272
10273
10274static void bnx2x_sp_rtnl_task(struct work_struct *work)
10275{
10276 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10277
10278 rtnl_lock();
10279
10280 if (!netif_running(bp->dev)) {
10281 rtnl_unlock();
10282 return;
10283 }
10284
10285 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10286#ifdef BNX2X_STOP_ON_ERROR
10287 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10288 "you will need to reboot when done\n");
10289 goto sp_rtnl_not_reset;
10290#endif
10291
10292
10293
10294
10295 bp->sp_rtnl_state = 0;
10296 smp_mb();
10297
10298 bnx2x_parity_recover(bp);
10299
10300 rtnl_unlock();
10301 return;
10302 }
10303
10304 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10305#ifdef BNX2X_STOP_ON_ERROR
10306 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10307 "you will need to reboot when done\n");
10308 goto sp_rtnl_not_reset;
10309#endif
10310
10311
10312
10313
10314
10315 bp->sp_rtnl_state = 0;
10316 smp_mb();
10317
10318
10319 bp->link_vars.link_up = 0;
10320 bp->force_link_down = true;
10321 netif_carrier_off(bp->dev);
10322 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10323
10324 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10325
10326
10327
10328
10329 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10330 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10331 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10332 BNX2X_ERR("Open the NIC fails again!\n");
10333 }
10334 rtnl_unlock();
10335 return;
10336 }
10337#ifdef BNX2X_STOP_ON_ERROR
10338sp_rtnl_not_reset:
10339#endif
10340 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10341 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10342 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10343 bnx2x_after_function_update(bp);
10344
10345
10346
10347
10348
10349 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10350 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10351 netif_device_detach(bp->dev);
10352 bnx2x_close(bp->dev);
10353 rtnl_unlock();
10354 return;
10355 }
10356
10357 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10358 DP(BNX2X_MSG_SP,
10359 "sending set mcast vf pf channel message from rtnl sp-task\n");
10360 bnx2x_vfpf_set_mcast(bp->dev);
10361 }
10362 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10363 &bp->sp_rtnl_state)){
10364 if (netif_carrier_ok(bp->dev)) {
10365 bnx2x_tx_disable(bp);
10366 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10367 }
10368 }
10369
10370 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10371 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10372 bnx2x_set_rx_mode_inner(bp);
10373 }
10374
10375 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10376 &bp->sp_rtnl_state))
10377 bnx2x_pf_set_vfs_vlan(bp);
10378
10379 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10380 bnx2x_dcbx_stop_hw_tx(bp);
10381 bnx2x_dcbx_resume_hw_tx(bp);
10382 }
10383
10384 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10385 &bp->sp_rtnl_state))
10386 bnx2x_update_mng_version(bp);
10387
10388 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10389 bnx2x_handle_update_svid_cmd(bp);
10390
10391 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10392 &bp->sp_rtnl_state)) {
10393 if (bnx2x_udp_port_update(bp)) {
10394
10395 memset(bp->udp_tunnel_ports, 0,
10396 sizeof(struct bnx2x_udp_tunnel) *
10397 BNX2X_UDP_PORT_MAX);
10398 } else {
10399
10400
10401
10402
10403 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10404 !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10405 udp_tunnel_get_rx_info(bp->dev);
10406 }
10407 }
10408
10409
10410
10411
10412 rtnl_unlock();
10413
10414
10415 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10416 &bp->sp_rtnl_state)) {
10417 bnx2x_disable_sriov(bp);
10418 bnx2x_enable_sriov(bp);
10419 }
10420}
10421
10422static void bnx2x_period_task(struct work_struct *work)
10423{
10424 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10425
10426 if (!netif_running(bp->dev))
10427 goto period_task_exit;
10428
10429 if (CHIP_REV_IS_SLOW(bp)) {
10430 BNX2X_ERR("period task called on emulation, ignoring\n");
10431 goto period_task_exit;
10432 }
10433
10434 bnx2x_acquire_phy_lock(bp);
10435
10436
10437
10438
10439
10440 smp_mb();
10441 if (bp->port.pmf) {
10442 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10443
10444
10445 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10446 }
10447
10448 bnx2x_release_phy_lock(bp);
10449period_task_exit:
10450 return;
10451}
10452
10453
10454
10455
10456
10457static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10458{
10459 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10460 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10461 return base + (BP_ABS_FUNC(bp)) * stride;
10462}
10463
10464static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10465 u8 port, u32 reset_reg,
10466 struct bnx2x_mac_vals *vals)
10467{
10468 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10469 u32 base_addr;
10470
10471 if (!(mask & reset_reg))
10472 return false;
10473
10474 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10475 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10476 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10477 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10478 REG_WR(bp, vals->umac_addr[port], 0);
10479
10480 return true;
10481}
10482
10483static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10484 struct bnx2x_mac_vals *vals)
10485{
10486 u32 val, base_addr, offset, mask, reset_reg;
10487 bool mac_stopped = false;
10488 u8 port = BP_PORT(bp);
10489
10490
10491 memset(vals, 0, sizeof(*vals));
10492
10493 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10494
10495 if (!CHIP_IS_E3(bp)) {
10496 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10497 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10498 if ((mask & reset_reg) && val) {
10499 u32 wb_data[2];
10500 BNX2X_DEV_INFO("Disable bmac Rx\n");
10501 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10502 : NIG_REG_INGRESS_BMAC0_MEM;
10503 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10504 : BIGMAC_REGISTER_BMAC_CONTROL;
10505
10506
10507
10508
10509
10510
10511
10512 wb_data[0] = REG_RD(bp, base_addr + offset);
10513 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10514 vals->bmac_addr = base_addr + offset;
10515 vals->bmac_val[0] = wb_data[0];
10516 vals->bmac_val[1] = wb_data[1];
10517 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10518 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10519 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10520 }
10521 BNX2X_DEV_INFO("Disable emac Rx\n");
10522 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10523 vals->emac_val = REG_RD(bp, vals->emac_addr);
10524 REG_WR(bp, vals->emac_addr, 0);
10525 mac_stopped = true;
10526 } else {
10527 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10528 BNX2X_DEV_INFO("Disable xmac Rx\n");
10529 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10530 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10531 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10532 val & ~(1 << 1));
10533 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10534 val | (1 << 1));
10535 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10536 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10537 REG_WR(bp, vals->xmac_addr, 0);
10538 mac_stopped = true;
10539 }
10540
10541 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10542 reset_reg, vals);
10543 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10544 reset_reg, vals);
10545 }
10546
10547 if (mac_stopped)
10548 msleep(20);
10549}
10550
10551#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10552#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10553 0x1848 + ((f) << 4))
10554#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10555#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10556#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10557
10558#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10559#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10560#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10561
10562static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10563{
10564
10565
10566
10567 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10568 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10569 return false;
10570
10571 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10572 BNX2X_DEV_INFO("UNDI previously loaded\n");
10573 return true;
10574 }
10575
10576 return false;
10577}
10578
10579static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10580{
10581 u16 rcq, bd;
10582 u32 addr, tmp_reg;
10583
10584 if (BP_FUNC(bp) < 2)
10585 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10586 else
10587 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10588
10589 tmp_reg = REG_RD(bp, addr);
10590 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10591 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10592
10593 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10594 REG_WR(bp, addr, tmp_reg);
10595
10596 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10597 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10598}
10599
10600static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10601{
10602 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10603 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10604 if (!rc) {
10605 BNX2X_ERR("MCP response failure, aborting\n");
10606 return -EBUSY;
10607 }
10608
10609 return 0;
10610}
10611
10612static struct bnx2x_prev_path_list *
10613 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10614{
10615 struct bnx2x_prev_path_list *tmp_list;
10616
10617 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10618 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10619 bp->pdev->bus->number == tmp_list->bus &&
10620 BP_PATH(bp) == tmp_list->path)
10621 return tmp_list;
10622
10623 return NULL;
10624}
10625
10626static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10627{
10628 struct bnx2x_prev_path_list *tmp_list;
10629 int rc;
10630
10631 rc = down_interruptible(&bnx2x_prev_sem);
10632 if (rc) {
10633 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10634 return rc;
10635 }
10636
10637 tmp_list = bnx2x_prev_path_get_entry(bp);
10638 if (tmp_list) {
10639 tmp_list->aer = 1;
10640 rc = 0;
10641 } else {
10642 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10643 BP_PATH(bp));
10644 }
10645
10646 up(&bnx2x_prev_sem);
10647
10648 return rc;
10649}
10650
10651static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10652{
10653 struct bnx2x_prev_path_list *tmp_list;
10654 bool rc = false;
10655
10656 if (down_trylock(&bnx2x_prev_sem))
10657 return false;
10658
10659 tmp_list = bnx2x_prev_path_get_entry(bp);
10660 if (tmp_list) {
10661 if (tmp_list->aer) {
10662 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10663 BP_PATH(bp));
10664 } else {
10665 rc = true;
10666 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10667 BP_PATH(bp));
10668 }
10669 }
10670
10671 up(&bnx2x_prev_sem);
10672
10673 return rc;
10674}
10675
10676bool bnx2x_port_after_undi(struct bnx2x *bp)
10677{
10678 struct bnx2x_prev_path_list *entry;
10679 bool val;
10680
10681 down(&bnx2x_prev_sem);
10682
10683 entry = bnx2x_prev_path_get_entry(bp);
10684 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10685
10686 up(&bnx2x_prev_sem);
10687
10688 return val;
10689}
10690
10691static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10692{
10693 struct bnx2x_prev_path_list *tmp_list;
10694 int rc;
10695
10696 rc = down_interruptible(&bnx2x_prev_sem);
10697 if (rc) {
10698 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10699 return rc;
10700 }
10701
10702
10703 tmp_list = bnx2x_prev_path_get_entry(bp);
10704 if (tmp_list) {
10705 if (!tmp_list->aer) {
10706 BNX2X_ERR("Re-Marking the path.\n");
10707 } else {
10708 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10709 BP_PATH(bp));
10710 tmp_list->aer = 0;
10711 }
10712 up(&bnx2x_prev_sem);
10713 return 0;
10714 }
10715 up(&bnx2x_prev_sem);
10716
10717
10718 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10719 if (!tmp_list) {
10720 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10721 return -ENOMEM;
10722 }
10723
10724 tmp_list->bus = bp->pdev->bus->number;
10725 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10726 tmp_list->path = BP_PATH(bp);
10727 tmp_list->aer = 0;
10728 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10729
10730 rc = down_interruptible(&bnx2x_prev_sem);
10731 if (rc) {
10732 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10733 kfree(tmp_list);
10734 } else {
10735 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10736 BP_PATH(bp));
10737 list_add(&tmp_list->list, &bnx2x_prev_list);
10738 up(&bnx2x_prev_sem);
10739 }
10740
10741 return rc;
10742}
10743
10744static int bnx2x_do_flr(struct bnx2x *bp)
10745{
10746 struct pci_dev *dev = bp->pdev;
10747
10748 if (CHIP_IS_E1x(bp)) {
10749 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10750 return -EINVAL;
10751 }
10752
10753
10754 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10755 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10756 bp->common.bc_ver);
10757 return -EINVAL;
10758 }
10759
10760 if (!pci_wait_for_pending_transaction(dev))
10761 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10762
10763 BNX2X_DEV_INFO("Initiating FLR\n");
10764 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10765
10766 return 0;
10767}
10768
10769static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10770{
10771 int rc;
10772
10773 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10774
10775
10776 if (bnx2x_prev_is_path_marked(bp))
10777 return bnx2x_prev_mcp_done(bp);
10778
10779 BNX2X_DEV_INFO("Path is unmarked\n");
10780
10781
10782 if (bnx2x_prev_is_after_undi(bp))
10783 goto out;
10784
10785
10786
10787
10788
10789 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10790
10791 if (!rc) {
10792
10793 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10794 rc = bnx2x_do_flr(bp);
10795 }
10796
10797 if (!rc) {
10798
10799 BNX2X_DEV_INFO("FLR successful\n");
10800 return 0;
10801 }
10802
10803 BNX2X_DEV_INFO("Could not FLR\n");
10804
10805out:
10806
10807 rc = bnx2x_prev_mcp_done(bp);
10808 if (!rc)
10809 rc = BNX2X_PREV_WAIT_NEEDED;
10810
10811 return rc;
10812}
10813
10814static int bnx2x_prev_unload_common(struct bnx2x *bp)
10815{
10816 u32 reset_reg, tmp_reg = 0, rc;
10817 bool prev_undi = false;
10818 struct bnx2x_mac_vals mac_vals;
10819
10820
10821
10822
10823
10824 BNX2X_DEV_INFO("Common unload Flow\n");
10825
10826 memset(&mac_vals, 0, sizeof(mac_vals));
10827
10828 if (bnx2x_prev_is_path_marked(bp))
10829 return bnx2x_prev_mcp_done(bp);
10830
10831 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10832
10833
10834 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10835 u32 timer_count = 1000;
10836
10837
10838 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10839
10840
10841 bnx2x_set_rx_filter(&bp->link_params, 0);
10842 bp->link_params.port ^= 1;
10843 bnx2x_set_rx_filter(&bp->link_params, 0);
10844 bp->link_params.port ^= 1;
10845
10846
10847 if (bnx2x_prev_is_after_undi(bp)) {
10848 prev_undi = true;
10849
10850 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10851
10852 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10853 }
10854 if (!CHIP_IS_E1x(bp))
10855
10856 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10857
10858
10859 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10860 while (timer_count) {
10861 u32 prev_brb = tmp_reg;
10862
10863 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10864 if (!tmp_reg)
10865 break;
10866
10867 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10868
10869
10870 if (prev_brb > tmp_reg)
10871 timer_count = 1000;
10872 else
10873 timer_count--;
10874
10875
10876 if (prev_undi)
10877 bnx2x_prev_unload_undi_inc(bp, 1);
10878
10879 udelay(10);
10880 }
10881
10882 if (!timer_count)
10883 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10884 }
10885
10886
10887 bnx2x_reset_common(bp);
10888
10889 if (mac_vals.xmac_addr)
10890 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10891 if (mac_vals.umac_addr[0])
10892 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10893 if (mac_vals.umac_addr[1])
10894 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10895 if (mac_vals.emac_addr)
10896 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10897 if (mac_vals.bmac_addr) {
10898 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10899 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10900 }
10901
10902 rc = bnx2x_prev_mark_path(bp, prev_undi);
10903 if (rc) {
10904 bnx2x_prev_mcp_done(bp);
10905 return rc;
10906 }
10907
10908 return bnx2x_prev_mcp_done(bp);
10909}
10910
10911static int bnx2x_prev_unload(struct bnx2x *bp)
10912{
10913 int time_counter = 10;
10914 u32 rc, fw, hw_lock_reg, hw_lock_val;
10915 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10916
10917
10918
10919
10920 bnx2x_clean_pglue_errors(bp);
10921
10922
10923 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10924 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10925 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10926
10927 hw_lock_val = REG_RD(bp, hw_lock_reg);
10928 if (hw_lock_val) {
10929 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10930 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10931 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10932 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10933 }
10934
10935 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10936 REG_WR(bp, hw_lock_reg, 0xffffffff);
10937 } else
10938 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10939
10940 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10941 BNX2X_DEV_INFO("Release previously held alr\n");
10942 bnx2x_release_alr(bp);
10943 }
10944
10945 do {
10946 int aer = 0;
10947
10948 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10949 if (!fw) {
10950 BNX2X_ERR("MCP response failure, aborting\n");
10951 rc = -EBUSY;
10952 break;
10953 }
10954
10955 rc = down_interruptible(&bnx2x_prev_sem);
10956 if (rc) {
10957 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10958 rc);
10959 } else {
10960
10961 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10962 bnx2x_prev_path_get_entry(bp)->aer);
10963 up(&bnx2x_prev_sem);
10964 }
10965
10966 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10967 rc = bnx2x_prev_unload_common(bp);
10968 break;
10969 }
10970
10971
10972 rc = bnx2x_prev_unload_uncommon(bp);
10973 if (rc != BNX2X_PREV_WAIT_NEEDED)
10974 break;
10975
10976 msleep(20);
10977 } while (--time_counter);
10978
10979 if (!time_counter || rc) {
10980 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10981 rc = -EPROBE_DEFER;
10982 }
10983
10984
10985 if (bnx2x_port_after_undi(bp))
10986 bp->link_params.feature_config_flags |=
10987 FEATURE_CONFIG_BOOT_FROM_SAN;
10988
10989 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10990
10991 return rc;
10992}
10993
10994static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10995{
10996 u32 val, val2, val3, val4, id, boot_mode;
10997 u16 pmc;
10998
10999
11000
11001 val = REG_RD(bp, MISC_REG_CHIP_NUM);
11002 id = ((val & 0xffff) << 16);
11003 val = REG_RD(bp, MISC_REG_CHIP_REV);
11004 id |= ((val & 0xf) << 12);
11005
11006
11007
11008
11009 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
11010 id |= (((val >> 24) & 0xf) << 4);
11011 val = REG_RD(bp, MISC_REG_BOND_ID);
11012 id |= (val & 0xf);
11013 bp->common.chip_id = id;
11014
11015
11016 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
11017 if (CHIP_IS_57810(bp))
11018 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
11019 (bp->common.chip_id & 0x0000FFFF);
11020 else if (CHIP_IS_57810_MF(bp))
11021 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
11022 (bp->common.chip_id & 0x0000FFFF);
11023 bp->common.chip_id |= 0x1;
11024 }
11025
11026
11027 bp->db_size = (1 << BNX2X_DB_SHIFT);
11028
11029 if (!CHIP_IS_E1x(bp)) {
11030 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
11031 if ((val & 1) == 0)
11032 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
11033 else
11034 val = (val >> 1) & 1;
11035 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
11036 "2_PORT_MODE");
11037 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
11038 CHIP_2_PORT_MODE;
11039
11040 if (CHIP_MODE_IS_4_PORT(bp))
11041 bp->pfid = (bp->pf_num >> 1);
11042 else
11043 bp->pfid = (bp->pf_num & 0x6);
11044 } else {
11045 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
11046 bp->pfid = bp->pf_num;
11047 }
11048
11049 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
11050
11051 bp->link_params.chip_id = bp->common.chip_id;
11052 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
11053
11054 val = (REG_RD(bp, 0x2874) & 0x55);
11055 if ((bp->common.chip_id & 0x1) ||
11056 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11057 bp->flags |= ONE_PORT_FLAG;
11058 BNX2X_DEV_INFO("single port device\n");
11059 }
11060
11061 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11062 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11063 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11064 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11065 bp->common.flash_size, bp->common.flash_size);
11066
11067 bnx2x_init_shmem(bp);
11068
11069 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11070 MISC_REG_GENERIC_CR_1 :
11071 MISC_REG_GENERIC_CR_0));
11072
11073 bp->link_params.shmem_base = bp->common.shmem_base;
11074 bp->link_params.shmem2_base = bp->common.shmem2_base;
11075 if (SHMEM2_RD(bp, size) >
11076 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11077 bp->link_params.lfa_base =
11078 REG_RD(bp, bp->common.shmem2_base +
11079 (u32)offsetof(struct shmem2_region,
11080 lfa_host_addr[BP_PORT(bp)]));
11081 else
11082 bp->link_params.lfa_base = 0;
11083 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11084 bp->common.shmem_base, bp->common.shmem2_base);
11085
11086 if (!bp->common.shmem_base) {
11087 BNX2X_DEV_INFO("MCP not active\n");
11088 bp->flags |= NO_MCP_FLAG;
11089 return;
11090 }
11091
11092 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11093 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11094
11095 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11096 SHARED_HW_CFG_LED_MODE_MASK) >>
11097 SHARED_HW_CFG_LED_MODE_SHIFT);
11098
11099 bp->link_params.feature_config_flags = 0;
11100 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11101 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11102 bp->link_params.feature_config_flags |=
11103 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11104 else
11105 bp->link_params.feature_config_flags &=
11106 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11107
11108 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11109 bp->common.bc_ver = val;
11110 BNX2X_DEV_INFO("bc_ver %X\n", val);
11111 if (val < BNX2X_BC_VER) {
11112
11113
11114 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11115 BNX2X_BC_VER, val);
11116 }
11117 bp->link_params.feature_config_flags |=
11118 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11119 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11120
11121 bp->link_params.feature_config_flags |=
11122 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11123 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11124 bp->link_params.feature_config_flags |=
11125 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11126 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11127 bp->link_params.feature_config_flags |=
11128 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11129 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11130
11131 bp->link_params.feature_config_flags |=
11132 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11133 FEATURE_CONFIG_MT_SUPPORT : 0;
11134
11135 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11136 BC_SUPPORTS_PFC_STATS : 0;
11137
11138 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11139 BC_SUPPORTS_FCOE_FEATURES : 0;
11140
11141 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11142 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11143
11144 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11145 BC_SUPPORTS_RMMOD_CMD : 0;
11146
11147 boot_mode = SHMEM_RD(bp,
11148 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11149 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11150 switch (boot_mode) {
11151 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11152 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11153 break;
11154 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11155 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11156 break;
11157 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11158 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11159 break;
11160 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11161 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11162 break;
11163 }
11164
11165 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11166 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11167
11168 BNX2X_DEV_INFO("%sWoL capable\n",
11169 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11170
11171 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11172 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11173 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11174 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11175
11176 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11177 val, val2, val3, val4);
11178}
11179
11180#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11181#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11182
11183static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11184{
11185 int pfid = BP_FUNC(bp);
11186 int igu_sb_id;
11187 u32 val;
11188 u8 fid, igu_sb_cnt = 0;
11189
11190 bp->igu_base_sb = 0xff;
11191 if (CHIP_INT_MODE_IS_BC(bp)) {
11192 int vn = BP_VN(bp);
11193 igu_sb_cnt = bp->igu_sb_cnt;
11194 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11195 FP_SB_MAX_E1x;
11196
11197 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11198 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11199
11200 return 0;
11201 }
11202
11203
11204 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11205 igu_sb_id++) {
11206 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11207 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11208 continue;
11209 fid = IGU_FID(val);
11210 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11211 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11212 continue;
11213 if (IGU_VEC(val) == 0)
11214
11215 bp->igu_dsb_id = igu_sb_id;
11216 else {
11217 if (bp->igu_base_sb == 0xff)
11218 bp->igu_base_sb = igu_sb_id;
11219 igu_sb_cnt++;
11220 }
11221 }
11222 }
11223
11224#ifdef CONFIG_PCI_MSI
11225
11226
11227
11228
11229
11230
11231 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11232#endif
11233
11234 if (igu_sb_cnt == 0) {
11235 BNX2X_ERR("CAM configuration error\n");
11236 return -EINVAL;
11237 }
11238
11239 return 0;
11240}
11241
11242static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11243{
11244 int cfg_size = 0, idx, port = BP_PORT(bp);
11245
11246
11247 bp->port.supported[0] = 0;
11248 bp->port.supported[1] = 0;
11249 switch (bp->link_params.num_phys) {
11250 case 1:
11251 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11252 cfg_size = 1;
11253 break;
11254 case 2:
11255 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11256 cfg_size = 1;
11257 break;
11258 case 3:
11259 if (bp->link_params.multi_phy_config &
11260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11261 bp->port.supported[1] =
11262 bp->link_params.phy[EXT_PHY1].supported;
11263 bp->port.supported[0] =
11264 bp->link_params.phy[EXT_PHY2].supported;
11265 } else {
11266 bp->port.supported[0] =
11267 bp->link_params.phy[EXT_PHY1].supported;
11268 bp->port.supported[1] =
11269 bp->link_params.phy[EXT_PHY2].supported;
11270 }
11271 cfg_size = 2;
11272 break;
11273 }
11274
11275 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11276 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11277 SHMEM_RD(bp,
11278 dev_info.port_hw_config[port].external_phy_config),
11279 SHMEM_RD(bp,
11280 dev_info.port_hw_config[port].external_phy_config2));
11281 return;
11282 }
11283
11284 if (CHIP_IS_E3(bp))
11285 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11286 else {
11287 switch (switch_cfg) {
11288 case SWITCH_CFG_1G:
11289 bp->port.phy_addr = REG_RD(
11290 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11291 break;
11292 case SWITCH_CFG_10G:
11293 bp->port.phy_addr = REG_RD(
11294 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11295 break;
11296 default:
11297 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11298 bp->port.link_config[0]);
11299 return;
11300 }
11301 }
11302 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11303
11304 for (idx = 0; idx < cfg_size; idx++) {
11305 if (!(bp->link_params.speed_cap_mask[idx] &
11306 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11307 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11308
11309 if (!(bp->link_params.speed_cap_mask[idx] &
11310 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11311 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11312
11313 if (!(bp->link_params.speed_cap_mask[idx] &
11314 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11315 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11316
11317 if (!(bp->link_params.speed_cap_mask[idx] &
11318 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11319 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11320
11321 if (!(bp->link_params.speed_cap_mask[idx] &
11322 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11323 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11324 SUPPORTED_1000baseT_Full);
11325
11326 if (!(bp->link_params.speed_cap_mask[idx] &
11327 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11328 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11329
11330 if (!(bp->link_params.speed_cap_mask[idx] &
11331 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11332 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11333
11334 if (!(bp->link_params.speed_cap_mask[idx] &
11335 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11336 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11337 }
11338
11339 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11340 bp->port.supported[1]);
11341}
11342
11343static void bnx2x_link_settings_requested(struct bnx2x *bp)
11344{
11345 u32 link_config, idx, cfg_size = 0;
11346 bp->port.advertising[0] = 0;
11347 bp->port.advertising[1] = 0;
11348 switch (bp->link_params.num_phys) {
11349 case 1:
11350 case 2:
11351 cfg_size = 1;
11352 break;
11353 case 3:
11354 cfg_size = 2;
11355 break;
11356 }
11357 for (idx = 0; idx < cfg_size; idx++) {
11358 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11359 link_config = bp->port.link_config[idx];
11360 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11361 case PORT_FEATURE_LINK_SPEED_AUTO:
11362 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11363 bp->link_params.req_line_speed[idx] =
11364 SPEED_AUTO_NEG;
11365 bp->port.advertising[idx] |=
11366 bp->port.supported[idx];
11367 if (bp->link_params.phy[EXT_PHY1].type ==
11368 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11369 bp->port.advertising[idx] |=
11370 (SUPPORTED_100baseT_Half |
11371 SUPPORTED_100baseT_Full);
11372 } else {
11373
11374 bp->link_params.req_line_speed[idx] =
11375 SPEED_10000;
11376 bp->port.advertising[idx] |=
11377 (ADVERTISED_10000baseT_Full |
11378 ADVERTISED_FIBRE);
11379 continue;
11380 }
11381 break;
11382
11383 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11384 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11385 bp->link_params.req_line_speed[idx] =
11386 SPEED_10;
11387 bp->port.advertising[idx] |=
11388 (ADVERTISED_10baseT_Full |
11389 ADVERTISED_TP);
11390 } else {
11391 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11392 link_config,
11393 bp->link_params.speed_cap_mask[idx]);
11394 return;
11395 }
11396 break;
11397
11398 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11399 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11400 bp->link_params.req_line_speed[idx] =
11401 SPEED_10;
11402 bp->link_params.req_duplex[idx] =
11403 DUPLEX_HALF;
11404 bp->port.advertising[idx] |=
11405 (ADVERTISED_10baseT_Half |
11406 ADVERTISED_TP);
11407 } else {
11408 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11409 link_config,
11410 bp->link_params.speed_cap_mask[idx]);
11411 return;
11412 }
11413 break;
11414
11415 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11416 if (bp->port.supported[idx] &
11417 SUPPORTED_100baseT_Full) {
11418 bp->link_params.req_line_speed[idx] =
11419 SPEED_100;
11420 bp->port.advertising[idx] |=
11421 (ADVERTISED_100baseT_Full |
11422 ADVERTISED_TP);
11423 } else {
11424 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11425 link_config,
11426 bp->link_params.speed_cap_mask[idx]);
11427 return;
11428 }
11429 break;
11430
11431 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11432 if (bp->port.supported[idx] &
11433 SUPPORTED_100baseT_Half) {
11434 bp->link_params.req_line_speed[idx] =
11435 SPEED_100;
11436 bp->link_params.req_duplex[idx] =
11437 DUPLEX_HALF;
11438 bp->port.advertising[idx] |=
11439 (ADVERTISED_100baseT_Half |
11440 ADVERTISED_TP);
11441 } else {
11442 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11443 link_config,
11444 bp->link_params.speed_cap_mask[idx]);
11445 return;
11446 }
11447 break;
11448
11449 case PORT_FEATURE_LINK_SPEED_1G:
11450 if (bp->port.supported[idx] &
11451 SUPPORTED_1000baseT_Full) {
11452 bp->link_params.req_line_speed[idx] =
11453 SPEED_1000;
11454 bp->port.advertising[idx] |=
11455 (ADVERTISED_1000baseT_Full |
11456 ADVERTISED_TP);
11457 } else if (bp->port.supported[idx] &
11458 SUPPORTED_1000baseKX_Full) {
11459 bp->link_params.req_line_speed[idx] =
11460 SPEED_1000;
11461 bp->port.advertising[idx] |=
11462 ADVERTISED_1000baseKX_Full;
11463 } else {
11464 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11465 link_config,
11466 bp->link_params.speed_cap_mask[idx]);
11467 return;
11468 }
11469 break;
11470
11471 case PORT_FEATURE_LINK_SPEED_2_5G:
11472 if (bp->port.supported[idx] &
11473 SUPPORTED_2500baseX_Full) {
11474 bp->link_params.req_line_speed[idx] =
11475 SPEED_2500;
11476 bp->port.advertising[idx] |=
11477 (ADVERTISED_2500baseX_Full |
11478 ADVERTISED_TP);
11479 } else {
11480 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11481 link_config,
11482 bp->link_params.speed_cap_mask[idx]);
11483 return;
11484 }
11485 break;
11486
11487 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11488 if (bp->port.supported[idx] &
11489 SUPPORTED_10000baseT_Full) {
11490 bp->link_params.req_line_speed[idx] =
11491 SPEED_10000;
11492 bp->port.advertising[idx] |=
11493 (ADVERTISED_10000baseT_Full |
11494 ADVERTISED_FIBRE);
11495 } else if (bp->port.supported[idx] &
11496 SUPPORTED_10000baseKR_Full) {
11497 bp->link_params.req_line_speed[idx] =
11498 SPEED_10000;
11499 bp->port.advertising[idx] |=
11500 (ADVERTISED_10000baseKR_Full |
11501 ADVERTISED_FIBRE);
11502 } else {
11503 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11504 link_config,
11505 bp->link_params.speed_cap_mask[idx]);
11506 return;
11507 }
11508 break;
11509 case PORT_FEATURE_LINK_SPEED_20G:
11510 bp->link_params.req_line_speed[idx] = SPEED_20000;
11511
11512 break;
11513 default:
11514 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11515 link_config);
11516 bp->link_params.req_line_speed[idx] =
11517 SPEED_AUTO_NEG;
11518 bp->port.advertising[idx] =
11519 bp->port.supported[idx];
11520 break;
11521 }
11522
11523 bp->link_params.req_flow_ctrl[idx] = (link_config &
11524 PORT_FEATURE_FLOW_CONTROL_MASK);
11525 if (bp->link_params.req_flow_ctrl[idx] ==
11526 BNX2X_FLOW_CTRL_AUTO) {
11527 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11528 bp->link_params.req_flow_ctrl[idx] =
11529 BNX2X_FLOW_CTRL_NONE;
11530 else
11531 bnx2x_set_requested_fc(bp);
11532 }
11533
11534 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11535 bp->link_params.req_line_speed[idx],
11536 bp->link_params.req_duplex[idx],
11537 bp->link_params.req_flow_ctrl[idx],
11538 bp->port.advertising[idx]);
11539 }
11540}
11541
11542static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11543{
11544 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11545 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11546 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11547 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11548}
11549
11550static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11551{
11552 int port = BP_PORT(bp);
11553 u32 config;
11554 u32 ext_phy_type, ext_phy_config, eee_mode;
11555
11556 bp->link_params.bp = bp;
11557 bp->link_params.port = port;
11558
11559 bp->link_params.lane_config =
11560 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11561
11562 bp->link_params.speed_cap_mask[0] =
11563 SHMEM_RD(bp,
11564 dev_info.port_hw_config[port].speed_capability_mask) &
11565 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11566 bp->link_params.speed_cap_mask[1] =
11567 SHMEM_RD(bp,
11568 dev_info.port_hw_config[port].speed_capability_mask2) &
11569 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11570 bp->port.link_config[0] =
11571 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11572
11573 bp->port.link_config[1] =
11574 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11575
11576 bp->link_params.multi_phy_config =
11577 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11578
11579
11580
11581 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11582 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11583 (config & PORT_FEATURE_WOL_ENABLED));
11584
11585 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11586 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11587 bp->flags |= NO_ISCSI_FLAG;
11588 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11589 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11590 bp->flags |= NO_FCOE_FLAG;
11591
11592 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11593 bp->link_params.lane_config,
11594 bp->link_params.speed_cap_mask[0],
11595 bp->port.link_config[0]);
11596
11597 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11598 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11599 bnx2x_phy_probe(&bp->link_params);
11600 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11601
11602 bnx2x_link_settings_requested(bp);
11603
11604
11605
11606
11607
11608 ext_phy_config =
11609 SHMEM_RD(bp,
11610 dev_info.port_hw_config[port].external_phy_config);
11611 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11612 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11613 bp->mdio.prtad = bp->port.phy_addr;
11614
11615 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11616 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11617 bp->mdio.prtad =
11618 XGXS_EXT_PHY_ADDR(ext_phy_config);
11619
11620
11621 eee_mode = (((SHMEM_RD(bp, dev_info.
11622 port_feature_config[port].eee_power_mode)) &
11623 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11624 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11625 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11626 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11627 EEE_MODE_ENABLE_LPI |
11628 EEE_MODE_OUTPUT_TIME;
11629 } else {
11630 bp->link_params.eee_mode = 0;
11631 }
11632}
11633
11634void bnx2x_get_iscsi_info(struct bnx2x *bp)
11635{
11636 u32 no_flags = NO_ISCSI_FLAG;
11637 int port = BP_PORT(bp);
11638 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11639 drv_lic_key[port].max_iscsi_conn);
11640
11641 if (!CNIC_SUPPORT(bp)) {
11642 bp->flags |= no_flags;
11643 return;
11644 }
11645
11646
11647 bp->cnic_eth_dev.max_iscsi_conn =
11648 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11649 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11650
11651 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11652 bp->cnic_eth_dev.max_iscsi_conn);
11653
11654
11655
11656
11657
11658 if (!bp->cnic_eth_dev.max_iscsi_conn)
11659 bp->flags |= no_flags;
11660}
11661
11662static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11663{
11664
11665 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11666 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11667 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11668 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11669
11670
11671 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11672 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11673 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11674 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11675}
11676
11677static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11678{
11679 u8 count = 0;
11680
11681 if (IS_MF(bp)) {
11682 u8 fid;
11683
11684
11685 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11686 if (IS_MF_SD(bp)) {
11687 u32 cfg = MF_CFG_RD(bp,
11688 func_mf_config[fid].config);
11689
11690 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11691 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11692 FUNC_MF_CFG_PROTOCOL_FCOE))
11693 count++;
11694 } else {
11695 u32 cfg = MF_CFG_RD(bp,
11696 func_ext_config[fid].
11697 func_cfg);
11698
11699 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11700 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11701 count++;
11702 }
11703 }
11704 } else {
11705 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11706
11707 for (port = 0; port < port_cnt; port++) {
11708 u32 lic = SHMEM_RD(bp,
11709 drv_lic_key[port].max_fcoe_conn) ^
11710 FW_ENCODE_32BIT_PATTERN;
11711 if (lic)
11712 count++;
11713 }
11714 }
11715
11716 return count;
11717}
11718
11719static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11720{
11721 int port = BP_PORT(bp);
11722 int func = BP_ABS_FUNC(bp);
11723 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11724 drv_lic_key[port].max_fcoe_conn);
11725 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11726
11727 if (!CNIC_SUPPORT(bp)) {
11728 bp->flags |= NO_FCOE_FLAG;
11729 return;
11730 }
11731
11732
11733 bp->cnic_eth_dev.max_fcoe_conn =
11734 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11735 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11736
11737
11738 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11739
11740
11741 if (num_fcoe_func)
11742 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11743
11744
11745 if (!IS_MF(bp)) {
11746
11747 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11748 SHMEM_RD(bp,
11749 dev_info.port_hw_config[port].
11750 fcoe_wwn_port_name_upper);
11751 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11752 SHMEM_RD(bp,
11753 dev_info.port_hw_config[port].
11754 fcoe_wwn_port_name_lower);
11755
11756
11757 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11758 SHMEM_RD(bp,
11759 dev_info.port_hw_config[port].
11760 fcoe_wwn_node_name_upper);
11761 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11762 SHMEM_RD(bp,
11763 dev_info.port_hw_config[port].
11764 fcoe_wwn_node_name_lower);
11765 } else if (!IS_MF_SD(bp)) {
11766
11767
11768
11769 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11770 bnx2x_get_ext_wwn_info(bp, func);
11771 } else {
11772 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11773 bnx2x_get_ext_wwn_info(bp, func);
11774 }
11775
11776 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11777
11778
11779
11780
11781
11782 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11783 bp->flags |= NO_FCOE_FLAG;
11784 eth_zero_addr(bp->fip_mac);
11785 }
11786}
11787
11788static void bnx2x_get_cnic_info(struct bnx2x *bp)
11789{
11790
11791
11792
11793
11794
11795 bnx2x_get_iscsi_info(bp);
11796 bnx2x_get_fcoe_info(bp);
11797}
11798
11799static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11800{
11801 u32 val, val2;
11802 int func = BP_ABS_FUNC(bp);
11803 int port = BP_PORT(bp);
11804 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11805 u8 *fip_mac = bp->fip_mac;
11806
11807 if (IS_MF(bp)) {
11808
11809
11810
11811
11812
11813 if (!IS_MF_SD(bp)) {
11814 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11815 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11816 val2 = MF_CFG_RD(bp, func_ext_config[func].
11817 iscsi_mac_addr_upper);
11818 val = MF_CFG_RD(bp, func_ext_config[func].
11819 iscsi_mac_addr_lower);
11820 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11821 BNX2X_DEV_INFO
11822 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11823 } else {
11824 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11825 }
11826
11827 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11828 val2 = MF_CFG_RD(bp, func_ext_config[func].
11829 fcoe_mac_addr_upper);
11830 val = MF_CFG_RD(bp, func_ext_config[func].
11831 fcoe_mac_addr_lower);
11832 bnx2x_set_mac_buf(fip_mac, val, val2);
11833 BNX2X_DEV_INFO
11834 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11835 } else {
11836 bp->flags |= NO_FCOE_FLAG;
11837 }
11838
11839 bp->mf_ext_config = cfg;
11840
11841 } else {
11842 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11843
11844 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11845
11846 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11847 BNX2X_DEV_INFO
11848 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11849 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11850
11851 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11852 BNX2X_DEV_INFO("SD FCoE MODE\n");
11853 BNX2X_DEV_INFO
11854 ("Read FIP MAC: %pM\n", fip_mac);
11855 }
11856 }
11857
11858
11859
11860
11861
11862 if (IS_MF_FCOE_AFEX(bp))
11863 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11864 } else {
11865 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11866 iscsi_mac_upper);
11867 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11868 iscsi_mac_lower);
11869 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11870
11871 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11872 fcoe_fip_mac_upper);
11873 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11874 fcoe_fip_mac_lower);
11875 bnx2x_set_mac_buf(fip_mac, val, val2);
11876 }
11877
11878
11879 if (!is_valid_ether_addr(iscsi_mac)) {
11880 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11881 eth_zero_addr(iscsi_mac);
11882 }
11883
11884
11885 if (!is_valid_ether_addr(fip_mac)) {
11886 bp->flags |= NO_FCOE_FLAG;
11887 eth_zero_addr(bp->fip_mac);
11888 }
11889}
11890
11891static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11892{
11893 u32 val, val2;
11894 int func = BP_ABS_FUNC(bp);
11895 int port = BP_PORT(bp);
11896
11897
11898 eth_zero_addr(bp->dev->dev_addr);
11899
11900 if (BP_NOMCP(bp)) {
11901 BNX2X_ERROR("warning: random MAC workaround active\n");
11902 eth_hw_addr_random(bp->dev);
11903 } else if (IS_MF(bp)) {
11904 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11905 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11906 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11907 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11908 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11909
11910 if (CNIC_SUPPORT(bp))
11911 bnx2x_get_cnic_mac_hwinfo(bp);
11912 } else {
11913
11914 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11915 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11916 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11917
11918 if (CNIC_SUPPORT(bp))
11919 bnx2x_get_cnic_mac_hwinfo(bp);
11920 }
11921
11922 if (!BP_NOMCP(bp)) {
11923
11924 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11925 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11926 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11927 bp->flags |= HAS_PHYS_PORT_ID;
11928 }
11929
11930 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11931
11932 if (!is_valid_ether_addr(bp->dev->dev_addr))
11933 dev_err(&bp->pdev->dev,
11934 "bad Ethernet MAC address configuration: %pM\n"
11935 "change it manually before bringing up the appropriate network interface\n",
11936 bp->dev->dev_addr);
11937}
11938
11939static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11940{
11941 int tmp;
11942 u32 cfg;
11943
11944 if (IS_VF(bp))
11945 return false;
11946
11947 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11948
11949 tmp = BP_ABS_FUNC(bp);
11950 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11951 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11952 } else {
11953
11954 tmp = BP_PORT(bp);
11955 cfg = SHMEM_RD(bp,
11956 dev_info.port_hw_config[tmp].generic_features);
11957 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11958 }
11959 return cfg;
11960}
11961
11962static void validate_set_si_mode(struct bnx2x *bp)
11963{
11964 u8 func = BP_ABS_FUNC(bp);
11965 u32 val;
11966
11967 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11968
11969
11970 if (val != 0xffff) {
11971 bp->mf_mode = MULTI_FUNCTION_SI;
11972 bp->mf_config[BP_VN(bp)] =
11973 MF_CFG_RD(bp, func_mf_config[func].config);
11974 } else
11975 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11976}
11977
11978static int bnx2x_get_hwinfo(struct bnx2x *bp)
11979{
11980 int func = BP_ABS_FUNC(bp);
11981 int vn;
11982 u32 val = 0, val2 = 0;
11983 int rc = 0;
11984
11985
11986 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11987 dev_err(&bp->pdev->dev,
11988 "Chip read returns all Fs. Preventing probe from continuing\n");
11989 return -EINVAL;
11990 }
11991
11992 bnx2x_get_common_hwinfo(bp);
11993
11994
11995
11996
11997 if (CHIP_IS_E1x(bp)) {
11998 bp->common.int_block = INT_BLOCK_HC;
11999
12000 bp->igu_dsb_id = DEF_SB_IGU_ID;
12001 bp->igu_base_sb = 0;
12002 } else {
12003 bp->common.int_block = INT_BLOCK_IGU;
12004
12005
12006 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12007
12008 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
12009
12010 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12011 int tout = 5000;
12012
12013 BNX2X_DEV_INFO("FORCING Normal Mode\n");
12014
12015 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
12016 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
12017 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
12018
12019 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12020 tout--;
12021 usleep_range(1000, 2000);
12022 }
12023
12024 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12025 dev_err(&bp->pdev->dev,
12026 "FORCING Normal Mode failed!!!\n");
12027 bnx2x_release_hw_lock(bp,
12028 HW_LOCK_RESOURCE_RESET);
12029 return -EPERM;
12030 }
12031 }
12032
12033 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12034 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
12035 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
12036 } else
12037 BNX2X_DEV_INFO("IGU Normal Mode\n");
12038
12039 rc = bnx2x_get_igu_cam_info(bp);
12040 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12041 if (rc)
12042 return rc;
12043 }
12044
12045
12046
12047
12048
12049
12050 if (CHIP_IS_E1x(bp))
12051 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12052 else
12053
12054
12055
12056
12057 bp->base_fw_ndsb = bp->igu_base_sb;
12058
12059 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12060 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12061 bp->igu_sb_cnt, bp->base_fw_ndsb);
12062
12063
12064
12065
12066 bp->mf_ov = 0;
12067 bp->mf_mode = 0;
12068 bp->mf_sub_mode = 0;
12069 vn = BP_VN(bp);
12070
12071 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12072 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12073 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12074 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12075
12076 if (SHMEM2_HAS(bp, mf_cfg_addr))
12077 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12078 else
12079 bp->common.mf_cfg_base = bp->common.shmem_base +
12080 offsetof(struct shmem_region, func_mb) +
12081 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12082
12083
12084
12085
12086
12087
12088
12089
12090 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12091
12092 val = SHMEM_RD(bp,
12093 dev_info.shared_feature_config.config);
12094 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12095
12096 switch (val) {
12097 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12098 validate_set_si_mode(bp);
12099 break;
12100 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12101 if ((!CHIP_IS_E1x(bp)) &&
12102 (MF_CFG_RD(bp, func_mf_config[func].
12103 mac_upper) != 0xffff) &&
12104 (SHMEM2_HAS(bp,
12105 afex_driver_support))) {
12106 bp->mf_mode = MULTI_FUNCTION_AFEX;
12107 bp->mf_config[vn] = MF_CFG_RD(bp,
12108 func_mf_config[func].config);
12109 } else {
12110 BNX2X_DEV_INFO("can not configure afex mode\n");
12111 }
12112 break;
12113 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12114
12115 val = MF_CFG_RD(bp,
12116 func_mf_config[FUNC_0].e1hov_tag);
12117 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12118
12119 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12120 bp->mf_mode = MULTI_FUNCTION_SD;
12121 bp->mf_config[vn] = MF_CFG_RD(bp,
12122 func_mf_config[func].config);
12123 } else
12124 BNX2X_DEV_INFO("illegal OV for SD\n");
12125 break;
12126 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12127 bp->mf_mode = MULTI_FUNCTION_SD;
12128 bp->mf_sub_mode = SUB_MF_MODE_BD;
12129 bp->mf_config[vn] =
12130 MF_CFG_RD(bp,
12131 func_mf_config[func].config);
12132
12133 if (SHMEM2_HAS(bp, mtu_size)) {
12134 int mtu_idx = BP_FW_MB_IDX(bp);
12135 u16 mtu_size;
12136 u32 mtu;
12137
12138 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12139 mtu_size = (u16)mtu;
12140 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12141 mtu_size, mtu);
12142
12143
12144 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12145 (mtu_size <=
12146 ETH_MAX_JUMBO_PACKET_SIZE))
12147 bp->dev->mtu = mtu_size;
12148 }
12149 break;
12150 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12151 bp->mf_mode = MULTI_FUNCTION_SD;
12152 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12153 bp->mf_config[vn] =
12154 MF_CFG_RD(bp,
12155 func_mf_config[func].config);
12156 break;
12157 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12158 bp->mf_config[vn] = 0;
12159 break;
12160 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12161 val2 = SHMEM_RD(bp,
12162 dev_info.shared_hw_config.config_3);
12163 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12164 switch (val2) {
12165 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12166 validate_set_si_mode(bp);
12167 bp->mf_sub_mode =
12168 SUB_MF_MODE_NPAR1_DOT_5;
12169 break;
12170 default:
12171
12172 bp->mf_config[vn] = 0;
12173 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12174 val);
12175 }
12176 break;
12177 default:
12178
12179 bp->mf_config[vn] = 0;
12180 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12181 }
12182 }
12183
12184 BNX2X_DEV_INFO("%s function mode\n",
12185 IS_MF(bp) ? "multi" : "single");
12186
12187 switch (bp->mf_mode) {
12188 case MULTI_FUNCTION_SD:
12189 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12190 FUNC_MF_CFG_E1HOV_TAG_MASK;
12191 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12192 bp->mf_ov = val;
12193 bp->path_has_ovlan = true;
12194
12195 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12196 func, bp->mf_ov, bp->mf_ov);
12197 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12198 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12199 dev_err(&bp->pdev->dev,
12200 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12201 func);
12202 bp->path_has_ovlan = true;
12203 } else {
12204 dev_err(&bp->pdev->dev,
12205 "No valid MF OV for func %d, aborting\n",
12206 func);
12207 return -EPERM;
12208 }
12209 break;
12210 case MULTI_FUNCTION_AFEX:
12211 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12212 break;
12213 case MULTI_FUNCTION_SI:
12214 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12215 func);
12216 break;
12217 default:
12218 if (vn) {
12219 dev_err(&bp->pdev->dev,
12220 "VN %d is in a single function mode, aborting\n",
12221 vn);
12222 return -EPERM;
12223 }
12224 break;
12225 }
12226
12227
12228
12229
12230
12231
12232 if (CHIP_MODE_IS_4_PORT(bp) &&
12233 !bp->path_has_ovlan &&
12234 !IS_MF(bp) &&
12235 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12236 u8 other_port = !BP_PORT(bp);
12237 u8 other_func = BP_PATH(bp) + 2*other_port;
12238 val = MF_CFG_RD(bp,
12239 func_mf_config[other_func].e1hov_tag);
12240 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12241 bp->path_has_ovlan = true;
12242 }
12243 }
12244
12245
12246 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12247 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12248
12249
12250 bnx2x_get_port_hwinfo(bp);
12251
12252
12253 bnx2x_get_mac_hwinfo(bp);
12254
12255 bnx2x_get_cnic_info(bp);
12256
12257 return rc;
12258}
12259
12260static void bnx2x_read_fwinfo(struct bnx2x *bp)
12261{
12262 int cnt, i, block_end, rodi;
12263 char vpd_start[BNX2X_VPD_LEN+1];
12264 char str_id_reg[VENDOR_ID_LEN+1];
12265 char str_id_cap[VENDOR_ID_LEN+1];
12266 char *vpd_data;
12267 char *vpd_extended_data = NULL;
12268 u8 len;
12269
12270 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12271 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12272
12273 if (cnt < BNX2X_VPD_LEN)
12274 goto out_not_found;
12275
12276
12277
12278
12279 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12280 PCI_VPD_LRDT_RO_DATA);
12281 if (i < 0)
12282 goto out_not_found;
12283
12284 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12285 pci_vpd_lrdt_size(&vpd_start[i]);
12286
12287 i += PCI_VPD_LRDT_TAG_SIZE;
12288
12289 if (block_end > BNX2X_VPD_LEN) {
12290 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12291 if (vpd_extended_data == NULL)
12292 goto out_not_found;
12293
12294
12295 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12296 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12297 block_end - BNX2X_VPD_LEN,
12298 vpd_extended_data + BNX2X_VPD_LEN);
12299 if (cnt < (block_end - BNX2X_VPD_LEN))
12300 goto out_not_found;
12301 vpd_data = vpd_extended_data;
12302 } else
12303 vpd_data = vpd_start;
12304
12305
12306
12307 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12308 PCI_VPD_RO_KEYWORD_MFR_ID);
12309 if (rodi < 0)
12310 goto out_not_found;
12311
12312 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12313
12314 if (len != VENDOR_ID_LEN)
12315 goto out_not_found;
12316
12317 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12318
12319
12320 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12321 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12322 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12323 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12324
12325 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12326 PCI_VPD_RO_KEYWORD_VENDOR0);
12327 if (rodi >= 0) {
12328 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12329
12330 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12331
12332 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12333 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12334 bp->fw_ver[len] = ' ';
12335 }
12336 }
12337 kfree(vpd_extended_data);
12338 return;
12339 }
12340out_not_found:
12341 kfree(vpd_extended_data);
12342 return;
12343}
12344
12345static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12346{
12347 u32 flags = 0;
12348
12349 if (CHIP_REV_IS_FPGA(bp))
12350 SET_FLAGS(flags, MODE_FPGA);
12351 else if (CHIP_REV_IS_EMUL(bp))
12352 SET_FLAGS(flags, MODE_EMUL);
12353 else
12354 SET_FLAGS(flags, MODE_ASIC);
12355
12356 if (CHIP_MODE_IS_4_PORT(bp))
12357 SET_FLAGS(flags, MODE_PORT4);
12358 else
12359 SET_FLAGS(flags, MODE_PORT2);
12360
12361 if (CHIP_IS_E2(bp))
12362 SET_FLAGS(flags, MODE_E2);
12363 else if (CHIP_IS_E3(bp)) {
12364 SET_FLAGS(flags, MODE_E3);
12365 if (CHIP_REV(bp) == CHIP_REV_Ax)
12366 SET_FLAGS(flags, MODE_E3_A0);
12367 else
12368 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12369 }
12370
12371 if (IS_MF(bp)) {
12372 SET_FLAGS(flags, MODE_MF);
12373 switch (bp->mf_mode) {
12374 case MULTI_FUNCTION_SD:
12375 SET_FLAGS(flags, MODE_MF_SD);
12376 break;
12377 case MULTI_FUNCTION_SI:
12378 SET_FLAGS(flags, MODE_MF_SI);
12379 break;
12380 case MULTI_FUNCTION_AFEX:
12381 SET_FLAGS(flags, MODE_MF_AFEX);
12382 break;
12383 }
12384 } else
12385 SET_FLAGS(flags, MODE_SF);
12386
12387#if defined(__LITTLE_ENDIAN)
12388 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12389#else
12390 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12391#endif
12392 INIT_MODE_FLAGS(bp) = flags;
12393}
12394
12395static int bnx2x_init_bp(struct bnx2x *bp)
12396{
12397 int func;
12398 int rc;
12399
12400 mutex_init(&bp->port.phy_mutex);
12401 mutex_init(&bp->fw_mb_mutex);
12402 mutex_init(&bp->drv_info_mutex);
12403 sema_init(&bp->stats_lock, 1);
12404 bp->drv_info_mng_owner = false;
12405 INIT_LIST_HEAD(&bp->vlan_reg);
12406
12407 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12408 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12409 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12410 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12411 if (IS_PF(bp)) {
12412 rc = bnx2x_get_hwinfo(bp);
12413 if (rc)
12414 return rc;
12415 } else {
12416 eth_zero_addr(bp->dev->dev_addr);
12417 }
12418
12419 bnx2x_set_modes_bitmap(bp);
12420
12421 rc = bnx2x_alloc_mem_bp(bp);
12422 if (rc)
12423 return rc;
12424
12425 bnx2x_read_fwinfo(bp);
12426
12427 func = BP_FUNC(bp);
12428
12429
12430 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12431
12432 bp->fw_seq =
12433 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12434 DRV_MSG_SEQ_NUMBER_MASK;
12435 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12436
12437 rc = bnx2x_prev_unload(bp);
12438 if (rc) {
12439 bnx2x_free_mem_bp(bp);
12440 return rc;
12441 }
12442 }
12443
12444 if (CHIP_REV_IS_FPGA(bp))
12445 dev_err(&bp->pdev->dev, "FPGA detected\n");
12446
12447 if (BP_NOMCP(bp) && (func == 0))
12448 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12449
12450 bp->disable_tpa = disable_tpa;
12451 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12452
12453 bp->disable_tpa |= is_kdump_kernel();
12454
12455
12456 if (bp->disable_tpa) {
12457 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12458 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12459 }
12460
12461 if (CHIP_IS_E1(bp))
12462 bp->dropless_fc = 0;
12463 else
12464 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12465
12466 bp->mrrs = mrrs;
12467
12468 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12469 if (IS_VF(bp))
12470 bp->rx_ring_size = MAX_RX_AVAIL;
12471
12472
12473 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12474 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12475
12476 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12477
12478 timer_setup(&bp->timer, bnx2x_timer, 0);
12479 bp->timer.expires = jiffies + bp->current_interval;
12480
12481 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12482 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12483 SHMEM2_HAS(bp, dcbx_en) &&
12484 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12485 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12486 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12487 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12488 bnx2x_dcbx_init_params(bp);
12489 } else {
12490 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12491 }
12492
12493 if (CHIP_IS_E1x(bp))
12494 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12495 else
12496 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12497
12498
12499 if (IS_VF(bp))
12500 bp->max_cos = 1;
12501 else if (CHIP_IS_E1x(bp))
12502 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12503 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12504 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12505 else if (CHIP_IS_E3B0(bp))
12506 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12507 else
12508 BNX2X_ERR("unknown chip %x revision %x\n",
12509 CHIP_NUM(bp), CHIP_REV(bp));
12510 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12511
12512
12513
12514
12515
12516 if (IS_VF(bp))
12517 bp->min_msix_vec_cnt = 1;
12518 else if (CNIC_SUPPORT(bp))
12519 bp->min_msix_vec_cnt = 3;
12520 else
12521 bp->min_msix_vec_cnt = 2;
12522 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12523
12524 bp->dump_preset_idx = 1;
12525
12526 return rc;
12527}
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538static int bnx2x_open(struct net_device *dev)
12539{
12540 struct bnx2x *bp = netdev_priv(dev);
12541 int rc;
12542
12543 bp->stats_init = true;
12544
12545 netif_carrier_off(dev);
12546
12547 bnx2x_set_power_state(bp, PCI_D0);
12548
12549
12550
12551
12552
12553
12554
12555 if (IS_PF(bp)) {
12556 int other_engine = BP_PATH(bp) ? 0 : 1;
12557 bool other_load_status, load_status;
12558 bool global = false;
12559
12560 other_load_status = bnx2x_get_load_status(bp, other_engine);
12561 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12562 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12563 bnx2x_chk_parity_attn(bp, &global, true)) {
12564 do {
12565
12566
12567
12568
12569
12570 if (global)
12571 bnx2x_set_reset_global(bp);
12572
12573
12574
12575
12576
12577
12578 if ((!load_status &&
12579 (!global || !other_load_status)) &&
12580 bnx2x_trylock_leader_lock(bp) &&
12581 !bnx2x_leader_reset(bp)) {
12582 netdev_info(bp->dev,
12583 "Recovered in open\n");
12584 break;
12585 }
12586
12587
12588 bnx2x_set_power_state(bp, PCI_D3hot);
12589 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12590
12591 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12592 "If you still see this message after a few retries then power cycle is required.\n");
12593
12594 return -EAGAIN;
12595 } while (0);
12596 }
12597 }
12598
12599 bp->recovery_state = BNX2X_RECOVERY_DONE;
12600 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12601 if (rc)
12602 return rc;
12603
12604 if (IS_PF(bp))
12605 udp_tunnel_get_rx_info(dev);
12606
12607 return 0;
12608}
12609
12610
12611static int bnx2x_close(struct net_device *dev)
12612{
12613 struct bnx2x *bp = netdev_priv(dev);
12614
12615
12616 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12617
12618 return 0;
12619}
12620
12621struct bnx2x_mcast_list_elem_group
12622{
12623 struct list_head mcast_group_link;
12624 struct bnx2x_mcast_list_elem mcast_elems[];
12625};
12626
12627#define MCAST_ELEMS_PER_PG \
12628 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12629 sizeof(struct bnx2x_mcast_list_elem))
12630
12631static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12632{
12633 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12634
12635 while (!list_empty(mcast_group_list)) {
12636 current_mcast_group = list_first_entry(mcast_group_list,
12637 struct bnx2x_mcast_list_elem_group,
12638 mcast_group_link);
12639 list_del(¤t_mcast_group->mcast_group_link);
12640 free_page((unsigned long)current_mcast_group);
12641 }
12642}
12643
12644static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12645 struct bnx2x_mcast_ramrod_params *p,
12646 struct list_head *mcast_group_list)
12647{
12648 struct bnx2x_mcast_list_elem *mc_mac;
12649 struct netdev_hw_addr *ha;
12650 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12651 int mc_count = netdev_mc_count(bp->dev);
12652 int offset = 0;
12653
12654 INIT_LIST_HEAD(&p->mcast_list);
12655 netdev_for_each_mc_addr(ha, bp->dev) {
12656 if (!offset) {
12657 current_mcast_group =
12658 (struct bnx2x_mcast_list_elem_group *)
12659 __get_free_page(GFP_ATOMIC);
12660 if (!current_mcast_group) {
12661 bnx2x_free_mcast_macs_list(mcast_group_list);
12662 BNX2X_ERR("Failed to allocate mc MAC list\n");
12663 return -ENOMEM;
12664 }
12665 list_add(¤t_mcast_group->mcast_group_link,
12666 mcast_group_list);
12667 }
12668 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12669 mc_mac->mac = bnx2x_mc_addr(ha);
12670 list_add_tail(&mc_mac->link, &p->mcast_list);
12671 offset++;
12672 if (offset == MCAST_ELEMS_PER_PG)
12673 offset = 0;
12674 }
12675 p->mcast_list_len = mc_count;
12676 return 0;
12677}
12678
12679
12680
12681
12682
12683
12684
12685
12686static int bnx2x_set_uc_list(struct bnx2x *bp)
12687{
12688 int rc;
12689 struct net_device *dev = bp->dev;
12690 struct netdev_hw_addr *ha;
12691 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12692 unsigned long ramrod_flags = 0;
12693
12694
12695 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12696 if (rc < 0) {
12697 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12698 return rc;
12699 }
12700
12701 netdev_for_each_uc_addr(ha, dev) {
12702 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12703 BNX2X_UC_LIST_MAC, &ramrod_flags);
12704 if (rc == -EEXIST) {
12705 DP(BNX2X_MSG_SP,
12706 "Failed to schedule ADD operations: %d\n", rc);
12707
12708 rc = 0;
12709
12710 } else if (rc < 0) {
12711
12712 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12713 rc);
12714 return rc;
12715 }
12716 }
12717
12718
12719 __set_bit(RAMROD_CONT, &ramrod_flags);
12720 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12721 BNX2X_UC_LIST_MAC, &ramrod_flags);
12722}
12723
12724static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12725{
12726 LIST_HEAD(mcast_group_list);
12727 struct net_device *dev = bp->dev;
12728 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12729 int rc = 0;
12730
12731 rparam.mcast_obj = &bp->mcast_obj;
12732
12733
12734 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12735 if (rc < 0) {
12736 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12737 return rc;
12738 }
12739
12740
12741 if (netdev_mc_count(dev)) {
12742 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12743 if (rc)
12744 return rc;
12745
12746
12747 rc = bnx2x_config_mcast(bp, &rparam,
12748 BNX2X_MCAST_CMD_ADD);
12749 if (rc < 0)
12750 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12751 rc);
12752
12753 bnx2x_free_mcast_macs_list(&mcast_group_list);
12754 }
12755
12756 return rc;
12757}
12758
12759static int bnx2x_set_mc_list(struct bnx2x *bp)
12760{
12761 LIST_HEAD(mcast_group_list);
12762 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12763 struct net_device *dev = bp->dev;
12764 int rc = 0;
12765
12766
12767 if (CHIP_IS_E1x(bp))
12768 return bnx2x_set_mc_list_e1x(bp);
12769
12770 rparam.mcast_obj = &bp->mcast_obj;
12771
12772 if (netdev_mc_count(dev)) {
12773 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12774 if (rc)
12775 return rc;
12776
12777
12778 rc = bnx2x_config_mcast(bp, &rparam,
12779 BNX2X_MCAST_CMD_SET);
12780 if (rc < 0)
12781 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12782 rc);
12783
12784 bnx2x_free_mcast_macs_list(&mcast_group_list);
12785 } else {
12786
12787 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12788 if (rc < 0)
12789 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12790 rc);
12791 }
12792
12793 return rc;
12794}
12795
12796
12797static void bnx2x_set_rx_mode(struct net_device *dev)
12798{
12799 struct bnx2x *bp = netdev_priv(dev);
12800
12801 if (bp->state != BNX2X_STATE_OPEN) {
12802 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12803 return;
12804 } else {
12805
12806 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12807 NETIF_MSG_IFUP);
12808 }
12809}
12810
12811void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12812{
12813 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12814
12815 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12816
12817 netif_addr_lock_bh(bp->dev);
12818
12819 if (bp->dev->flags & IFF_PROMISC) {
12820 rx_mode = BNX2X_RX_MODE_PROMISC;
12821 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12822 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12823 CHIP_IS_E1(bp))) {
12824 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12825 } else {
12826 if (IS_PF(bp)) {
12827
12828 if (bnx2x_set_mc_list(bp) < 0)
12829 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12830
12831
12832 netif_addr_unlock_bh(bp->dev);
12833 if (bnx2x_set_uc_list(bp) < 0)
12834 rx_mode = BNX2X_RX_MODE_PROMISC;
12835 netif_addr_lock_bh(bp->dev);
12836 } else {
12837
12838
12839
12840 bnx2x_schedule_sp_rtnl(bp,
12841 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12842 }
12843 }
12844
12845 bp->rx_mode = rx_mode;
12846
12847 if (IS_MF_ISCSI_ONLY(bp))
12848 bp->rx_mode = BNX2X_RX_MODE_NONE;
12849
12850
12851 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12852 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12853 netif_addr_unlock_bh(bp->dev);
12854 return;
12855 }
12856
12857 if (IS_PF(bp)) {
12858 bnx2x_set_storm_rx_mode(bp);
12859 netif_addr_unlock_bh(bp->dev);
12860 } else {
12861
12862
12863
12864
12865 netif_addr_unlock_bh(bp->dev);
12866 bnx2x_vfpf_storm_rx_mode(bp);
12867 }
12868}
12869
12870
12871static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12872 int devad, u16 addr)
12873{
12874 struct bnx2x *bp = netdev_priv(netdev);
12875 u16 value;
12876 int rc;
12877
12878 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12879 prtad, devad, addr);
12880
12881
12882 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12883
12884 bnx2x_acquire_phy_lock(bp);
12885 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12886 bnx2x_release_phy_lock(bp);
12887 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12888
12889 if (!rc)
12890 rc = value;
12891 return rc;
12892}
12893
12894
12895static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12896 u16 addr, u16 value)
12897{
12898 struct bnx2x *bp = netdev_priv(netdev);
12899 int rc;
12900
12901 DP(NETIF_MSG_LINK,
12902 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12903 prtad, devad, addr, value);
12904
12905
12906 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12907
12908 bnx2x_acquire_phy_lock(bp);
12909 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12910 bnx2x_release_phy_lock(bp);
12911 return rc;
12912}
12913
12914
12915static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12916{
12917 struct bnx2x *bp = netdev_priv(dev);
12918 struct mii_ioctl_data *mdio = if_mii(ifr);
12919
12920 if (!netif_running(dev))
12921 return -EAGAIN;
12922
12923 switch (cmd) {
12924 case SIOCSHWTSTAMP:
12925 return bnx2x_hwtstamp_ioctl(bp, ifr);
12926 default:
12927 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12928 mdio->phy_id, mdio->reg_num, mdio->val_in);
12929 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12930 }
12931}
12932
12933static int bnx2x_validate_addr(struct net_device *dev)
12934{
12935 struct bnx2x *bp = netdev_priv(dev);
12936
12937
12938 if (IS_VF(bp))
12939 bnx2x_sample_bulletin(bp);
12940
12941 if (!is_valid_ether_addr(dev->dev_addr)) {
12942 BNX2X_ERR("Non-valid Ethernet address\n");
12943 return -EADDRNOTAVAIL;
12944 }
12945 return 0;
12946}
12947
12948static int bnx2x_get_phys_port_id(struct net_device *netdev,
12949 struct netdev_phys_item_id *ppid)
12950{
12951 struct bnx2x *bp = netdev_priv(netdev);
12952
12953 if (!(bp->flags & HAS_PHYS_PORT_ID))
12954 return -EOPNOTSUPP;
12955
12956 ppid->id_len = sizeof(bp->phys_port_id);
12957 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12958
12959 return 0;
12960}
12961
12962static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12963 struct net_device *dev,
12964 netdev_features_t features)
12965{
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979 if (unlikely(skb_is_gso(skb) &&
12980 (skb_shinfo(skb)->gso_size > 9000) &&
12981 !skb_gso_validate_mac_len(skb, 9700)))
12982 features &= ~NETIF_F_GSO_MASK;
12983
12984 features = vlan_features_check(skb, features);
12985 return vxlan_features_check(skb, features);
12986}
12987
12988static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12989{
12990 int rc;
12991
12992 if (IS_PF(bp)) {
12993 unsigned long ramrod_flags = 0;
12994
12995 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12996 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12997 add, &ramrod_flags);
12998 } else {
12999 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
13000 }
13001
13002 return rc;
13003}
13004
13005static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
13006{
13007 struct bnx2x_vlan_entry *vlan;
13008 int rc = 0;
13009
13010
13011 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13012 if (vlan->hw)
13013 continue;
13014
13015 if (bp->vlan_cnt >= bp->vlan_credit)
13016 return -ENOBUFS;
13017
13018 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13019 if (rc) {
13020 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
13021 return rc;
13022 }
13023
13024 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
13025 vlan->hw = true;
13026 bp->vlan_cnt++;
13027 }
13028
13029 return 0;
13030}
13031
13032static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13033{
13034 bool need_accept_any_vlan;
13035
13036 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
13037
13038 if (bp->accept_any_vlan != need_accept_any_vlan) {
13039 bp->accept_any_vlan = need_accept_any_vlan;
13040 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
13041 bp->accept_any_vlan ? "raised" : "cleared");
13042 if (set_rx_mode) {
13043 if (IS_PF(bp))
13044 bnx2x_set_rx_mode_inner(bp);
13045 else
13046 bnx2x_vfpf_storm_rx_mode(bp);
13047 }
13048 }
13049}
13050
13051int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13052{
13053
13054 bnx2x_vlan_configure(bp, false);
13055
13056 return 0;
13057}
13058
13059static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
13060{
13061 struct bnx2x *bp = netdev_priv(dev);
13062 struct bnx2x_vlan_entry *vlan;
13063
13064 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
13065
13066 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
13067 if (!vlan)
13068 return -ENOMEM;
13069
13070 vlan->vid = vid;
13071 vlan->hw = false;
13072 list_add_tail(&vlan->link, &bp->vlan_reg);
13073
13074 if (netif_running(dev))
13075 bnx2x_vlan_configure(bp, true);
13076
13077 return 0;
13078}
13079
13080static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13081{
13082 struct bnx2x *bp = netdev_priv(dev);
13083 struct bnx2x_vlan_entry *vlan;
13084 bool found = false;
13085 int rc = 0;
13086
13087 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13088
13089 list_for_each_entry(vlan, &bp->vlan_reg, link)
13090 if (vlan->vid == vid) {
13091 found = true;
13092 break;
13093 }
13094
13095 if (!found) {
13096 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13097 return -EINVAL;
13098 }
13099
13100 if (netif_running(dev) && vlan->hw) {
13101 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13102 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13103 bp->vlan_cnt--;
13104 }
13105
13106 list_del(&vlan->link);
13107 kfree(vlan);
13108
13109 if (netif_running(dev))
13110 bnx2x_vlan_configure(bp, true);
13111
13112 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13113
13114 return rc;
13115}
13116
13117static const struct net_device_ops bnx2x_netdev_ops = {
13118 .ndo_open = bnx2x_open,
13119 .ndo_stop = bnx2x_close,
13120 .ndo_start_xmit = bnx2x_start_xmit,
13121 .ndo_select_queue = bnx2x_select_queue,
13122 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13123 .ndo_set_mac_address = bnx2x_change_mac_addr,
13124 .ndo_validate_addr = bnx2x_validate_addr,
13125 .ndo_do_ioctl = bnx2x_ioctl,
13126 .ndo_change_mtu = bnx2x_change_mtu,
13127 .ndo_fix_features = bnx2x_fix_features,
13128 .ndo_set_features = bnx2x_set_features,
13129 .ndo_tx_timeout = bnx2x_tx_timeout,
13130 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13131 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13132 .ndo_setup_tc = __bnx2x_setup_tc,
13133#ifdef CONFIG_BNX2X_SRIOV
13134 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13135 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13136 .ndo_get_vf_config = bnx2x_get_vf_config,
13137 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13138#endif
13139#ifdef NETDEV_FCOE_WWNN
13140 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13141#endif
13142
13143 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13144 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13145 .ndo_features_check = bnx2x_features_check,
13146 .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
13147 .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
13148};
13149
13150static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13151{
13152 struct device *dev = &bp->pdev->dev;
13153
13154 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13155 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13156 dev_err(dev, "System does not support DMA, aborting\n");
13157 return -EIO;
13158 }
13159
13160 return 0;
13161}
13162
13163static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13164{
13165 if (bp->flags & AER_ENABLED) {
13166 pci_disable_pcie_error_reporting(bp->pdev);
13167 bp->flags &= ~AER_ENABLED;
13168 }
13169}
13170
13171static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13172 struct net_device *dev, unsigned long board_type)
13173{
13174 int rc;
13175 u32 pci_cfg_dword;
13176 bool chip_is_e1x = (board_type == BCM57710 ||
13177 board_type == BCM57711 ||
13178 board_type == BCM57711E);
13179
13180 SET_NETDEV_DEV(dev, &pdev->dev);
13181
13182 bp->dev = dev;
13183 bp->pdev = pdev;
13184
13185 rc = pci_enable_device(pdev);
13186 if (rc) {
13187 dev_err(&bp->pdev->dev,
13188 "Cannot enable PCI device, aborting\n");
13189 goto err_out;
13190 }
13191
13192 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13193 dev_err(&bp->pdev->dev,
13194 "Cannot find PCI device base address, aborting\n");
13195 rc = -ENODEV;
13196 goto err_out_disable;
13197 }
13198
13199 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13200 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13201 rc = -ENODEV;
13202 goto err_out_disable;
13203 }
13204
13205 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13206 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13207 PCICFG_REVESION_ID_ERROR_VAL) {
13208 pr_err("PCI device error, probably due to fan failure, aborting\n");
13209 rc = -ENODEV;
13210 goto err_out_disable;
13211 }
13212
13213 if (atomic_read(&pdev->enable_cnt) == 1) {
13214 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13215 if (rc) {
13216 dev_err(&bp->pdev->dev,
13217 "Cannot obtain PCI resources, aborting\n");
13218 goto err_out_disable;
13219 }
13220
13221 pci_set_master(pdev);
13222 pci_save_state(pdev);
13223 }
13224
13225 if (IS_PF(bp)) {
13226 if (!pdev->pm_cap) {
13227 dev_err(&bp->pdev->dev,
13228 "Cannot find power management capability, aborting\n");
13229 rc = -EIO;
13230 goto err_out_release;
13231 }
13232 }
13233
13234 if (!pci_is_pcie(pdev)) {
13235 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13236 rc = -EIO;
13237 goto err_out_release;
13238 }
13239
13240 rc = bnx2x_set_coherency_mask(bp);
13241 if (rc)
13242 goto err_out_release;
13243
13244 dev->mem_start = pci_resource_start(pdev, 0);
13245 dev->base_addr = dev->mem_start;
13246 dev->mem_end = pci_resource_end(pdev, 0);
13247
13248 dev->irq = pdev->irq;
13249
13250 bp->regview = pci_ioremap_bar(pdev, 0);
13251 if (!bp->regview) {
13252 dev_err(&bp->pdev->dev,
13253 "Cannot map register space, aborting\n");
13254 rc = -ENOMEM;
13255 goto err_out_release;
13256 }
13257
13258
13259
13260
13261
13262
13263 if (chip_is_e1x) {
13264 bp->pf_num = PCI_FUNC(pdev->devfn);
13265 } else {
13266
13267 pci_read_config_dword(bp->pdev,
13268 PCICFG_ME_REGISTER, &pci_cfg_dword);
13269 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13270 ME_REG_ABS_PF_NUM_SHIFT);
13271 }
13272 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13273
13274
13275 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13276 PCICFG_VENDOR_ID_OFFSET);
13277
13278
13279 pdev->needs_freset = 1;
13280
13281
13282 rc = pci_enable_pcie_error_reporting(pdev);
13283 if (!rc)
13284 bp->flags |= AER_ENABLED;
13285 else
13286 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13287
13288
13289
13290
13291
13292 if (IS_PF(bp)) {
13293 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13294 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13295 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13296 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13297
13298 if (chip_is_e1x) {
13299 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13300 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13301 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13302 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13303 }
13304
13305
13306
13307
13308
13309 if (!chip_is_e1x)
13310 REG_WR(bp,
13311 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13312 }
13313
13314 dev->watchdog_timeo = TX_TIMEOUT;
13315
13316 dev->netdev_ops = &bnx2x_netdev_ops;
13317 bnx2x_set_ethtool_ops(bp, dev);
13318
13319 dev->priv_flags |= IFF_UNICAST_FLT;
13320
13321 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13322 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13323 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13324 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13325 if (!chip_is_e1x) {
13326 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13327 NETIF_F_GSO_IPXIP4 |
13328 NETIF_F_GSO_UDP_TUNNEL |
13329 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13330 NETIF_F_GSO_PARTIAL;
13331
13332 dev->hw_enc_features =
13333 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13334 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13335 NETIF_F_GSO_IPXIP4 |
13336 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13337 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13338 NETIF_F_GSO_PARTIAL;
13339
13340 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13341 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13342 }
13343
13344 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13345 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13346
13347 if (IS_PF(bp)) {
13348 if (chip_is_e1x)
13349 bp->accept_any_vlan = true;
13350 else
13351 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13352 }
13353
13354
13355
13356
13357 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13358 dev->features |= NETIF_F_HIGHDMA;
13359 if (dev->features & NETIF_F_LRO)
13360 dev->features &= ~NETIF_F_GRO_HW;
13361
13362
13363 dev->hw_features |= NETIF_F_LOOPBACK;
13364
13365#ifdef BCM_DCBNL
13366 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13367#endif
13368
13369
13370 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13371 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13372
13373
13374 bp->mdio.prtad = MDIO_PRTAD_NONE;
13375 bp->mdio.mmds = 0;
13376 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13377 bp->mdio.dev = dev;
13378 bp->mdio.mdio_read = bnx2x_mdio_read;
13379 bp->mdio.mdio_write = bnx2x_mdio_write;
13380
13381 return 0;
13382
13383err_out_release:
13384 if (atomic_read(&pdev->enable_cnt) == 1)
13385 pci_release_regions(pdev);
13386
13387err_out_disable:
13388 pci_disable_device(pdev);
13389
13390err_out:
13391 return rc;
13392}
13393
13394static int bnx2x_check_firmware(struct bnx2x *bp)
13395{
13396 const struct firmware *firmware = bp->firmware;
13397 struct bnx2x_fw_file_hdr *fw_hdr;
13398 struct bnx2x_fw_file_section *sections;
13399 u32 offset, len, num_ops;
13400 __be16 *ops_offsets;
13401 int i;
13402 const u8 *fw_ver;
13403
13404 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13405 BNX2X_ERR("Wrong FW size\n");
13406 return -EINVAL;
13407 }
13408
13409 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13410 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13411
13412
13413
13414 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13415 offset = be32_to_cpu(sections[i].offset);
13416 len = be32_to_cpu(sections[i].len);
13417 if (offset + len > firmware->size) {
13418 BNX2X_ERR("Section %d length is out of bounds\n", i);
13419 return -EINVAL;
13420 }
13421 }
13422
13423
13424 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13425 ops_offsets = (__force __be16 *)(firmware->data + offset);
13426 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13427
13428 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13429 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13430 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13431 return -EINVAL;
13432 }
13433 }
13434
13435
13436 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13437 fw_ver = firmware->data + offset;
13438 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13439 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13440 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13441 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13442 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13443 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13444 BCM_5710_FW_MAJOR_VERSION,
13445 BCM_5710_FW_MINOR_VERSION,
13446 BCM_5710_FW_REVISION_VERSION,
13447 BCM_5710_FW_ENGINEERING_VERSION);
13448 return -EINVAL;
13449 }
13450
13451 return 0;
13452}
13453
13454static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13455{
13456 const __be32 *source = (const __be32 *)_source;
13457 u32 *target = (u32 *)_target;
13458 u32 i;
13459
13460 for (i = 0; i < n/4; i++)
13461 target[i] = be32_to_cpu(source[i]);
13462}
13463
13464
13465
13466
13467
13468static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13469{
13470 const __be32 *source = (const __be32 *)_source;
13471 struct raw_op *target = (struct raw_op *)_target;
13472 u32 i, j, tmp;
13473
13474 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13475 tmp = be32_to_cpu(source[j]);
13476 target[i].op = (tmp >> 24) & 0xff;
13477 target[i].offset = tmp & 0xffffff;
13478 target[i].raw_data = be32_to_cpu(source[j + 1]);
13479 }
13480}
13481
13482
13483
13484
13485static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13486{
13487 const __be32 *source = (const __be32 *)_source;
13488 struct iro *target = (struct iro *)_target;
13489 u32 i, j, tmp;
13490
13491 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13492 target[i].base = be32_to_cpu(source[j]);
13493 j++;
13494 tmp = be32_to_cpu(source[j]);
13495 target[i].m1 = (tmp >> 16) & 0xffff;
13496 target[i].m2 = tmp & 0xffff;
13497 j++;
13498 tmp = be32_to_cpu(source[j]);
13499 target[i].m3 = (tmp >> 16) & 0xffff;
13500 target[i].size = tmp & 0xffff;
13501 j++;
13502 }
13503}
13504
13505static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13506{
13507 const __be16 *source = (const __be16 *)_source;
13508 u16 *target = (u16 *)_target;
13509 u32 i;
13510
13511 for (i = 0; i < n/2; i++)
13512 target[i] = be16_to_cpu(source[i]);
13513}
13514
13515#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13516do { \
13517 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13518 bp->arr = kmalloc(len, GFP_KERNEL); \
13519 if (!bp->arr) \
13520 goto lbl; \
13521 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13522 (u8 *)bp->arr, len); \
13523} while (0)
13524
13525static int bnx2x_init_firmware(struct bnx2x *bp)
13526{
13527 const char *fw_file_name;
13528 struct bnx2x_fw_file_hdr *fw_hdr;
13529 int rc;
13530
13531 if (bp->firmware)
13532 return 0;
13533
13534 if (CHIP_IS_E1(bp))
13535 fw_file_name = FW_FILE_NAME_E1;
13536 else if (CHIP_IS_E1H(bp))
13537 fw_file_name = FW_FILE_NAME_E1H;
13538 else if (!CHIP_IS_E1x(bp))
13539 fw_file_name = FW_FILE_NAME_E2;
13540 else {
13541 BNX2X_ERR("Unsupported chip revision\n");
13542 return -EINVAL;
13543 }
13544 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13545
13546 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13547 if (rc) {
13548 BNX2X_ERR("Can't load firmware file %s\n",
13549 fw_file_name);
13550 goto request_firmware_exit;
13551 }
13552
13553 rc = bnx2x_check_firmware(bp);
13554 if (rc) {
13555 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13556 goto request_firmware_exit;
13557 }
13558
13559 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13560
13561
13562
13563 rc = -ENOMEM;
13564 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13565
13566
13567 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13568
13569
13570 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13571 be16_to_cpu_n);
13572
13573
13574 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13575 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13576 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13577 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13578 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13579 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13580 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13581 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13582 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13583 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13584 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13585 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13586 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13587 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13588 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13589 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13590
13591 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13592
13593 return 0;
13594
13595iro_alloc_err:
13596 kfree(bp->init_ops_offsets);
13597init_offsets_alloc_err:
13598 kfree(bp->init_ops);
13599init_ops_alloc_err:
13600 kfree(bp->init_data);
13601request_firmware_exit:
13602 release_firmware(bp->firmware);
13603 bp->firmware = NULL;
13604
13605 return rc;
13606}
13607
13608static void bnx2x_release_firmware(struct bnx2x *bp)
13609{
13610 kfree(bp->init_ops_offsets);
13611 kfree(bp->init_ops);
13612 kfree(bp->init_data);
13613 release_firmware(bp->firmware);
13614 bp->firmware = NULL;
13615}
13616
13617static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13618 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13619 .init_hw_cmn = bnx2x_init_hw_common,
13620 .init_hw_port = bnx2x_init_hw_port,
13621 .init_hw_func = bnx2x_init_hw_func,
13622
13623 .reset_hw_cmn = bnx2x_reset_common,
13624 .reset_hw_port = bnx2x_reset_port,
13625 .reset_hw_func = bnx2x_reset_func,
13626
13627 .gunzip_init = bnx2x_gunzip_init,
13628 .gunzip_end = bnx2x_gunzip_end,
13629
13630 .init_fw = bnx2x_init_firmware,
13631 .release_fw = bnx2x_release_firmware,
13632};
13633
13634void bnx2x__init_func_obj(struct bnx2x *bp)
13635{
13636
13637 bnx2x_setup_dmae(bp);
13638
13639 bnx2x_init_func_obj(bp, &bp->func_obj,
13640 bnx2x_sp(bp, func_rdata),
13641 bnx2x_sp_mapping(bp, func_rdata),
13642 bnx2x_sp(bp, func_afex_rdata),
13643 bnx2x_sp_mapping(bp, func_afex_rdata),
13644 &bnx2x_func_sp_drv);
13645}
13646
13647
13648static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13649{
13650 int cid_count = BNX2X_L2_MAX_CID(bp);
13651
13652 if (IS_SRIOV(bp))
13653 cid_count += BNX2X_VF_CIDS;
13654
13655 if (CNIC_SUPPORT(bp))
13656 cid_count += CNIC_CID_MAX;
13657
13658 return roundup(cid_count, QM_CID_ROUND);
13659}
13660
13661
13662
13663
13664
13665
13666
13667static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13668{
13669 int index;
13670 u16 control = 0;
13671
13672
13673
13674
13675
13676 if (!pdev->msix_cap) {
13677 dev_info(&pdev->dev, "no msix capability found\n");
13678 return 1 + cnic_cnt;
13679 }
13680 dev_info(&pdev->dev, "msix capability found\n");
13681
13682
13683
13684
13685
13686
13687
13688
13689 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13690
13691 index = control & PCI_MSIX_FLAGS_QSIZE;
13692
13693 return index;
13694}
13695
13696static int set_max_cos_est(int chip_id)
13697{
13698 switch (chip_id) {
13699 case BCM57710:
13700 case BCM57711:
13701 case BCM57711E:
13702 return BNX2X_MULTI_TX_COS_E1X;
13703 case BCM57712:
13704 case BCM57712_MF:
13705 return BNX2X_MULTI_TX_COS_E2_E3A0;
13706 case BCM57800:
13707 case BCM57800_MF:
13708 case BCM57810:
13709 case BCM57810_MF:
13710 case BCM57840_4_10:
13711 case BCM57840_2_20:
13712 case BCM57840_O:
13713 case BCM57840_MFO:
13714 case BCM57840_MF:
13715 case BCM57811:
13716 case BCM57811_MF:
13717 return BNX2X_MULTI_TX_COS_E3B0;
13718 case BCM57712_VF:
13719 case BCM57800_VF:
13720 case BCM57810_VF:
13721 case BCM57840_VF:
13722 case BCM57811_VF:
13723 return 1;
13724 default:
13725 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13726 return -ENODEV;
13727 }
13728}
13729
13730static int set_is_vf(int chip_id)
13731{
13732 switch (chip_id) {
13733 case BCM57712_VF:
13734 case BCM57800_VF:
13735 case BCM57810_VF:
13736 case BCM57840_VF:
13737 case BCM57811_VF:
13738 return true;
13739 default:
13740 return false;
13741 }
13742}
13743
13744
13745#define tsgen_ctrl 0x0
13746#define tsgen_freecount 0x10
13747#define tsgen_synctime_t0 0x20
13748#define tsgen_offset_t0 0x28
13749#define tsgen_drift_t0 0x30
13750#define tsgen_synctime_t1 0x58
13751#define tsgen_offset_t1 0x60
13752#define tsgen_drift_t1 0x68
13753
13754
13755static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13756 int best_val, int best_period)
13757{
13758 struct bnx2x_func_state_params func_params = {NULL};
13759 struct bnx2x_func_set_timesync_params *set_timesync_params =
13760 &func_params.params.set_timesync;
13761
13762
13763 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13764 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13765
13766 func_params.f_obj = &bp->func_obj;
13767 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13768
13769
13770 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13771 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13772 set_timesync_params->add_sub_drift_adjust_value =
13773 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13774 set_timesync_params->drift_adjust_value = best_val;
13775 set_timesync_params->drift_adjust_period = best_period;
13776
13777 return bnx2x_func_state_change(bp, &func_params);
13778}
13779
13780static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13781{
13782 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13783 int rc;
13784 int drift_dir = 1;
13785 int val, period, period1, period2, dif, dif1, dif2;
13786 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13787
13788 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13789
13790 if (!netif_running(bp->dev)) {
13791 DP(BNX2X_MSG_PTP,
13792 "PTP adjfreq called while the interface is down\n");
13793 return -ENETDOWN;
13794 }
13795
13796 if (ppb < 0) {
13797 ppb = -ppb;
13798 drift_dir = 0;
13799 }
13800
13801 if (ppb == 0) {
13802 best_val = 1;
13803 best_period = 0x1FFFFFF;
13804 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13805 best_val = 31;
13806 best_period = 1;
13807 } else {
13808
13809
13810
13811 for (val = 0; val <= 31; val++) {
13812 if ((val & 0x7) == 0)
13813 continue;
13814 period1 = val * 1000000 / ppb;
13815 period2 = period1 + 1;
13816 if (period1 != 0)
13817 dif1 = ppb - (val * 1000000 / period1);
13818 else
13819 dif1 = BNX2X_MAX_PHC_DRIFT;
13820 if (dif1 < 0)
13821 dif1 = -dif1;
13822 dif2 = ppb - (val * 1000000 / period2);
13823 if (dif2 < 0)
13824 dif2 = -dif2;
13825 dif = (dif1 < dif2) ? dif1 : dif2;
13826 period = (dif1 < dif2) ? period1 : period2;
13827 if (dif < best_dif) {
13828 best_dif = dif;
13829 best_val = val;
13830 best_period = period;
13831 }
13832 }
13833 }
13834
13835 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13836 best_period);
13837 if (rc) {
13838 BNX2X_ERR("Failed to set drift\n");
13839 return -EFAULT;
13840 }
13841
13842 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13843 best_period);
13844
13845 return 0;
13846}
13847
13848static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13849{
13850 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13851
13852 if (!netif_running(bp->dev)) {
13853 DP(BNX2X_MSG_PTP,
13854 "PTP adjtime called while the interface is down\n");
13855 return -ENETDOWN;
13856 }
13857
13858 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13859
13860 timecounter_adjtime(&bp->timecounter, delta);
13861
13862 return 0;
13863}
13864
13865static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13866{
13867 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13868 u64 ns;
13869
13870 if (!netif_running(bp->dev)) {
13871 DP(BNX2X_MSG_PTP,
13872 "PTP gettime called while the interface is down\n");
13873 return -ENETDOWN;
13874 }
13875
13876 ns = timecounter_read(&bp->timecounter);
13877
13878 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13879
13880 *ts = ns_to_timespec64(ns);
13881
13882 return 0;
13883}
13884
13885static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13886 const struct timespec64 *ts)
13887{
13888 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13889 u64 ns;
13890
13891 if (!netif_running(bp->dev)) {
13892 DP(BNX2X_MSG_PTP,
13893 "PTP settime called while the interface is down\n");
13894 return -ENETDOWN;
13895 }
13896
13897 ns = timespec64_to_ns(ts);
13898
13899 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13900
13901
13902 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13903
13904 return 0;
13905}
13906
13907
13908static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13909 struct ptp_clock_request *rq, int on)
13910{
13911 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13912
13913 BNX2X_ERR("PHC ancillary features are not supported\n");
13914 return -ENOTSUPP;
13915}
13916
13917void bnx2x_register_phc(struct bnx2x *bp)
13918{
13919
13920 bp->ptp_clock_info.owner = THIS_MODULE;
13921 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13922 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13923 bp->ptp_clock_info.n_alarm = 0;
13924 bp->ptp_clock_info.n_ext_ts = 0;
13925 bp->ptp_clock_info.n_per_out = 0;
13926 bp->ptp_clock_info.pps = 0;
13927 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13928 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13929 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13930 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13931 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13932
13933 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13934 if (IS_ERR(bp->ptp_clock)) {
13935 bp->ptp_clock = NULL;
13936 BNX2X_ERR("PTP clock registration failed\n");
13937 }
13938}
13939
13940static int bnx2x_init_one(struct pci_dev *pdev,
13941 const struct pci_device_id *ent)
13942{
13943 struct net_device *dev = NULL;
13944 struct bnx2x *bp;
13945 int rc, max_non_def_sbs;
13946 int rx_count, tx_count, rss_count, doorbell_size;
13947 int max_cos_est;
13948 bool is_vf;
13949 int cnic_cnt;
13950
13951
13952
13953
13954 if (is_kdump_kernel()) {
13955 ktime_t now = ktime_get_boottime();
13956 ktime_t fw_ready_time = ktime_set(5, 0);
13957
13958 if (ktime_before(now, fw_ready_time))
13959 msleep(ktime_ms_delta(fw_ready_time, now));
13960 }
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970 max_cos_est = set_max_cos_est(ent->driver_data);
13971 if (max_cos_est < 0)
13972 return max_cos_est;
13973 is_vf = set_is_vf(ent->driver_data);
13974 cnic_cnt = is_vf ? 0 : 1;
13975
13976 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13977
13978
13979 max_non_def_sbs += is_vf ? 1 : 0;
13980
13981
13982 rss_count = max_non_def_sbs - cnic_cnt;
13983
13984 if (rss_count < 1)
13985 return -EINVAL;
13986
13987
13988 rx_count = rss_count + cnic_cnt;
13989
13990
13991
13992
13993 tx_count = rss_count * max_cos_est + cnic_cnt;
13994
13995
13996 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13997 if (!dev)
13998 return -ENOMEM;
13999
14000 bp = netdev_priv(dev);
14001
14002 bp->flags = 0;
14003 if (is_vf)
14004 bp->flags |= IS_VF_FLAG;
14005
14006 bp->igu_sb_cnt = max_non_def_sbs;
14007 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
14008 bp->msg_enable = debug;
14009 bp->cnic_support = cnic_cnt;
14010 bp->cnic_probe = bnx2x_cnic_probe;
14011
14012 pci_set_drvdata(pdev, dev);
14013
14014 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
14015 if (rc < 0) {
14016 free_netdev(dev);
14017 return rc;
14018 }
14019
14020 BNX2X_DEV_INFO("This is a %s function\n",
14021 IS_PF(bp) ? "physical" : "virtual");
14022 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
14023 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
14024 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
14025 tx_count, rx_count);
14026
14027 rc = bnx2x_init_bp(bp);
14028 if (rc)
14029 goto init_one_exit;
14030
14031
14032
14033
14034
14035 if (IS_VF(bp)) {
14036 bp->doorbells = bnx2x_vf_doorbells(bp);
14037 rc = bnx2x_vf_pci_alloc(bp);
14038 if (rc)
14039 goto init_one_freemem;
14040 } else {
14041 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
14042 if (doorbell_size > pci_resource_len(pdev, 2)) {
14043 dev_err(&bp->pdev->dev,
14044 "Cannot map doorbells, bar size too small, aborting\n");
14045 rc = -ENOMEM;
14046 goto init_one_freemem;
14047 }
14048 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
14049 doorbell_size);
14050 }
14051 if (!bp->doorbells) {
14052 dev_err(&bp->pdev->dev,
14053 "Cannot map doorbell space, aborting\n");
14054 rc = -ENOMEM;
14055 goto init_one_freemem;
14056 }
14057
14058 if (IS_VF(bp)) {
14059 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
14060 if (rc)
14061 goto init_one_freemem;
14062
14063#ifdef CONFIG_BNX2X_SRIOV
14064
14065 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14066 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14067 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14068 }
14069#endif
14070 }
14071
14072
14073 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14074 if (rc)
14075 goto init_one_freemem;
14076
14077
14078 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14079 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14080
14081
14082 if (CHIP_IS_E1x(bp))
14083 bp->flags |= NO_FCOE_FLAG;
14084
14085
14086 bnx2x_set_num_queues(bp);
14087
14088
14089
14090
14091 rc = bnx2x_set_int_mode(bp);
14092 if (rc) {
14093 dev_err(&pdev->dev, "Cannot set interrupts\n");
14094 goto init_one_freemem;
14095 }
14096 BNX2X_DEV_INFO("set interrupts successfully\n");
14097
14098
14099 rc = register_netdev(dev);
14100 if (rc) {
14101 dev_err(&pdev->dev, "Cannot register net device\n");
14102 goto init_one_freemem;
14103 }
14104 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14105
14106 if (!NO_FCOE(bp)) {
14107
14108 rtnl_lock();
14109 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14110 rtnl_unlock();
14111 }
14112 BNX2X_DEV_INFO(
14113 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14114 board_info[ent->driver_data].name,
14115 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14116 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14117 pcie_print_link_status(bp->pdev);
14118
14119 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14120 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14121
14122 return 0;
14123
14124init_one_freemem:
14125 bnx2x_free_mem_bp(bp);
14126
14127init_one_exit:
14128 bnx2x_disable_pcie_error_reporting(bp);
14129
14130 if (bp->regview)
14131 iounmap(bp->regview);
14132
14133 if (IS_PF(bp) && bp->doorbells)
14134 iounmap(bp->doorbells);
14135
14136 free_netdev(dev);
14137
14138 if (atomic_read(&pdev->enable_cnt) == 1)
14139 pci_release_regions(pdev);
14140
14141 pci_disable_device(pdev);
14142
14143 return rc;
14144}
14145
14146static void __bnx2x_remove(struct pci_dev *pdev,
14147 struct net_device *dev,
14148 struct bnx2x *bp,
14149 bool remove_netdev)
14150{
14151
14152 if (!NO_FCOE(bp)) {
14153 rtnl_lock();
14154 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14155 rtnl_unlock();
14156 }
14157
14158#ifdef BCM_DCBNL
14159
14160 bnx2x_dcbnl_update_applist(bp, true);
14161#endif
14162
14163 if (IS_PF(bp) &&
14164 !BP_NOMCP(bp) &&
14165 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14166 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14167
14168
14169 if (remove_netdev) {
14170 unregister_netdev(dev);
14171 } else {
14172 rtnl_lock();
14173 dev_close(dev);
14174 rtnl_unlock();
14175 }
14176
14177 bnx2x_iov_remove_one(bp);
14178
14179
14180 if (IS_PF(bp)) {
14181 bnx2x_set_power_state(bp, PCI_D0);
14182 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14183
14184
14185
14186
14187 bnx2x_reset_endianity(bp);
14188 }
14189
14190
14191 bnx2x_disable_msi(bp);
14192
14193
14194 if (IS_PF(bp))
14195 bnx2x_set_power_state(bp, PCI_D3hot);
14196
14197
14198 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14199
14200
14201 if (IS_VF(bp))
14202 bnx2x_vfpf_release(bp);
14203
14204
14205 if (system_state == SYSTEM_POWER_OFF) {
14206 pci_wake_from_d3(pdev, bp->wol);
14207 pci_set_power_state(pdev, PCI_D3hot);
14208 }
14209
14210 bnx2x_disable_pcie_error_reporting(bp);
14211 if (remove_netdev) {
14212 if (bp->regview)
14213 iounmap(bp->regview);
14214
14215
14216
14217
14218 if (IS_PF(bp)) {
14219 if (bp->doorbells)
14220 iounmap(bp->doorbells);
14221
14222 bnx2x_release_firmware(bp);
14223 } else {
14224 bnx2x_vf_pci_dealloc(bp);
14225 }
14226 bnx2x_free_mem_bp(bp);
14227
14228 free_netdev(dev);
14229
14230 if (atomic_read(&pdev->enable_cnt) == 1)
14231 pci_release_regions(pdev);
14232
14233 pci_disable_device(pdev);
14234 }
14235}
14236
14237static void bnx2x_remove_one(struct pci_dev *pdev)
14238{
14239 struct net_device *dev = pci_get_drvdata(pdev);
14240 struct bnx2x *bp;
14241
14242 if (!dev) {
14243 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14244 return;
14245 }
14246 bp = netdev_priv(dev);
14247
14248 __bnx2x_remove(pdev, dev, bp, true);
14249}
14250
14251static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14252{
14253 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14254
14255 bp->rx_mode = BNX2X_RX_MODE_NONE;
14256
14257 if (CNIC_LOADED(bp))
14258 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14259
14260
14261 bnx2x_tx_disable(bp);
14262
14263 bnx2x_del_all_napi(bp);
14264 if (CNIC_LOADED(bp))
14265 bnx2x_del_all_napi_cnic(bp);
14266 netdev_reset_tc(bp->dev);
14267
14268 del_timer_sync(&bp->timer);
14269 cancel_delayed_work_sync(&bp->sp_task);
14270 cancel_delayed_work_sync(&bp->period_task);
14271
14272 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14273 bp->stats_state = STATS_STATE_DISABLED;
14274 up(&bp->stats_lock);
14275 }
14276
14277 bnx2x_save_statistics(bp);
14278
14279 netif_carrier_off(bp->dev);
14280
14281 return 0;
14282}
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14293 pci_channel_state_t state)
14294{
14295 struct net_device *dev = pci_get_drvdata(pdev);
14296 struct bnx2x *bp = netdev_priv(dev);
14297
14298 rtnl_lock();
14299
14300 BNX2X_ERR("IO error detected\n");
14301
14302 netif_device_detach(dev);
14303
14304 if (state == pci_channel_io_perm_failure) {
14305 rtnl_unlock();
14306 return PCI_ERS_RESULT_DISCONNECT;
14307 }
14308
14309 if (netif_running(dev))
14310 bnx2x_eeh_nic_unload(bp);
14311
14312 bnx2x_prev_path_mark_eeh(bp);
14313
14314 pci_disable_device(pdev);
14315
14316 rtnl_unlock();
14317
14318
14319 return PCI_ERS_RESULT_NEED_RESET;
14320}
14321
14322
14323
14324
14325
14326
14327
14328static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14329{
14330 struct net_device *dev = pci_get_drvdata(pdev);
14331 struct bnx2x *bp = netdev_priv(dev);
14332 int i;
14333
14334 rtnl_lock();
14335 BNX2X_ERR("IO slot reset initializing...\n");
14336 if (pci_enable_device(pdev)) {
14337 dev_err(&pdev->dev,
14338 "Cannot re-enable PCI device after reset\n");
14339 rtnl_unlock();
14340 return PCI_ERS_RESULT_DISCONNECT;
14341 }
14342
14343 pci_set_master(pdev);
14344 pci_restore_state(pdev);
14345 pci_save_state(pdev);
14346
14347 if (netif_running(dev))
14348 bnx2x_set_power_state(bp, PCI_D0);
14349
14350 if (netif_running(dev)) {
14351 BNX2X_ERR("IO slot reset --> driver unload\n");
14352
14353
14354 if (bnx2x_init_shmem(bp)) {
14355 rtnl_unlock();
14356 return PCI_ERS_RESULT_DISCONNECT;
14357 }
14358
14359 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14360 u32 v;
14361
14362 v = SHMEM2_RD(bp,
14363 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14364 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14365 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14366 }
14367 bnx2x_drain_tx_queues(bp);
14368 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14369 bnx2x_netif_stop(bp, 1);
14370 bnx2x_free_irq(bp);
14371
14372
14373 bnx2x_send_unload_done(bp, true);
14374
14375 bp->sp_state = 0;
14376 bp->port.pmf = 0;
14377
14378 bnx2x_prev_unload(bp);
14379
14380
14381
14382
14383 bnx2x_squeeze_objects(bp);
14384 bnx2x_free_skbs(bp);
14385 for_each_rx_queue(bp, i)
14386 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14387 bnx2x_free_fp_mem(bp);
14388 bnx2x_free_mem(bp);
14389
14390 bp->state = BNX2X_STATE_CLOSED;
14391 }
14392
14393 rtnl_unlock();
14394
14395 return PCI_ERS_RESULT_RECOVERED;
14396}
14397
14398
14399
14400
14401
14402
14403
14404
14405static void bnx2x_io_resume(struct pci_dev *pdev)
14406{
14407 struct net_device *dev = pci_get_drvdata(pdev);
14408 struct bnx2x *bp = netdev_priv(dev);
14409
14410 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14411 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14412 return;
14413 }
14414
14415 rtnl_lock();
14416
14417 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14418 DRV_MSG_SEQ_NUMBER_MASK;
14419
14420 if (netif_running(dev))
14421 bnx2x_nic_load(bp, LOAD_NORMAL);
14422
14423 netif_device_attach(dev);
14424
14425 rtnl_unlock();
14426}
14427
14428static const struct pci_error_handlers bnx2x_err_handler = {
14429 .error_detected = bnx2x_io_error_detected,
14430 .slot_reset = bnx2x_io_slot_reset,
14431 .resume = bnx2x_io_resume,
14432};
14433
14434static void bnx2x_shutdown(struct pci_dev *pdev)
14435{
14436 struct net_device *dev = pci_get_drvdata(pdev);
14437 struct bnx2x *bp;
14438
14439 if (!dev)
14440 return;
14441
14442 bp = netdev_priv(dev);
14443 if (!bp)
14444 return;
14445
14446 rtnl_lock();
14447 netif_device_detach(dev);
14448 rtnl_unlock();
14449
14450
14451
14452
14453
14454 __bnx2x_remove(pdev, dev, bp, false);
14455}
14456
14457static struct pci_driver bnx2x_pci_driver = {
14458 .name = DRV_MODULE_NAME,
14459 .id_table = bnx2x_pci_tbl,
14460 .probe = bnx2x_init_one,
14461 .remove = bnx2x_remove_one,
14462 .suspend = bnx2x_suspend,
14463 .resume = bnx2x_resume,
14464 .err_handler = &bnx2x_err_handler,
14465#ifdef CONFIG_BNX2X_SRIOV
14466 .sriov_configure = bnx2x_sriov_configure,
14467#endif
14468 .shutdown = bnx2x_shutdown,
14469};
14470
14471static int __init bnx2x_init(void)
14472{
14473 int ret;
14474
14475 pr_info("%s", version);
14476
14477 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14478 if (bnx2x_wq == NULL) {
14479 pr_err("Cannot create workqueue\n");
14480 return -ENOMEM;
14481 }
14482 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14483 if (!bnx2x_iov_wq) {
14484 pr_err("Cannot create iov workqueue\n");
14485 destroy_workqueue(bnx2x_wq);
14486 return -ENOMEM;
14487 }
14488
14489 ret = pci_register_driver(&bnx2x_pci_driver);
14490 if (ret) {
14491 pr_err("Cannot register driver\n");
14492 destroy_workqueue(bnx2x_wq);
14493 destroy_workqueue(bnx2x_iov_wq);
14494 }
14495 return ret;
14496}
14497
14498static void __exit bnx2x_cleanup(void)
14499{
14500 struct list_head *pos, *q;
14501
14502 pci_unregister_driver(&bnx2x_pci_driver);
14503
14504 destroy_workqueue(bnx2x_wq);
14505 destroy_workqueue(bnx2x_iov_wq);
14506
14507
14508 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14509 struct bnx2x_prev_path_list *tmp =
14510 list_entry(pos, struct bnx2x_prev_path_list, list);
14511 list_del(pos);
14512 kfree(tmp);
14513 }
14514}
14515
14516void bnx2x_notify_link_changed(struct bnx2x *bp)
14517{
14518 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14519}
14520
14521module_init(bnx2x_init);
14522module_exit(bnx2x_cleanup);
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14534{
14535 unsigned long ramrod_flags = 0;
14536
14537 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14538 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14539 &bp->iscsi_l2_mac_obj, true,
14540 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14541}
14542
14543
14544static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14545{
14546 struct eth_spe *spe;
14547 int cxt_index, cxt_offset;
14548
14549#ifdef BNX2X_STOP_ON_ERROR
14550 if (unlikely(bp->panic))
14551 return;
14552#endif
14553
14554 spin_lock_bh(&bp->spq_lock);
14555 BUG_ON(bp->cnic_spq_pending < count);
14556 bp->cnic_spq_pending -= count;
14557
14558 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14559 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14560 & SPE_HDR_CONN_TYPE) >>
14561 SPE_HDR_CONN_TYPE_SHIFT;
14562 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14563 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14564
14565
14566
14567
14568 if (type == ETH_CONNECTION_TYPE) {
14569 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14570 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14571 ILT_PAGE_CIDS;
14572 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14573 (cxt_index * ILT_PAGE_CIDS);
14574 bnx2x_set_ctx_validation(bp,
14575 &bp->context[cxt_index].
14576 vcxt[cxt_offset].eth,
14577 BNX2X_ISCSI_ETH_CID(bp));
14578 }
14579 }
14580
14581
14582
14583
14584
14585
14586
14587 if (type == ETH_CONNECTION_TYPE) {
14588 if (!atomic_read(&bp->cq_spq_left))
14589 break;
14590 else
14591 atomic_dec(&bp->cq_spq_left);
14592 } else if (type == NONE_CONNECTION_TYPE) {
14593 if (!atomic_read(&bp->eq_spq_left))
14594 break;
14595 else
14596 atomic_dec(&bp->eq_spq_left);
14597 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14598 (type == FCOE_CONNECTION_TYPE)) {
14599 if (bp->cnic_spq_pending >=
14600 bp->cnic_eth_dev.max_kwqe_pending)
14601 break;
14602 else
14603 bp->cnic_spq_pending++;
14604 } else {
14605 BNX2X_ERR("Unknown SPE type: %d\n", type);
14606 bnx2x_panic();
14607 break;
14608 }
14609
14610 spe = bnx2x_sp_get_next(bp);
14611 *spe = *bp->cnic_kwq_cons;
14612
14613 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14614 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14615
14616 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14617 bp->cnic_kwq_cons = bp->cnic_kwq;
14618 else
14619 bp->cnic_kwq_cons++;
14620 }
14621 bnx2x_sp_prod_update(bp);
14622 spin_unlock_bh(&bp->spq_lock);
14623}
14624
14625static int bnx2x_cnic_sp_queue(struct net_device *dev,
14626 struct kwqe_16 *kwqes[], u32 count)
14627{
14628 struct bnx2x *bp = netdev_priv(dev);
14629 int i;
14630
14631#ifdef BNX2X_STOP_ON_ERROR
14632 if (unlikely(bp->panic)) {
14633 BNX2X_ERR("Can't post to SP queue while panic\n");
14634 return -EIO;
14635 }
14636#endif
14637
14638 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14639 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14640 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14641 return -EAGAIN;
14642 }
14643
14644 spin_lock_bh(&bp->spq_lock);
14645
14646 for (i = 0; i < count; i++) {
14647 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14648
14649 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14650 break;
14651
14652 *bp->cnic_kwq_prod = *spe;
14653
14654 bp->cnic_kwq_pending++;
14655
14656 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14657 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14658 spe->data.update_data_addr.hi,
14659 spe->data.update_data_addr.lo,
14660 bp->cnic_kwq_pending);
14661
14662 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14663 bp->cnic_kwq_prod = bp->cnic_kwq;
14664 else
14665 bp->cnic_kwq_prod++;
14666 }
14667
14668 spin_unlock_bh(&bp->spq_lock);
14669
14670 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14671 bnx2x_cnic_sp_post(bp, 0);
14672
14673 return i;
14674}
14675
14676static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14677{
14678 struct cnic_ops *c_ops;
14679 int rc = 0;
14680
14681 mutex_lock(&bp->cnic_mutex);
14682 c_ops = rcu_dereference_protected(bp->cnic_ops,
14683 lockdep_is_held(&bp->cnic_mutex));
14684 if (c_ops)
14685 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14686 mutex_unlock(&bp->cnic_mutex);
14687
14688 return rc;
14689}
14690
14691static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14692{
14693 struct cnic_ops *c_ops;
14694 int rc = 0;
14695
14696 rcu_read_lock();
14697 c_ops = rcu_dereference(bp->cnic_ops);
14698 if (c_ops)
14699 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14700 rcu_read_unlock();
14701
14702 return rc;
14703}
14704
14705
14706
14707
14708int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14709{
14710 struct cnic_ctl_info ctl = {0};
14711
14712 ctl.cmd = cmd;
14713
14714 return bnx2x_cnic_ctl_send(bp, &ctl);
14715}
14716
14717static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14718{
14719 struct cnic_ctl_info ctl = {0};
14720
14721
14722 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14723 ctl.data.comp.cid = cid;
14724 ctl.data.comp.error = err;
14725
14726 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14727 bnx2x_cnic_sp_post(bp, 0);
14728}
14729
14730
14731
14732
14733
14734
14735static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14736{
14737 unsigned long accept_flags = 0, ramrod_flags = 0;
14738 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14739 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14740
14741 if (start) {
14742
14743
14744
14745
14746
14747
14748 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14749 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14750 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14751 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14752
14753
14754 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14755
14756 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14757 } else
14758
14759 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14760
14761 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14762 set_bit(sched_state, &bp->sp_state);
14763 else {
14764 __set_bit(RAMROD_RX, &ramrod_flags);
14765 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14766 ramrod_flags);
14767 }
14768}
14769
14770static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14771{
14772 struct bnx2x *bp = netdev_priv(dev);
14773 int rc = 0;
14774
14775 switch (ctl->cmd) {
14776 case DRV_CTL_CTXTBL_WR_CMD: {
14777 u32 index = ctl->data.io.offset;
14778 dma_addr_t addr = ctl->data.io.dma_addr;
14779
14780 bnx2x_ilt_wr(bp, index, addr);
14781 break;
14782 }
14783
14784 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14785 int count = ctl->data.credit.credit_count;
14786
14787 bnx2x_cnic_sp_post(bp, count);
14788 break;
14789 }
14790
14791
14792 case DRV_CTL_START_L2_CMD: {
14793 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14794 unsigned long sp_bits = 0;
14795
14796
14797 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14798 cp->iscsi_l2_client_id,
14799 cp->iscsi_l2_cid, BP_FUNC(bp),
14800 bnx2x_sp(bp, mac_rdata),
14801 bnx2x_sp_mapping(bp, mac_rdata),
14802 BNX2X_FILTER_MAC_PENDING,
14803 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14804 &bp->macs_pool);
14805
14806
14807 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14808 if (rc)
14809 break;
14810
14811 barrier();
14812
14813
14814
14815 netif_addr_lock_bh(dev);
14816 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14817 netif_addr_unlock_bh(dev);
14818
14819
14820 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14821 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14822
14823 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14824 BNX2X_ERR("rx_mode completion timed out!\n");
14825
14826 break;
14827 }
14828
14829
14830 case DRV_CTL_STOP_L2_CMD: {
14831 unsigned long sp_bits = 0;
14832
14833
14834 netif_addr_lock_bh(dev);
14835 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14836 netif_addr_unlock_bh(dev);
14837
14838
14839 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14840 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14841
14842 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14843 BNX2X_ERR("rx_mode completion timed out!\n");
14844
14845 barrier();
14846
14847
14848 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14849 BNX2X_ISCSI_ETH_MAC, true);
14850 break;
14851 }
14852 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14853 int count = ctl->data.credit.credit_count;
14854
14855 smp_mb__before_atomic();
14856 atomic_add(count, &bp->cq_spq_left);
14857 smp_mb__after_atomic();
14858 break;
14859 }
14860 case DRV_CTL_ULP_REGISTER_CMD: {
14861 int ulp_type = ctl->data.register_data.ulp_type;
14862
14863 if (CHIP_IS_E3(bp)) {
14864 int idx = BP_FW_MB_IDX(bp);
14865 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14866 int path = BP_PATH(bp);
14867 int port = BP_PORT(bp);
14868 int i;
14869 u32 scratch_offset;
14870 u32 *host_addr;
14871
14872
14873 if (ulp_type == CNIC_ULP_ISCSI)
14874 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14875 else if (ulp_type == CNIC_ULP_FCOE)
14876 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14877 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14878
14879 if ((ulp_type != CNIC_ULP_FCOE) ||
14880 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14881 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14882 break;
14883
14884
14885 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14886 if (!scratch_offset)
14887 break;
14888 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14889 fcoe_features[path][port]);
14890 host_addr = (u32 *) &(ctl->data.register_data.
14891 fcoe_features);
14892 for (i = 0; i < sizeof(struct fcoe_capabilities);
14893 i += 4)
14894 REG_WR(bp, scratch_offset + i,
14895 *(host_addr + i/4));
14896 }
14897 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14898 break;
14899 }
14900
14901 case DRV_CTL_ULP_UNREGISTER_CMD: {
14902 int ulp_type = ctl->data.ulp_type;
14903
14904 if (CHIP_IS_E3(bp)) {
14905 int idx = BP_FW_MB_IDX(bp);
14906 u32 cap;
14907
14908 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14909 if (ulp_type == CNIC_ULP_ISCSI)
14910 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14911 else if (ulp_type == CNIC_ULP_FCOE)
14912 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14913 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14914 }
14915 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14916 break;
14917 }
14918
14919 default:
14920 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14921 rc = -EINVAL;
14922 }
14923
14924
14925 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14926 switch (ctl->drv_state) {
14927 case DRV_NOP:
14928 break;
14929 case DRV_ACTIVE:
14930 bnx2x_set_os_driver_state(bp,
14931 OS_DRIVER_STATE_ACTIVE);
14932 break;
14933 case DRV_INACTIVE:
14934 bnx2x_set_os_driver_state(bp,
14935 OS_DRIVER_STATE_DISABLED);
14936 break;
14937 case DRV_UNLOADED:
14938 bnx2x_set_os_driver_state(bp,
14939 OS_DRIVER_STATE_NOT_LOADED);
14940 break;
14941 default:
14942 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14943 }
14944 }
14945
14946 return rc;
14947}
14948
14949static int bnx2x_get_fc_npiv(struct net_device *dev,
14950 struct cnic_fc_npiv_tbl *cnic_tbl)
14951{
14952 struct bnx2x *bp = netdev_priv(dev);
14953 struct bdn_fc_npiv_tbl *tbl = NULL;
14954 u32 offset, entries;
14955 int rc = -EINVAL;
14956 int i;
14957
14958 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14959 goto out;
14960
14961 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14962
14963 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14964 if (!tbl) {
14965 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14966 goto out;
14967 }
14968
14969 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14970 if (!offset) {
14971 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14972 goto out;
14973 }
14974 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14975
14976
14977 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14978 BNX2X_ERR("Failed to read FC-NPIV table\n");
14979 goto out;
14980 }
14981
14982
14983
14984
14985 entries = tbl->fc_npiv_cfg.num_of_npiv;
14986 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14987 tbl->fc_npiv_cfg.num_of_npiv = entries;
14988
14989 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14990 DP(BNX2X_MSG_MCP,
14991 "No FC-NPIV table [valid, simply not present]\n");
14992 goto out;
14993 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14994 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14995 tbl->fc_npiv_cfg.num_of_npiv);
14996 goto out;
14997 } else {
14998 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14999 tbl->fc_npiv_cfg.num_of_npiv);
15000 }
15001
15002
15003 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
15004 for (i = 0; i < cnic_tbl->count; i++) {
15005 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
15006 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
15007 }
15008
15009 rc = 0;
15010out:
15011 kfree(tbl);
15012 return rc;
15013}
15014
15015void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
15016{
15017 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15018
15019 if (bp->flags & USING_MSIX_FLAG) {
15020 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
15021 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
15022 cp->irq_arr[0].vector = bp->msix_table[1].vector;
15023 } else {
15024 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
15025 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
15026 }
15027 if (!CHIP_IS_E1x(bp))
15028 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
15029 else
15030 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
15031
15032 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
15033 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
15034 cp->irq_arr[1].status_blk = bp->def_status_blk;
15035 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
15036 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
15037
15038 cp->num_irq = 2;
15039}
15040
15041void bnx2x_setup_cnic_info(struct bnx2x *bp)
15042{
15043 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15044
15045 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15046 bnx2x_cid_ilt_lines(bp);
15047 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15048 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15049 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15050
15051 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
15052 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
15053 cp->iscsi_l2_cid);
15054
15055 if (NO_ISCSI_OOO(bp))
15056 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15057}
15058
15059static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
15060 void *data)
15061{
15062 struct bnx2x *bp = netdev_priv(dev);
15063 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15064 int rc;
15065
15066 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
15067
15068 if (ops == NULL) {
15069 BNX2X_ERR("NULL ops received\n");
15070 return -EINVAL;
15071 }
15072
15073 if (!CNIC_SUPPORT(bp)) {
15074 BNX2X_ERR("Can't register CNIC when not supported\n");
15075 return -EOPNOTSUPP;
15076 }
15077
15078 if (!CNIC_LOADED(bp)) {
15079 rc = bnx2x_load_cnic(bp);
15080 if (rc) {
15081 BNX2X_ERR("CNIC-related load failed\n");
15082 return rc;
15083 }
15084 }
15085
15086 bp->cnic_enabled = true;
15087
15088 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15089 if (!bp->cnic_kwq)
15090 return -ENOMEM;
15091
15092 bp->cnic_kwq_cons = bp->cnic_kwq;
15093 bp->cnic_kwq_prod = bp->cnic_kwq;
15094 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15095
15096 bp->cnic_spq_pending = 0;
15097 bp->cnic_kwq_pending = 0;
15098
15099 bp->cnic_data = data;
15100
15101 cp->num_irq = 0;
15102 cp->drv_state |= CNIC_DRV_STATE_REGD;
15103 cp->iro_arr = bp->iro_arr;
15104
15105 bnx2x_setup_cnic_irq_info(bp);
15106
15107 rcu_assign_pointer(bp->cnic_ops, ops);
15108
15109
15110 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15111
15112 return 0;
15113}
15114
15115static int bnx2x_unregister_cnic(struct net_device *dev)
15116{
15117 struct bnx2x *bp = netdev_priv(dev);
15118 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15119
15120 mutex_lock(&bp->cnic_mutex);
15121 cp->drv_state = 0;
15122 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15123 mutex_unlock(&bp->cnic_mutex);
15124 synchronize_rcu();
15125 bp->cnic_enabled = false;
15126 kfree(bp->cnic_kwq);
15127 bp->cnic_kwq = NULL;
15128
15129 return 0;
15130}
15131
15132static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15133{
15134 struct bnx2x *bp = netdev_priv(dev);
15135 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15136
15137
15138
15139
15140
15141 if (NO_ISCSI(bp) && NO_FCOE(bp))
15142 return NULL;
15143
15144 cp->drv_owner = THIS_MODULE;
15145 cp->chip_id = CHIP_ID(bp);
15146 cp->pdev = bp->pdev;
15147 cp->io_base = bp->regview;
15148 cp->io_base2 = bp->doorbells;
15149 cp->max_kwqe_pending = 8;
15150 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15151 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15152 bnx2x_cid_ilt_lines(bp);
15153 cp->ctx_tbl_len = CNIC_ILT_LINES;
15154 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15155 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15156 cp->drv_ctl = bnx2x_drv_ctl;
15157 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15158 cp->drv_register_cnic = bnx2x_register_cnic;
15159 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15160 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15161 cp->iscsi_l2_client_id =
15162 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15163 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15164
15165 if (NO_ISCSI_OOO(bp))
15166 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15167
15168 if (NO_ISCSI(bp))
15169 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15170
15171 if (NO_FCOE(bp))
15172 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15173
15174 BNX2X_DEV_INFO(
15175 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15176 cp->ctx_blk_size,
15177 cp->ctx_tbl_offset,
15178 cp->ctx_tbl_len,
15179 cp->starting_cid);
15180 return cp;
15181}
15182
15183static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15184{
15185 struct bnx2x *bp = fp->bp;
15186 u32 offset = BAR_USTRORM_INTMEM;
15187
15188 if (IS_VF(bp))
15189 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15190 else if (!CHIP_IS_E1x(bp))
15191 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15192 else
15193 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15194
15195 return offset;
15196}
15197
15198
15199
15200
15201
15202
15203int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15204{
15205 u32 pretend_reg;
15206
15207 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15208 return -1;
15209
15210
15211 pretend_reg = bnx2x_get_pretend_reg(bp);
15212 REG_WR(bp, pretend_reg, pretend_func_val);
15213 REG_RD(bp, pretend_reg);
15214 return 0;
15215}
15216
15217static void bnx2x_ptp_task(struct work_struct *work)
15218{
15219 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15220 int port = BP_PORT(bp);
15221 u32 val_seq;
15222 u64 timestamp, ns;
15223 struct skb_shared_hwtstamps shhwtstamps;
15224 bool bail = true;
15225 int i;
15226
15227
15228
15229
15230 for (i = 0; i < 10; i++) {
15231
15232 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15233 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15234 if (val_seq & 0x10000) {
15235 bail = false;
15236 break;
15237 }
15238 msleep(1 << i);
15239 }
15240
15241 if (!bail) {
15242
15243 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15244 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15245 timestamp <<= 32;
15246 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15247 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15248
15249 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15250 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15251 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15252
15253 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15254 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15255 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15256
15257 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15258 timestamp, ns);
15259 } else {
15260 DP(BNX2X_MSG_PTP,
15261 "Tx timestamp is not recorded (register read=%u)\n",
15262 val_seq);
15263 bp->eth_stats.ptp_skip_tx_ts++;
15264 }
15265
15266 dev_kfree_skb_any(bp->ptp_tx_skb);
15267 bp->ptp_tx_skb = NULL;
15268}
15269
15270void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15271{
15272 int port = BP_PORT(bp);
15273 u64 timestamp, ns;
15274
15275 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15276 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15277 timestamp <<= 32;
15278 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15279 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15280
15281
15282 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15283 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15284
15285 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15286
15287 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15288
15289 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15290 timestamp, ns);
15291}
15292
15293
15294static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15295{
15296 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15297 int port = BP_PORT(bp);
15298 u32 wb_data[2];
15299 u64 phc_cycles;
15300
15301 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15302 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15303 phc_cycles = wb_data[1];
15304 phc_cycles = (phc_cycles << 32) + wb_data[0];
15305
15306 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15307
15308 return phc_cycles;
15309}
15310
15311static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15312{
15313 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15314 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15315 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15316 bp->cyclecounter.shift = 0;
15317 bp->cyclecounter.mult = 1;
15318}
15319
15320static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15321{
15322 struct bnx2x_func_state_params func_params = {NULL};
15323 struct bnx2x_func_set_timesync_params *set_timesync_params =
15324 &func_params.params.set_timesync;
15325
15326
15327 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15328 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15329
15330 func_params.f_obj = &bp->func_obj;
15331 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15332
15333
15334 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15335 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15336
15337 return bnx2x_func_state_change(bp, &func_params);
15338}
15339
15340static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15341{
15342 struct bnx2x_queue_state_params q_params;
15343 int rc, i;
15344
15345
15346 memset(&q_params, 0, sizeof(q_params));
15347 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15348 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15349 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15350 &q_params.params.update.update_flags);
15351 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15352 &q_params.params.update.update_flags);
15353
15354
15355 for_each_eth_queue(bp, i) {
15356 struct bnx2x_fastpath *fp = &bp->fp[i];
15357
15358
15359 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15360
15361
15362 rc = bnx2x_queue_state_change(bp, &q_params);
15363 if (rc) {
15364 BNX2X_ERR("Failed to enable PTP packets\n");
15365 return rc;
15366 }
15367 }
15368
15369 return 0;
15370}
15371
15372#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15373#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15374#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15375#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15376#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15377#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15378#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15379#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15380#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15381#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15382#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15383#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15384
15385int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15386{
15387 int port = BP_PORT(bp);
15388 u32 param, rule;
15389 int rc;
15390
15391 if (!bp->hwtstamp_ioctl_called)
15392 return 0;
15393
15394 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15395 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15396 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15397 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15398 switch (bp->tx_type) {
15399 case HWTSTAMP_TX_ON:
15400 bp->flags |= TX_TIMESTAMPING_EN;
15401 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15402 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15403 break;
15404 case HWTSTAMP_TX_ONESTEP_SYNC:
15405 BNX2X_ERR("One-step timestamping is not supported\n");
15406 return -ERANGE;
15407 }
15408
15409 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15410 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15411 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15412 NIG_REG_P0_LLH_PTP_RULE_MASK;
15413 switch (bp->rx_filter) {
15414 case HWTSTAMP_FILTER_NONE:
15415 break;
15416 case HWTSTAMP_FILTER_ALL:
15417 case HWTSTAMP_FILTER_SOME:
15418 case HWTSTAMP_FILTER_NTP_ALL:
15419 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15420 break;
15421 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15422 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15423 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15424 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15425
15426 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15427 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15428 break;
15429 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15430 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15431 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15432 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15433
15434 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15435 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15436 break;
15437 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15438 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15439 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15440 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15441
15442 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15443 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15444
15445 break;
15446 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15447 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15448 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15449 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15450
15451 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15452 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15453 break;
15454 }
15455
15456
15457 rc = bnx2x_enable_ptp_packets(bp);
15458 if (rc)
15459 return rc;
15460
15461
15462 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15463 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15464
15465 return 0;
15466}
15467
15468static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15469{
15470 struct hwtstamp_config config;
15471 int rc;
15472
15473 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15474
15475 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15476 return -EFAULT;
15477
15478 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15479 config.tx_type, config.rx_filter);
15480
15481 if (config.flags) {
15482 BNX2X_ERR("config.flags is reserved for future use\n");
15483 return -EINVAL;
15484 }
15485
15486 bp->hwtstamp_ioctl_called = 1;
15487 bp->tx_type = config.tx_type;
15488 bp->rx_filter = config.rx_filter;
15489
15490 rc = bnx2x_configure_ptp_filters(bp);
15491 if (rc)
15492 return rc;
15493
15494 config.rx_filter = bp->rx_filter;
15495
15496 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15497 -EFAULT : 0;
15498}
15499
15500
15501static int bnx2x_configure_ptp(struct bnx2x *bp)
15502{
15503 int rc, port = BP_PORT(bp);
15504 u32 wb_data[2];
15505
15506
15507 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15508 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15509 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15510 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15511 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15512 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15513 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15514 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15515
15516
15517 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15518 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15519
15520
15521 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15522 NIG_REG_P0_PTP_EN, 0x3F);
15523
15524
15525 wb_data[0] = 0;
15526 wb_data[1] = 0;
15527 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15528
15529
15530 rc = bnx2x_send_reset_timesync_ramrod(bp);
15531 if (rc) {
15532 BNX2X_ERR("Failed to reset PHC drift register\n");
15533 return -EFAULT;
15534 }
15535
15536
15537 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15538 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15539 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15540 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15541
15542 return 0;
15543}
15544
15545
15546void bnx2x_init_ptp(struct bnx2x *bp)
15547{
15548 int rc;
15549
15550
15551 rc = bnx2x_configure_ptp(bp);
15552 if (rc) {
15553 BNX2X_ERR("Stopping PTP initialization\n");
15554 return;
15555 }
15556
15557
15558 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15559
15560
15561
15562
15563
15564 if (!bp->timecounter_init_done) {
15565 bnx2x_init_cyclecounter(bp);
15566 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15567 ktime_to_ns(ktime_get_real()));
15568 bp->timecounter_init_done = 1;
15569 }
15570
15571 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15572}
15573