1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/ioport.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/aer.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39#include <linux/irq.h>
40#include <linux/delay.h>
41#include <asm/byteorder.h>
42#include <linux/time.h>
43#include <linux/ethtool.h>
44#include <linux/mii.h>
45#include <linux/if_vlan.h>
46#include <linux/crash_dump.h>
47#include <net/ip.h>
48#include <net/ipv6.h>
49#include <net/tcp.h>
50#include <net/vxlan.h>
51#include <net/checksum.h>
52#include <net/ip6_checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/crc32c.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/io.h>
59#include <linux/semaphore.h>
60#include <linux/stringify.h>
61#include <linux/vmalloc.h>
62#include "bnx2x.h"
63#include "bnx2x_init.h"
64#include "bnx2x_init_ops.h"
65#include "bnx2x_cmn.h"
66#include "bnx2x_vfpf.h"
67#include "bnx2x_dcb.h"
68#include "bnx2x_sp.h"
69#include <linux/firmware.h>
70#include "bnx2x_fw_file_hdr.h"
71
72#define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81
82#define TX_TIMEOUT (5*HZ)
83
84MODULE_AUTHOR("Eliezer Tamir");
85MODULE_DESCRIPTION("QLogic "
86 "BCM57710/57711/57711E/"
87 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
88 "57840/57840_MF Driver");
89MODULE_LICENSE("GPL");
90MODULE_FIRMWARE(FW_FILE_NAME_E1);
91MODULE_FIRMWARE(FW_FILE_NAME_E1H);
92MODULE_FIRMWARE(FW_FILE_NAME_E2);
93
94int bnx2x_num_queues;
95module_param_named(num_queues, bnx2x_num_queues, int, 0444);
96MODULE_PARM_DESC(num_queues,
97 " Set number of queues (default is as a number of CPUs)");
98
99static int disable_tpa;
100module_param(disable_tpa, int, 0444);
101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103static int int_mode;
104module_param(int_mode, int, 0444);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
106 "(1 INT#x; 2 MSI)");
107
108static int dropless_fc;
109module_param(dropless_fc, int, 0444);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112static int mrrs = -1;
113module_param(mrrs, int, 0444);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116static int debug;
117module_param(debug, int, 0444);
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static struct workqueue_struct *bnx2x_wq;
121struct workqueue_struct *bnx2x_iov_wq;
122
123struct bnx2x_mac_vals {
124 u32 xmac_addr;
125 u32 xmac_val;
126 u32 emac_addr;
127 u32 emac_val;
128 u32 umac_addr[2];
129 u32 umac_val[2];
130 u32 bmac_addr;
131 u32 bmac_val[2];
132};
133
134enum bnx2x_board_type {
135 BCM57710 = 0,
136 BCM57711,
137 BCM57711E,
138 BCM57712,
139 BCM57712_MF,
140 BCM57712_VF,
141 BCM57800,
142 BCM57800_MF,
143 BCM57800_VF,
144 BCM57810,
145 BCM57810_MF,
146 BCM57810_VF,
147 BCM57840_4_10,
148 BCM57840_2_20,
149 BCM57840_MF,
150 BCM57840_VF,
151 BCM57811,
152 BCM57811_MF,
153 BCM57840_O,
154 BCM57840_MFO,
155 BCM57811_VF
156};
157
158
159static struct {
160 char *name;
161} board_info[] = {
162 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
163 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
164 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
165 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
166 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
167 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
168 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
169 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
170 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
171 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
172 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
173 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
174 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
175 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
176 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
177 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
178 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
179 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
180 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
181 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
183};
184
185#ifndef PCI_DEVICE_ID_NX2_57710
186#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
187#endif
188#ifndef PCI_DEVICE_ID_NX2_57711
189#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
190#endif
191#ifndef PCI_DEVICE_ID_NX2_57711E
192#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
193#endif
194#ifndef PCI_DEVICE_ID_NX2_57712
195#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
196#endif
197#ifndef PCI_DEVICE_ID_NX2_57712_MF
198#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
199#endif
200#ifndef PCI_DEVICE_ID_NX2_57712_VF
201#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
202#endif
203#ifndef PCI_DEVICE_ID_NX2_57800
204#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
205#endif
206#ifndef PCI_DEVICE_ID_NX2_57800_MF
207#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
208#endif
209#ifndef PCI_DEVICE_ID_NX2_57800_VF
210#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
211#endif
212#ifndef PCI_DEVICE_ID_NX2_57810
213#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
214#endif
215#ifndef PCI_DEVICE_ID_NX2_57810_MF
216#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
217#endif
218#ifndef PCI_DEVICE_ID_NX2_57840_O
219#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
220#endif
221#ifndef PCI_DEVICE_ID_NX2_57810_VF
222#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
223#endif
224#ifndef PCI_DEVICE_ID_NX2_57840_4_10
225#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
226#endif
227#ifndef PCI_DEVICE_ID_NX2_57840_2_20
228#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
229#endif
230#ifndef PCI_DEVICE_ID_NX2_57840_MFO
231#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
232#endif
233#ifndef PCI_DEVICE_ID_NX2_57840_MF
234#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
235#endif
236#ifndef PCI_DEVICE_ID_NX2_57840_VF
237#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
238#endif
239#ifndef PCI_DEVICE_ID_NX2_57811
240#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
241#endif
242#ifndef PCI_DEVICE_ID_NX2_57811_MF
243#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
244#endif
245#ifndef PCI_DEVICE_ID_NX2_57811_VF
246#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
247#endif
248
249static const struct pci_device_id bnx2x_pci_tbl[] = {
250 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
251 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
263 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
270 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
274 { 0 }
275};
276
277MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
278
279const u32 dmae_reg_go_c[] = {
280 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
281 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
282 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
283 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
284};
285
286
287#define BNX2X_PREV_WAIT_NEEDED 1
288static DEFINE_SEMAPHORE(bnx2x_prev_sem);
289static LIST_HEAD(bnx2x_prev_list);
290
291
292static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
293static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
294static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
295
296
297
298
299
300static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
301
302static void __storm_memset_dma_mapping(struct bnx2x *bp,
303 u32 addr, dma_addr_t mapping)
304{
305 REG_WR(bp, addr, U64_LO(mapping));
306 REG_WR(bp, addr + 4, U64_HI(mapping));
307}
308
309static void storm_memset_spq_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
311{
312 u32 addr = XSEM_REG_FAST_MEMORY +
313 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
314
315 __storm_memset_dma_mapping(bp, addr, mapping);
316}
317
318static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
320{
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329}
330
331static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
333{
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342}
343
344static void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
347{
348 size_t size = sizeof(struct event_ring_data);
349
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
351
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353}
354
355static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
357{
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
360}
361
362
363
364
365static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
366{
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
370 PCICFG_VENDOR_ID_OFFSET);
371}
372
373static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
374{
375 u32 val;
376
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
378 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
380 PCICFG_VENDOR_ID_OFFSET);
381
382 return val;
383}
384
385#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
386#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
387#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
388#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
389#define DMAE_DP_DST_NONE "dst_addr [none]"
390
391static void bnx2x_dp_dmae(struct bnx2x *bp,
392 struct dmae_command *dmae, int msglvl)
393{
394 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
395 int i;
396
397 switch (dmae->opcode & DMAE_COMMAND_DST) {
398 case DMAE_CMD_DST_PCI:
399 if (src_type == DMAE_CMD_SRC_PCI)
400 DP(msglvl, "DMAE: opcode 0x%08x\n"
401 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
402 "comp_addr [%x:%08x], comp_val 0x%08x\n",
403 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
404 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
405 dmae->comp_addr_hi, dmae->comp_addr_lo,
406 dmae->comp_val);
407 else
408 DP(msglvl, "DMAE: opcode 0x%08x\n"
409 "src [%08x], len [%d*4], dst [%x:%08x]\n"
410 "comp_addr [%x:%08x], comp_val 0x%08x\n",
411 dmae->opcode, dmae->src_addr_lo >> 2,
412 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
413 dmae->comp_addr_hi, dmae->comp_addr_lo,
414 dmae->comp_val);
415 break;
416 case DMAE_CMD_DST_GRC:
417 if (src_type == DMAE_CMD_SRC_PCI)
418 DP(msglvl, "DMAE: opcode 0x%08x\n"
419 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
420 "comp_addr [%x:%08x], comp_val 0x%08x\n",
421 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
422 dmae->len, dmae->dst_addr_lo >> 2,
423 dmae->comp_addr_hi, dmae->comp_addr_lo,
424 dmae->comp_val);
425 else
426 DP(msglvl, "DMAE: opcode 0x%08x\n"
427 "src [%08x], len [%d*4], dst [%08x]\n"
428 "comp_addr [%x:%08x], comp_val 0x%08x\n",
429 dmae->opcode, dmae->src_addr_lo >> 2,
430 dmae->len, dmae->dst_addr_lo >> 2,
431 dmae->comp_addr_hi, dmae->comp_addr_lo,
432 dmae->comp_val);
433 break;
434 default:
435 if (src_type == DMAE_CMD_SRC_PCI)
436 DP(msglvl, "DMAE: opcode 0x%08x\n"
437 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
438 "comp_addr [%x:%08x] comp_val 0x%08x\n",
439 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
440 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
441 dmae->comp_val);
442 else
443 DP(msglvl, "DMAE: opcode 0x%08x\n"
444 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
445 "comp_addr [%x:%08x] comp_val 0x%08x\n",
446 dmae->opcode, dmae->src_addr_lo >> 2,
447 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
448 dmae->comp_val);
449 break;
450 }
451
452 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
453 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
454 i, *(((u32 *)dmae) + i));
455}
456
457
458void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
459{
460 u32 cmd_offset;
461 int i;
462
463 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
464 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
465 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
466 }
467 REG_WR(bp, dmae_reg_go_c[idx], 1);
468}
469
470u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
471{
472 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
473 DMAE_CMD_C_ENABLE);
474}
475
476u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
477{
478 return opcode & ~DMAE_CMD_SRC_RESET;
479}
480
481u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
482 bool with_comp, u8 comp_type)
483{
484 u32 opcode = 0;
485
486 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
487 (dst_type << DMAE_COMMAND_DST_SHIFT));
488
489 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
490
491 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
492 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
493 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
494 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
495
496#ifdef __BIG_ENDIAN
497 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
498#else
499 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
500#endif
501 if (with_comp)
502 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
503 return opcode;
504}
505
506void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
507 struct dmae_command *dmae,
508 u8 src_type, u8 dst_type)
509{
510 memset(dmae, 0, sizeof(struct dmae_command));
511
512
513 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
514 true, DMAE_COMP_PCI);
515
516
517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
519 dmae->comp_val = DMAE_COMP_VAL;
520}
521
522
523int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
524 u32 *comp)
525{
526 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
527 int rc = 0;
528
529 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
530
531
532
533
534
535
536 spin_lock_bh(&bp->dmae_lock);
537
538
539 *comp = 0;
540
541
542 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
543
544
545 udelay(5);
546 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
547
548 if (!cnt ||
549 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
550 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
551 BNX2X_ERR("DMAE timeout!\n");
552 rc = DMAE_TIMEOUT;
553 goto unlock;
554 }
555 cnt--;
556 udelay(50);
557 }
558 if (*comp & DMAE_PCI_ERR_FLAG) {
559 BNX2X_ERR("DMAE PCI error!\n");
560 rc = DMAE_PCI_ERROR;
561 }
562
563unlock:
564
565 spin_unlock_bh(&bp->dmae_lock);
566
567 return rc;
568}
569
570void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
571 u32 len32)
572{
573 int rc;
574 struct dmae_command dmae;
575
576 if (!bp->dmae_ready) {
577 u32 *data = bnx2x_sp(bp, wb_data[0]);
578
579 if (CHIP_IS_E1(bp))
580 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
581 else
582 bnx2x_init_str_wr(bp, dst_addr, data, len32);
583 return;
584 }
585
586
587 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
588
589
590 dmae.src_addr_lo = U64_LO(dma_addr);
591 dmae.src_addr_hi = U64_HI(dma_addr);
592 dmae.dst_addr_lo = dst_addr >> 2;
593 dmae.dst_addr_hi = 0;
594 dmae.len = len32;
595
596
597 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
598 if (rc) {
599 BNX2X_ERR("DMAE returned failure %d\n", rc);
600#ifdef BNX2X_STOP_ON_ERROR
601 bnx2x_panic();
602#endif
603 }
604}
605
606void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
607{
608 int rc;
609 struct dmae_command dmae;
610
611 if (!bp->dmae_ready) {
612 u32 *data = bnx2x_sp(bp, wb_data[0]);
613 int i;
614
615 if (CHIP_IS_E1(bp))
616 for (i = 0; i < len32; i++)
617 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
618 else
619 for (i = 0; i < len32; i++)
620 data[i] = REG_RD(bp, src_addr + i*4);
621
622 return;
623 }
624
625
626 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
627
628
629 dmae.src_addr_lo = src_addr >> 2;
630 dmae.src_addr_hi = 0;
631 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
632 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
633 dmae.len = len32;
634
635
636 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
637 if (rc) {
638 BNX2X_ERR("DMAE returned failure %d\n", rc);
639#ifdef BNX2X_STOP_ON_ERROR
640 bnx2x_panic();
641#endif
642 }
643}
644
645static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
646 u32 addr, u32 len)
647{
648 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
649 int offset = 0;
650
651 while (len > dmae_wr_max) {
652 bnx2x_write_dmae(bp, phys_addr + offset,
653 addr + offset, dmae_wr_max);
654 offset += dmae_wr_max * 4;
655 len -= dmae_wr_max;
656 }
657
658 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
659}
660
661enum storms {
662 XSTORM,
663 TSTORM,
664 CSTORM,
665 USTORM,
666 MAX_STORMS
667};
668
669#define STORMS_NUM 4
670#define REGS_IN_ENTRY 4
671
672static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
673 enum storms storm,
674 int entry)
675{
676 switch (storm) {
677 case XSTORM:
678 return XSTORM_ASSERT_LIST_OFFSET(entry);
679 case TSTORM:
680 return TSTORM_ASSERT_LIST_OFFSET(entry);
681 case CSTORM:
682 return CSTORM_ASSERT_LIST_OFFSET(entry);
683 case USTORM:
684 return USTORM_ASSERT_LIST_OFFSET(entry);
685 case MAX_STORMS:
686 default:
687 BNX2X_ERR("unknown storm\n");
688 }
689 return -EINVAL;
690}
691
692static int bnx2x_mc_assert(struct bnx2x *bp)
693{
694 char last_idx;
695 int i, j, rc = 0;
696 enum storms storm;
697 u32 regs[REGS_IN_ENTRY];
698 u32 bar_storm_intmem[STORMS_NUM] = {
699 BAR_XSTRORM_INTMEM,
700 BAR_TSTRORM_INTMEM,
701 BAR_CSTRORM_INTMEM,
702 BAR_USTRORM_INTMEM
703 };
704 u32 storm_assert_list_index[STORMS_NUM] = {
705 XSTORM_ASSERT_LIST_INDEX_OFFSET,
706 TSTORM_ASSERT_LIST_INDEX_OFFSET,
707 CSTORM_ASSERT_LIST_INDEX_OFFSET,
708 USTORM_ASSERT_LIST_INDEX_OFFSET
709 };
710 char *storms_string[STORMS_NUM] = {
711 "XSTORM",
712 "TSTORM",
713 "CSTORM",
714 "USTORM"
715 };
716
717 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
718 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
719 storm_assert_list_index[storm]);
720 if (last_idx)
721 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
722 storms_string[storm], last_idx);
723
724
725 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
726
727 for (j = 0; j < REGS_IN_ENTRY; j++)
728 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
729 bnx2x_get_assert_list_entry(bp,
730 storm,
731 i) +
732 sizeof(u32) * j);
733
734
735 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
736 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
737 storms_string[storm], i, regs[3],
738 regs[2], regs[1], regs[0]);
739 rc++;
740 } else {
741 break;
742 }
743 }
744 }
745
746 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
747 CHIP_IS_E1(bp) ? "everest1" :
748 CHIP_IS_E1H(bp) ? "everest1h" :
749 CHIP_IS_E2(bp) ? "everest2" : "everest3",
750 BCM_5710_FW_MAJOR_VERSION,
751 BCM_5710_FW_MINOR_VERSION,
752 BCM_5710_FW_REVISION_VERSION);
753
754 return rc;
755}
756
757#define MCPR_TRACE_BUFFER_SIZE (0x800)
758#define SCRATCH_BUFFER_SIZE(bp) \
759 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
760
761void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
762{
763 u32 addr, val;
764 u32 mark, offset;
765 __be32 data[9];
766 int word;
767 u32 trace_shmem_base;
768 if (BP_NOMCP(bp)) {
769 BNX2X_ERR("NO MCP - can not dump\n");
770 return;
771 }
772 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
773 (bp->common.bc_ver & 0xff0000) >> 16,
774 (bp->common.bc_ver & 0xff00) >> 8,
775 (bp->common.bc_ver & 0xff));
776
777 if (pci_channel_offline(bp->pdev)) {
778 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
779 return;
780 }
781
782 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
783 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
784 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
785
786 if (BP_PATH(bp) == 0)
787 trace_shmem_base = bp->common.shmem_base;
788 else
789 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
790
791
792 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
793 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
794 SCRATCH_BUFFER_SIZE(bp)) {
795 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
796 trace_shmem_base);
797 return;
798 }
799
800 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
801
802
803 mark = REG_RD(bp, addr);
804 if (mark != MFW_TRACE_SIGNATURE) {
805 BNX2X_ERR("Trace buffer signature is missing.");
806 return ;
807 }
808
809
810 addr += 4;
811 mark = REG_RD(bp, addr);
812 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
813 if (mark >= trace_shmem_base || mark < addr + 4) {
814 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
815 return;
816 }
817 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
818
819 printk("%s", lvl);
820
821
822 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
823 for (word = 0; word < 8; word++)
824 data[word] = htonl(REG_RD(bp, offset + 4*word));
825 data[8] = 0x0;
826 pr_cont("%s", (char *)data);
827 }
828
829
830 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
831 for (word = 0; word < 8; word++)
832 data[word] = htonl(REG_RD(bp, offset + 4*word));
833 data[8] = 0x0;
834 pr_cont("%s", (char *)data);
835 }
836 printk("%s" "end of fw dump\n", lvl);
837}
838
839static void bnx2x_fw_dump(struct bnx2x *bp)
840{
841 bnx2x_fw_dump_lvl(bp, KERN_ERR);
842}
843
844static void bnx2x_hc_int_disable(struct bnx2x *bp)
845{
846 int port = BP_PORT(bp);
847 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
848 u32 val = REG_RD(bp, addr);
849
850
851
852
853
854 if (CHIP_IS_E1(bp)) {
855
856
857
858
859 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
860
861 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
862 HC_CONFIG_0_REG_INT_LINE_EN_0 |
863 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
864 } else
865 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
866 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
867 HC_CONFIG_0_REG_INT_LINE_EN_0 |
868 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
869
870 DP(NETIF_MSG_IFDOWN,
871 "write %x to HC %d (addr 0x%x)\n",
872 val, port, addr);
873
874 REG_WR(bp, addr, val);
875 if (REG_RD(bp, addr) != val)
876 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
877}
878
879static void bnx2x_igu_int_disable(struct bnx2x *bp)
880{
881 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
882
883 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
884 IGU_PF_CONF_INT_LINE_EN |
885 IGU_PF_CONF_ATTN_BIT_EN);
886
887 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
888
889 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
890 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
891 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
892}
893
894static void bnx2x_int_disable(struct bnx2x *bp)
895{
896 if (bp->common.int_block == INT_BLOCK_HC)
897 bnx2x_hc_int_disable(bp);
898 else
899 bnx2x_igu_int_disable(bp);
900}
901
902void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
903{
904 int i;
905 u16 j;
906 struct hc_sp_status_block_data sp_sb_data;
907 int func = BP_FUNC(bp);
908#ifdef BNX2X_STOP_ON_ERROR
909 u16 start = 0, end = 0;
910 u8 cos;
911#endif
912 if (IS_PF(bp) && disable_int)
913 bnx2x_int_disable(bp);
914
915 bp->stats_state = STATS_STATE_DISABLED;
916 bp->eth_stats.unrecoverable_error++;
917 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
918
919 BNX2X_ERR("begin crash dump -----------------\n");
920
921
922
923 if (IS_PF(bp)) {
924 struct host_sp_status_block *def_sb = bp->def_status_blk;
925 int data_size, cstorm_offset;
926
927 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
928 bp->def_idx, bp->def_att_idx, bp->attn_state,
929 bp->spq_prod_idx, bp->stats_counter);
930 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
931 def_sb->atten_status_block.attn_bits,
932 def_sb->atten_status_block.attn_bits_ack,
933 def_sb->atten_status_block.status_block_id,
934 def_sb->atten_status_block.attn_bits_index);
935 BNX2X_ERR(" def (");
936 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
937 pr_cont("0x%x%s",
938 def_sb->sp_sb.index_values[i],
939 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
940
941 data_size = sizeof(struct hc_sp_status_block_data) /
942 sizeof(u32);
943 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
944 for (i = 0; i < data_size; i++)
945 *((u32 *)&sp_sb_data + i) =
946 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
947 i * sizeof(u32));
948
949 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
950 sp_sb_data.igu_sb_id,
951 sp_sb_data.igu_seg_id,
952 sp_sb_data.p_func.pf_id,
953 sp_sb_data.p_func.vnic_id,
954 sp_sb_data.p_func.vf_id,
955 sp_sb_data.p_func.vf_valid,
956 sp_sb_data.state);
957 }
958
959 for_each_eth_queue(bp, i) {
960 struct bnx2x_fastpath *fp = &bp->fp[i];
961 int loop;
962 struct hc_status_block_data_e2 sb_data_e2;
963 struct hc_status_block_data_e1x sb_data_e1x;
964 struct hc_status_block_sm *hc_sm_p =
965 CHIP_IS_E1x(bp) ?
966 sb_data_e1x.common.state_machine :
967 sb_data_e2.common.state_machine;
968 struct hc_index_data *hc_index_p =
969 CHIP_IS_E1x(bp) ?
970 sb_data_e1x.index_data :
971 sb_data_e2.index_data;
972 u8 data_size, cos;
973 u32 *sb_data_p;
974 struct bnx2x_fp_txdata txdata;
975
976 if (!bp->fp)
977 break;
978
979 if (!fp->rx_cons_sb)
980 continue;
981
982
983 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
984 i, fp->rx_bd_prod, fp->rx_bd_cons,
985 fp->rx_comp_prod,
986 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
987 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
988 fp->rx_sge_prod, fp->last_max_sge,
989 le16_to_cpu(fp->fp_hc_idx));
990
991
992 for_each_cos_in_tx_queue(fp, cos)
993 {
994 if (!fp->txdata_ptr[cos])
995 break;
996
997 txdata = *fp->txdata_ptr[cos];
998
999 if (!txdata.tx_cons_sb)
1000 continue;
1001
1002 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1003 i, txdata.tx_pkt_prod,
1004 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1005 txdata.tx_bd_cons,
1006 le16_to_cpu(*txdata.tx_cons_sb));
1007 }
1008
1009 loop = CHIP_IS_E1x(bp) ?
1010 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1011
1012
1013
1014 if (IS_FCOE_FP(fp))
1015 continue;
1016
1017 BNX2X_ERR(" run indexes (");
1018 for (j = 0; j < HC_SB_MAX_SM; j++)
1019 pr_cont("0x%x%s",
1020 fp->sb_running_index[j],
1021 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1022
1023 BNX2X_ERR(" indexes (");
1024 for (j = 0; j < loop; j++)
1025 pr_cont("0x%x%s",
1026 fp->sb_index_values[j],
1027 (j == loop - 1) ? ")" : " ");
1028
1029
1030 if (IS_VF(bp))
1031 continue;
1032
1033
1034 data_size = CHIP_IS_E1x(bp) ?
1035 sizeof(struct hc_status_block_data_e1x) :
1036 sizeof(struct hc_status_block_data_e2);
1037 data_size /= sizeof(u32);
1038 sb_data_p = CHIP_IS_E1x(bp) ?
1039 (u32 *)&sb_data_e1x :
1040 (u32 *)&sb_data_e2;
1041
1042 for (j = 0; j < data_size; j++)
1043 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1044 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1045 j * sizeof(u32));
1046
1047 if (!CHIP_IS_E1x(bp)) {
1048 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1049 sb_data_e2.common.p_func.pf_id,
1050 sb_data_e2.common.p_func.vf_id,
1051 sb_data_e2.common.p_func.vf_valid,
1052 sb_data_e2.common.p_func.vnic_id,
1053 sb_data_e2.common.same_igu_sb_1b,
1054 sb_data_e2.common.state);
1055 } else {
1056 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1057 sb_data_e1x.common.p_func.pf_id,
1058 sb_data_e1x.common.p_func.vf_id,
1059 sb_data_e1x.common.p_func.vf_valid,
1060 sb_data_e1x.common.p_func.vnic_id,
1061 sb_data_e1x.common.same_igu_sb_1b,
1062 sb_data_e1x.common.state);
1063 }
1064
1065
1066 for (j = 0; j < HC_SB_MAX_SM; j++) {
1067 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1068 j, hc_sm_p[j].__flags,
1069 hc_sm_p[j].igu_sb_id,
1070 hc_sm_p[j].igu_seg_id,
1071 hc_sm_p[j].time_to_expire,
1072 hc_sm_p[j].timer_value);
1073 }
1074
1075
1076 for (j = 0; j < loop; j++) {
1077 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1078 hc_index_p[j].flags,
1079 hc_index_p[j].timeout);
1080 }
1081 }
1082
1083#ifdef BNX2X_STOP_ON_ERROR
1084 if (IS_PF(bp)) {
1085
1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1087 for (i = 0; i < NUM_EQ_DESC; i++) {
1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1089
1090 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1091 i, bp->eq_ring[i].message.opcode,
1092 bp->eq_ring[i].message.error);
1093 BNX2X_ERR("data: %x %x %x\n",
1094 data[0], data[1], data[2]);
1095 }
1096 }
1097
1098
1099
1100 for_each_valid_rx_queue(bp, i) {
1101 struct bnx2x_fastpath *fp = &bp->fp[i];
1102
1103 if (!bp->fp)
1104 break;
1105
1106 if (!fp->rx_cons_sb)
1107 continue;
1108
1109 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1110 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1111 for (j = start; j != end; j = RX_BD(j + 1)) {
1112 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1113 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1114
1115 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1116 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1117 }
1118
1119 start = RX_SGE(fp->rx_sge_prod);
1120 end = RX_SGE(fp->last_max_sge);
1121 for (j = start; j != end; j = RX_SGE(j + 1)) {
1122 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1123 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1124
1125 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1126 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1127 }
1128
1129 start = RCQ_BD(fp->rx_comp_cons - 10);
1130 end = RCQ_BD(fp->rx_comp_cons + 503);
1131 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1133
1134 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1135 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1136 }
1137 }
1138
1139
1140 for_each_valid_tx_queue(bp, i) {
1141 struct bnx2x_fastpath *fp = &bp->fp[i];
1142
1143 if (!bp->fp)
1144 break;
1145
1146 for_each_cos_in_tx_queue(fp, cos) {
1147 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1148
1149 if (!fp->txdata_ptr[cos])
1150 break;
1151
1152 if (!txdata->tx_cons_sb)
1153 continue;
1154
1155 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1156 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1157 for (j = start; j != end; j = TX_BD(j + 1)) {
1158 struct sw_tx_bd *sw_bd =
1159 &txdata->tx_buf_ring[j];
1160
1161 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1162 i, cos, j, sw_bd->skb,
1163 sw_bd->first_bd);
1164 }
1165
1166 start = TX_BD(txdata->tx_bd_cons - 10);
1167 end = TX_BD(txdata->tx_bd_cons + 254);
1168 for (j = start; j != end; j = TX_BD(j + 1)) {
1169 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1170
1171 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1172 i, cos, j, tx_bd[0], tx_bd[1],
1173 tx_bd[2], tx_bd[3]);
1174 }
1175 }
1176 }
1177#endif
1178 if (IS_PF(bp)) {
1179 int tmp_msg_en = bp->msg_enable;
1180
1181 bnx2x_fw_dump(bp);
1182 bp->msg_enable |= NETIF_MSG_HW;
1183 BNX2X_ERR("Idle check (1st round) ----------\n");
1184 bnx2x_idle_chk(bp);
1185 BNX2X_ERR("Idle check (2nd round) ----------\n");
1186 bnx2x_idle_chk(bp);
1187 bp->msg_enable = tmp_msg_en;
1188 bnx2x_mc_assert(bp);
1189 }
1190
1191 BNX2X_ERR("end crash dump -----------------\n");
1192}
1193
1194
1195
1196
1197
1198
1199
1200#define FLR_WAIT_USEC 10000
1201#define FLR_WAIT_INTERVAL 50
1202#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1203
1204struct pbf_pN_buf_regs {
1205 int pN;
1206 u32 init_crd;
1207 u32 crd;
1208 u32 crd_freed;
1209};
1210
1211struct pbf_pN_cmd_regs {
1212 int pN;
1213 u32 lines_occup;
1214 u32 lines_freed;
1215};
1216
1217static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1218 struct pbf_pN_buf_regs *regs,
1219 u32 poll_count)
1220{
1221 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1222 u32 cur_cnt = poll_count;
1223
1224 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1225 crd = crd_start = REG_RD(bp, regs->crd);
1226 init_crd = REG_RD(bp, regs->init_crd);
1227
1228 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1229 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1230 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1231
1232 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1233 (init_crd - crd_start))) {
1234 if (cur_cnt--) {
1235 udelay(FLR_WAIT_INTERVAL);
1236 crd = REG_RD(bp, regs->crd);
1237 crd_freed = REG_RD(bp, regs->crd_freed);
1238 } else {
1239 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1240 regs->pN);
1241 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1242 regs->pN, crd);
1243 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1244 regs->pN, crd_freed);
1245 break;
1246 }
1247 }
1248 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1249 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1250}
1251
1252static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1253 struct pbf_pN_cmd_regs *regs,
1254 u32 poll_count)
1255{
1256 u32 occup, to_free, freed, freed_start;
1257 u32 cur_cnt = poll_count;
1258
1259 occup = to_free = REG_RD(bp, regs->lines_occup);
1260 freed = freed_start = REG_RD(bp, regs->lines_freed);
1261
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1263 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1264
1265 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1266 if (cur_cnt--) {
1267 udelay(FLR_WAIT_INTERVAL);
1268 occup = REG_RD(bp, regs->lines_occup);
1269 freed = REG_RD(bp, regs->lines_freed);
1270 } else {
1271 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1272 regs->pN);
1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1274 regs->pN, occup);
1275 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1276 regs->pN, freed);
1277 break;
1278 }
1279 }
1280 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1281 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1282}
1283
1284static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1285 u32 expected, u32 poll_count)
1286{
1287 u32 cur_cnt = poll_count;
1288 u32 val;
1289
1290 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1291 udelay(FLR_WAIT_INTERVAL);
1292
1293 return val;
1294}
1295
1296int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1297 char *msg, u32 poll_cnt)
1298{
1299 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1300 if (val != 0) {
1301 BNX2X_ERR("%s usage count=%d\n", msg, val);
1302 return 1;
1303 }
1304 return 0;
1305}
1306
1307
1308u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1309{
1310
1311 if (CHIP_REV_IS_EMUL(bp))
1312 return FLR_POLL_CNT * 2000;
1313
1314 if (CHIP_REV_IS_FPGA(bp))
1315 return FLR_POLL_CNT * 120;
1316
1317 return FLR_POLL_CNT;
1318}
1319
1320void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1321{
1322 struct pbf_pN_cmd_regs cmd_regs[] = {
1323 {0, (CHIP_IS_E3B0(bp)) ?
1324 PBF_REG_TQ_OCCUPANCY_Q0 :
1325 PBF_REG_P0_TQ_OCCUPANCY,
1326 (CHIP_IS_E3B0(bp)) ?
1327 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1328 PBF_REG_P0_TQ_LINES_FREED_CNT},
1329 {1, (CHIP_IS_E3B0(bp)) ?
1330 PBF_REG_TQ_OCCUPANCY_Q1 :
1331 PBF_REG_P1_TQ_OCCUPANCY,
1332 (CHIP_IS_E3B0(bp)) ?
1333 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1334 PBF_REG_P1_TQ_LINES_FREED_CNT},
1335 {4, (CHIP_IS_E3B0(bp)) ?
1336 PBF_REG_TQ_OCCUPANCY_LB_Q :
1337 PBF_REG_P4_TQ_OCCUPANCY,
1338 (CHIP_IS_E3B0(bp)) ?
1339 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1340 PBF_REG_P4_TQ_LINES_FREED_CNT}
1341 };
1342
1343 struct pbf_pN_buf_regs buf_regs[] = {
1344 {0, (CHIP_IS_E3B0(bp)) ?
1345 PBF_REG_INIT_CRD_Q0 :
1346 PBF_REG_P0_INIT_CRD ,
1347 (CHIP_IS_E3B0(bp)) ?
1348 PBF_REG_CREDIT_Q0 :
1349 PBF_REG_P0_CREDIT,
1350 (CHIP_IS_E3B0(bp)) ?
1351 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1352 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1353 {1, (CHIP_IS_E3B0(bp)) ?
1354 PBF_REG_INIT_CRD_Q1 :
1355 PBF_REG_P1_INIT_CRD,
1356 (CHIP_IS_E3B0(bp)) ?
1357 PBF_REG_CREDIT_Q1 :
1358 PBF_REG_P1_CREDIT,
1359 (CHIP_IS_E3B0(bp)) ?
1360 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1361 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1362 {4, (CHIP_IS_E3B0(bp)) ?
1363 PBF_REG_INIT_CRD_LB_Q :
1364 PBF_REG_P4_INIT_CRD,
1365 (CHIP_IS_E3B0(bp)) ?
1366 PBF_REG_CREDIT_LB_Q :
1367 PBF_REG_P4_CREDIT,
1368 (CHIP_IS_E3B0(bp)) ?
1369 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1370 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1371 };
1372
1373 int i;
1374
1375
1376 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1377 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1378
1379
1380 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1381 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1382}
1383
1384#define OP_GEN_PARAM(param) \
1385 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1386
1387#define OP_GEN_TYPE(type) \
1388 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1389
1390#define OP_GEN_AGG_VECT(index) \
1391 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1392
1393int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1394{
1395 u32 op_gen_command = 0;
1396 u32 comp_addr = BAR_CSTRORM_INTMEM +
1397 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1398 int ret = 0;
1399
1400 if (REG_RD(bp, comp_addr)) {
1401 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1402 return 1;
1403 }
1404
1405 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1406 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1407 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1408 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1409
1410 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1411 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1412
1413 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1414 BNX2X_ERR("FW final cleanup did not succeed\n");
1415 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1416 (REG_RD(bp, comp_addr)));
1417 bnx2x_panic();
1418 return 1;
1419 }
1420
1421 REG_WR(bp, comp_addr, 0);
1422
1423 return ret;
1424}
1425
1426u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1427{
1428 u16 status;
1429
1430 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1431 return status & PCI_EXP_DEVSTA_TRPND;
1432}
1433
1434
1435
1436static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1437{
1438
1439 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440 CFC_REG_NUM_LCIDS_INSIDE_PF,
1441 "CFC PF usage counter timed out",
1442 poll_cnt))
1443 return 1;
1444
1445
1446 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447 DORQ_REG_PF_USAGE_CNT,
1448 "DQ PF usage counter timed out",
1449 poll_cnt))
1450 return 1;
1451
1452
1453 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1454 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1455 "QM PF usage counter timed out",
1456 poll_cnt))
1457 return 1;
1458
1459
1460 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1461 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1462 "Timers VNIC usage counter timed out",
1463 poll_cnt))
1464 return 1;
1465 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1466 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1467 "Timers NUM_SCANS usage counter timed out",
1468 poll_cnt))
1469 return 1;
1470
1471
1472 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1473 dmae_reg_go_c[INIT_DMAE_C(bp)],
1474 "DMAE command register timed out",
1475 poll_cnt))
1476 return 1;
1477
1478 return 0;
1479}
1480
1481static void bnx2x_hw_enable_status(struct bnx2x *bp)
1482{
1483 u32 val;
1484
1485 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1486 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1487
1488 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1489 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1490
1491 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1492 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1493
1494 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1495 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1496
1497 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1498 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1499
1500 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1501 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1502
1503 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1504 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1505
1506 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1507 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1508 val);
1509}
1510
1511static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1512{
1513 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1514
1515 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1516
1517
1518 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1519
1520
1521 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1522 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1523 return -EBUSY;
1524
1525
1526
1527
1528 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1529 return -EBUSY;
1530
1531
1532
1533
1534 bnx2x_tx_hw_flushed(bp, poll_cnt);
1535
1536
1537 msleep(100);
1538
1539
1540 if (bnx2x_is_pcie_pending(bp->pdev))
1541 BNX2X_ERR("PCIE Transactions still pending\n");
1542
1543
1544 bnx2x_hw_enable_status(bp);
1545
1546
1547
1548
1549
1550 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1551
1552 return 0;
1553}
1554
1555static void bnx2x_hc_int_enable(struct bnx2x *bp)
1556{
1557 int port = BP_PORT(bp);
1558 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1559 u32 val = REG_RD(bp, addr);
1560 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1561 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1562 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1563
1564 if (msix) {
1565 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1566 HC_CONFIG_0_REG_INT_LINE_EN_0);
1567 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1569 if (single_msix)
1570 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1571 } else if (msi) {
1572 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1573 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1574 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1575 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1576 } else {
1577 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1578 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1579 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1580 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1581
1582 if (!CHIP_IS_E1(bp)) {
1583 DP(NETIF_MSG_IFUP,
1584 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1585
1586 REG_WR(bp, addr, val);
1587
1588 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1589 }
1590 }
1591
1592 if (CHIP_IS_E1(bp))
1593 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1594
1595 DP(NETIF_MSG_IFUP,
1596 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1597 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1598
1599 REG_WR(bp, addr, val);
1600
1601
1602
1603 barrier();
1604
1605 if (!CHIP_IS_E1(bp)) {
1606
1607 if (IS_MF(bp)) {
1608 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1609 if (bp->port.pmf)
1610
1611 val |= 0x1100;
1612 } else
1613 val = 0xffff;
1614
1615 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1617 }
1618}
1619
1620static void bnx2x_igu_int_enable(struct bnx2x *bp)
1621{
1622 u32 val;
1623 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1624 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1625 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1626
1627 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1628
1629 if (msix) {
1630 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1631 IGU_PF_CONF_SINGLE_ISR_EN);
1632 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1633 IGU_PF_CONF_ATTN_BIT_EN);
1634
1635 if (single_msix)
1636 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1637 } else if (msi) {
1638 val &= ~IGU_PF_CONF_INT_LINE_EN;
1639 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1640 IGU_PF_CONF_ATTN_BIT_EN |
1641 IGU_PF_CONF_SINGLE_ISR_EN);
1642 } else {
1643 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1644 val |= (IGU_PF_CONF_INT_LINE_EN |
1645 IGU_PF_CONF_ATTN_BIT_EN |
1646 IGU_PF_CONF_SINGLE_ISR_EN);
1647 }
1648
1649
1650 if ((!msix) || single_msix) {
1651 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1652 bnx2x_ack_int(bp);
1653 }
1654
1655 val |= IGU_PF_CONF_FUNC_EN;
1656
1657 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1658 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1659
1660 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1661
1662 if (val & IGU_PF_CONF_INT_LINE_EN)
1663 pci_intx(bp->pdev, true);
1664
1665 barrier();
1666
1667
1668 if (IS_MF(bp)) {
1669 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1670 if (bp->port.pmf)
1671
1672 val |= 0x1100;
1673 } else
1674 val = 0xffff;
1675
1676 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1677 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1678}
1679
1680void bnx2x_int_enable(struct bnx2x *bp)
1681{
1682 if (bp->common.int_block == INT_BLOCK_HC)
1683 bnx2x_hc_int_enable(bp);
1684 else
1685 bnx2x_igu_int_enable(bp);
1686}
1687
1688void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1689{
1690 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1691 int i, offset;
1692
1693 if (disable_hw)
1694
1695 bnx2x_int_disable(bp);
1696
1697
1698 if (msix) {
1699 synchronize_irq(bp->msix_table[0].vector);
1700 offset = 1;
1701 if (CNIC_SUPPORT(bp))
1702 offset++;
1703 for_each_eth_queue(bp, i)
1704 synchronize_irq(bp->msix_table[offset++].vector);
1705 } else
1706 synchronize_irq(bp->pdev->irq);
1707
1708
1709 cancel_delayed_work(&bp->sp_task);
1710 cancel_delayed_work(&bp->period_task);
1711 flush_workqueue(bnx2x_wq);
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1722{
1723 u32 lock_status;
1724 u32 resource_bit = (1 << resource);
1725 int func = BP_FUNC(bp);
1726 u32 hw_lock_control_reg;
1727
1728 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1729 "Trying to take a lock on resource %d\n", resource);
1730
1731
1732 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1733 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1734 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1735 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1736 return false;
1737 }
1738
1739 if (func <= 5)
1740 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1741 else
1742 hw_lock_control_reg =
1743 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1744
1745
1746 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1747 lock_status = REG_RD(bp, hw_lock_control_reg);
1748 if (lock_status & resource_bit)
1749 return true;
1750
1751 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1752 "Failed to get a lock on resource %d\n", resource);
1753 return false;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1765{
1766 if (BP_PATH(bp))
1767 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1768 else
1769 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1780{
1781 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1782}
1783
1784static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1785
1786
1787static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1788{
1789
1790
1791
1792
1793 atomic_set(&bp->interrupt_occurred, 1);
1794
1795
1796
1797
1798
1799 smp_wmb();
1800
1801
1802 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1803}
1804
1805void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1806{
1807 struct bnx2x *bp = fp->bp;
1808 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1810 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1811 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1812
1813 DP(BNX2X_MSG_SP,
1814 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1815 fp->index, cid, command, bp->state,
1816 rr_cqe->ramrod_cqe.ramrod_type);
1817
1818
1819
1820
1821 if (cid >= BNX2X_FIRST_VF_CID &&
1822 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1823 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1824
1825 switch (command) {
1826 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1827 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1828 drv_cmd = BNX2X_Q_CMD_UPDATE;
1829 break;
1830
1831 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1832 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1833 drv_cmd = BNX2X_Q_CMD_SETUP;
1834 break;
1835
1836 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1837 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1838 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1839 break;
1840
1841 case (RAMROD_CMD_ID_ETH_HALT):
1842 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1843 drv_cmd = BNX2X_Q_CMD_HALT;
1844 break;
1845
1846 case (RAMROD_CMD_ID_ETH_TERMINATE):
1847 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1848 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1849 break;
1850
1851 case (RAMROD_CMD_ID_ETH_EMPTY):
1852 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1853 drv_cmd = BNX2X_Q_CMD_EMPTY;
1854 break;
1855
1856 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1857 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1858 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1859 break;
1860
1861 default:
1862 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1863 command, fp->index);
1864 return;
1865 }
1866
1867 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1868 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1869
1870
1871
1872
1873
1874
1875
1876#ifdef BNX2X_STOP_ON_ERROR
1877 bnx2x_panic();
1878#else
1879 return;
1880#endif
1881
1882 smp_mb__before_atomic();
1883 atomic_inc(&bp->cq_spq_left);
1884
1885 smp_mb__after_atomic();
1886
1887 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1888
1889 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1890 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 smp_mb__before_atomic();
1901 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1902 wmb();
1903 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1904 smp_mb__after_atomic();
1905
1906
1907 bnx2x_schedule_sp_task(bp);
1908 }
1909
1910 return;
1911}
1912
1913irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1914{
1915 struct bnx2x *bp = netdev_priv(dev_instance);
1916 u16 status = bnx2x_ack_int(bp);
1917 u16 mask;
1918 int i;
1919 u8 cos;
1920
1921
1922 if (unlikely(status == 0)) {
1923 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1924 return IRQ_NONE;
1925 }
1926 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1927
1928#ifdef BNX2X_STOP_ON_ERROR
1929 if (unlikely(bp->panic))
1930 return IRQ_HANDLED;
1931#endif
1932
1933 for_each_eth_queue(bp, i) {
1934 struct bnx2x_fastpath *fp = &bp->fp[i];
1935
1936 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1937 if (status & mask) {
1938
1939 for_each_cos_in_tx_queue(fp, cos)
1940 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1941 prefetch(&fp->sb_running_index[SM_RX_ID]);
1942 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1943 status &= ~mask;
1944 }
1945 }
1946
1947 if (CNIC_SUPPORT(bp)) {
1948 mask = 0x2;
1949 if (status & (mask | 0x1)) {
1950 struct cnic_ops *c_ops = NULL;
1951
1952 rcu_read_lock();
1953 c_ops = rcu_dereference(bp->cnic_ops);
1954 if (c_ops && (bp->cnic_eth_dev.drv_state &
1955 CNIC_DRV_STATE_HANDLES_IRQ))
1956 c_ops->cnic_handler(bp->cnic_data, NULL);
1957 rcu_read_unlock();
1958
1959 status &= ~mask;
1960 }
1961 }
1962
1963 if (unlikely(status & 0x1)) {
1964
1965
1966
1967
1968 bnx2x_schedule_sp_task(bp);
1969
1970 status &= ~0x1;
1971 if (!status)
1972 return IRQ_HANDLED;
1973 }
1974
1975 if (unlikely(status))
1976 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1977 status);
1978
1979 return IRQ_HANDLED;
1980}
1981
1982
1983
1984
1985
1986
1987
1988int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1989{
1990 u32 lock_status;
1991 u32 resource_bit = (1 << resource);
1992 int func = BP_FUNC(bp);
1993 u32 hw_lock_control_reg;
1994 int cnt;
1995
1996
1997 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1998 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1999 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2000 return -EINVAL;
2001 }
2002
2003 if (func <= 5) {
2004 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2005 } else {
2006 hw_lock_control_reg =
2007 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2008 }
2009
2010
2011 lock_status = REG_RD(bp, hw_lock_control_reg);
2012 if (lock_status & resource_bit) {
2013 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2014 lock_status, resource_bit);
2015 return -EEXIST;
2016 }
2017
2018
2019 for (cnt = 0; cnt < 1000; cnt++) {
2020
2021 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2022 lock_status = REG_RD(bp, hw_lock_control_reg);
2023 if (lock_status & resource_bit)
2024 return 0;
2025
2026 usleep_range(5000, 10000);
2027 }
2028 BNX2X_ERR("Timeout\n");
2029 return -EAGAIN;
2030}
2031
2032int bnx2x_release_leader_lock(struct bnx2x *bp)
2033{
2034 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2035}
2036
2037int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2038{
2039 u32 lock_status;
2040 u32 resource_bit = (1 << resource);
2041 int func = BP_FUNC(bp);
2042 u32 hw_lock_control_reg;
2043
2044
2045 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2046 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2047 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2048 return -EINVAL;
2049 }
2050
2051 if (func <= 5) {
2052 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2053 } else {
2054 hw_lock_control_reg =
2055 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2056 }
2057
2058
2059 lock_status = REG_RD(bp, hw_lock_control_reg);
2060 if (!(lock_status & resource_bit)) {
2061 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2062 lock_status, resource_bit);
2063 return -EFAULT;
2064 }
2065
2066 REG_WR(bp, hw_lock_control_reg, resource_bit);
2067 return 0;
2068}
2069
2070int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2071{
2072
2073 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2074 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2075 int gpio_shift = gpio_num +
2076 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2077 u32 gpio_mask = (1 << gpio_shift);
2078 u32 gpio_reg;
2079 int value;
2080
2081 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2082 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2083 return -EINVAL;
2084 }
2085
2086
2087 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2088
2089
2090 if ((gpio_reg & gpio_mask) == gpio_mask)
2091 value = 1;
2092 else
2093 value = 0;
2094
2095 return value;
2096}
2097
2098int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2099{
2100
2101 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2102 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2103 int gpio_shift = gpio_num +
2104 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2105 u32 gpio_mask = (1 << gpio_shift);
2106 u32 gpio_reg;
2107
2108 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2109 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2110 return -EINVAL;
2111 }
2112
2113 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2114
2115 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2116
2117 switch (mode) {
2118 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2119 DP(NETIF_MSG_LINK,
2120 "Set GPIO %d (shift %d) -> output low\n",
2121 gpio_num, gpio_shift);
2122
2123 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2124 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2125 break;
2126
2127 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2128 DP(NETIF_MSG_LINK,
2129 "Set GPIO %d (shift %d) -> output high\n",
2130 gpio_num, gpio_shift);
2131
2132 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2133 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2134 break;
2135
2136 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2137 DP(NETIF_MSG_LINK,
2138 "Set GPIO %d (shift %d) -> input\n",
2139 gpio_num, gpio_shift);
2140
2141 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2142 break;
2143
2144 default:
2145 break;
2146 }
2147
2148 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2149 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2150
2151 return 0;
2152}
2153
2154int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2155{
2156 u32 gpio_reg = 0;
2157 int rc = 0;
2158
2159
2160
2161 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2162
2163 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2164 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2165 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2166 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2167
2168 switch (mode) {
2169 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2170 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2171
2172 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2173 break;
2174
2175 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2176 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2177
2178 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2179 break;
2180
2181 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2182 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2183
2184 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2185 break;
2186
2187 default:
2188 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2189 rc = -EINVAL;
2190 break;
2191 }
2192
2193 if (rc == 0)
2194 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2195
2196 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2197
2198 return rc;
2199}
2200
2201int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2202{
2203
2204 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2205 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2206 int gpio_shift = gpio_num +
2207 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2208 u32 gpio_mask = (1 << gpio_shift);
2209 u32 gpio_reg;
2210
2211 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2212 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2213 return -EINVAL;
2214 }
2215
2216 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2217
2218 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2219
2220 switch (mode) {
2221 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2222 DP(NETIF_MSG_LINK,
2223 "Clear GPIO INT %d (shift %d) -> output low\n",
2224 gpio_num, gpio_shift);
2225
2226 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2227 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2228 break;
2229
2230 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2231 DP(NETIF_MSG_LINK,
2232 "Set GPIO INT %d (shift %d) -> output high\n",
2233 gpio_num, gpio_shift);
2234
2235 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2236 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2237 break;
2238
2239 default:
2240 break;
2241 }
2242
2243 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2244 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2245
2246 return 0;
2247}
2248
2249static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2250{
2251 u32 spio_reg;
2252
2253
2254 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2255 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2256 return -EINVAL;
2257 }
2258
2259 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2260
2261 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2262
2263 switch (mode) {
2264 case MISC_SPIO_OUTPUT_LOW:
2265 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2266
2267 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2269 break;
2270
2271 case MISC_SPIO_OUTPUT_HIGH:
2272 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2273
2274 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2275 spio_reg |= (spio << MISC_SPIO_SET_POS);
2276 break;
2277
2278 case MISC_SPIO_INPUT_HI_Z:
2279 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2280
2281 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2282 break;
2283
2284 default:
2285 break;
2286 }
2287
2288 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2289 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2290
2291 return 0;
2292}
2293
2294void bnx2x_calc_fc_adv(struct bnx2x *bp)
2295{
2296 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2297
2298 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2299 ADVERTISED_Pause);
2300 switch (bp->link_vars.ieee_fc &
2301 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2302 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2303 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2304 ADVERTISED_Pause);
2305 break;
2306
2307 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2308 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2309 break;
2310
2311 default:
2312 break;
2313 }
2314}
2315
2316static void bnx2x_set_requested_fc(struct bnx2x *bp)
2317{
2318
2319
2320
2321
2322 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2323 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2324 else
2325 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2326}
2327
2328static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2329{
2330 u32 pause_enabled = 0;
2331
2332 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2333 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2334 pause_enabled = 1;
2335
2336 REG_WR(bp, BAR_USTRORM_INTMEM +
2337 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2338 pause_enabled);
2339 }
2340
2341 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2342 pause_enabled ? "enabled" : "disabled");
2343}
2344
2345int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2346{
2347 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2348 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2349
2350 if (!BP_NOMCP(bp)) {
2351 bnx2x_set_requested_fc(bp);
2352 bnx2x_acquire_phy_lock(bp);
2353
2354 if (load_mode == LOAD_DIAG) {
2355 struct link_params *lp = &bp->link_params;
2356 lp->loopback_mode = LOOPBACK_XGXS;
2357
2358 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2359 if (lp->speed_cap_mask[cfx_idx] &
2360 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2361 lp->req_line_speed[cfx_idx] =
2362 SPEED_20000;
2363 else if (lp->speed_cap_mask[cfx_idx] &
2364 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2365 lp->req_line_speed[cfx_idx] =
2366 SPEED_10000;
2367 else
2368 lp->req_line_speed[cfx_idx] =
2369 SPEED_1000;
2370 }
2371 }
2372
2373 if (load_mode == LOAD_LOOPBACK_EXT) {
2374 struct link_params *lp = &bp->link_params;
2375 lp->loopback_mode = LOOPBACK_EXT;
2376 }
2377
2378 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2379
2380 bnx2x_release_phy_lock(bp);
2381
2382 bnx2x_init_dropless_fc(bp);
2383
2384 bnx2x_calc_fc_adv(bp);
2385
2386 if (bp->link_vars.link_up) {
2387 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2388 bnx2x_link_report(bp);
2389 }
2390 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2391 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2392 return rc;
2393 }
2394 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2395 return -EINVAL;
2396}
2397
2398void bnx2x_link_set(struct bnx2x *bp)
2399{
2400 if (!BP_NOMCP(bp)) {
2401 bnx2x_acquire_phy_lock(bp);
2402 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2403 bnx2x_release_phy_lock(bp);
2404
2405 bnx2x_init_dropless_fc(bp);
2406
2407 bnx2x_calc_fc_adv(bp);
2408 } else
2409 BNX2X_ERR("Bootcode is missing - can not set link\n");
2410}
2411
2412static void bnx2x__link_reset(struct bnx2x *bp)
2413{
2414 if (!BP_NOMCP(bp)) {
2415 bnx2x_acquire_phy_lock(bp);
2416 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2417 bnx2x_release_phy_lock(bp);
2418 } else
2419 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2420}
2421
2422void bnx2x_force_link_reset(struct bnx2x *bp)
2423{
2424 bnx2x_acquire_phy_lock(bp);
2425 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2426 bnx2x_release_phy_lock(bp);
2427}
2428
2429u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2430{
2431 u8 rc = 0;
2432
2433 if (!BP_NOMCP(bp)) {
2434 bnx2x_acquire_phy_lock(bp);
2435 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2436 is_serdes);
2437 bnx2x_release_phy_lock(bp);
2438 } else
2439 BNX2X_ERR("Bootcode is missing - can not test link\n");
2440
2441 return rc;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453static void bnx2x_calc_vn_min(struct bnx2x *bp,
2454 struct cmng_init_input *input)
2455{
2456 int all_zero = 1;
2457 int vn;
2458
2459 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2460 u32 vn_cfg = bp->mf_config[vn];
2461 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2462 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2463
2464
2465 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2466 vn_min_rate = 0;
2467
2468 else if (!vn_min_rate)
2469 vn_min_rate = DEF_MIN_RATE;
2470 else
2471 all_zero = 0;
2472
2473 input->vnic_min_rate[vn] = vn_min_rate;
2474 }
2475
2476
2477 if (BNX2X_IS_ETS_ENABLED(bp)) {
2478 input->flags.cmng_enables &=
2479 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2480 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2481 } else if (all_zero) {
2482 input->flags.cmng_enables &=
2483 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2484 DP(NETIF_MSG_IFUP,
2485 "All MIN values are zeroes fairness will be disabled\n");
2486 } else
2487 input->flags.cmng_enables |=
2488 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2489}
2490
2491static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2492 struct cmng_init_input *input)
2493{
2494 u16 vn_max_rate;
2495 u32 vn_cfg = bp->mf_config[vn];
2496
2497 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2498 vn_max_rate = 0;
2499 else {
2500 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2501
2502 if (IS_MF_PERCENT_BW(bp)) {
2503
2504 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2505 } else
2506
2507 vn_max_rate = maxCfg * 100;
2508 }
2509
2510 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2511
2512 input->vnic_max_rate[vn] = vn_max_rate;
2513}
2514
2515static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2516{
2517 if (CHIP_REV_IS_SLOW(bp))
2518 return CMNG_FNS_NONE;
2519 if (IS_MF(bp))
2520 return CMNG_FNS_MINMAX;
2521
2522 return CMNG_FNS_NONE;
2523}
2524
2525void bnx2x_read_mf_cfg(struct bnx2x *bp)
2526{
2527 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2528
2529 if (BP_NOMCP(bp))
2530 return;
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2544 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2545
2546 if (func >= E1H_FUNC_MAX)
2547 break;
2548
2549 bp->mf_config[vn] =
2550 MF_CFG_RD(bp, func_mf_config[func].config);
2551 }
2552 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2553 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2554 bp->flags |= MF_FUNC_DIS;
2555 } else {
2556 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2557 bp->flags &= ~MF_FUNC_DIS;
2558 }
2559}
2560
2561static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2562{
2563 struct cmng_init_input input;
2564 memset(&input, 0, sizeof(struct cmng_init_input));
2565
2566 input.port_rate = bp->link_vars.line_speed;
2567
2568 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2569 int vn;
2570
2571
2572 if (read_cfg)
2573 bnx2x_read_mf_cfg(bp);
2574
2575
2576 bnx2x_calc_vn_min(bp, &input);
2577
2578
2579 if (bp->port.pmf)
2580 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2581 bnx2x_calc_vn_max(bp, vn, &input);
2582
2583
2584 input.flags.cmng_enables |=
2585 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2586
2587 bnx2x_init_cmng(&input, &bp->cmng);
2588 return;
2589 }
2590
2591
2592 DP(NETIF_MSG_IFUP,
2593 "rate shaping and fairness are disabled\n");
2594}
2595
2596static void storm_memset_cmng(struct bnx2x *bp,
2597 struct cmng_init *cmng,
2598 u8 port)
2599{
2600 int vn;
2601 size_t size = sizeof(struct cmng_struct_per_port);
2602
2603 u32 addr = BAR_XSTRORM_INTMEM +
2604 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2605
2606 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2607
2608 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2609 int func = func_by_vn(bp, vn);
2610
2611 addr = BAR_XSTRORM_INTMEM +
2612 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2613 size = sizeof(struct rate_shaping_vars_per_vn);
2614 __storm_memset_struct(bp, addr, size,
2615 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2616
2617 addr = BAR_XSTRORM_INTMEM +
2618 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2619 size = sizeof(struct fairness_vars_per_vn);
2620 __storm_memset_struct(bp, addr, size,
2621 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2622 }
2623}
2624
2625
2626void bnx2x_set_local_cmng(struct bnx2x *bp)
2627{
2628 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2629
2630 if (cmng_fns != CMNG_FNS_NONE) {
2631 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2632 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2633 } else {
2634
2635 DP(NETIF_MSG_IFUP,
2636 "single function mode without fairness\n");
2637 }
2638}
2639
2640
2641static void bnx2x_link_attn(struct bnx2x *bp)
2642{
2643
2644 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2645
2646 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2647
2648 bnx2x_init_dropless_fc(bp);
2649
2650 if (bp->link_vars.link_up) {
2651
2652 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2653 struct host_port_stats *pstats;
2654
2655 pstats = bnx2x_sp(bp, port_stats);
2656
2657 memset(&(pstats->mac_stx[0]), 0,
2658 sizeof(struct mac_stx));
2659 }
2660 if (bp->state == BNX2X_STATE_OPEN)
2661 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2662 }
2663
2664 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2665 bnx2x_set_local_cmng(bp);
2666
2667 __bnx2x_link_report(bp);
2668
2669 if (IS_MF(bp))
2670 bnx2x_link_sync_notify(bp);
2671}
2672
2673void bnx2x__link_status_update(struct bnx2x *bp)
2674{
2675 if (bp->state != BNX2X_STATE_OPEN)
2676 return;
2677
2678
2679 if (IS_PF(bp)) {
2680 bnx2x_dcbx_pmf_update(bp);
2681 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2682 if (bp->link_vars.link_up)
2683 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2684 else
2685 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2686
2687 bnx2x_link_report(bp);
2688
2689 } else {
2690 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2691 SUPPORTED_10baseT_Full |
2692 SUPPORTED_100baseT_Half |
2693 SUPPORTED_100baseT_Full |
2694 SUPPORTED_1000baseT_Full |
2695 SUPPORTED_2500baseX_Full |
2696 SUPPORTED_10000baseT_Full |
2697 SUPPORTED_TP |
2698 SUPPORTED_FIBRE |
2699 SUPPORTED_Autoneg |
2700 SUPPORTED_Pause |
2701 SUPPORTED_Asym_Pause);
2702 bp->port.advertising[0] = bp->port.supported[0];
2703
2704 bp->link_params.bp = bp;
2705 bp->link_params.port = BP_PORT(bp);
2706 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2707 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2708 bp->link_params.req_line_speed[0] = SPEED_10000;
2709 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2710 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2711 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2712 bp->link_vars.line_speed = SPEED_10000;
2713 bp->link_vars.link_status =
2714 (LINK_STATUS_LINK_UP |
2715 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2716 bp->link_vars.link_up = 1;
2717 bp->link_vars.duplex = DUPLEX_FULL;
2718 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2719 __bnx2x_link_report(bp);
2720
2721 bnx2x_sample_bulletin(bp);
2722
2723
2724
2725
2726
2727
2728 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2729 }
2730}
2731
2732static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2733 u16 vlan_val, u8 allowed_prio)
2734{
2735 struct bnx2x_func_state_params func_params = {NULL};
2736 struct bnx2x_func_afex_update_params *f_update_params =
2737 &func_params.params.afex_update;
2738
2739 func_params.f_obj = &bp->func_obj;
2740 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2741
2742
2743
2744
2745
2746 f_update_params->vif_id = vifid;
2747 f_update_params->afex_default_vlan = vlan_val;
2748 f_update_params->allowed_priorities = allowed_prio;
2749
2750
2751 if (bnx2x_func_state_change(bp, &func_params) < 0)
2752 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2753
2754 return 0;
2755}
2756
2757static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2758 u16 vif_index, u8 func_bit_map)
2759{
2760 struct bnx2x_func_state_params func_params = {NULL};
2761 struct bnx2x_func_afex_viflists_params *update_params =
2762 &func_params.params.afex_viflists;
2763 int rc;
2764 u32 drv_msg_code;
2765
2766
2767 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2768 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2769 cmd_type);
2770
2771 func_params.f_obj = &bp->func_obj;
2772 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2773
2774
2775 update_params->afex_vif_list_command = cmd_type;
2776 update_params->vif_list_index = vif_index;
2777 update_params->func_bit_map =
2778 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2779 update_params->func_to_clear = 0;
2780 drv_msg_code =
2781 (cmd_type == VIF_LIST_RULE_GET) ?
2782 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2783 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2784
2785
2786
2787
2788 rc = bnx2x_func_state_change(bp, &func_params);
2789 if (rc < 0)
2790 bnx2x_fw_command(bp, drv_msg_code, 0);
2791
2792 return 0;
2793}
2794
2795static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2796{
2797 struct afex_stats afex_stats;
2798 u32 func = BP_ABS_FUNC(bp);
2799 u32 mf_config;
2800 u16 vlan_val;
2801 u32 vlan_prio;
2802 u16 vif_id;
2803 u8 allowed_prio;
2804 u8 vlan_mode;
2805 u32 addr_to_write, vifid, addrs, stats_type, i;
2806
2807 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2808 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809 DP(BNX2X_MSG_MCP,
2810 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2811 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2812 }
2813
2814 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2815 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2816 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2817 DP(BNX2X_MSG_MCP,
2818 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2819 vifid, addrs);
2820 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2821 addrs);
2822 }
2823
2824 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2825 addr_to_write = SHMEM2_RD(bp,
2826 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2827 stats_type = SHMEM2_RD(bp,
2828 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2829
2830 DP(BNX2X_MSG_MCP,
2831 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2832 addr_to_write);
2833
2834 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2835
2836
2837 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2838 REG_WR(bp, addr_to_write + i*sizeof(u32),
2839 *(((u32 *)(&afex_stats))+i));
2840
2841
2842 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2843 }
2844
2845 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2846 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2847 bp->mf_config[BP_VN(bp)] = mf_config;
2848 DP(BNX2X_MSG_MCP,
2849 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2850 mf_config);
2851
2852
2853 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2854
2855 struct cmng_init_input cmng_input;
2856 struct rate_shaping_vars_per_vn m_rs_vn;
2857 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2858 u32 addr = BAR_XSTRORM_INTMEM +
2859 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2860
2861 bp->mf_config[BP_VN(bp)] = mf_config;
2862
2863 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2864 m_rs_vn.vn_counter.rate =
2865 cmng_input.vnic_max_rate[BP_VN(bp)];
2866 m_rs_vn.vn_counter.quota =
2867 (m_rs_vn.vn_counter.rate *
2868 RS_PERIODIC_TIMEOUT_USEC) / 8;
2869
2870 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2871
2872
2873 vif_id =
2874 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2875 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2876 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2877 vlan_val =
2878 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2879 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2880 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2881 vlan_prio = (mf_config &
2882 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2883 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2884 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2885 vlan_mode =
2886 (MF_CFG_RD(bp,
2887 func_mf_config[func].afex_config) &
2888 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2889 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2890 allowed_prio =
2891 (MF_CFG_RD(bp,
2892 func_mf_config[func].afex_config) &
2893 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2894 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2895
2896
2897 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2898 allowed_prio))
2899 return;
2900
2901 bp->afex_def_vlan_tag = vlan_val;
2902 bp->afex_vlan_mode = vlan_mode;
2903 } else {
2904
2905 bnx2x_link_report(bp);
2906
2907
2908 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2909
2910
2911 bp->afex_def_vlan_tag = -1;
2912 }
2913 }
2914}
2915
2916static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2917{
2918 struct bnx2x_func_switch_update_params *switch_update_params;
2919 struct bnx2x_func_state_params func_params;
2920
2921 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2922 switch_update_params = &func_params.params.switch_update;
2923 func_params.f_obj = &bp->func_obj;
2924 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2925
2926
2927 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2928 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2929
2930 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2931 int func = BP_ABS_FUNC(bp);
2932 u32 val;
2933
2934
2935 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2936 FUNC_MF_CFG_E1HOV_TAG_MASK;
2937 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2938 bp->mf_ov = val;
2939 } else {
2940 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2941 goto fail;
2942 }
2943
2944
2945 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2946 bp->mf_ov);
2947
2948
2949 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2950 &switch_update_params->changes);
2951 switch_update_params->vlan = bp->mf_ov;
2952
2953 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2954 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2955 bp->mf_ov);
2956 goto fail;
2957 } else {
2958 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2959 bp->mf_ov);
2960 }
2961 } else {
2962 goto fail;
2963 }
2964
2965 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2966 return;
2967fail:
2968 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2969}
2970
2971static void bnx2x_pmf_update(struct bnx2x *bp)
2972{
2973 int port = BP_PORT(bp);
2974 u32 val;
2975
2976 bp->port.pmf = 1;
2977 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2978
2979
2980
2981
2982
2983 smp_mb();
2984
2985
2986 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2987
2988 bnx2x_dcbx_pmf_update(bp);
2989
2990
2991 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2992 if (bp->common.int_block == INT_BLOCK_HC) {
2993 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2994 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2995 } else if (!CHIP_IS_E1x(bp)) {
2996 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2997 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2998 }
2999
3000 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3001}
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3013{
3014 int mb_idx = BP_FW_MB_IDX(bp);
3015 u32 seq;
3016 u32 rc = 0;
3017 u32 cnt = 1;
3018 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3019
3020 mutex_lock(&bp->fw_mb_mutex);
3021 seq = ++bp->fw_seq;
3022 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3023 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3024
3025 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3026 (command | seq), param);
3027
3028 do {
3029
3030 msleep(delay);
3031
3032 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3033
3034
3035 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3036
3037 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3038 cnt*delay, rc, seq);
3039
3040
3041 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3042 rc &= FW_MSG_CODE_MASK;
3043 else {
3044
3045 BNX2X_ERR("FW failed to respond!\n");
3046 bnx2x_fw_dump(bp);
3047 rc = 0;
3048 }
3049 mutex_unlock(&bp->fw_mb_mutex);
3050
3051 return rc;
3052}
3053
3054static void storm_memset_func_cfg(struct bnx2x *bp,
3055 struct tstorm_eth_function_common_config *tcfg,
3056 u16 abs_fid)
3057{
3058 size_t size = sizeof(struct tstorm_eth_function_common_config);
3059
3060 u32 addr = BAR_TSTRORM_INTMEM +
3061 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3062
3063 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3064}
3065
3066void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3067{
3068 if (CHIP_IS_E1x(bp)) {
3069 struct tstorm_eth_function_common_config tcfg = {0};
3070
3071 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3072 }
3073
3074
3075 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3076 storm_memset_func_en(bp, p->func_id, 1);
3077
3078
3079 if (p->spq_active) {
3080 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3081 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3082 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3083 }
3084}
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3096 struct bnx2x_fastpath *fp,
3097 bool zero_stats)
3098{
3099 unsigned long flags = 0;
3100
3101
3102 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3103
3104
3105
3106
3107
3108
3109 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3110 if (zero_stats)
3111 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3112
3113 if (bp->flags & TX_SWITCHING)
3114 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3115
3116 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3117 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3118
3119#ifdef BNX2X_STOP_ON_ERROR
3120 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3121#endif
3122
3123 return flags;
3124}
3125
3126static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3127 struct bnx2x_fastpath *fp,
3128 bool leading)
3129{
3130 unsigned long flags = 0;
3131
3132
3133 if (IS_MF_SD(bp))
3134 __set_bit(BNX2X_Q_FLG_OV, &flags);
3135
3136 if (IS_FCOE_FP(fp)) {
3137 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3138
3139 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3140 }
3141
3142 if (fp->mode != TPA_MODE_DISABLED) {
3143 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3144 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3145 if (fp->mode == TPA_MODE_GRO)
3146 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3147 }
3148
3149 if (leading) {
3150 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3151 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3152 }
3153
3154
3155 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3156
3157
3158 if (IS_MF_AFEX(bp))
3159 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3160
3161 return flags | bnx2x_get_common_flags(bp, fp, true);
3162}
3163
3164static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3165 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3166 u8 cos)
3167{
3168 gen_init->stat_id = bnx2x_stats_id(fp);
3169 gen_init->spcl_id = fp->cl_id;
3170
3171
3172 if (IS_FCOE_FP(fp))
3173 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3174 else
3175 gen_init->mtu = bp->dev->mtu;
3176
3177 gen_init->cos = cos;
3178
3179 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3180}
3181
3182static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3183 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3184 struct bnx2x_rxq_setup_params *rxq_init)
3185{
3186 u8 max_sge = 0;
3187 u16 sge_sz = 0;
3188 u16 tpa_agg_size = 0;
3189
3190 if (fp->mode != TPA_MODE_DISABLED) {
3191 pause->sge_th_lo = SGE_TH_LO(bp);
3192 pause->sge_th_hi = SGE_TH_HI(bp);
3193
3194
3195 WARN_ON(bp->dropless_fc &&
3196 pause->sge_th_hi + FW_PREFETCH_CNT >
3197 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3198
3199 tpa_agg_size = TPA_AGG_SIZE;
3200 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3201 SGE_PAGE_SHIFT;
3202 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3203 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3204 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3205 }
3206
3207
3208 if (!CHIP_IS_E1(bp)) {
3209 pause->bd_th_lo = BD_TH_LO(bp);
3210 pause->bd_th_hi = BD_TH_HI(bp);
3211
3212 pause->rcq_th_lo = RCQ_TH_LO(bp);
3213 pause->rcq_th_hi = RCQ_TH_HI(bp);
3214
3215
3216
3217
3218 WARN_ON(bp->dropless_fc &&
3219 pause->bd_th_hi + FW_PREFETCH_CNT >
3220 bp->rx_ring_size);
3221 WARN_ON(bp->dropless_fc &&
3222 pause->rcq_th_hi + FW_PREFETCH_CNT >
3223 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3224
3225 pause->pri_map = 1;
3226 }
3227
3228
3229 rxq_init->dscr_map = fp->rx_desc_mapping;
3230 rxq_init->sge_map = fp->rx_sge_mapping;
3231 rxq_init->rcq_map = fp->rx_comp_mapping;
3232 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3233
3234
3235
3236
3237 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3238 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3239
3240 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3241 rxq_init->tpa_agg_sz = tpa_agg_size;
3242 rxq_init->sge_buf_sz = sge_sz;
3243 rxq_init->max_sges_pkt = max_sge;
3244 rxq_init->rss_engine_id = BP_FUNC(bp);
3245 rxq_init->mcast_engine_id = BP_FUNC(bp);
3246
3247
3248
3249
3250
3251
3252 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3253
3254 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3255 rxq_init->fw_sb_id = fp->fw_sb_id;
3256
3257 if (IS_FCOE_FP(fp))
3258 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3259 else
3260 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3261
3262
3263
3264 if (IS_MF_AFEX(bp)) {
3265 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3266 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3267 }
3268}
3269
3270static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3271 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3272 u8 cos)
3273{
3274 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3275 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3277 txq_init->fw_sb_id = fp->fw_sb_id;
3278
3279
3280
3281
3282
3283 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3284
3285 if (IS_FCOE_FP(fp)) {
3286 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3287 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3288 }
3289}
3290
3291static void bnx2x_pf_init(struct bnx2x *bp)
3292{
3293 struct bnx2x_func_init_params func_init = {0};
3294 struct event_ring_data eq_data = { {0} };
3295
3296 if (!CHIP_IS_E1x(bp)) {
3297
3298
3299 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3300 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3301 (CHIP_MODE_IS_4_PORT(bp) ?
3302 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3303
3304 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3305 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3306 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3307 (CHIP_MODE_IS_4_PORT(bp) ?
3308 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3309 }
3310
3311 func_init.spq_active = true;
3312 func_init.pf_id = BP_FUNC(bp);
3313 func_init.func_id = BP_FUNC(bp);
3314 func_init.spq_map = bp->spq_mapping;
3315 func_init.spq_prod = bp->spq_prod_idx;
3316
3317 bnx2x_func_init(bp, &func_init);
3318
3319 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3320
3321
3322
3323
3324
3325
3326
3327 bp->link_vars.line_speed = SPEED_10000;
3328 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3329
3330
3331 if (bp->port.pmf)
3332 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3333
3334
3335 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3336 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3337 eq_data.producer = bp->eq_prod;
3338 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3339 eq_data.sb_id = DEF_SB_ID;
3340 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3341}
3342
3343static void bnx2x_e1h_disable(struct bnx2x *bp)
3344{
3345 int port = BP_PORT(bp);
3346
3347 bnx2x_tx_disable(bp);
3348
3349 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3350}
3351
3352static void bnx2x_e1h_enable(struct bnx2x *bp)
3353{
3354 int port = BP_PORT(bp);
3355
3356 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3357 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3358
3359
3360 netif_tx_wake_all_queues(bp->dev);
3361
3362
3363
3364
3365
3366}
3367
3368#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3369
3370static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3371{
3372 struct eth_stats_info *ether_stat =
3373 &bp->slowpath->drv_info_to_mcp.ether_stat;
3374 struct bnx2x_vlan_mac_obj *mac_obj =
3375 &bp->sp_objs->mac_obj;
3376 int i;
3377
3378 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3379 ETH_STAT_INFO_VERSION_LEN);
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3390 memset(ether_stat->mac_local + i, 0,
3391 sizeof(ether_stat->mac_local[0]));
3392 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3393 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3394 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3395 ETH_ALEN);
3396 ether_stat->mtu_size = bp->dev->mtu;
3397 if (bp->dev->features & NETIF_F_RXCSUM)
3398 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3399 if (bp->dev->features & NETIF_F_TSO)
3400 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3401 ether_stat->feature_flags |= bp->common.boot_mode;
3402
3403 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3404
3405 ether_stat->txq_size = bp->tx_ring_size;
3406 ether_stat->rxq_size = bp->rx_ring_size;
3407
3408#ifdef CONFIG_BNX2X_SRIOV
3409 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3410#endif
3411}
3412
3413static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3414{
3415 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3416 struct fcoe_stats_info *fcoe_stat =
3417 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3418
3419 if (!CNIC_LOADED(bp))
3420 return;
3421
3422 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3423
3424 fcoe_stat->qos_priority =
3425 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3426
3427
3428 if (!NO_FCOE(bp)) {
3429 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3430 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3431 tstorm_queue_statistics;
3432
3433 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3434 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3435 xstorm_queue_statistics;
3436
3437 struct fcoe_statistics_params *fw_fcoe_stat =
3438 &bp->fw_stats_data->fcoe;
3439
3440 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3441 fcoe_stat->rx_bytes_lo,
3442 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3443
3444 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3445 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3446 fcoe_stat->rx_bytes_lo,
3447 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3448
3449 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3450 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3451 fcoe_stat->rx_bytes_lo,
3452 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3453
3454 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3455 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3456 fcoe_stat->rx_bytes_lo,
3457 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3458
3459 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3460 fcoe_stat->rx_frames_lo,
3461 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3462
3463 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3464 fcoe_stat->rx_frames_lo,
3465 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3466
3467 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3468 fcoe_stat->rx_frames_lo,
3469 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3470
3471 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3472 fcoe_stat->rx_frames_lo,
3473 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3474
3475 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3476 fcoe_stat->tx_bytes_lo,
3477 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3478
3479 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3480 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3481 fcoe_stat->tx_bytes_lo,
3482 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3483
3484 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3485 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3486 fcoe_stat->tx_bytes_lo,
3487 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3488
3489 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3490 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3491 fcoe_stat->tx_bytes_lo,
3492 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3493
3494 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3495 fcoe_stat->tx_frames_lo,
3496 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3497
3498 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3499 fcoe_stat->tx_frames_lo,
3500 fcoe_q_xstorm_stats->ucast_pkts_sent);
3501
3502 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3503 fcoe_stat->tx_frames_lo,
3504 fcoe_q_xstorm_stats->bcast_pkts_sent);
3505
3506 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3507 fcoe_stat->tx_frames_lo,
3508 fcoe_q_xstorm_stats->mcast_pkts_sent);
3509 }
3510
3511
3512 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3513}
3514
3515static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3516{
3517 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3518 struct iscsi_stats_info *iscsi_stat =
3519 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3520
3521 if (!CNIC_LOADED(bp))
3522 return;
3523
3524 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3525 ETH_ALEN);
3526
3527 iscsi_stat->qos_priority =
3528 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3529
3530
3531 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3532}
3533
3534
3535
3536
3537
3538
3539static void bnx2x_config_mf_bw(struct bnx2x *bp)
3540{
3541
3542
3543
3544
3545 if (!IS_MF(bp)) {
3546 DP(BNX2X_MSG_MCP,
3547 "Ignoring MF BW config in single function mode\n");
3548 return;
3549 }
3550
3551 if (bp->link_vars.link_up) {
3552 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3553 bnx2x_link_sync_notify(bp);
3554 }
3555 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3556}
3557
3558static void bnx2x_set_mf_bw(struct bnx2x *bp)
3559{
3560 bnx2x_config_mf_bw(bp);
3561 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3562}
3563
3564static void bnx2x_handle_eee_event(struct bnx2x *bp)
3565{
3566 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3567 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3568}
3569
3570#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3571#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3572
3573static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3574{
3575 enum drv_info_opcode op_code;
3576 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3577 bool release = false;
3578 int wait;
3579
3580
3581 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3582 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3583 return;
3584 }
3585
3586 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3587 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3588
3589
3590 mutex_lock(&bp->drv_info_mutex);
3591
3592 memset(&bp->slowpath->drv_info_to_mcp, 0,
3593 sizeof(union drv_info_to_mcp));
3594
3595 switch (op_code) {
3596 case ETH_STATS_OPCODE:
3597 bnx2x_drv_info_ether_stat(bp);
3598 break;
3599 case FCOE_STATS_OPCODE:
3600 bnx2x_drv_info_fcoe_stat(bp);
3601 break;
3602 case ISCSI_STATS_OPCODE:
3603 bnx2x_drv_info_iscsi_stat(bp);
3604 break;
3605 default:
3606
3607 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3608 goto out;
3609 }
3610
3611
3612
3613
3614 SHMEM2_WR(bp, drv_info_host_addr_lo,
3615 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3616 SHMEM2_WR(bp, drv_info_host_addr_hi,
3617 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3618
3619 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3620
3621
3622
3623
3624
3625 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3626 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3627 } else if (!bp->drv_info_mng_owner) {
3628 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3629
3630 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3631 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3632
3633
3634 if (indication & bit) {
3635 SHMEM2_WR(bp, mfw_drv_indication,
3636 indication & ~bit);
3637 release = true;
3638 break;
3639 }
3640
3641 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3642 }
3643 }
3644 if (!release) {
3645 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3646 bp->drv_info_mng_owner = true;
3647 }
3648
3649out:
3650 mutex_unlock(&bp->drv_info_mutex);
3651}
3652
3653static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3654{
3655 u8 vals[4];
3656 int i = 0;
3657
3658 if (bnx2x_format) {
3659 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3660 &vals[0], &vals[1], &vals[2], &vals[3]);
3661 if (i > 0)
3662 vals[0] -= '0';
3663 } else {
3664 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3665 &vals[0], &vals[1], &vals[2], &vals[3]);
3666 }
3667
3668 while (i < 4)
3669 vals[i++] = 0;
3670
3671 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3672}
3673
3674void bnx2x_update_mng_version(struct bnx2x *bp)
3675{
3676 u32 iscsiver = DRV_VER_NOT_LOADED;
3677 u32 fcoever = DRV_VER_NOT_LOADED;
3678 u32 ethver = DRV_VER_NOT_LOADED;
3679 int idx = BP_FW_MB_IDX(bp);
3680 u8 *version;
3681
3682 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3683 return;
3684
3685 mutex_lock(&bp->drv_info_mutex);
3686
3687 if (bp->drv_info_mng_owner)
3688 goto out;
3689
3690 if (bp->state != BNX2X_STATE_OPEN)
3691 goto out;
3692
3693
3694 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3695 if (!CNIC_LOADED(bp))
3696 goto out;
3697
3698
3699 memset(&bp->slowpath->drv_info_to_mcp, 0,
3700 sizeof(union drv_info_to_mcp));
3701 bnx2x_drv_info_iscsi_stat(bp);
3702 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3703 iscsiver = bnx2x_update_mng_version_utility(version, false);
3704
3705 memset(&bp->slowpath->drv_info_to_mcp, 0,
3706 sizeof(union drv_info_to_mcp));
3707 bnx2x_drv_info_fcoe_stat(bp);
3708 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3709 fcoever = bnx2x_update_mng_version_utility(version, false);
3710
3711out:
3712 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3713 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3714 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3715
3716 mutex_unlock(&bp->drv_info_mutex);
3717
3718 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3719 ethver, iscsiver, fcoever);
3720}
3721
3722void bnx2x_update_mfw_dump(struct bnx2x *bp)
3723{
3724 u32 drv_ver;
3725 u32 valid_dump;
3726
3727 if (!SHMEM2_HAS(bp, drv_info))
3728 return;
3729
3730
3731 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3732
3733 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3734 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3735
3736 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3737
3738
3739 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3740
3741 if (valid_dump & FIRST_DUMP_VALID)
3742 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3743
3744 if (valid_dump & SECOND_DUMP_VALID)
3745 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3746}
3747
3748static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3749{
3750 u32 cmd_ok, cmd_fail;
3751
3752
3753 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3754 event & DRV_STATUS_OEM_EVENT_MASK) {
3755 BNX2X_ERR("Received simultaneous events %08x\n", event);
3756 return;
3757 }
3758
3759 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3760 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3761 cmd_ok = DRV_MSG_CODE_DCC_OK;
3762 } else {
3763 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3764 cmd_ok = DRV_MSG_CODE_OEM_OK;
3765 }
3766
3767 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3768
3769 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3770 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3771
3772
3773
3774
3775 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3776 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3777 bp->flags |= MF_FUNC_DIS;
3778
3779 bnx2x_e1h_disable(bp);
3780 } else {
3781 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3782 bp->flags &= ~MF_FUNC_DIS;
3783
3784 bnx2x_e1h_enable(bp);
3785 }
3786 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3787 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3788 }
3789
3790 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3791 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3792 bnx2x_config_mf_bw(bp);
3793 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3794 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3795 }
3796
3797
3798 if (event)
3799 bnx2x_fw_command(bp, cmd_fail, 0);
3800 else
3801 bnx2x_fw_command(bp, cmd_ok, 0);
3802}
3803
3804
3805static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3806{
3807 struct eth_spe *next_spe = bp->spq_prod_bd;
3808
3809 if (bp->spq_prod_bd == bp->spq_last_bd) {
3810 bp->spq_prod_bd = bp->spq;
3811 bp->spq_prod_idx = 0;
3812 DP(BNX2X_MSG_SP, "end of spq\n");
3813 } else {
3814 bp->spq_prod_bd++;
3815 bp->spq_prod_idx++;
3816 }
3817 return next_spe;
3818}
3819
3820
3821static void bnx2x_sp_prod_update(struct bnx2x *bp)
3822{
3823 int func = BP_FUNC(bp);
3824
3825
3826
3827
3828
3829
3830 mb();
3831
3832 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3833 bp->spq_prod_idx);
3834}
3835
3836
3837
3838
3839
3840
3841
3842static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3843{
3844 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3845 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3846 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3847 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3848 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3849 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3850 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3851 return true;
3852 else
3853 return false;
3854}
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3871 u32 data_hi, u32 data_lo, int cmd_type)
3872{
3873 struct eth_spe *spe;
3874 u16 type;
3875 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3876
3877#ifdef BNX2X_STOP_ON_ERROR
3878 if (unlikely(bp->panic)) {
3879 BNX2X_ERR("Can't post SP when there is panic\n");
3880 return -EIO;
3881 }
3882#endif
3883
3884 spin_lock_bh(&bp->spq_lock);
3885
3886 if (common) {
3887 if (!atomic_read(&bp->eq_spq_left)) {
3888 BNX2X_ERR("BUG! EQ ring full!\n");
3889 spin_unlock_bh(&bp->spq_lock);
3890 bnx2x_panic();
3891 return -EBUSY;
3892 }
3893 } else if (!atomic_read(&bp->cq_spq_left)) {
3894 BNX2X_ERR("BUG! SPQ ring full!\n");
3895 spin_unlock_bh(&bp->spq_lock);
3896 bnx2x_panic();
3897 return -EBUSY;
3898 }
3899
3900 spe = bnx2x_sp_get_next(bp);
3901
3902
3903 spe->hdr.conn_and_cmd_data =
3904 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3905 HW_CID(bp, cid));
3906
3907
3908
3909
3910
3911 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3912 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3913 SPE_HDR_CONN_TYPE;
3914 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3915 SPE_HDR_FUNCTION_ID);
3916 } else {
3917 type = cmd_type;
3918 }
3919
3920 spe->hdr.type = cpu_to_le16(type);
3921
3922 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3923 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3924
3925
3926
3927
3928
3929
3930 if (common)
3931 atomic_dec(&bp->eq_spq_left);
3932 else
3933 atomic_dec(&bp->cq_spq_left);
3934
3935 DP(BNX2X_MSG_SP,
3936 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3937 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3938 (u32)(U64_LO(bp->spq_mapping) +
3939 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3940 HW_CID(bp, cid), data_hi, data_lo, type,
3941 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3942
3943 bnx2x_sp_prod_update(bp);
3944 spin_unlock_bh(&bp->spq_lock);
3945 return 0;
3946}
3947
3948
3949static int bnx2x_acquire_alr(struct bnx2x *bp)
3950{
3951 u32 j, val;
3952 int rc = 0;
3953
3954 might_sleep();
3955 for (j = 0; j < 1000; j++) {
3956 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3957 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3958 if (val & MCPR_ACCESS_LOCK_LOCK)
3959 break;
3960
3961 usleep_range(5000, 10000);
3962 }
3963 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3964 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3965 rc = -EBUSY;
3966 }
3967
3968 return rc;
3969}
3970
3971
3972static void bnx2x_release_alr(struct bnx2x *bp)
3973{
3974 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3975}
3976
3977#define BNX2X_DEF_SB_ATT_IDX 0x0001
3978#define BNX2X_DEF_SB_IDX 0x0002
3979
3980static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3981{
3982 struct host_sp_status_block *def_sb = bp->def_status_blk;
3983 u16 rc = 0;
3984
3985 barrier();
3986 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3987 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3988 rc |= BNX2X_DEF_SB_ATT_IDX;
3989 }
3990
3991 if (bp->def_idx != def_sb->sp_sb.running_index) {
3992 bp->def_idx = def_sb->sp_sb.running_index;
3993 rc |= BNX2X_DEF_SB_IDX;
3994 }
3995
3996
3997 barrier();
3998 return rc;
3999}
4000
4001
4002
4003
4004
4005static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4006{
4007 int port = BP_PORT(bp);
4008 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4009 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4010 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4011 NIG_REG_MASK_INTERRUPT_PORT0;
4012 u32 aeu_mask;
4013 u32 nig_mask = 0;
4014 u32 reg_addr;
4015
4016 if (bp->attn_state & asserted)
4017 BNX2X_ERR("IGU ERROR\n");
4018
4019 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4020 aeu_mask = REG_RD(bp, aeu_addr);
4021
4022 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4023 aeu_mask, asserted);
4024 aeu_mask &= ~(asserted & 0x3ff);
4025 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4026
4027 REG_WR(bp, aeu_addr, aeu_mask);
4028 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4029
4030 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4031 bp->attn_state |= asserted;
4032 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4033
4034 if (asserted & ATTN_HARD_WIRED_MASK) {
4035 if (asserted & ATTN_NIG_FOR_FUNC) {
4036
4037 bnx2x_acquire_phy_lock(bp);
4038
4039
4040 nig_mask = REG_RD(bp, nig_int_mask_addr);
4041
4042
4043
4044
4045 if (nig_mask) {
4046 REG_WR(bp, nig_int_mask_addr, 0);
4047
4048 bnx2x_link_attn(bp);
4049 }
4050
4051
4052 }
4053 if (asserted & ATTN_SW_TIMER_4_FUNC)
4054 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4055
4056 if (asserted & GPIO_2_FUNC)
4057 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4058
4059 if (asserted & GPIO_3_FUNC)
4060 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4061
4062 if (asserted & GPIO_4_FUNC)
4063 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4064
4065 if (port == 0) {
4066 if (asserted & ATTN_GENERAL_ATTN_1) {
4067 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4069 }
4070 if (asserted & ATTN_GENERAL_ATTN_2) {
4071 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4073 }
4074 if (asserted & ATTN_GENERAL_ATTN_3) {
4075 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4076 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4077 }
4078 } else {
4079 if (asserted & ATTN_GENERAL_ATTN_4) {
4080 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4082 }
4083 if (asserted & ATTN_GENERAL_ATTN_5) {
4084 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4086 }
4087 if (asserted & ATTN_GENERAL_ATTN_6) {
4088 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4089 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4090 }
4091 }
4092
4093 }
4094
4095 if (bp->common.int_block == INT_BLOCK_HC)
4096 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4097 COMMAND_REG_ATTN_BITS_SET);
4098 else
4099 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4100
4101 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4102 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4103 REG_WR(bp, reg_addr, asserted);
4104
4105
4106 if (asserted & ATTN_NIG_FOR_FUNC) {
4107
4108
4109
4110 if (bp->common.int_block != INT_BLOCK_HC) {
4111 u32 cnt = 0, igu_acked;
4112 do {
4113 igu_acked = REG_RD(bp,
4114 IGU_REG_ATTENTION_ACK_BITS);
4115 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4116 (++cnt < MAX_IGU_ATTN_ACK_TO));
4117 if (!igu_acked)
4118 DP(NETIF_MSG_HW,
4119 "Failed to verify IGU ack on time\n");
4120 barrier();
4121 }
4122 REG_WR(bp, nig_int_mask_addr, nig_mask);
4123 bnx2x_release_phy_lock(bp);
4124 }
4125}
4126
4127static void bnx2x_fan_failure(struct bnx2x *bp)
4128{
4129 int port = BP_PORT(bp);
4130 u32 ext_phy_config;
4131
4132 ext_phy_config =
4133 SHMEM_RD(bp,
4134 dev_info.port_hw_config[port].external_phy_config);
4135
4136 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4137 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4138 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4139 ext_phy_config);
4140
4141
4142 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4143 "Please contact OEM Support for assistance\n");
4144
4145
4146
4147
4148
4149 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4150}
4151
4152static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4153{
4154 int port = BP_PORT(bp);
4155 int reg_offset;
4156 u32 val;
4157
4158 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4159 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4160
4161 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4162
4163 val = REG_RD(bp, reg_offset);
4164 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4165 REG_WR(bp, reg_offset, val);
4166
4167 BNX2X_ERR("SPIO5 hw attention\n");
4168
4169
4170 bnx2x_hw_reset_phy(&bp->link_params);
4171 bnx2x_fan_failure(bp);
4172 }
4173
4174 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4175 bnx2x_acquire_phy_lock(bp);
4176 bnx2x_handle_module_detect_int(&bp->link_params);
4177 bnx2x_release_phy_lock(bp);
4178 }
4179
4180 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4181
4182 val = REG_RD(bp, reg_offset);
4183 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4184 REG_WR(bp, reg_offset, val);
4185
4186 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4187 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4188 bnx2x_panic();
4189 }
4190}
4191
4192static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4193{
4194 u32 val;
4195
4196 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4197
4198 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4199 BNX2X_ERR("DB hw attention 0x%x\n", val);
4200
4201 if (val & 0x2)
4202 BNX2X_ERR("FATAL error from DORQ\n");
4203 }
4204
4205 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4206
4207 int port = BP_PORT(bp);
4208 int reg_offset;
4209
4210 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4211 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4212
4213 val = REG_RD(bp, reg_offset);
4214 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4215 REG_WR(bp, reg_offset, val);
4216
4217 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4218 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4219 bnx2x_panic();
4220 }
4221}
4222
4223static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4224{
4225 u32 val;
4226
4227 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4228
4229 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4230 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4231
4232 if (val & 0x2)
4233 BNX2X_ERR("FATAL error from CFC\n");
4234 }
4235
4236 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4237 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4238 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4239
4240 if (val & 0x18000)
4241 BNX2X_ERR("FATAL error from PXP\n");
4242
4243 if (!CHIP_IS_E1x(bp)) {
4244 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4245 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4246 }
4247 }
4248
4249 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4250
4251 int port = BP_PORT(bp);
4252 int reg_offset;
4253
4254 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4255 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4256
4257 val = REG_RD(bp, reg_offset);
4258 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4259 REG_WR(bp, reg_offset, val);
4260
4261 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4262 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4263 bnx2x_panic();
4264 }
4265}
4266
4267static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4268{
4269 u32 val;
4270
4271 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4272
4273 if (attn & BNX2X_PMF_LINK_ASSERT) {
4274 int func = BP_FUNC(bp);
4275
4276 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4277 bnx2x_read_mf_cfg(bp);
4278 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4279 func_mf_config[BP_ABS_FUNC(bp)].config);
4280 val = SHMEM_RD(bp,
4281 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4282
4283 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4284 DRV_STATUS_OEM_EVENT_MASK))
4285 bnx2x_oem_event(bp,
4286 (val & (DRV_STATUS_DCC_EVENT_MASK |
4287 DRV_STATUS_OEM_EVENT_MASK)));
4288
4289 if (val & DRV_STATUS_SET_MF_BW)
4290 bnx2x_set_mf_bw(bp);
4291
4292 if (val & DRV_STATUS_DRV_INFO_REQ)
4293 bnx2x_handle_drv_info_req(bp);
4294
4295 if (val & DRV_STATUS_VF_DISABLED)
4296 bnx2x_schedule_iov_task(bp,
4297 BNX2X_IOV_HANDLE_FLR);
4298
4299 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4300 bnx2x_pmf_update(bp);
4301
4302 if (bp->port.pmf &&
4303 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4304 bp->dcbx_enabled > 0)
4305
4306 bnx2x_dcbx_set_params(bp,
4307 BNX2X_DCBX_STATE_NEG_RECEIVED);
4308 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4309 bnx2x_handle_afex_cmd(bp,
4310 val & DRV_STATUS_AFEX_EVENT_MASK);
4311 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4312 bnx2x_handle_eee_event(bp);
4313
4314 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4315 bnx2x_schedule_sp_rtnl(bp,
4316 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4317
4318 if (bp->link_vars.periodic_flags &
4319 PERIODIC_FLAGS_LINK_EVENT) {
4320
4321 bnx2x_acquire_phy_lock(bp);
4322 bp->link_vars.periodic_flags &=
4323 ~PERIODIC_FLAGS_LINK_EVENT;
4324 bnx2x_release_phy_lock(bp);
4325 if (IS_MF(bp))
4326 bnx2x_link_sync_notify(bp);
4327 bnx2x_link_report(bp);
4328 }
4329
4330
4331
4332 bnx2x__link_status_update(bp);
4333 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4334
4335 BNX2X_ERR("MC assert!\n");
4336 bnx2x_mc_assert(bp);
4337 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4338 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4339 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4340 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4341 bnx2x_panic();
4342
4343 } else if (attn & BNX2X_MCP_ASSERT) {
4344
4345 BNX2X_ERR("MCP assert!\n");
4346 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4347 bnx2x_fw_dump(bp);
4348
4349 } else
4350 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4351 }
4352
4353 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4354 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4355 if (attn & BNX2X_GRC_TIMEOUT) {
4356 val = CHIP_IS_E1(bp) ? 0 :
4357 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4358 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4359 }
4360 if (attn & BNX2X_GRC_RSV) {
4361 val = CHIP_IS_E1(bp) ? 0 :
4362 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4363 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4364 }
4365 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4366 }
4367}
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4384
4385#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4386#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4387#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4388#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4389#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4390#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4391#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4392
4393
4394
4395
4396
4397
4398void bnx2x_set_reset_global(struct bnx2x *bp)
4399{
4400 u32 val;
4401 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4402 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4403 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4404 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405}
4406
4407
4408
4409
4410
4411
4412static void bnx2x_clear_reset_global(struct bnx2x *bp)
4413{
4414 u32 val;
4415 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4416 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4417 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4418 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4419}
4420
4421
4422
4423
4424
4425
4426static bool bnx2x_reset_is_global(struct bnx2x *bp)
4427{
4428 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4429
4430 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4431 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4432}
4433
4434
4435
4436
4437
4438
4439static void bnx2x_set_reset_done(struct bnx2x *bp)
4440{
4441 u32 val;
4442 u32 bit = BP_PATH(bp) ?
4443 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4444 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4445 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4446
4447
4448 val &= ~bit;
4449 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4450
4451 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4452}
4453
4454
4455
4456
4457
4458
4459void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4460{
4461 u32 val;
4462 u32 bit = BP_PATH(bp) ?
4463 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4464 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4465 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4466
4467
4468 val |= bit;
4469 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4470 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4471}
4472
4473
4474
4475
4476
4477bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4478{
4479 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4480 u32 bit = engine ?
4481 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4482
4483
4484 return (val & bit) ? false : true;
4485}
4486
4487
4488
4489
4490
4491
4492void bnx2x_set_pf_load(struct bnx2x *bp)
4493{
4494 u32 val1, val;
4495 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4496 BNX2X_PATH0_LOAD_CNT_MASK;
4497 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4498 BNX2X_PATH0_LOAD_CNT_SHIFT;
4499
4500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4501 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4502
4503 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4504
4505
4506 val1 = (val & mask) >> shift;
4507
4508
4509 val1 |= (1 << bp->pf_num);
4510
4511
4512 val &= ~mask;
4513
4514
4515 val |= ((val1 << shift) & mask);
4516
4517 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4519}
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530bool bnx2x_clear_pf_load(struct bnx2x *bp)
4531{
4532 u32 val1, val;
4533 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4534 BNX2X_PATH0_LOAD_CNT_MASK;
4535 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4536 BNX2X_PATH0_LOAD_CNT_SHIFT;
4537
4538 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4539 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4540 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4541
4542
4543 val1 = (val & mask) >> shift;
4544
4545
4546 val1 &= ~(1 << bp->pf_num);
4547
4548
4549 val &= ~mask;
4550
4551
4552 val |= ((val1 << shift) & mask);
4553
4554 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4555 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4556 return val1 != 0;
4557}
4558
4559
4560
4561
4562
4563
4564static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4565{
4566 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4567 BNX2X_PATH0_LOAD_CNT_MASK);
4568 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4569 BNX2X_PATH0_LOAD_CNT_SHIFT);
4570 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4571
4572 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4573
4574 val = (val & mask) >> shift;
4575
4576 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4577 engine, val);
4578
4579 return val != 0;
4580}
4581
4582static void _print_parity(struct bnx2x *bp, u32 reg)
4583{
4584 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4585}
4586
4587static void _print_next_block(int idx, const char *blk)
4588{
4589 pr_cont("%s%s", idx ? ", " : "", blk);
4590}
4591
4592static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4593 int *par_num, bool print)
4594{
4595 u32 cur_bit;
4596 bool res;
4597 int i;
4598
4599 res = false;
4600
4601 for (i = 0; sig; i++) {
4602 cur_bit = (0x1UL << i);
4603 if (sig & cur_bit) {
4604 res |= true;
4605
4606 if (print) {
4607 switch (cur_bit) {
4608 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4609 _print_next_block((*par_num)++, "BRB");
4610 _print_parity(bp,
4611 BRB1_REG_BRB1_PRTY_STS);
4612 break;
4613 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4614 _print_next_block((*par_num)++,
4615 "PARSER");
4616 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4617 break;
4618 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4619 _print_next_block((*par_num)++, "TSDM");
4620 _print_parity(bp,
4621 TSDM_REG_TSDM_PRTY_STS);
4622 break;
4623 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4624 _print_next_block((*par_num)++,
4625 "SEARCHER");
4626 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4627 break;
4628 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4629 _print_next_block((*par_num)++, "TCM");
4630 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4631 break;
4632 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4633 _print_next_block((*par_num)++,
4634 "TSEMI");
4635 _print_parity(bp,
4636 TSEM_REG_TSEM_PRTY_STS_0);
4637 _print_parity(bp,
4638 TSEM_REG_TSEM_PRTY_STS_1);
4639 break;
4640 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4641 _print_next_block((*par_num)++, "XPB");
4642 _print_parity(bp, GRCBASE_XPB +
4643 PB_REG_PB_PRTY_STS);
4644 break;
4645 }
4646 }
4647
4648
4649 sig &= ~cur_bit;
4650 }
4651 }
4652
4653 return res;
4654}
4655
4656static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4657 int *par_num, bool *global,
4658 bool print)
4659{
4660 u32 cur_bit;
4661 bool res;
4662 int i;
4663
4664 res = false;
4665
4666 for (i = 0; sig; i++) {
4667 cur_bit = (0x1UL << i);
4668 if (sig & cur_bit) {
4669 res |= true;
4670 switch (cur_bit) {
4671 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4672 if (print) {
4673 _print_next_block((*par_num)++, "PBF");
4674 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4675 }
4676 break;
4677 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4678 if (print) {
4679 _print_next_block((*par_num)++, "QM");
4680 _print_parity(bp, QM_REG_QM_PRTY_STS);
4681 }
4682 break;
4683 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4684 if (print) {
4685 _print_next_block((*par_num)++, "TM");
4686 _print_parity(bp, TM_REG_TM_PRTY_STS);
4687 }
4688 break;
4689 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4690 if (print) {
4691 _print_next_block((*par_num)++, "XSDM");
4692 _print_parity(bp,
4693 XSDM_REG_XSDM_PRTY_STS);
4694 }
4695 break;
4696 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4697 if (print) {
4698 _print_next_block((*par_num)++, "XCM");
4699 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4700 }
4701 break;
4702 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4703 if (print) {
4704 _print_next_block((*par_num)++,
4705 "XSEMI");
4706 _print_parity(bp,
4707 XSEM_REG_XSEM_PRTY_STS_0);
4708 _print_parity(bp,
4709 XSEM_REG_XSEM_PRTY_STS_1);
4710 }
4711 break;
4712 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4713 if (print) {
4714 _print_next_block((*par_num)++,
4715 "DOORBELLQ");
4716 _print_parity(bp,
4717 DORQ_REG_DORQ_PRTY_STS);
4718 }
4719 break;
4720 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4721 if (print) {
4722 _print_next_block((*par_num)++, "NIG");
4723 if (CHIP_IS_E1x(bp)) {
4724 _print_parity(bp,
4725 NIG_REG_NIG_PRTY_STS);
4726 } else {
4727 _print_parity(bp,
4728 NIG_REG_NIG_PRTY_STS_0);
4729 _print_parity(bp,
4730 NIG_REG_NIG_PRTY_STS_1);
4731 }
4732 }
4733 break;
4734 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4735 if (print)
4736 _print_next_block((*par_num)++,
4737 "VAUX PCI CORE");
4738 *global = true;
4739 break;
4740 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4741 if (print) {
4742 _print_next_block((*par_num)++,
4743 "DEBUG");
4744 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4745 }
4746 break;
4747 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4748 if (print) {
4749 _print_next_block((*par_num)++, "USDM");
4750 _print_parity(bp,
4751 USDM_REG_USDM_PRTY_STS);
4752 }
4753 break;
4754 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4755 if (print) {
4756 _print_next_block((*par_num)++, "UCM");
4757 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4758 }
4759 break;
4760 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4761 if (print) {
4762 _print_next_block((*par_num)++,
4763 "USEMI");
4764 _print_parity(bp,
4765 USEM_REG_USEM_PRTY_STS_0);
4766 _print_parity(bp,
4767 USEM_REG_USEM_PRTY_STS_1);
4768 }
4769 break;
4770 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4771 if (print) {
4772 _print_next_block((*par_num)++, "UPB");
4773 _print_parity(bp, GRCBASE_UPB +
4774 PB_REG_PB_PRTY_STS);
4775 }
4776 break;
4777 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4778 if (print) {
4779 _print_next_block((*par_num)++, "CSDM");
4780 _print_parity(bp,
4781 CSDM_REG_CSDM_PRTY_STS);
4782 }
4783 break;
4784 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4785 if (print) {
4786 _print_next_block((*par_num)++, "CCM");
4787 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4788 }
4789 break;
4790 }
4791
4792
4793 sig &= ~cur_bit;
4794 }
4795 }
4796
4797 return res;
4798}
4799
4800static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4801 int *par_num, bool print)
4802{
4803 u32 cur_bit;
4804 bool res;
4805 int i;
4806
4807 res = false;
4808
4809 for (i = 0; sig; i++) {
4810 cur_bit = (0x1UL << i);
4811 if (sig & cur_bit) {
4812 res = true;
4813 if (print) {
4814 switch (cur_bit) {
4815 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4816 _print_next_block((*par_num)++,
4817 "CSEMI");
4818 _print_parity(bp,
4819 CSEM_REG_CSEM_PRTY_STS_0);
4820 _print_parity(bp,
4821 CSEM_REG_CSEM_PRTY_STS_1);
4822 break;
4823 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4824 _print_next_block((*par_num)++, "PXP");
4825 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4826 _print_parity(bp,
4827 PXP2_REG_PXP2_PRTY_STS_0);
4828 _print_parity(bp,
4829 PXP2_REG_PXP2_PRTY_STS_1);
4830 break;
4831 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4832 _print_next_block((*par_num)++,
4833 "PXPPCICLOCKCLIENT");
4834 break;
4835 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4836 _print_next_block((*par_num)++, "CFC");
4837 _print_parity(bp,
4838 CFC_REG_CFC_PRTY_STS);
4839 break;
4840 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4841 _print_next_block((*par_num)++, "CDU");
4842 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4843 break;
4844 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4845 _print_next_block((*par_num)++, "DMAE");
4846 _print_parity(bp,
4847 DMAE_REG_DMAE_PRTY_STS);
4848 break;
4849 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4850 _print_next_block((*par_num)++, "IGU");
4851 if (CHIP_IS_E1x(bp))
4852 _print_parity(bp,
4853 HC_REG_HC_PRTY_STS);
4854 else
4855 _print_parity(bp,
4856 IGU_REG_IGU_PRTY_STS);
4857 break;
4858 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4859 _print_next_block((*par_num)++, "MISC");
4860 _print_parity(bp,
4861 MISC_REG_MISC_PRTY_STS);
4862 break;
4863 }
4864 }
4865
4866
4867 sig &= ~cur_bit;
4868 }
4869 }
4870
4871 return res;
4872}
4873
4874static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4875 int *par_num, bool *global,
4876 bool print)
4877{
4878 bool res = false;
4879 u32 cur_bit;
4880 int i;
4881
4882 for (i = 0; sig; i++) {
4883 cur_bit = (0x1UL << i);
4884 if (sig & cur_bit) {
4885 switch (cur_bit) {
4886 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4887 if (print)
4888 _print_next_block((*par_num)++,
4889 "MCP ROM");
4890 *global = true;
4891 res = true;
4892 break;
4893 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4894 if (print)
4895 _print_next_block((*par_num)++,
4896 "MCP UMP RX");
4897 *global = true;
4898 res = true;
4899 break;
4900 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4901 if (print)
4902 _print_next_block((*par_num)++,
4903 "MCP UMP TX");
4904 *global = true;
4905 res = true;
4906 break;
4907 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4908 (*par_num)++;
4909
4910 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4911 1UL << 10);
4912 break;
4913 }
4914
4915
4916 sig &= ~cur_bit;
4917 }
4918 }
4919
4920 return res;
4921}
4922
4923static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4924 int *par_num, bool print)
4925{
4926 u32 cur_bit;
4927 bool res;
4928 int i;
4929
4930 res = false;
4931
4932 for (i = 0; sig; i++) {
4933 cur_bit = (0x1UL << i);
4934 if (sig & cur_bit) {
4935 res = true;
4936 if (print) {
4937 switch (cur_bit) {
4938 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4939 _print_next_block((*par_num)++,
4940 "PGLUE_B");
4941 _print_parity(bp,
4942 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4943 break;
4944 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4945 _print_next_block((*par_num)++, "ATC");
4946 _print_parity(bp,
4947 ATC_REG_ATC_PRTY_STS);
4948 break;
4949 }
4950 }
4951
4952 sig &= ~cur_bit;
4953 }
4954 }
4955
4956 return res;
4957}
4958
4959static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4960 u32 *sig)
4961{
4962 bool res = false;
4963
4964 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4965 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4966 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4967 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4968 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4969 int par_num = 0;
4970
4971 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4972 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4973 sig[0] & HW_PRTY_ASSERT_SET_0,
4974 sig[1] & HW_PRTY_ASSERT_SET_1,
4975 sig[2] & HW_PRTY_ASSERT_SET_2,
4976 sig[3] & HW_PRTY_ASSERT_SET_3,
4977 sig[4] & HW_PRTY_ASSERT_SET_4);
4978 if (print) {
4979 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4980 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4981 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4982 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4983 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4984 netdev_err(bp->dev,
4985 "Parity errors detected in blocks: ");
4986 } else {
4987 print = false;
4988 }
4989 }
4990 res |= bnx2x_check_blocks_with_parity0(bp,
4991 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4992 res |= bnx2x_check_blocks_with_parity1(bp,
4993 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4994 res |= bnx2x_check_blocks_with_parity2(bp,
4995 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4996 res |= bnx2x_check_blocks_with_parity3(bp,
4997 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4998 res |= bnx2x_check_blocks_with_parity4(bp,
4999 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
5000
5001 if (print)
5002 pr_cont("\n");
5003 }
5004
5005 return res;
5006}
5007
5008
5009
5010
5011
5012
5013
5014
5015bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5016{
5017 struct attn_route attn = { {0} };
5018 int port = BP_PORT(bp);
5019
5020 attn.sig[0] = REG_RD(bp,
5021 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5022 port*4);
5023 attn.sig[1] = REG_RD(bp,
5024 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5025 port*4);
5026 attn.sig[2] = REG_RD(bp,
5027 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5028 port*4);
5029 attn.sig[3] = REG_RD(bp,
5030 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5031 port*4);
5032
5033
5034
5035 attn.sig[3] &= ((REG_RD(bp,
5036 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5037 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5038 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5039 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5040
5041 if (!CHIP_IS_E1x(bp))
5042 attn.sig[4] = REG_RD(bp,
5043 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5044 port*4);
5045
5046 return bnx2x_parity_attn(bp, global, print, attn.sig);
5047}
5048
5049static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5050{
5051 u32 val;
5052 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5053
5054 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5055 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5056 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5057 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5058 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5059 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5060 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5061 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5062 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5063 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5064 if (val &
5065 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5066 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5067 if (val &
5068 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5069 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5070 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5071 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5072 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5073 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5074 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5075 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5076 }
5077 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5078 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5079 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5080 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5081 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5082 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5083 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5084 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5085 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5086 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5087 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5088 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5089 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5090 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5091 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5092 }
5093
5094 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5095 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5096 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5097 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5098 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5099 }
5100}
5101
5102static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5103{
5104 struct attn_route attn, *group_mask;
5105 int port = BP_PORT(bp);
5106 int index;
5107 u32 reg_addr;
5108 u32 val;
5109 u32 aeu_mask;
5110 bool global = false;
5111
5112
5113
5114 bnx2x_acquire_alr(bp);
5115
5116 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5117#ifndef BNX2X_STOP_ON_ERROR
5118 bp->recovery_state = BNX2X_RECOVERY_INIT;
5119 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5120
5121 bnx2x_int_disable(bp);
5122
5123
5124
5125#else
5126 bnx2x_panic();
5127#endif
5128 bnx2x_release_alr(bp);
5129 return;
5130 }
5131
5132 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5133 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5134 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5135 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5136 if (!CHIP_IS_E1x(bp))
5137 attn.sig[4] =
5138 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5139 else
5140 attn.sig[4] = 0;
5141
5142 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5143 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5144
5145 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5146 if (deasserted & (1 << index)) {
5147 group_mask = &bp->attn_group[index];
5148
5149 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5150 index,
5151 group_mask->sig[0], group_mask->sig[1],
5152 group_mask->sig[2], group_mask->sig[3],
5153 group_mask->sig[4]);
5154
5155 bnx2x_attn_int_deasserted4(bp,
5156 attn.sig[4] & group_mask->sig[4]);
5157 bnx2x_attn_int_deasserted3(bp,
5158 attn.sig[3] & group_mask->sig[3]);
5159 bnx2x_attn_int_deasserted1(bp,
5160 attn.sig[1] & group_mask->sig[1]);
5161 bnx2x_attn_int_deasserted2(bp,
5162 attn.sig[2] & group_mask->sig[2]);
5163 bnx2x_attn_int_deasserted0(bp,
5164 attn.sig[0] & group_mask->sig[0]);
5165 }
5166 }
5167
5168 bnx2x_release_alr(bp);
5169
5170 if (bp->common.int_block == INT_BLOCK_HC)
5171 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5172 COMMAND_REG_ATTN_BITS_CLR);
5173 else
5174 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5175
5176 val = ~deasserted;
5177 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5178 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5179 REG_WR(bp, reg_addr, val);
5180
5181 if (~bp->attn_state & deasserted)
5182 BNX2X_ERR("IGU ERROR\n");
5183
5184 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5185 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5186
5187 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5188 aeu_mask = REG_RD(bp, reg_addr);
5189
5190 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5191 aeu_mask, deasserted);
5192 aeu_mask |= (deasserted & 0x3ff);
5193 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5194
5195 REG_WR(bp, reg_addr, aeu_mask);
5196 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5197
5198 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5199 bp->attn_state &= ~deasserted;
5200 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5201}
5202
5203static void bnx2x_attn_int(struct bnx2x *bp)
5204{
5205
5206 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5207 attn_bits);
5208 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5209 attn_bits_ack);
5210 u32 attn_state = bp->attn_state;
5211
5212
5213 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5214 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5215
5216 DP(NETIF_MSG_HW,
5217 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5218 attn_bits, attn_ack, asserted, deasserted);
5219
5220 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5221 BNX2X_ERR("BAD attention state\n");
5222
5223
5224 if (asserted)
5225 bnx2x_attn_int_asserted(bp, asserted);
5226
5227 if (deasserted)
5228 bnx2x_attn_int_deasserted(bp, deasserted);
5229}
5230
5231void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5232 u16 index, u8 op, u8 update)
5233{
5234 u32 igu_addr = bp->igu_base_addr;
5235 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5236 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5237 igu_addr);
5238}
5239
5240static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5241{
5242
5243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5244}
5245
5246static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5247 union event_ring_elem *elem)
5248{
5249 u8 err = elem->message.error;
5250
5251 if (!bp->cnic_eth_dev.starting_cid ||
5252 (cid < bp->cnic_eth_dev.starting_cid &&
5253 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5254 return 1;
5255
5256 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5257
5258 if (unlikely(err)) {
5259
5260 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5261 cid);
5262 bnx2x_panic_dump(bp, false);
5263 }
5264 bnx2x_cnic_cfc_comp(bp, cid, err);
5265 return 0;
5266}
5267
5268static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5269{
5270 struct bnx2x_mcast_ramrod_params rparam;
5271 int rc;
5272
5273 memset(&rparam, 0, sizeof(rparam));
5274
5275 rparam.mcast_obj = &bp->mcast_obj;
5276
5277 netif_addr_lock_bh(bp->dev);
5278
5279
5280 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5281
5282
5283 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5284 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5285 if (rc < 0)
5286 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5287 rc);
5288 }
5289
5290 netif_addr_unlock_bh(bp->dev);
5291}
5292
5293static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5294 union event_ring_elem *elem)
5295{
5296 unsigned long ramrod_flags = 0;
5297 int rc = 0;
5298 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5299 u32 cid = echo & BNX2X_SWCID_MASK;
5300 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5301
5302
5303 __set_bit(RAMROD_CONT, &ramrod_flags);
5304
5305 switch (echo >> BNX2X_SWCID_SHIFT) {
5306 case BNX2X_FILTER_MAC_PENDING:
5307 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5308 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5309 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5310 else
5311 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5312
5313 break;
5314 case BNX2X_FILTER_VLAN_PENDING:
5315 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5316 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5317 break;
5318 case BNX2X_FILTER_MCAST_PENDING:
5319 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5320
5321
5322
5323 bnx2x_handle_mcast_eqe(bp);
5324 return;
5325 default:
5326 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5327 return;
5328 }
5329
5330 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5331
5332 if (rc < 0)
5333 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5334 else if (rc > 0)
5335 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5336}
5337
5338static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5339
5340static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5341{
5342 netif_addr_lock_bh(bp->dev);
5343
5344 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5345
5346
5347 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5348 bnx2x_set_storm_rx_mode(bp);
5349 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5350 &bp->sp_state))
5351 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5352 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5353 &bp->sp_state))
5354 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5355
5356 netif_addr_unlock_bh(bp->dev);
5357}
5358
5359static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5360 union event_ring_elem *elem)
5361{
5362 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5363 DP(BNX2X_MSG_SP,
5364 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5365 elem->message.data.vif_list_event.func_bit_map);
5366 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5367 elem->message.data.vif_list_event.func_bit_map);
5368 } else if (elem->message.data.vif_list_event.echo ==
5369 VIF_LIST_RULE_SET) {
5370 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5371 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5372 }
5373}
5374
5375
5376static void bnx2x_after_function_update(struct bnx2x *bp)
5377{
5378 int q, rc;
5379 struct bnx2x_fastpath *fp;
5380 struct bnx2x_queue_state_params queue_params = {NULL};
5381 struct bnx2x_queue_update_params *q_update_params =
5382 &queue_params.params.update;
5383
5384
5385 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5386
5387
5388 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5389 &q_update_params->update_flags);
5390 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5391 &q_update_params->update_flags);
5392 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5393
5394
5395 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5396 q_update_params->silent_removal_value = 0;
5397 q_update_params->silent_removal_mask = 0;
5398 } else {
5399 q_update_params->silent_removal_value =
5400 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5401 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5402 }
5403
5404 for_each_eth_queue(bp, q) {
5405
5406 fp = &bp->fp[q];
5407 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5408
5409
5410 rc = bnx2x_queue_state_change(bp, &queue_params);
5411 if (rc < 0)
5412 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5413 q);
5414 }
5415
5416 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5417 fp = &bp->fp[FCOE_IDX(bp)];
5418 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5419
5420
5421 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5422
5423
5424 smp_mb__before_atomic();
5425 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5426 smp_mb__after_atomic();
5427
5428
5429 rc = bnx2x_queue_state_change(bp, &queue_params);
5430 if (rc < 0)
5431 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5432 q);
5433 } else {
5434
5435 bnx2x_link_report(bp);
5436 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5437 }
5438}
5439
5440static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5441 struct bnx2x *bp, u32 cid)
5442{
5443 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5444
5445 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5446 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5447 else
5448 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5449}
5450
5451static void bnx2x_eq_int(struct bnx2x *bp)
5452{
5453 u16 hw_cons, sw_cons, sw_prod;
5454 union event_ring_elem *elem;
5455 u8 echo;
5456 u32 cid;
5457 u8 opcode;
5458 int rc, spqe_cnt = 0;
5459 struct bnx2x_queue_sp_obj *q_obj;
5460 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5461 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5462
5463 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5464
5465
5466
5467
5468
5469
5470 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5471 hw_cons++;
5472
5473
5474
5475
5476
5477 sw_cons = bp->eq_cons;
5478 sw_prod = bp->eq_prod;
5479
5480 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5481 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5482
5483 for (; sw_cons != hw_cons;
5484 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5485
5486 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5487
5488 rc = bnx2x_iov_eq_sp_event(bp, elem);
5489 if (!rc) {
5490 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5491 rc);
5492 goto next_spqe;
5493 }
5494
5495 opcode = elem->message.opcode;
5496
5497
5498 switch (opcode) {
5499 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5500 bnx2x_vf_mbx_schedule(bp,
5501 &elem->message.data.vf_pf_event);
5502 continue;
5503
5504 case EVENT_RING_OPCODE_STAT_QUERY:
5505 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5506 "got statistics comp event %d\n",
5507 bp->stats_comp++);
5508
5509 goto next_spqe;
5510
5511 case EVENT_RING_OPCODE_CFC_DEL:
5512
5513
5514
5515
5516
5517
5518
5519 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5520
5521 DP(BNX2X_MSG_SP,
5522 "got delete ramrod for MULTI[%d]\n", cid);
5523
5524 if (CNIC_LOADED(bp) &&
5525 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5526 goto next_spqe;
5527
5528 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5529
5530 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5531 break;
5532
5533 goto next_spqe;
5534
5535 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5536 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5537 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5538 if (f_obj->complete_cmd(bp, f_obj,
5539 BNX2X_F_CMD_TX_STOP))
5540 break;
5541 goto next_spqe;
5542
5543 case EVENT_RING_OPCODE_START_TRAFFIC:
5544 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5545 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5546 if (f_obj->complete_cmd(bp, f_obj,
5547 BNX2X_F_CMD_TX_START))
5548 break;
5549 goto next_spqe;
5550
5551 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5552 echo = elem->message.data.function_update_event.echo;
5553 if (echo == SWITCH_UPDATE) {
5554 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5555 "got FUNC_SWITCH_UPDATE ramrod\n");
5556 if (f_obj->complete_cmd(
5557 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5558 break;
5559
5560 } else {
5561 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5562
5563 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5564 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5565 f_obj->complete_cmd(bp, f_obj,
5566 BNX2X_F_CMD_AFEX_UPDATE);
5567
5568
5569
5570
5571
5572 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5573 }
5574
5575 goto next_spqe;
5576
5577 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5578 f_obj->complete_cmd(bp, f_obj,
5579 BNX2X_F_CMD_AFEX_VIFLISTS);
5580 bnx2x_after_afex_vif_lists(bp, elem);
5581 goto next_spqe;
5582 case EVENT_RING_OPCODE_FUNCTION_START:
5583 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5584 "got FUNC_START ramrod\n");
5585 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5586 break;
5587
5588 goto next_spqe;
5589
5590 case EVENT_RING_OPCODE_FUNCTION_STOP:
5591 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5592 "got FUNC_STOP ramrod\n");
5593 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5594 break;
5595
5596 goto next_spqe;
5597
5598 case EVENT_RING_OPCODE_SET_TIMESYNC:
5599 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5600 "got set_timesync ramrod completion\n");
5601 if (f_obj->complete_cmd(bp, f_obj,
5602 BNX2X_F_CMD_SET_TIMESYNC))
5603 break;
5604 goto next_spqe;
5605 }
5606
5607 switch (opcode | bp->state) {
5608 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5609 BNX2X_STATE_OPEN):
5610 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5611 BNX2X_STATE_OPENING_WAIT4_PORT):
5612 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5613 BNX2X_STATE_CLOSING_WAIT4_HALT):
5614 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5615 SW_CID(elem->message.data.eth_event.echo));
5616 rss_raw->clear_pending(rss_raw);
5617 break;
5618
5619 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5620 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5621 case (EVENT_RING_OPCODE_SET_MAC |
5622 BNX2X_STATE_CLOSING_WAIT4_HALT):
5623 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5624 BNX2X_STATE_OPEN):
5625 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5626 BNX2X_STATE_DIAG):
5627 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5628 BNX2X_STATE_CLOSING_WAIT4_HALT):
5629 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5630 bnx2x_handle_classification_eqe(bp, elem);
5631 break;
5632
5633 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5634 BNX2X_STATE_OPEN):
5635 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5636 BNX2X_STATE_DIAG):
5637 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5638 BNX2X_STATE_CLOSING_WAIT4_HALT):
5639 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5640 bnx2x_handle_mcast_eqe(bp);
5641 break;
5642
5643 case (EVENT_RING_OPCODE_FILTERS_RULES |
5644 BNX2X_STATE_OPEN):
5645 case (EVENT_RING_OPCODE_FILTERS_RULES |
5646 BNX2X_STATE_DIAG):
5647 case (EVENT_RING_OPCODE_FILTERS_RULES |
5648 BNX2X_STATE_CLOSING_WAIT4_HALT):
5649 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5650 bnx2x_handle_rx_mode_eqe(bp);
5651 break;
5652 default:
5653
5654 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5655 elem->message.opcode, bp->state);
5656 }
5657next_spqe:
5658 spqe_cnt++;
5659 }
5660
5661 smp_mb__before_atomic();
5662 atomic_add(spqe_cnt, &bp->eq_spq_left);
5663
5664 bp->eq_cons = sw_cons;
5665 bp->eq_prod = sw_prod;
5666
5667 smp_wmb();
5668
5669
5670 bnx2x_update_eq_prod(bp, bp->eq_prod);
5671}
5672
5673static void bnx2x_sp_task(struct work_struct *work)
5674{
5675 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5676
5677 DP(BNX2X_MSG_SP, "sp task invoked\n");
5678
5679
5680 smp_rmb();
5681 if (atomic_read(&bp->interrupt_occurred)) {
5682
5683
5684 u16 status = bnx2x_update_dsb_idx(bp);
5685
5686 DP(BNX2X_MSG_SP, "status %x\n", status);
5687 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5688 atomic_set(&bp->interrupt_occurred, 0);
5689
5690
5691 if (status & BNX2X_DEF_SB_ATT_IDX) {
5692 bnx2x_attn_int(bp);
5693 status &= ~BNX2X_DEF_SB_ATT_IDX;
5694 }
5695
5696
5697 if (status & BNX2X_DEF_SB_IDX) {
5698 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5699
5700 if (FCOE_INIT(bp) &&
5701 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5702
5703
5704
5705 local_bh_disable();
5706 napi_schedule(&bnx2x_fcoe(bp, napi));
5707 local_bh_enable();
5708 }
5709
5710
5711 bnx2x_eq_int(bp);
5712 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5713 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5714
5715 status &= ~BNX2X_DEF_SB_IDX;
5716 }
5717
5718
5719 if (unlikely(status))
5720 DP(BNX2X_MSG_SP,
5721 "got an unknown interrupt! (status 0x%x)\n", status);
5722
5723
5724 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5725 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5726 }
5727
5728
5729 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5730 &bp->sp_state)) {
5731 bnx2x_link_report(bp);
5732 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5733 }
5734}
5735
5736irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5737{
5738 struct net_device *dev = dev_instance;
5739 struct bnx2x *bp = netdev_priv(dev);
5740
5741 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5742 IGU_INT_DISABLE, 0);
5743
5744#ifdef BNX2X_STOP_ON_ERROR
5745 if (unlikely(bp->panic))
5746 return IRQ_HANDLED;
5747#endif
5748
5749 if (CNIC_LOADED(bp)) {
5750 struct cnic_ops *c_ops;
5751
5752 rcu_read_lock();
5753 c_ops = rcu_dereference(bp->cnic_ops);
5754 if (c_ops)
5755 c_ops->cnic_handler(bp->cnic_data, NULL);
5756 rcu_read_unlock();
5757 }
5758
5759
5760
5761
5762 bnx2x_schedule_sp_task(bp);
5763
5764 return IRQ_HANDLED;
5765}
5766
5767
5768
5769void bnx2x_drv_pulse(struct bnx2x *bp)
5770{
5771 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5772 bp->fw_drv_pulse_wr_seq);
5773}
5774
5775static void bnx2x_timer(struct timer_list *t)
5776{
5777 struct bnx2x *bp = from_timer(bp, t, timer);
5778
5779 if (!netif_running(bp->dev))
5780 return;
5781
5782 if (IS_PF(bp) &&
5783 !BP_NOMCP(bp)) {
5784 int mb_idx = BP_FW_MB_IDX(bp);
5785 u16 drv_pulse;
5786 u16 mcp_pulse;
5787
5788 ++bp->fw_drv_pulse_wr_seq;
5789 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5790 drv_pulse = bp->fw_drv_pulse_wr_seq;
5791 bnx2x_drv_pulse(bp);
5792
5793 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5794 MCP_PULSE_SEQ_MASK);
5795
5796
5797
5798
5799
5800 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5801 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5802 drv_pulse, mcp_pulse);
5803 }
5804
5805 if (bp->state == BNX2X_STATE_OPEN)
5806 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5807
5808
5809 if (IS_VF(bp))
5810 bnx2x_timer_sriov(bp);
5811
5812 mod_timer(&bp->timer, jiffies + bp->current_interval);
5813}
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5824{
5825 u32 i;
5826 if (!(len%4) && !(addr%4))
5827 for (i = 0; i < len; i += 4)
5828 REG_WR(bp, addr + i, fill);
5829 else
5830 for (i = 0; i < len; i++)
5831 REG_WR8(bp, addr + i, fill);
5832}
5833
5834
5835static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5836 int fw_sb_id,
5837 u32 *sb_data_p,
5838 u32 data_size)
5839{
5840 int index;
5841 for (index = 0; index < data_size; index++)
5842 REG_WR(bp, BAR_CSTRORM_INTMEM +
5843 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5844 sizeof(u32)*index,
5845 *(sb_data_p + index));
5846}
5847
5848static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5849{
5850 u32 *sb_data_p;
5851 u32 data_size = 0;
5852 struct hc_status_block_data_e2 sb_data_e2;
5853 struct hc_status_block_data_e1x sb_data_e1x;
5854
5855
5856 if (!CHIP_IS_E1x(bp)) {
5857 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5858 sb_data_e2.common.state = SB_DISABLED;
5859 sb_data_e2.common.p_func.vf_valid = false;
5860 sb_data_p = (u32 *)&sb_data_e2;
5861 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5862 } else {
5863 memset(&sb_data_e1x, 0,
5864 sizeof(struct hc_status_block_data_e1x));
5865 sb_data_e1x.common.state = SB_DISABLED;
5866 sb_data_e1x.common.p_func.vf_valid = false;
5867 sb_data_p = (u32 *)&sb_data_e1x;
5868 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5869 }
5870 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5871
5872 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5873 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5874 CSTORM_STATUS_BLOCK_SIZE);
5875 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5876 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5877 CSTORM_SYNC_BLOCK_SIZE);
5878}
5879
5880
5881static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5882 struct hc_sp_status_block_data *sp_sb_data)
5883{
5884 int func = BP_FUNC(bp);
5885 int i;
5886 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5887 REG_WR(bp, BAR_CSTRORM_INTMEM +
5888 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5889 i*sizeof(u32),
5890 *((u32 *)sp_sb_data + i));
5891}
5892
5893static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5894{
5895 int func = BP_FUNC(bp);
5896 struct hc_sp_status_block_data sp_sb_data;
5897 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5898
5899 sp_sb_data.state = SB_DISABLED;
5900 sp_sb_data.p_func.vf_valid = false;
5901
5902 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5903
5904 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5905 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5906 CSTORM_SP_STATUS_BLOCK_SIZE);
5907 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5908 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5909 CSTORM_SP_SYNC_BLOCK_SIZE);
5910}
5911
5912static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5913 int igu_sb_id, int igu_seg_id)
5914{
5915 hc_sm->igu_sb_id = igu_sb_id;
5916 hc_sm->igu_seg_id = igu_seg_id;
5917 hc_sm->timer_value = 0xFF;
5918 hc_sm->time_to_expire = 0xFFFFFFFF;
5919}
5920
5921
5922static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5923{
5924
5925
5926 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5927
5928
5929 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5930 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5932 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5933
5934
5935
5936 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5937 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5938
5939
5940 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5941 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5942 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5943 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5944 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5945 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5946 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5947 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5948}
5949
5950void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5951 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5952{
5953 int igu_seg_id;
5954
5955 struct hc_status_block_data_e2 sb_data_e2;
5956 struct hc_status_block_data_e1x sb_data_e1x;
5957 struct hc_status_block_sm *hc_sm_p;
5958 int data_size;
5959 u32 *sb_data_p;
5960
5961 if (CHIP_INT_MODE_IS_BC(bp))
5962 igu_seg_id = HC_SEG_ACCESS_NORM;
5963 else
5964 igu_seg_id = IGU_SEG_ACCESS_NORM;
5965
5966 bnx2x_zero_fp_sb(bp, fw_sb_id);
5967
5968 if (!CHIP_IS_E1x(bp)) {
5969 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5970 sb_data_e2.common.state = SB_ENABLED;
5971 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5972 sb_data_e2.common.p_func.vf_id = vfid;
5973 sb_data_e2.common.p_func.vf_valid = vf_valid;
5974 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5975 sb_data_e2.common.same_igu_sb_1b = true;
5976 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5977 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5978 hc_sm_p = sb_data_e2.common.state_machine;
5979 sb_data_p = (u32 *)&sb_data_e2;
5980 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5981 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5982 } else {
5983 memset(&sb_data_e1x, 0,
5984 sizeof(struct hc_status_block_data_e1x));
5985 sb_data_e1x.common.state = SB_ENABLED;
5986 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5987 sb_data_e1x.common.p_func.vf_id = 0xff;
5988 sb_data_e1x.common.p_func.vf_valid = false;
5989 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5990 sb_data_e1x.common.same_igu_sb_1b = true;
5991 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5992 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5993 hc_sm_p = sb_data_e1x.common.state_machine;
5994 sb_data_p = (u32 *)&sb_data_e1x;
5995 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5996 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5997 }
5998
5999 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
6000 igu_sb_id, igu_seg_id);
6001 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
6002 igu_sb_id, igu_seg_id);
6003
6004 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6005
6006
6007 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6008}
6009
6010static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6011 u16 tx_usec, u16 rx_usec)
6012{
6013 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6014 false, rx_usec);
6015 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6016 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6017 tx_usec);
6018 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6019 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6020 tx_usec);
6021 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6022 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6023 tx_usec);
6024}
6025
6026static void bnx2x_init_def_sb(struct bnx2x *bp)
6027{
6028 struct host_sp_status_block *def_sb = bp->def_status_blk;
6029 dma_addr_t mapping = bp->def_status_blk_mapping;
6030 int igu_sp_sb_index;
6031 int igu_seg_id;
6032 int port = BP_PORT(bp);
6033 int func = BP_FUNC(bp);
6034 int reg_offset, reg_offset_en5;
6035 u64 section;
6036 int index;
6037 struct hc_sp_status_block_data sp_sb_data;
6038 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6039
6040 if (CHIP_INT_MODE_IS_BC(bp)) {
6041 igu_sp_sb_index = DEF_SB_IGU_ID;
6042 igu_seg_id = HC_SEG_ACCESS_DEF;
6043 } else {
6044 igu_sp_sb_index = bp->igu_dsb_id;
6045 igu_seg_id = IGU_SEG_ACCESS_DEF;
6046 }
6047
6048
6049 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6050 atten_status_block);
6051 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6052
6053 bp->attn_state = 0;
6054
6055 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6056 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6057 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6058 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6059 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6060 int sindex;
6061
6062 for (sindex = 0; sindex < 4; sindex++)
6063 bp->attn_group[index].sig[sindex] =
6064 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6065
6066 if (!CHIP_IS_E1x(bp))
6067
6068
6069
6070
6071
6072 bp->attn_group[index].sig[4] = REG_RD(bp,
6073 reg_offset_en5 + 0x4*index);
6074 else
6075 bp->attn_group[index].sig[4] = 0;
6076 }
6077
6078 if (bp->common.int_block == INT_BLOCK_HC) {
6079 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6080 HC_REG_ATTN_MSG0_ADDR_L);
6081
6082 REG_WR(bp, reg_offset, U64_LO(section));
6083 REG_WR(bp, reg_offset + 4, U64_HI(section));
6084 } else if (!CHIP_IS_E1x(bp)) {
6085 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6086 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6087 }
6088
6089 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6090 sp_sb);
6091
6092 bnx2x_zero_sp_sb(bp);
6093
6094
6095 sp_sb_data.state = SB_ENABLED;
6096 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6097 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6098 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6099 sp_sb_data.igu_seg_id = igu_seg_id;
6100 sp_sb_data.p_func.pf_id = func;
6101 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6102 sp_sb_data.p_func.vf_id = 0xff;
6103
6104 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6105
6106 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6107}
6108
6109void bnx2x_update_coalesce(struct bnx2x *bp)
6110{
6111 int i;
6112
6113 for_each_eth_queue(bp, i)
6114 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6115 bp->tx_ticks, bp->rx_ticks);
6116}
6117
6118static void bnx2x_init_sp_ring(struct bnx2x *bp)
6119{
6120 spin_lock_init(&bp->spq_lock);
6121 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6122
6123 bp->spq_prod_idx = 0;
6124 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6125 bp->spq_prod_bd = bp->spq;
6126 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6127}
6128
6129static void bnx2x_init_eq_ring(struct bnx2x *bp)
6130{
6131 int i;
6132 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6133 union event_ring_elem *elem =
6134 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6135
6136 elem->next_page.addr.hi =
6137 cpu_to_le32(U64_HI(bp->eq_mapping +
6138 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6139 elem->next_page.addr.lo =
6140 cpu_to_le32(U64_LO(bp->eq_mapping +
6141 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6142 }
6143 bp->eq_cons = 0;
6144 bp->eq_prod = NUM_EQ_DESC;
6145 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6146
6147 atomic_set(&bp->eq_spq_left,
6148 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6149}
6150
6151
6152static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6153 unsigned long rx_mode_flags,
6154 unsigned long rx_accept_flags,
6155 unsigned long tx_accept_flags,
6156 unsigned long ramrod_flags)
6157{
6158 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6159 int rc;
6160
6161 memset(&ramrod_param, 0, sizeof(ramrod_param));
6162
6163
6164 ramrod_param.cid = 0;
6165 ramrod_param.cl_id = cl_id;
6166 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6167 ramrod_param.func_id = BP_FUNC(bp);
6168
6169 ramrod_param.pstate = &bp->sp_state;
6170 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6171
6172 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6173 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6174
6175 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6176
6177 ramrod_param.ramrod_flags = ramrod_flags;
6178 ramrod_param.rx_mode_flags = rx_mode_flags;
6179
6180 ramrod_param.rx_accept_flags = rx_accept_flags;
6181 ramrod_param.tx_accept_flags = tx_accept_flags;
6182
6183 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6184 if (rc < 0) {
6185 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6186 return rc;
6187 }
6188
6189 return 0;
6190}
6191
6192static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6193 unsigned long *rx_accept_flags,
6194 unsigned long *tx_accept_flags)
6195{
6196
6197 *rx_accept_flags = 0;
6198 *tx_accept_flags = 0;
6199
6200 switch (rx_mode) {
6201 case BNX2X_RX_MODE_NONE:
6202
6203
6204
6205
6206 break;
6207 case BNX2X_RX_MODE_NORMAL:
6208 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6209 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6210 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6211
6212
6213 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6214 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6215 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6216
6217 if (bp->accept_any_vlan) {
6218 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6220 }
6221
6222 break;
6223 case BNX2X_RX_MODE_ALLMULTI:
6224 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6225 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6226 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6227
6228
6229 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6230 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6231 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6232
6233 if (bp->accept_any_vlan) {
6234 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6235 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6236 }
6237
6238 break;
6239 case BNX2X_RX_MODE_PROMISC:
6240
6241
6242
6243
6244 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6245 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6246 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6247 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6248
6249
6250 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6251 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6252
6253 if (IS_MF_SI(bp))
6254 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6255 else
6256 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6257
6258 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6259 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6260
6261 break;
6262 default:
6263 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6264 return -EINVAL;
6265 }
6266
6267 return 0;
6268}
6269
6270
6271static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6272{
6273 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6274 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6275 int rc;
6276
6277 if (!NO_FCOE(bp))
6278
6279 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6280
6281 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6282 &tx_accept_flags);
6283 if (rc)
6284 return rc;
6285
6286 __set_bit(RAMROD_RX, &ramrod_flags);
6287 __set_bit(RAMROD_TX, &ramrod_flags);
6288
6289 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6290 rx_accept_flags, tx_accept_flags,
6291 ramrod_flags);
6292}
6293
6294static void bnx2x_init_internal_common(struct bnx2x *bp)
6295{
6296 int i;
6297
6298
6299
6300 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6301 REG_WR(bp, BAR_USTRORM_INTMEM +
6302 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6303 if (!CHIP_IS_E1x(bp)) {
6304 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6305 CHIP_INT_MODE_IS_BC(bp) ?
6306 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6307 }
6308}
6309
6310static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6311{
6312 switch (load_code) {
6313 case FW_MSG_CODE_DRV_LOAD_COMMON:
6314 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6315 bnx2x_init_internal_common(bp);
6316 fallthrough;
6317
6318 case FW_MSG_CODE_DRV_LOAD_PORT:
6319
6320 fallthrough;
6321
6322 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6323
6324
6325 break;
6326
6327 default:
6328 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6329 break;
6330 }
6331}
6332
6333static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6334{
6335 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6336}
6337
6338static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6339{
6340 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6341}
6342
6343static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6344{
6345 if (CHIP_IS_E1x(fp->bp))
6346 return BP_L_ID(fp->bp) + fp->index;
6347 else
6348 return bnx2x_fp_igu_sb_id(fp);
6349}
6350
6351static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6352{
6353 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6354 u8 cos;
6355 unsigned long q_type = 0;
6356 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6357 fp->rx_queue = fp_idx;
6358 fp->cid = fp_idx;
6359 fp->cl_id = bnx2x_fp_cl_id(fp);
6360 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6361 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6362
6363 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6364
6365
6366 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6367
6368
6369 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6370
6371
6372 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6373 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6374
6375 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6376
6377
6378 for_each_cos_in_tx_queue(fp, cos) {
6379 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6380 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6381 FP_COS_TO_TXQ(fp, cos, bp),
6382 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6383 cids[cos] = fp->txdata_ptr[cos]->cid;
6384 }
6385
6386
6387 if (IS_VF(bp))
6388 return;
6389
6390 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6391 fp->fw_sb_id, fp->igu_sb_id);
6392 bnx2x_update_fpsb_idx(fp);
6393 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6394 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6395 bnx2x_sp_mapping(bp, q_rdata), q_type);
6396
6397
6398
6399
6400 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6401
6402 DP(NETIF_MSG_IFUP,
6403 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6404 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6405 fp->igu_sb_id);
6406}
6407
6408static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6409{
6410 int i;
6411
6412 for (i = 1; i <= NUM_TX_RINGS; i++) {
6413 struct eth_tx_next_bd *tx_next_bd =
6414 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6415
6416 tx_next_bd->addr_hi =
6417 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6418 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6419 tx_next_bd->addr_lo =
6420 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6421 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6422 }
6423
6424 *txdata->tx_cons_sb = cpu_to_le16(0);
6425
6426 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6427 txdata->tx_db.data.zero_fill1 = 0;
6428 txdata->tx_db.data.prod = 0;
6429
6430 txdata->tx_pkt_prod = 0;
6431 txdata->tx_pkt_cons = 0;
6432 txdata->tx_bd_prod = 0;
6433 txdata->tx_bd_cons = 0;
6434 txdata->tx_pkt = 0;
6435}
6436
6437static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6438{
6439 int i;
6440
6441 for_each_tx_queue_cnic(bp, i)
6442 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6443}
6444
6445static void bnx2x_init_tx_rings(struct bnx2x *bp)
6446{
6447 int i;
6448 u8 cos;
6449
6450 for_each_eth_queue(bp, i)
6451 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6452 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6453}
6454
6455static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6456{
6457 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6458 unsigned long q_type = 0;
6459
6460 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6461 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6462 BNX2X_FCOE_ETH_CL_ID_IDX);
6463 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6464 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6465 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6466 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6467 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6468 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6469 fp);
6470
6471 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6472
6473
6474 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6475
6476 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6477 bnx2x_rx_ustorm_prods_offset(fp);
6478
6479
6480 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6481 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6482
6483
6484 BUG_ON(fp->max_cos != 1);
6485
6486 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6487 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6488 bnx2x_sp_mapping(bp, q_rdata), q_type);
6489
6490 DP(NETIF_MSG_IFUP,
6491 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6492 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6493 fp->igu_sb_id);
6494}
6495
6496void bnx2x_nic_init_cnic(struct bnx2x *bp)
6497{
6498 if (!NO_FCOE(bp))
6499 bnx2x_init_fcoe_fp(bp);
6500
6501 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6502 BNX2X_VF_ID_INVALID, false,
6503 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6504
6505
6506 rmb();
6507 bnx2x_init_rx_rings_cnic(bp);
6508 bnx2x_init_tx_rings_cnic(bp);
6509
6510
6511 mb();
6512}
6513
6514void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6515{
6516 int i;
6517
6518
6519 for_each_eth_queue(bp, i)
6520 bnx2x_init_eth_fp(bp, i);
6521
6522
6523 rmb();
6524 bnx2x_init_rx_rings(bp);
6525 bnx2x_init_tx_rings(bp);
6526
6527 if (IS_PF(bp)) {
6528
6529 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6530 bp->common.shmem_base,
6531 bp->common.shmem2_base, BP_PORT(bp));
6532
6533
6534 bnx2x_init_def_sb(bp);
6535 bnx2x_update_dsb_idx(bp);
6536 bnx2x_init_sp_ring(bp);
6537 } else {
6538 bnx2x_memset_stats(bp);
6539 }
6540}
6541
6542void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6543{
6544 bnx2x_init_eq_ring(bp);
6545 bnx2x_init_internal(bp, load_code);
6546 bnx2x_pf_init(bp);
6547 bnx2x_stats_init(bp);
6548
6549
6550 mb();
6551
6552 bnx2x_int_enable(bp);
6553
6554
6555 bnx2x_attn_int_deasserted0(bp,
6556 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6557 AEU_INPUTS_ATTN_BITS_SPIO5);
6558}
6559
6560
6561static int bnx2x_gunzip_init(struct bnx2x *bp)
6562{
6563 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6564 &bp->gunzip_mapping, GFP_KERNEL);
6565 if (bp->gunzip_buf == NULL)
6566 goto gunzip_nomem1;
6567
6568 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6569 if (bp->strm == NULL)
6570 goto gunzip_nomem2;
6571
6572 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6573 if (bp->strm->workspace == NULL)
6574 goto gunzip_nomem3;
6575
6576 return 0;
6577
6578gunzip_nomem3:
6579 kfree(bp->strm);
6580 bp->strm = NULL;
6581
6582gunzip_nomem2:
6583 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6584 bp->gunzip_mapping);
6585 bp->gunzip_buf = NULL;
6586
6587gunzip_nomem1:
6588 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6589 return -ENOMEM;
6590}
6591
6592static void bnx2x_gunzip_end(struct bnx2x *bp)
6593{
6594 if (bp->strm) {
6595 vfree(bp->strm->workspace);
6596 kfree(bp->strm);
6597 bp->strm = NULL;
6598 }
6599
6600 if (bp->gunzip_buf) {
6601 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6602 bp->gunzip_mapping);
6603 bp->gunzip_buf = NULL;
6604 }
6605}
6606
6607static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6608{
6609 int n, rc;
6610
6611
6612 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6613 BNX2X_ERR("Bad gzip header\n");
6614 return -EINVAL;
6615 }
6616
6617 n = 10;
6618
6619#define FNAME 0x8
6620
6621 if (zbuf[3] & FNAME)
6622 while ((zbuf[n++] != 0) && (n < len));
6623
6624 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6625 bp->strm->avail_in = len - n;
6626 bp->strm->next_out = bp->gunzip_buf;
6627 bp->strm->avail_out = FW_BUF_SIZE;
6628
6629 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6630 if (rc != Z_OK)
6631 return rc;
6632
6633 rc = zlib_inflate(bp->strm, Z_FINISH);
6634 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6635 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6636 bp->strm->msg);
6637
6638 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6639 if (bp->gunzip_outlen & 0x3)
6640 netdev_err(bp->dev,
6641 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6642 bp->gunzip_outlen);
6643 bp->gunzip_outlen >>= 2;
6644
6645 zlib_inflateEnd(bp->strm);
6646
6647 if (rc == Z_STREAM_END)
6648 return 0;
6649
6650 return rc;
6651}
6652
6653
6654
6655
6656
6657
6658
6659
6660static void bnx2x_lb_pckt(struct bnx2x *bp)
6661{
6662 u32 wb_write[3];
6663
6664
6665 wb_write[0] = 0x55555555;
6666 wb_write[1] = 0x55555555;
6667 wb_write[2] = 0x20;
6668 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6669
6670
6671 wb_write[0] = 0x09000000;
6672 wb_write[1] = 0x55555555;
6673 wb_write[2] = 0x10;
6674 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6675}
6676
6677
6678
6679
6680
6681static int bnx2x_int_mem_test(struct bnx2x *bp)
6682{
6683 int factor;
6684 int count, i;
6685 u32 val = 0;
6686
6687 if (CHIP_REV_IS_FPGA(bp))
6688 factor = 120;
6689 else if (CHIP_REV_IS_EMUL(bp))
6690 factor = 200;
6691 else
6692 factor = 1;
6693
6694
6695 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6696 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6697 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6698 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6699
6700
6701 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6702
6703
6704 bnx2x_lb_pckt(bp);
6705
6706
6707
6708 count = 1000 * factor;
6709 while (count) {
6710
6711 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6712 val = *bnx2x_sp(bp, wb_data[0]);
6713 if (val == 0x10)
6714 break;
6715
6716 usleep_range(10000, 20000);
6717 count--;
6718 }
6719 if (val != 0x10) {
6720 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6721 return -1;
6722 }
6723
6724
6725 count = 1000 * factor;
6726 while (count) {
6727 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6728 if (val == 1)
6729 break;
6730
6731 usleep_range(10000, 20000);
6732 count--;
6733 }
6734 if (val != 0x1) {
6735 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6736 return -2;
6737 }
6738
6739
6740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6741 msleep(50);
6742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6743 msleep(50);
6744 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6745 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6746
6747 DP(NETIF_MSG_HW, "part2\n");
6748
6749
6750 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6751 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6752 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6753 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6754
6755
6756 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6757
6758
6759 for (i = 0; i < 10; i++)
6760 bnx2x_lb_pckt(bp);
6761
6762
6763
6764 count = 1000 * factor;
6765 while (count) {
6766
6767 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6768 val = *bnx2x_sp(bp, wb_data[0]);
6769 if (val == 0xb0)
6770 break;
6771
6772 usleep_range(10000, 20000);
6773 count--;
6774 }
6775 if (val != 0xb0) {
6776 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6777 return -3;
6778 }
6779
6780
6781 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6782 if (val != 2)
6783 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6784
6785
6786 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6787
6788
6789 msleep(10 * factor);
6790
6791 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6792 if (val != 3)
6793 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6794
6795
6796 for (i = 0; i < 11; i++)
6797 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6798 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6799 if (val != 1) {
6800 BNX2X_ERR("clear of NIG failed\n");
6801 return -4;
6802 }
6803
6804
6805 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6806 msleep(50);
6807 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6808 msleep(50);
6809 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6810 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6811 if (!CNIC_SUPPORT(bp))
6812
6813 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6814
6815
6816 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6817 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6818 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6819 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6820
6821 DP(NETIF_MSG_HW, "done\n");
6822
6823 return 0;
6824}
6825
6826static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6827{
6828 u32 val;
6829
6830 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6831 if (!CHIP_IS_E1x(bp))
6832 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6833 else
6834 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6835 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6836 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6837
6838
6839
6840
6841
6842
6843 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6844 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6845 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6846 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6847 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6848 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6849
6850
6851 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6852 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6853 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6854
6855
6856 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6857 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6858 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6859 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6860
6861
6862
6863 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6864 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6865 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6866 if (!CHIP_IS_E1x(bp))
6867 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6868 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6869 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6870
6871 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6872 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6873 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6874
6875
6876 if (!CHIP_IS_E1x(bp))
6877
6878 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6879
6880 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6881 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6882
6883 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6884}
6885
6886static void bnx2x_reset_common(struct bnx2x *bp)
6887{
6888 u32 val = 0x1400;
6889
6890
6891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6892 0xd3ffff7f);
6893
6894 if (CHIP_IS_E3(bp)) {
6895 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6896 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6897 }
6898
6899 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6900}
6901
6902static void bnx2x_setup_dmae(struct bnx2x *bp)
6903{
6904 bp->dmae_ready = 0;
6905 spin_lock_init(&bp->dmae_lock);
6906}
6907
6908static void bnx2x_init_pxp(struct bnx2x *bp)
6909{
6910 u16 devctl;
6911 int r_order, w_order;
6912
6913 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6914 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6915 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6916 if (bp->mrrs == -1)
6917 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6918 else {
6919 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6920 r_order = bp->mrrs;
6921 }
6922
6923 bnx2x_init_pxp_arb(bp, r_order, w_order);
6924}
6925
6926static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6927{
6928 int is_required;
6929 u32 val;
6930 int port;
6931
6932 if (BP_NOMCP(bp))
6933 return;
6934
6935 is_required = 0;
6936 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6937 SHARED_HW_CFG_FAN_FAILURE_MASK;
6938
6939 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6940 is_required = 1;
6941
6942
6943
6944
6945
6946
6947 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6948 for (port = PORT_0; port < PORT_MAX; port++) {
6949 is_required |=
6950 bnx2x_fan_failure_det_req(
6951 bp,
6952 bp->common.shmem_base,
6953 bp->common.shmem2_base,
6954 port);
6955 }
6956
6957 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6958
6959 if (is_required == 0)
6960 return;
6961
6962
6963 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6964
6965
6966 val = REG_RD(bp, MISC_REG_SPIO_INT);
6967 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6968 REG_WR(bp, MISC_REG_SPIO_INT, val);
6969
6970
6971 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6972 val |= MISC_SPIO_SPIO5;
6973 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6974}
6975
6976void bnx2x_pf_disable(struct bnx2x *bp)
6977{
6978 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6979 val &= ~IGU_PF_CONF_FUNC_EN;
6980
6981 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6982 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6983 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6984}
6985
6986static void bnx2x__common_init_phy(struct bnx2x *bp)
6987{
6988 u32 shmem_base[2], shmem2_base[2];
6989
6990 if (SHMEM2_RD(bp, size) >
6991 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6992 return;
6993 shmem_base[0] = bp->common.shmem_base;
6994 shmem2_base[0] = bp->common.shmem2_base;
6995 if (!CHIP_IS_E1x(bp)) {
6996 shmem_base[1] =
6997 SHMEM2_RD(bp, other_shmem_base_addr);
6998 shmem2_base[1] =
6999 SHMEM2_RD(bp, other_shmem2_base_addr);
7000 }
7001 bnx2x_acquire_phy_lock(bp);
7002 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7003 bp->common.chip_id);
7004 bnx2x_release_phy_lock(bp);
7005}
7006
7007static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7008{
7009 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7010 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7011 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7012 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7013 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7014
7015
7016 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7017
7018 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7019 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7020 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7021 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7022}
7023
7024static void bnx2x_set_endianity(struct bnx2x *bp)
7025{
7026#ifdef __BIG_ENDIAN
7027 bnx2x_config_endianity(bp, 1);
7028#else
7029 bnx2x_config_endianity(bp, 0);
7030#endif
7031}
7032
7033static void bnx2x_reset_endianity(struct bnx2x *bp)
7034{
7035 bnx2x_config_endianity(bp, 0);
7036}
7037
7038
7039
7040
7041
7042
7043static int bnx2x_init_hw_common(struct bnx2x *bp)
7044{
7045 u32 val;
7046
7047 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7048
7049
7050
7051
7052
7053 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7054
7055 bnx2x_reset_common(bp);
7056 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7057
7058 val = 0xfffc;
7059 if (CHIP_IS_E3(bp)) {
7060 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7061 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7062 }
7063 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7064
7065 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7066
7067 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7068
7069 if (!CHIP_IS_E1x(bp)) {
7070 u8 abs_func_id;
7071
7072
7073
7074
7075
7076
7077
7078
7079 for (abs_func_id = BP_PATH(bp);
7080 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7081 if (abs_func_id == BP_ABS_FUNC(bp)) {
7082 REG_WR(bp,
7083 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7084 1);
7085 continue;
7086 }
7087
7088 bnx2x_pretend_func(bp, abs_func_id);
7089
7090 bnx2x_pf_disable(bp);
7091 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7092 }
7093 }
7094
7095 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7096 if (CHIP_IS_E1(bp)) {
7097
7098
7099 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7100 }
7101
7102 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7103 bnx2x_init_pxp(bp);
7104 bnx2x_set_endianity(bp);
7105 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7106
7107 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7108 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7109
7110
7111 msleep(100);
7112
7113 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7114 if (val != 1) {
7115 BNX2X_ERR("PXP2 CFG failed\n");
7116 return -EBUSY;
7117 }
7118 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7119 if (val != 1) {
7120 BNX2X_ERR("PXP2 RD_INIT failed\n");
7121 return -EBUSY;
7122 }
7123
7124
7125
7126
7127
7128
7129 if (!CHIP_IS_E1x(bp)) {
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192 struct ilt_client_info ilt_cli;
7193 struct bnx2x_ilt ilt;
7194 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7195 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7196
7197
7198 ilt_cli.start = 0;
7199 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7200 ilt_cli.client_num = ILT_CLIENT_TM;
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7214 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7215 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7216
7217 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7218 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7219 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7220 }
7221
7222 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7223 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7224
7225 if (!CHIP_IS_E1x(bp)) {
7226 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7227 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7228 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7229
7230 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7231
7232
7233 do {
7234 msleep(200);
7235 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7236 } while (factor-- && (val != 1));
7237
7238 if (val != 1) {
7239 BNX2X_ERR("ATC_INIT failed\n");
7240 return -EBUSY;
7241 }
7242 }
7243
7244 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7245
7246 bnx2x_iov_init_dmae(bp);
7247
7248
7249 bp->dmae_ready = 1;
7250 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7251
7252 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7253
7254 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7255
7256 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7257
7258 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7259
7260 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7261 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7262 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7263 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7264
7265 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7266
7267
7268 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7269
7270
7271 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7272 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7273
7274 if (CNIC_SUPPORT(bp))
7275 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7276
7277 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7278
7279 if (!CHIP_REV_IS_SLOW(bp))
7280
7281 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7282
7283 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7284
7285 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7286 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7287
7288 if (!CHIP_IS_E1(bp))
7289 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7290
7291 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7292 if (IS_MF_AFEX(bp)) {
7293
7294
7295
7296 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7297 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7298 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7299 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7300 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7301 } else {
7302
7303
7304
7305 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7306 bp->path_has_ovlan ? 7 : 6);
7307 }
7308 }
7309
7310 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7311 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7312 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7313 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7314
7315 if (!CHIP_IS_E1x(bp)) {
7316
7317 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7318 VFC_MEMORIES_RST_REG_CAM_RST |
7319 VFC_MEMORIES_RST_REG_RAM_RST);
7320 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7321 VFC_MEMORIES_RST_REG_CAM_RST |
7322 VFC_MEMORIES_RST_REG_RAM_RST);
7323
7324 msleep(20);
7325 }
7326
7327 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7328 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7329 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7330 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7331
7332
7333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7334 0x80000000);
7335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7336 0x80000000);
7337
7338 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7339 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7340 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7341
7342 if (!CHIP_IS_E1x(bp)) {
7343 if (IS_MF_AFEX(bp)) {
7344
7345
7346
7347 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7348 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7349 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7350 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7351 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7352 } else {
7353 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7354 bp->path_has_ovlan ? 7 : 6);
7355 }
7356 }
7357
7358 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7359
7360 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7361
7362 if (CNIC_SUPPORT(bp)) {
7363 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7364 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7365 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7366 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7367 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7368 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7369 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7370 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7371 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7372 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7373 }
7374 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7375
7376 if (sizeof(union cdu_context) != 1024)
7377
7378 dev_alert(&bp->pdev->dev,
7379 "please adjust the size of cdu_context(%ld)\n",
7380 (long)sizeof(union cdu_context));
7381
7382 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7383 val = (4 << 24) + (0 << 12) + 1024;
7384 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7385
7386 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7387 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7388
7389 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7390
7391
7392 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7393
7394 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7395
7396 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7397 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7398
7399 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7400 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7401
7402
7403 REG_WR(bp, 0x2814, 0xffffffff);
7404 REG_WR(bp, 0x3820, 0xffffffff);
7405
7406 if (!CHIP_IS_E1x(bp)) {
7407 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7408 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7409 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7410 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7411 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7412 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7413 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7414 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7415 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7416 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7417 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7418 }
7419
7420 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7421 if (!CHIP_IS_E1(bp)) {
7422
7423 if (!CHIP_IS_E3(bp))
7424 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7425 }
7426 if (CHIP_IS_E1H(bp))
7427
7428 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7429
7430 if (CHIP_REV_IS_SLOW(bp))
7431 msleep(200);
7432
7433
7434 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7435 if (val != 1) {
7436 BNX2X_ERR("CFC LL_INIT failed\n");
7437 return -EBUSY;
7438 }
7439 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7440 if (val != 1) {
7441 BNX2X_ERR("CFC AC_INIT failed\n");
7442 return -EBUSY;
7443 }
7444 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7445 if (val != 1) {
7446 BNX2X_ERR("CFC CAM_INIT failed\n");
7447 return -EBUSY;
7448 }
7449 REG_WR(bp, CFC_REG_DEBUG0, 0);
7450
7451 if (CHIP_IS_E1(bp)) {
7452
7453
7454 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7455 val = *bnx2x_sp(bp, wb_data[0]);
7456
7457
7458 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7459 BNX2X_ERR("internal mem self test failed\n");
7460 return -EBUSY;
7461 }
7462 }
7463
7464 bnx2x_setup_fan_failure_detection(bp);
7465
7466
7467 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7468
7469 bnx2x_enable_blocks_attention(bp);
7470 bnx2x_enable_blocks_parity(bp);
7471
7472 if (!BP_NOMCP(bp)) {
7473 if (CHIP_IS_E1x(bp))
7474 bnx2x__common_init_phy(bp);
7475 } else
7476 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7477
7478 if (SHMEM2_HAS(bp, netproc_fw_ver))
7479 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7480
7481 return 0;
7482}
7483
7484
7485
7486
7487
7488
7489static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7490{
7491 int rc = bnx2x_init_hw_common(bp);
7492
7493 if (rc)
7494 return rc;
7495
7496
7497 if (!BP_NOMCP(bp))
7498 bnx2x__common_init_phy(bp);
7499
7500 return 0;
7501}
7502
7503static int bnx2x_init_hw_port(struct bnx2x *bp)
7504{
7505 int port = BP_PORT(bp);
7506 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7507 u32 low, high;
7508 u32 val, reg;
7509
7510 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7511
7512 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7513
7514 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7515 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7516 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7517
7518
7519
7520
7521
7522
7523 if (!CHIP_IS_E1x(bp))
7524 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7525
7526 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7527 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7528 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7529 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7530
7531 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7532 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7533 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7534 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7535
7536
7537 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7538
7539 if (CNIC_SUPPORT(bp)) {
7540 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7541 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7542 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7543 }
7544
7545 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7546
7547 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7548
7549 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7550
7551 if (IS_MF(bp))
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7553 else if (bp->dev->mtu > 4096) {
7554 if (bp->flags & ONE_PORT_FLAG)
7555 low = 160;
7556 else {
7557 val = bp->dev->mtu;
7558
7559 low = 96 + (val/64) +
7560 ((val % 64) ? 1 : 0);
7561 }
7562 } else
7563 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7564 high = low + 56;
7565 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7566 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7567 }
7568
7569 if (CHIP_MODE_IS_4_PORT(bp))
7570 REG_WR(bp, (BP_PORT(bp) ?
7571 BRB1_REG_MAC_GUARANTIED_1 :
7572 BRB1_REG_MAC_GUARANTIED_0), 40);
7573
7574 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7575 if (CHIP_IS_E3B0(bp)) {
7576 if (IS_MF_AFEX(bp)) {
7577
7578 REG_WR(bp, BP_PORT(bp) ?
7579 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7580 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7581 REG_WR(bp, BP_PORT(bp) ?
7582 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7583 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7584 REG_WR(bp, BP_PORT(bp) ?
7585 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7586 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7587 } else {
7588
7589
7590
7591
7592 REG_WR(bp, BP_PORT(bp) ?
7593 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7594 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7595 (bp->path_has_ovlan ? 7 : 6));
7596 }
7597 }
7598
7599 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7600 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7601 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7602 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7603
7604 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7605 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7606 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7607 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7608
7609 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7610 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7611
7612 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7613
7614 if (CHIP_IS_E1x(bp)) {
7615
7616 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7617
7618
7619 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7620
7621 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7622
7623
7624 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7625 udelay(50);
7626 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7627 }
7628
7629 if (CNIC_SUPPORT(bp))
7630 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7631
7632 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7633 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7634
7635 if (CHIP_IS_E1(bp)) {
7636 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7637 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7638 }
7639 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7640
7641 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7642
7643 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7644
7645
7646
7647
7648 val = IS_MF(bp) ? 0xF7 : 0x7;
7649
7650 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7651 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7652
7653
7654 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7655 REG_WR(bp, reg,
7656 REG_RD(bp, reg) &
7657 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7658
7659 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7660 REG_WR(bp, reg,
7661 REG_RD(bp, reg) &
7662 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7663
7664 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7665
7666 if (!CHIP_IS_E1x(bp)) {
7667
7668
7669
7670 if (IS_MF_AFEX(bp))
7671 REG_WR(bp, BP_PORT(bp) ?
7672 NIG_REG_P1_HDRS_AFTER_BASIC :
7673 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7674 else
7675 REG_WR(bp, BP_PORT(bp) ?
7676 NIG_REG_P1_HDRS_AFTER_BASIC :
7677 NIG_REG_P0_HDRS_AFTER_BASIC,
7678 IS_MF_SD(bp) ? 7 : 6);
7679
7680 if (CHIP_IS_E3(bp))
7681 REG_WR(bp, BP_PORT(bp) ?
7682 NIG_REG_LLH1_MF_MODE :
7683 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7684 }
7685 if (!CHIP_IS_E3(bp))
7686 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7687
7688 if (!CHIP_IS_E1(bp)) {
7689
7690 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7691 (IS_MF_SD(bp) ? 0x1 : 0x2));
7692
7693 if (!CHIP_IS_E1x(bp)) {
7694 val = 0;
7695 switch (bp->mf_mode) {
7696 case MULTI_FUNCTION_SD:
7697 val = 1;
7698 break;
7699 case MULTI_FUNCTION_SI:
7700 case MULTI_FUNCTION_AFEX:
7701 val = 2;
7702 break;
7703 }
7704
7705 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7706 NIG_REG_LLH0_CLS_TYPE), val);
7707 }
7708 {
7709 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7710 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7711 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7712 }
7713 }
7714
7715
7716 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7717 if (val & MISC_SPIO_SPIO5) {
7718 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7720 val = REG_RD(bp, reg_addr);
7721 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7722 REG_WR(bp, reg_addr, val);
7723 }
7724
7725 if (CHIP_IS_E3B0(bp))
7726 bp->flags |= PTP_SUPPORTED;
7727
7728 return 0;
7729}
7730
7731static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7732{
7733 int reg;
7734 u32 wb_write[2];
7735
7736 if (CHIP_IS_E1(bp))
7737 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7738 else
7739 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7740
7741 wb_write[0] = ONCHIP_ADDR1(addr);
7742 wb_write[1] = ONCHIP_ADDR2(addr);
7743 REG_WR_DMAE(bp, reg, wb_write, 2);
7744}
7745
7746void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7747{
7748 u32 data, ctl, cnt = 100;
7749 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7750 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7751 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7752 u32 sb_bit = 1 << (idu_sb_id%32);
7753 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7754 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7755
7756
7757 if (CHIP_INT_MODE_IS_BC(bp))
7758 return;
7759
7760 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7761 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7762 IGU_REGULAR_CLEANUP_SET |
7763 IGU_REGULAR_BCLEANUP;
7764
7765 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7766 func_encode << IGU_CTRL_REG_FID_SHIFT |
7767 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7768
7769 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7770 data, igu_addr_data);
7771 REG_WR(bp, igu_addr_data, data);
7772 barrier();
7773 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7774 ctl, igu_addr_ctl);
7775 REG_WR(bp, igu_addr_ctl, ctl);
7776 barrier();
7777
7778
7779 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7780 msleep(20);
7781
7782 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7783 DP(NETIF_MSG_HW,
7784 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7785 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7786 }
7787}
7788
7789static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7790{
7791 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7792}
7793
7794static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7795{
7796 u32 i, base = FUNC_ILT_BASE(func);
7797 for (i = base; i < base + ILT_PER_FUNC; i++)
7798 bnx2x_ilt_wr(bp, i, 0);
7799}
7800
7801static void bnx2x_init_searcher(struct bnx2x *bp)
7802{
7803 int port = BP_PORT(bp);
7804 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7805
7806 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7807}
7808
7809static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7810{
7811 int rc;
7812 struct bnx2x_func_state_params func_params = {NULL};
7813 struct bnx2x_func_switch_update_params *switch_update_params =
7814 &func_params.params.switch_update;
7815
7816
7817 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7818 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7819
7820 func_params.f_obj = &bp->func_obj;
7821 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7822
7823
7824 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7825 &switch_update_params->changes);
7826 if (suspend)
7827 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7828 &switch_update_params->changes);
7829
7830 rc = bnx2x_func_state_change(bp, &func_params);
7831
7832 return rc;
7833}
7834
7835static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7836{
7837 int rc, i, port = BP_PORT(bp);
7838 int vlan_en = 0, mac_en[NUM_MACS];
7839
7840
7841 if (bp->mf_mode == SINGLE_FUNCTION) {
7842 bnx2x_set_rx_filter(&bp->link_params, 0);
7843 } else {
7844 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7845 NIG_REG_LLH0_FUNC_EN);
7846 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7847 NIG_REG_LLH0_FUNC_EN, 0);
7848 for (i = 0; i < NUM_MACS; i++) {
7849 mac_en[i] = REG_RD(bp, port ?
7850 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7851 4 * i) :
7852 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7853 4 * i));
7854 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7855 4 * i) :
7856 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7857 }
7858 }
7859
7860
7861 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7862 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7863
7864
7865
7866
7867
7868
7869 rc = bnx2x_func_switch_update(bp, 1);
7870 if (rc) {
7871 BNX2X_ERR("Can't suspend tx-switching!\n");
7872 return rc;
7873 }
7874
7875
7876 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7877
7878
7879 if (bp->mf_mode == SINGLE_FUNCTION) {
7880 bnx2x_set_rx_filter(&bp->link_params, 1);
7881 } else {
7882 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7883 NIG_REG_LLH0_FUNC_EN, vlan_en);
7884 for (i = 0; i < NUM_MACS; i++) {
7885 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7886 4 * i) :
7887 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7888 mac_en[i]);
7889 }
7890 }
7891
7892
7893 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7894 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7895
7896
7897 rc = bnx2x_func_switch_update(bp, 0);
7898 if (rc) {
7899 BNX2X_ERR("Can't resume tx-switching!\n");
7900 return rc;
7901 }
7902
7903 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7904 return 0;
7905}
7906
7907int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7908{
7909 int rc;
7910
7911 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7912
7913 if (CONFIGURE_NIC_MODE(bp)) {
7914
7915 bnx2x_init_searcher(bp);
7916
7917
7918 rc = bnx2x_reset_nic_mode(bp);
7919 if (rc)
7920 BNX2X_ERR("Can't change NIC mode!\n");
7921 return rc;
7922 }
7923
7924 return 0;
7925}
7926
7927
7928
7929
7930
7931
7932
7933
7934static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7935{
7936 if (!CHIP_IS_E1x(bp))
7937 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7938 1 << BP_ABS_FUNC(bp));
7939}
7940
7941static int bnx2x_init_hw_func(struct bnx2x *bp)
7942{
7943 int port = BP_PORT(bp);
7944 int func = BP_FUNC(bp);
7945 int init_phase = PHASE_PF0 + func;
7946 struct bnx2x_ilt *ilt = BP_ILT(bp);
7947 u16 cdu_ilt_start;
7948 u32 addr, val;
7949 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7950 int i, main_mem_width, rc;
7951
7952 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7953
7954
7955 if (!CHIP_IS_E1x(bp)) {
7956 rc = bnx2x_pf_flr_clnup(bp);
7957 if (rc) {
7958 bnx2x_fw_dump(bp);
7959 return rc;
7960 }
7961 }
7962
7963
7964 if (bp->common.int_block == INT_BLOCK_HC) {
7965 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7966 val = REG_RD(bp, addr);
7967 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7968 REG_WR(bp, addr, val);
7969 }
7970
7971 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7972 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7973
7974 ilt = BP_ILT(bp);
7975 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7976
7977 if (IS_SRIOV(bp))
7978 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7979 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7980
7981
7982
7983
7984 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7985 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7986 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7987 ilt->lines[cdu_ilt_start + i].page_mapping =
7988 bp->context[i].cxt_mapping;
7989 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7990 }
7991
7992 bnx2x_ilt_init_op(bp, INITOP_SET);
7993
7994 if (!CONFIGURE_NIC_MODE(bp)) {
7995 bnx2x_init_searcher(bp);
7996 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7997 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7998 } else {
7999
8000 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8001 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
8002 }
8003
8004 if (!CHIP_IS_E1x(bp)) {
8005 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8006
8007
8008
8009
8010 if (!(bp->flags & USING_MSIX_FLAG))
8011 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8012
8013
8014
8015
8016
8017
8018 msleep(20);
8019
8020
8021
8022
8023
8024 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8025
8026 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8027 }
8028
8029 bp->dmae_ready = 1;
8030
8031 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8032
8033 bnx2x_clean_pglue_errors(bp);
8034
8035 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8036 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8037 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8038 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8039 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8040 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8041 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8042 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8043 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8044 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8045 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8046 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8047 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8048
8049 if (!CHIP_IS_E1x(bp))
8050 REG_WR(bp, QM_REG_PF_EN, 1);
8051
8052 if (!CHIP_IS_E1x(bp)) {
8053 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8054 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8055 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8056 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8057 }
8058 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8059
8060 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8061 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8062 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8063
8064 bnx2x_iov_init_dq(bp);
8065
8066 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8067 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8068 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8069 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8070 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8071 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8072 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8073 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8074 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8075 if (!CHIP_IS_E1x(bp))
8076 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8077
8078 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8079
8080 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8081
8082 if (!CHIP_IS_E1x(bp))
8083 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8084
8085 if (IS_MF(bp)) {
8086 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8087 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8088 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8089 bp->mf_ov);
8090 }
8091 }
8092
8093 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8094
8095
8096 if (bp->common.int_block == INT_BLOCK_HC) {
8097 if (CHIP_IS_E1H(bp)) {
8098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8099
8100 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8101 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8102 }
8103 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8104
8105 } else {
8106 int num_segs, sb_idx, prod_offset;
8107
8108 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8109
8110 if (!CHIP_IS_E1x(bp)) {
8111 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8112 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8113 }
8114
8115 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8116
8117 if (!CHIP_IS_E1x(bp)) {
8118 int dsb_idx = 0;
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8141 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8142 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8143 prod_offset = (bp->igu_base_sb + sb_idx) *
8144 num_segs;
8145
8146 for (i = 0; i < num_segs; i++) {
8147 addr = IGU_REG_PROD_CONS_MEMORY +
8148 (prod_offset + i) * 4;
8149 REG_WR(bp, addr, 0);
8150 }
8151
8152 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8153 USTORM_ID, 0, IGU_INT_NOP, 1);
8154 bnx2x_igu_clear_sb(bp,
8155 bp->igu_base_sb + sb_idx);
8156 }
8157
8158
8159 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8160 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8161
8162 if (CHIP_MODE_IS_4_PORT(bp))
8163 dsb_idx = BP_FUNC(bp);
8164 else
8165 dsb_idx = BP_VN(bp);
8166
8167 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8168 IGU_BC_BASE_DSB_PROD + dsb_idx :
8169 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8170
8171
8172
8173
8174
8175 for (i = 0; i < (num_segs * E1HVN_MAX);
8176 i += E1HVN_MAX) {
8177 addr = IGU_REG_PROD_CONS_MEMORY +
8178 (prod_offset + i)*4;
8179 REG_WR(bp, addr, 0);
8180 }
8181
8182 if (CHIP_INT_MODE_IS_BC(bp)) {
8183 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8184 USTORM_ID, 0, IGU_INT_NOP, 1);
8185 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8186 CSTORM_ID, 0, IGU_INT_NOP, 1);
8187 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8188 XSTORM_ID, 0, IGU_INT_NOP, 1);
8189 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8190 TSTORM_ID, 0, IGU_INT_NOP, 1);
8191 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8192 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8193 } else {
8194 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8195 USTORM_ID, 0, IGU_INT_NOP, 1);
8196 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8197 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8198 }
8199 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8200
8201
8202
8203 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8204 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8205 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8206 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8207 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8208 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8209 }
8210 }
8211
8212
8213 REG_WR(bp, 0x2114, 0xffffffff);
8214 REG_WR(bp, 0x2120, 0xffffffff);
8215
8216 if (CHIP_IS_E1x(bp)) {
8217 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8218 main_mem_base = HC_REG_MAIN_MEMORY +
8219 BP_PORT(bp) * (main_mem_size * 4);
8220 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8221 main_mem_width = 8;
8222
8223 val = REG_RD(bp, main_mem_prty_clr);
8224 if (val)
8225 DP(NETIF_MSG_HW,
8226 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8227 val);
8228
8229
8230 for (i = main_mem_base;
8231 i < main_mem_base + main_mem_size * 4;
8232 i += main_mem_width) {
8233 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8234 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8235 i, main_mem_width / 4);
8236 }
8237
8238 REG_RD(bp, main_mem_prty_clr);
8239 }
8240
8241#ifdef BNX2X_STOP_ON_ERROR
8242
8243 REG_WR8(bp, BAR_USTRORM_INTMEM +
8244 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8245 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8246 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8247 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8248 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8249 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8250 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8251#endif
8252
8253 bnx2x_phy_probe(&bp->link_params);
8254
8255 return 0;
8256}
8257
8258void bnx2x_free_mem_cnic(struct bnx2x *bp)
8259{
8260 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8261
8262 if (!CHIP_IS_E1x(bp))
8263 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8264 sizeof(struct host_hc_status_block_e2));
8265 else
8266 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8267 sizeof(struct host_hc_status_block_e1x));
8268
8269 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8270}
8271
8272void bnx2x_free_mem(struct bnx2x *bp)
8273{
8274 int i;
8275
8276 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8277 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8278
8279 if (IS_VF(bp))
8280 return;
8281
8282 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8283 sizeof(struct host_sp_status_block));
8284
8285 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8286 sizeof(struct bnx2x_slowpath));
8287
8288 for (i = 0; i < L2_ILT_LINES(bp); i++)
8289 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8290 bp->context[i].size);
8291 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8292
8293 BNX2X_FREE(bp->ilt->lines);
8294
8295 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8296
8297 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8298 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8299
8300 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8301
8302 bnx2x_iov_free_mem(bp);
8303}
8304
8305int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8306{
8307 if (!CHIP_IS_E1x(bp)) {
8308
8309 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8310 sizeof(struct host_hc_status_block_e2));
8311 if (!bp->cnic_sb.e2_sb)
8312 goto alloc_mem_err;
8313 } else {
8314 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8315 sizeof(struct host_hc_status_block_e1x));
8316 if (!bp->cnic_sb.e1x_sb)
8317 goto alloc_mem_err;
8318 }
8319
8320 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8321
8322 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8323 if (!bp->t2)
8324 goto alloc_mem_err;
8325 }
8326
8327
8328 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8329 &bp->slowpath->drv_info_to_mcp;
8330
8331 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8332 goto alloc_mem_err;
8333
8334 return 0;
8335
8336alloc_mem_err:
8337 bnx2x_free_mem_cnic(bp);
8338 BNX2X_ERR("Can't allocate memory\n");
8339 return -ENOMEM;
8340}
8341
8342int bnx2x_alloc_mem(struct bnx2x *bp)
8343{
8344 int i, allocated, context_size;
8345
8346 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8347
8348 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8349 if (!bp->t2)
8350 goto alloc_mem_err;
8351 }
8352
8353 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8354 sizeof(struct host_sp_status_block));
8355 if (!bp->def_status_blk)
8356 goto alloc_mem_err;
8357
8358 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8359 sizeof(struct bnx2x_slowpath));
8360 if (!bp->slowpath)
8361 goto alloc_mem_err;
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8377
8378 for (i = 0, allocated = 0; allocated < context_size; i++) {
8379 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8380 (context_size - allocated));
8381 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8382 bp->context[i].size);
8383 if (!bp->context[i].vcxt)
8384 goto alloc_mem_err;
8385 allocated += bp->context[i].size;
8386 }
8387 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8388 GFP_KERNEL);
8389 if (!bp->ilt->lines)
8390 goto alloc_mem_err;
8391
8392 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8393 goto alloc_mem_err;
8394
8395 if (bnx2x_iov_alloc_mem(bp))
8396 goto alloc_mem_err;
8397
8398
8399 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8400 if (!bp->spq)
8401 goto alloc_mem_err;
8402
8403
8404 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8405 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8406 if (!bp->eq_ring)
8407 goto alloc_mem_err;
8408
8409 return 0;
8410
8411alloc_mem_err:
8412 bnx2x_free_mem(bp);
8413 BNX2X_ERR("Can't allocate memory\n");
8414 return -ENOMEM;
8415}
8416
8417
8418
8419
8420
8421int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8422 struct bnx2x_vlan_mac_obj *obj, bool set,
8423 int mac_type, unsigned long *ramrod_flags)
8424{
8425 int rc;
8426 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8427
8428 memset(&ramrod_param, 0, sizeof(ramrod_param));
8429
8430
8431 ramrod_param.vlan_mac_obj = obj;
8432 ramrod_param.ramrod_flags = *ramrod_flags;
8433
8434
8435 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8436 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8437
8438 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8439
8440
8441 if (set)
8442 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8443 else
8444 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8445 }
8446
8447 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8448
8449 if (rc == -EEXIST) {
8450 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8451
8452 rc = 0;
8453 } else if (rc < 0)
8454 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8455
8456 return rc;
8457}
8458
8459int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8460 struct bnx2x_vlan_mac_obj *obj, bool set,
8461 unsigned long *ramrod_flags)
8462{
8463 int rc;
8464 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8465
8466 memset(&ramrod_param, 0, sizeof(ramrod_param));
8467
8468
8469 ramrod_param.vlan_mac_obj = obj;
8470 ramrod_param.ramrod_flags = *ramrod_flags;
8471
8472
8473 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8474 ramrod_param.user_req.u.vlan.vlan = vlan;
8475 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8476
8477 if (set)
8478 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8479 else
8480 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8481 }
8482
8483 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8484
8485 if (rc == -EEXIST) {
8486
8487 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8488 rc = 0;
8489 } else if (rc < 0) {
8490 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8491 }
8492
8493 return rc;
8494}
8495
8496void bnx2x_clear_vlan_info(struct bnx2x *bp)
8497{
8498 struct bnx2x_vlan_entry *vlan;
8499
8500
8501 list_for_each_entry(vlan, &bp->vlan_reg, link)
8502 vlan->hw = false;
8503
8504 bp->vlan_cnt = 0;
8505}
8506
8507static int bnx2x_del_all_vlans(struct bnx2x *bp)
8508{
8509 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8510 unsigned long ramrod_flags = 0, vlan_flags = 0;
8511 int rc;
8512
8513 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8514 __set_bit(BNX2X_VLAN, &vlan_flags);
8515 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8516 if (rc)
8517 return rc;
8518
8519 bnx2x_clear_vlan_info(bp);
8520
8521 return 0;
8522}
8523
8524int bnx2x_del_all_macs(struct bnx2x *bp,
8525 struct bnx2x_vlan_mac_obj *mac_obj,
8526 int mac_type, bool wait_for_comp)
8527{
8528 int rc;
8529 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8530
8531
8532 if (wait_for_comp)
8533 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8534
8535
8536 __set_bit(mac_type, &vlan_mac_flags);
8537
8538 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8539 if (rc < 0)
8540 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8541
8542 return rc;
8543}
8544
8545int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8546{
8547 if (IS_PF(bp)) {
8548 unsigned long ramrod_flags = 0;
8549
8550 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8551 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8552 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8553 &bp->sp_objs->mac_obj, set,
8554 BNX2X_ETH_MAC, &ramrod_flags);
8555 } else {
8556 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8557 bp->fp->index, set);
8558 }
8559}
8560
8561int bnx2x_setup_leading(struct bnx2x *bp)
8562{
8563 if (IS_PF(bp))
8564 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8565 else
8566 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8567}
8568
8569
8570
8571
8572
8573
8574
8575
8576int bnx2x_set_int_mode(struct bnx2x *bp)
8577{
8578 int rc = 0;
8579
8580 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8581 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8582 return -EINVAL;
8583 }
8584
8585 switch (int_mode) {
8586 case BNX2X_INT_MODE_MSIX:
8587
8588 rc = bnx2x_enable_msix(bp);
8589
8590
8591 if (!rc)
8592 return 0;
8593
8594
8595 if (rc && IS_VF(bp))
8596 return rc;
8597
8598
8599 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8600 bp->num_queues,
8601 1 + bp->num_cnic_queues);
8602
8603 fallthrough;
8604 case BNX2X_INT_MODE_MSI:
8605 bnx2x_enable_msi(bp);
8606
8607 fallthrough;
8608 case BNX2X_INT_MODE_INTX:
8609 bp->num_ethernet_queues = 1;
8610 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8611 BNX2X_DEV_INFO("set number of queues to 1\n");
8612 break;
8613 default:
8614 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8615 return -EINVAL;
8616 }
8617 return 0;
8618}
8619
8620
8621static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8622{
8623 if (IS_SRIOV(bp))
8624 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8625 return L2_ILT_LINES(bp);
8626}
8627
8628void bnx2x_ilt_set_info(struct bnx2x *bp)
8629{
8630 struct ilt_client_info *ilt_client;
8631 struct bnx2x_ilt *ilt = BP_ILT(bp);
8632 u16 line = 0;
8633
8634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8636
8637
8638 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8639 ilt_client->client_num = ILT_CLIENT_CDU;
8640 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8641 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8642 ilt_client->start = line;
8643 line += bnx2x_cid_ilt_lines(bp);
8644
8645 if (CNIC_SUPPORT(bp))
8646 line += CNIC_ILT_LINES;
8647 ilt_client->end = line - 1;
8648
8649 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8650 ilt_client->start,
8651 ilt_client->end,
8652 ilt_client->page_size,
8653 ilt_client->flags,
8654 ilog2(ilt_client->page_size >> 12));
8655
8656
8657 if (QM_INIT(bp->qm_cid_count)) {
8658 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8659 ilt_client->client_num = ILT_CLIENT_QM;
8660 ilt_client->page_size = QM_ILT_PAGE_SZ;
8661 ilt_client->flags = 0;
8662 ilt_client->start = line;
8663
8664
8665 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8666 QM_ILT_PAGE_SZ);
8667
8668 ilt_client->end = line - 1;
8669
8670 DP(NETIF_MSG_IFUP,
8671 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8672 ilt_client->start,
8673 ilt_client->end,
8674 ilt_client->page_size,
8675 ilt_client->flags,
8676 ilog2(ilt_client->page_size >> 12));
8677 }
8678
8679 if (CNIC_SUPPORT(bp)) {
8680
8681 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8682 ilt_client->client_num = ILT_CLIENT_SRC;
8683 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8684 ilt_client->flags = 0;
8685 ilt_client->start = line;
8686 line += SRC_ILT_LINES;
8687 ilt_client->end = line - 1;
8688
8689 DP(NETIF_MSG_IFUP,
8690 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8691 ilt_client->start,
8692 ilt_client->end,
8693 ilt_client->page_size,
8694 ilt_client->flags,
8695 ilog2(ilt_client->page_size >> 12));
8696
8697
8698 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8699 ilt_client->client_num = ILT_CLIENT_TM;
8700 ilt_client->page_size = TM_ILT_PAGE_SZ;
8701 ilt_client->flags = 0;
8702 ilt_client->start = line;
8703 line += TM_ILT_LINES;
8704 ilt_client->end = line - 1;
8705
8706 DP(NETIF_MSG_IFUP,
8707 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8708 ilt_client->start,
8709 ilt_client->end,
8710 ilt_client->page_size,
8711 ilt_client->flags,
8712 ilog2(ilt_client->page_size >> 12));
8713 }
8714
8715 BUG_ON(line > ILT_MAX_LINES);
8716}
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728
8729static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8730 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8731{
8732 u8 cos;
8733 int cxt_index, cxt_offset;
8734
8735
8736 if (!IS_FCOE_FP(fp)) {
8737 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8738 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8739
8740
8741
8742
8743 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8744 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8745
8746
8747 init_params->rx.hc_rate = bp->rx_ticks ?
8748 (1000000 / bp->rx_ticks) : 0;
8749 init_params->tx.hc_rate = bp->tx_ticks ?
8750 (1000000 / bp->tx_ticks) : 0;
8751
8752
8753 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8754 fp->fw_sb_id;
8755
8756
8757
8758
8759
8760 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8761 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8762 }
8763
8764
8765 init_params->max_cos = fp->max_cos;
8766
8767 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8768 fp->index, init_params->max_cos);
8769
8770
8771 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8772 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8773 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8774 ILT_PAGE_CIDS);
8775 init_params->cxts[cos] =
8776 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8777 }
8778}
8779
8780static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8781 struct bnx2x_queue_state_params *q_params,
8782 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8783 int tx_index, bool leading)
8784{
8785 memset(tx_only_params, 0, sizeof(*tx_only_params));
8786
8787
8788 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8789
8790
8791 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8792
8793
8794 tx_only_params->cid_index = tx_index;
8795
8796
8797 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8798
8799
8800 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8801
8802 DP(NETIF_MSG_IFUP,
8803 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8804 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8805 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8806 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8807
8808
8809 return bnx2x_queue_state_change(bp, q_params);
8810}
8811
8812
8813
8814
8815
8816
8817
8818
8819
8820
8821
8822
8823int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8824 bool leading)
8825{
8826 struct bnx2x_queue_state_params q_params = {NULL};
8827 struct bnx2x_queue_setup_params *setup_params =
8828 &q_params.params.setup;
8829 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8830 &q_params.params.tx_only;
8831 int rc;
8832 u8 tx_index;
8833
8834 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8835
8836
8837 if (!IS_FCOE_FP(fp))
8838 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8839 IGU_INT_ENABLE, 0);
8840
8841 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8842
8843 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8844
8845
8846 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8847
8848
8849 q_params.cmd = BNX2X_Q_CMD_INIT;
8850
8851
8852 rc = bnx2x_queue_state_change(bp, &q_params);
8853 if (rc) {
8854 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8855 return rc;
8856 }
8857
8858 DP(NETIF_MSG_IFUP, "init complete\n");
8859
8860
8861 memset(setup_params, 0, sizeof(*setup_params));
8862
8863
8864 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8865
8866
8867 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8868 FIRST_TX_COS_INDEX);
8869
8870 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8871 &setup_params->rxq_params);
8872
8873 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8874 FIRST_TX_COS_INDEX);
8875
8876
8877 q_params.cmd = BNX2X_Q_CMD_SETUP;
8878
8879 if (IS_FCOE_FP(fp))
8880 bp->fcoe_init = true;
8881
8882
8883 rc = bnx2x_queue_state_change(bp, &q_params);
8884 if (rc) {
8885 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8886 return rc;
8887 }
8888
8889
8890 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8891 tx_index < fp->max_cos;
8892 tx_index++) {
8893
8894
8895 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8896 tx_only_params, tx_index, leading);
8897 if (rc) {
8898 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8899 fp->index, tx_index);
8900 return rc;
8901 }
8902 }
8903
8904 return rc;
8905}
8906
8907static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8908{
8909 struct bnx2x_fastpath *fp = &bp->fp[index];
8910 struct bnx2x_fp_txdata *txdata;
8911 struct bnx2x_queue_state_params q_params = {NULL};
8912 int rc, tx_index;
8913
8914 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8915
8916 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8917
8918 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8919
8920
8921 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8922 tx_index < fp->max_cos;
8923 tx_index++){
8924
8925
8926 txdata = fp->txdata_ptr[tx_index];
8927
8928 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8929 txdata->txq_index);
8930
8931
8932 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8933 memset(&q_params.params.terminate, 0,
8934 sizeof(q_params.params.terminate));
8935 q_params.params.terminate.cid_index = tx_index;
8936
8937 rc = bnx2x_queue_state_change(bp, &q_params);
8938 if (rc)
8939 return rc;
8940
8941
8942 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8943 memset(&q_params.params.cfc_del, 0,
8944 sizeof(q_params.params.cfc_del));
8945 q_params.params.cfc_del.cid_index = tx_index;
8946 rc = bnx2x_queue_state_change(bp, &q_params);
8947 if (rc)
8948 return rc;
8949 }
8950
8951
8952 q_params.cmd = BNX2X_Q_CMD_HALT;
8953 rc = bnx2x_queue_state_change(bp, &q_params);
8954 if (rc)
8955 return rc;
8956
8957
8958 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8959 memset(&q_params.params.terminate, 0,
8960 sizeof(q_params.params.terminate));
8961 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8962 rc = bnx2x_queue_state_change(bp, &q_params);
8963 if (rc)
8964 return rc;
8965
8966 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8967 memset(&q_params.params.cfc_del, 0,
8968 sizeof(q_params.params.cfc_del));
8969 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8970 return bnx2x_queue_state_change(bp, &q_params);
8971}
8972
8973static void bnx2x_reset_func(struct bnx2x *bp)
8974{
8975 int port = BP_PORT(bp);
8976 int func = BP_FUNC(bp);
8977 int i;
8978
8979
8980 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8981 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8982 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8983 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8984
8985
8986 for_each_eth_queue(bp, i) {
8987 struct bnx2x_fastpath *fp = &bp->fp[i];
8988 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8989 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8990 SB_DISABLED);
8991 }
8992
8993 if (CNIC_LOADED(bp))
8994
8995 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8996 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8997 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8998
8999
9000 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9001 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
9002 SB_DISABLED);
9003
9004 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
9005 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9006 0);
9007
9008
9009 if (bp->common.int_block == INT_BLOCK_HC) {
9010 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9011 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9012 } else {
9013 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9014 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9015 }
9016
9017 if (CNIC_LOADED(bp)) {
9018
9019 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9020
9021
9022
9023
9024 for (i = 0; i < 200; i++) {
9025 usleep_range(10000, 20000);
9026 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9027 break;
9028 }
9029 }
9030
9031 bnx2x_clear_func_ilt(bp, func);
9032
9033
9034
9035
9036 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9037 struct ilt_client_info ilt_cli;
9038
9039 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9040 ilt_cli.start = 0;
9041 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9042 ilt_cli.client_num = ILT_CLIENT_TM;
9043
9044 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9045 }
9046
9047
9048 if (!CHIP_IS_E1x(bp))
9049 bnx2x_pf_disable(bp);
9050
9051 bp->dmae_ready = 0;
9052}
9053
9054static void bnx2x_reset_port(struct bnx2x *bp)
9055{
9056 int port = BP_PORT(bp);
9057 u32 val;
9058
9059
9060 bnx2x__link_reset(bp);
9061
9062 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9063
9064
9065 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9066
9067 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9068 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9069
9070
9071 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9072
9073 msleep(100);
9074
9075 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9076 if (val)
9077 DP(NETIF_MSG_IFDOWN,
9078 "BRB1 is not empty %d blocks are occupied\n", val);
9079
9080
9081}
9082
9083static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9084{
9085 struct bnx2x_func_state_params func_params = {NULL};
9086
9087
9088 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9089
9090 func_params.f_obj = &bp->func_obj;
9091 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9092
9093 func_params.params.hw_init.load_phase = load_code;
9094
9095 return bnx2x_func_state_change(bp, &func_params);
9096}
9097
9098static int bnx2x_func_stop(struct bnx2x *bp)
9099{
9100 struct bnx2x_func_state_params func_params = {NULL};
9101 int rc;
9102
9103
9104 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9105 func_params.f_obj = &bp->func_obj;
9106 func_params.cmd = BNX2X_F_CMD_STOP;
9107
9108
9109
9110
9111
9112
9113
9114 rc = bnx2x_func_state_change(bp, &func_params);
9115 if (rc) {
9116#ifdef BNX2X_STOP_ON_ERROR
9117 return rc;
9118#else
9119 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9120 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9121 return bnx2x_func_state_change(bp, &func_params);
9122#endif
9123 }
9124
9125 return 0;
9126}
9127
9128
9129
9130
9131
9132
9133
9134
9135
9136u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9137{
9138 u32 reset_code = 0;
9139 int port = BP_PORT(bp);
9140
9141
9142 if (unload_mode == UNLOAD_NORMAL)
9143 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9144
9145 else if (bp->flags & NO_WOL_FLAG)
9146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9147
9148 else if (bp->wol) {
9149 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9150 u8 *mac_addr = bp->dev->dev_addr;
9151 struct pci_dev *pdev = bp->pdev;
9152 u32 val;
9153 u16 pmc;
9154
9155
9156
9157
9158 u8 entry = (BP_VN(bp) + 1)*8;
9159
9160 val = (mac_addr[0] << 8) | mac_addr[1];
9161 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9162
9163 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9164 (mac_addr[4] << 8) | mac_addr[5];
9165 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9166
9167
9168 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9169 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9170 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9171
9172 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9173
9174 } else
9175 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9176
9177
9178 if (!BP_NOMCP(bp))
9179 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9180 else {
9181 int path = BP_PATH(bp);
9182
9183 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9184 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9185 bnx2x_load_count[path][2]);
9186 bnx2x_load_count[path][0]--;
9187 bnx2x_load_count[path][1 + port]--;
9188 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9189 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9190 bnx2x_load_count[path][2]);
9191 if (bnx2x_load_count[path][0] == 0)
9192 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9193 else if (bnx2x_load_count[path][1 + port] == 0)
9194 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9195 else
9196 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9197 }
9198
9199 return reset_code;
9200}
9201
9202
9203
9204
9205
9206
9207
9208void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9209{
9210 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9211
9212
9213 if (!BP_NOMCP(bp))
9214 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9215}
9216
9217static int bnx2x_func_wait_started(struct bnx2x *bp)
9218{
9219 int tout = 50;
9220 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9221
9222 if (!bp->port.pmf)
9223 return 0;
9224
9225
9226
9227
9228
9229
9230
9231
9232
9233
9234
9235
9236
9237
9238
9239
9240 if (msix)
9241 synchronize_irq(bp->msix_table[0].vector);
9242 else
9243 synchronize_irq(bp->pdev->irq);
9244
9245 flush_workqueue(bnx2x_wq);
9246 flush_workqueue(bnx2x_iov_wq);
9247
9248 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9249 BNX2X_F_STATE_STARTED && tout--)
9250 msleep(20);
9251
9252 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9253 BNX2X_F_STATE_STARTED) {
9254#ifdef BNX2X_STOP_ON_ERROR
9255 BNX2X_ERR("Wrong function state\n");
9256 return -EBUSY;
9257#else
9258
9259
9260
9261
9262 struct bnx2x_func_state_params func_params = {NULL};
9263
9264 DP(NETIF_MSG_IFDOWN,
9265 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9266
9267 func_params.f_obj = &bp->func_obj;
9268 __set_bit(RAMROD_DRV_CLR_ONLY,
9269 &func_params.ramrod_flags);
9270
9271
9272 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9273 bnx2x_func_state_change(bp, &func_params);
9274
9275
9276 func_params.cmd = BNX2X_F_CMD_TX_START;
9277 return bnx2x_func_state_change(bp, &func_params);
9278#endif
9279 }
9280
9281 return 0;
9282}
9283
9284static void bnx2x_disable_ptp(struct bnx2x *bp)
9285{
9286 int port = BP_PORT(bp);
9287
9288
9289 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9290 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9291
9292
9293 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9294 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9295 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9296 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9297 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9298 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9299 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9300 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9301
9302
9303 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9304 NIG_REG_P0_PTP_EN, 0x0);
9305}
9306
9307
9308static void bnx2x_stop_ptp(struct bnx2x *bp)
9309{
9310
9311
9312
9313 cancel_work_sync(&bp->ptp_task);
9314
9315 if (bp->ptp_tx_skb) {
9316 dev_kfree_skb_any(bp->ptp_tx_skb);
9317 bp->ptp_tx_skb = NULL;
9318 }
9319
9320
9321 bnx2x_disable_ptp(bp);
9322
9323 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9324}
9325
9326void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9327{
9328 int port = BP_PORT(bp);
9329 int i, rc = 0;
9330 u8 cos;
9331 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9332 u32 reset_code;
9333
9334
9335 for_each_tx_queue(bp, i) {
9336 struct bnx2x_fastpath *fp = &bp->fp[i];
9337
9338 for_each_cos_in_tx_queue(fp, cos)
9339 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9340#ifdef BNX2X_STOP_ON_ERROR
9341 if (rc)
9342 return;
9343#endif
9344 }
9345
9346
9347 usleep_range(1000, 2000);
9348
9349
9350 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9351 false);
9352 if (rc < 0)
9353 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9354
9355
9356 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9357 true);
9358 if (rc < 0)
9359 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9360 rc);
9361
9362
9363
9364
9365
9366 if (!CHIP_IS_E1x(bp)) {
9367
9368 rc = bnx2x_del_all_vlans(bp);
9369 if (rc < 0)
9370 BNX2X_ERR("Failed to delete all VLANs\n");
9371 }
9372
9373
9374 if (!CHIP_IS_E1(bp))
9375 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9376
9377
9378
9379
9380
9381 netif_addr_lock_bh(bp->dev);
9382
9383 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9384 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9385 else if (bp->slowpath)
9386 bnx2x_set_storm_rx_mode(bp);
9387
9388
9389 rparam.mcast_obj = &bp->mcast_obj;
9390 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9391 if (rc < 0)
9392 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9393
9394 netif_addr_unlock_bh(bp->dev);
9395
9396 bnx2x_iov_chip_cleanup(bp);
9397
9398
9399
9400
9401
9402
9403 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9404
9405
9406
9407
9408
9409 rc = bnx2x_func_wait_started(bp);
9410 if (rc) {
9411 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9412#ifdef BNX2X_STOP_ON_ERROR
9413 return;
9414#endif
9415 }
9416
9417
9418
9419
9420 for_each_eth_queue(bp, i)
9421 if (bnx2x_stop_queue(bp, i))
9422#ifdef BNX2X_STOP_ON_ERROR
9423 return;
9424#else
9425 goto unload_error;
9426#endif
9427
9428 if (CNIC_LOADED(bp)) {
9429 for_each_cnic_queue(bp, i)
9430 if (bnx2x_stop_queue(bp, i))
9431#ifdef BNX2X_STOP_ON_ERROR
9432 return;
9433#else
9434 goto unload_error;
9435#endif
9436 }
9437
9438
9439
9440
9441 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9442 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9443
9444#ifndef BNX2X_STOP_ON_ERROR
9445unload_error:
9446#endif
9447 rc = bnx2x_func_stop(bp);
9448 if (rc) {
9449 BNX2X_ERR("Function stop failed!\n");
9450#ifdef BNX2X_STOP_ON_ERROR
9451 return;
9452#endif
9453 }
9454
9455
9456
9457
9458
9459
9460 if (bp->flags & PTP_SUPPORTED) {
9461 bnx2x_stop_ptp(bp);
9462 if (bp->ptp_clock) {
9463 ptp_clock_unregister(bp->ptp_clock);
9464 bp->ptp_clock = NULL;
9465 }
9466 }
9467
9468
9469 bnx2x_netif_stop(bp, 1);
9470
9471 bnx2x_del_all_napi(bp);
9472 if (CNIC_LOADED(bp))
9473 bnx2x_del_all_napi_cnic(bp);
9474
9475
9476 bnx2x_free_irq(bp);
9477
9478
9479
9480
9481
9482
9483 if (!pci_channel_offline(bp->pdev)) {
9484 rc = bnx2x_reset_hw(bp, reset_code);
9485 if (rc)
9486 BNX2X_ERR("HW_RESET failed\n");
9487 }
9488
9489
9490 bnx2x_send_unload_done(bp, keep_link);
9491}
9492
9493void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9494{
9495 u32 val;
9496
9497 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9498
9499 if (CHIP_IS_E1(bp)) {
9500 int port = BP_PORT(bp);
9501 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9502 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9503
9504 val = REG_RD(bp, addr);
9505 val &= ~(0x300);
9506 REG_WR(bp, addr, val);
9507 } else {
9508 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9509 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9510 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9511 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9512 }
9513}
9514
9515
9516static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9517{
9518 u32 val;
9519
9520
9521 if (!CHIP_IS_E1(bp)) {
9522
9523 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9524
9525 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9526 }
9527
9528
9529 if (CHIP_IS_E1x(bp)) {
9530
9531 val = REG_RD(bp, HC_REG_CONFIG_1);
9532 REG_WR(bp, HC_REG_CONFIG_1,
9533 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9534 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9535
9536 val = REG_RD(bp, HC_REG_CONFIG_0);
9537 REG_WR(bp, HC_REG_CONFIG_0,
9538 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9539 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9540 } else {
9541
9542 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9543
9544 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9545 (!close) ?
9546 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9547 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9548 }
9549
9550 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9551 close ? "closing" : "opening");
9552}
9553
9554#define SHARED_MF_CLP_MAGIC 0x80000000
9555
9556static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9557{
9558
9559 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9560 *magic_val = val & SHARED_MF_CLP_MAGIC;
9561 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9562}
9563
9564
9565
9566
9567
9568
9569
9570static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9571{
9572
9573 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9574 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9575 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9576}
9577
9578
9579
9580
9581
9582
9583
9584
9585
9586static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9587{
9588 u32 shmem;
9589 u32 validity_offset;
9590
9591 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9592
9593
9594 if (!CHIP_IS_E1(bp))
9595 bnx2x_clp_reset_prep(bp, magic_val);
9596
9597
9598 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9599 validity_offset =
9600 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9601
9602
9603 if (shmem > 0)
9604 REG_WR(bp, shmem + validity_offset, 0);
9605}
9606
9607#define MCP_TIMEOUT 5000
9608#define MCP_ONE_TIMEOUT 100
9609
9610
9611
9612
9613
9614
9615static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9616{
9617
9618
9619 if (CHIP_REV_IS_SLOW(bp))
9620 msleep(MCP_ONE_TIMEOUT*10);
9621 else
9622 msleep(MCP_ONE_TIMEOUT);
9623}
9624
9625
9626
9627
9628static int bnx2x_init_shmem(struct bnx2x *bp)
9629{
9630 int cnt = 0;
9631 u32 val = 0;
9632
9633 do {
9634 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9635
9636
9637
9638
9639 if (bp->common.shmem_base == 0xFFFFFFFF) {
9640 bp->flags |= NO_MCP_FLAG;
9641 return -ENODEV;
9642 }
9643
9644 if (bp->common.shmem_base) {
9645 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9646 if (val & SHR_MEM_VALIDITY_MB)
9647 return 0;
9648 }
9649
9650 bnx2x_mcp_wait_one(bp);
9651
9652 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9653
9654 BNX2X_ERR("BAD MCP validity signature\n");
9655
9656 return -ENODEV;
9657}
9658
9659static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9660{
9661 int rc = bnx2x_init_shmem(bp);
9662
9663
9664 if (!CHIP_IS_E1(bp))
9665 bnx2x_clp_reset_done(bp, magic_val);
9666
9667 return rc;
9668}
9669
9670static void bnx2x_pxp_prep(struct bnx2x *bp)
9671{
9672 if (!CHIP_IS_E1(bp)) {
9673 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9674 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9675 }
9676}
9677
9678
9679
9680
9681
9682
9683
9684
9685
9686
9687
9688static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9689{
9690 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9691 u32 global_bits2, stay_reset2;
9692
9693
9694
9695
9696
9697 global_bits2 =
9698 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9699 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9700
9701
9702
9703
9704
9705
9706 not_reset_mask1 =
9707 MISC_REGISTERS_RESET_REG_1_RST_HC |
9708 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9709 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9710
9711 not_reset_mask2 =
9712 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9713 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9714 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9715 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9716 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9717 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9718 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9719 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9720 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9721 MISC_REGISTERS_RESET_REG_2_PGLC |
9722 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9723 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9724 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9725 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9726 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9727 MISC_REGISTERS_RESET_REG_2_UMAC1;
9728
9729
9730
9731
9732
9733 stay_reset2 =
9734 MISC_REGISTERS_RESET_REG_2_XMAC |
9735 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9736
9737
9738 reset_mask1 = 0xffffffff;
9739
9740 if (CHIP_IS_E1(bp))
9741 reset_mask2 = 0xffff;
9742 else if (CHIP_IS_E1H(bp))
9743 reset_mask2 = 0x1ffff;
9744 else if (CHIP_IS_E2(bp))
9745 reset_mask2 = 0xfffff;
9746 else
9747 reset_mask2 = 0x3ffffff;
9748
9749
9750 if (!global)
9751 reset_mask2 &= ~global_bits2;
9752
9753
9754
9755
9756
9757
9758
9759
9760
9761
9762
9763
9764
9765
9766
9767 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9768 reset_mask2 & (~not_reset_mask2));
9769
9770 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9771 reset_mask1 & (~not_reset_mask1));
9772
9773 barrier();
9774
9775 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9776 reset_mask2 & (~stay_reset2));
9777
9778 barrier();
9779
9780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9781}
9782
9783
9784
9785
9786
9787
9788
9789
9790
9791
9792static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9793{
9794 u32 cnt = 1000;
9795 u32 pend_bits = 0;
9796
9797 do {
9798 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9799
9800 if (pend_bits == 0)
9801 break;
9802
9803 usleep_range(1000, 2000);
9804 } while (cnt-- > 0);
9805
9806 if (cnt <= 0) {
9807 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9808 pend_bits);
9809 return -EBUSY;
9810 }
9811
9812 return 0;
9813}
9814
9815static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9816{
9817 int cnt = 1000;
9818 u32 val = 0;
9819 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9820 u32 tags_63_32 = 0;
9821
9822
9823 do {
9824 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9825 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9826 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9827 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9828 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9829 if (CHIP_IS_E3(bp))
9830 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9831
9832 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9833 ((port_is_idle_0 & 0x1) == 0x1) &&
9834 ((port_is_idle_1 & 0x1) == 0x1) &&
9835 (pgl_exp_rom2 == 0xffffffff) &&
9836 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9837 break;
9838 usleep_range(1000, 2000);
9839 } while (cnt-- > 0);
9840
9841 if (cnt <= 0) {
9842 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9843 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9844 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9845 pgl_exp_rom2);
9846 return -EAGAIN;
9847 }
9848
9849 barrier();
9850
9851
9852 bnx2x_set_234_gates(bp, true);
9853
9854
9855 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9856 return -EAGAIN;
9857
9858
9859
9860
9861 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9862 barrier();
9863
9864
9865
9866
9867 usleep_range(1000, 2000);
9868
9869
9870
9871 if (global)
9872 bnx2x_reset_mcp_prep(bp, &val);
9873
9874
9875 bnx2x_pxp_prep(bp);
9876 barrier();
9877
9878
9879 bnx2x_process_kill_chip_reset(bp, global);
9880 barrier();
9881
9882
9883 if (!CHIP_IS_E1x(bp))
9884 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9885
9886
9887
9888 if (global && bnx2x_reset_mcp_comp(bp, val))
9889 return -EAGAIN;
9890
9891
9892
9893
9894 bnx2x_set_234_gates(bp, false);
9895
9896
9897
9898
9899 return 0;
9900}
9901
9902static int bnx2x_leader_reset(struct bnx2x *bp)
9903{
9904 int rc = 0;
9905 bool global = bnx2x_reset_is_global(bp);
9906 u32 load_code;
9907
9908
9909
9910
9911 if (!global && !BP_NOMCP(bp)) {
9912 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9913 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9914 if (!load_code) {
9915 BNX2X_ERR("MCP response failure, aborting\n");
9916 rc = -EAGAIN;
9917 goto exit_leader_reset;
9918 }
9919 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9920 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9921 BNX2X_ERR("MCP unexpected resp, aborting\n");
9922 rc = -EAGAIN;
9923 goto exit_leader_reset2;
9924 }
9925 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9926 if (!load_code) {
9927 BNX2X_ERR("MCP response failure, aborting\n");
9928 rc = -EAGAIN;
9929 goto exit_leader_reset2;
9930 }
9931 }
9932
9933
9934 if (bnx2x_process_kill(bp, global)) {
9935 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9936 BP_PATH(bp));
9937 rc = -EAGAIN;
9938 goto exit_leader_reset2;
9939 }
9940
9941
9942
9943
9944
9945 bnx2x_set_reset_done(bp);
9946 if (global)
9947 bnx2x_clear_reset_global(bp);
9948
9949exit_leader_reset2:
9950
9951 if (!global && !BP_NOMCP(bp)) {
9952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9953 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9954 }
9955exit_leader_reset:
9956 bp->is_leader = 0;
9957 bnx2x_release_leader_lock(bp);
9958 smp_mb();
9959 return rc;
9960}
9961
9962static void bnx2x_recovery_failed(struct bnx2x *bp)
9963{
9964 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9965
9966
9967 netif_device_detach(bp->dev);
9968
9969
9970
9971
9972
9973 bnx2x_set_reset_in_progress(bp);
9974
9975
9976 bnx2x_set_power_state(bp, PCI_D3hot);
9977
9978 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9979
9980 smp_mb();
9981}
9982
9983
9984
9985
9986
9987
9988static void bnx2x_parity_recover(struct bnx2x *bp)
9989{
9990 u32 error_recovered, error_unrecovered;
9991 bool is_parity, global = false;
9992#ifdef CONFIG_BNX2X_SRIOV
9993 int vf_idx;
9994
9995 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
9996 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
9997
9998 if (vf)
9999 vf->state = VF_LOST;
10000 }
10001#endif
10002 DP(NETIF_MSG_HW, "Handling parity\n");
10003 while (1) {
10004 switch (bp->recovery_state) {
10005 case BNX2X_RECOVERY_INIT:
10006 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
10007 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10008 WARN_ON(!is_parity);
10009
10010
10011 if (bnx2x_trylock_leader_lock(bp)) {
10012 bnx2x_set_reset_in_progress(bp);
10013
10014
10015
10016
10017
10018
10019 if (global)
10020 bnx2x_set_reset_global(bp);
10021
10022 bp->is_leader = 1;
10023 }
10024
10025
10026
10027 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10028 return;
10029
10030 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10031
10032
10033
10034
10035
10036 smp_mb();
10037 break;
10038
10039 case BNX2X_RECOVERY_WAIT:
10040 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10041 if (bp->is_leader) {
10042 int other_engine = BP_PATH(bp) ? 0 : 1;
10043 bool other_load_status =
10044 bnx2x_get_load_status(bp, other_engine);
10045 bool load_status =
10046 bnx2x_get_load_status(bp, BP_PATH(bp));
10047 global = bnx2x_reset_is_global(bp);
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057 if (load_status ||
10058 (global && other_load_status)) {
10059
10060
10061
10062 schedule_delayed_work(&bp->sp_rtnl_task,
10063 HZ/10);
10064 return;
10065 } else {
10066
10067
10068
10069
10070
10071 if (bnx2x_leader_reset(bp)) {
10072 bnx2x_recovery_failed(bp);
10073 return;
10074 }
10075
10076
10077
10078
10079
10080
10081 break;
10082 }
10083 } else {
10084 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10085
10086
10087
10088
10089
10090
10091 if (bnx2x_trylock_leader_lock(bp)) {
10092
10093
10094
10095 bp->is_leader = 1;
10096 break;
10097 }
10098
10099 schedule_delayed_work(&bp->sp_rtnl_task,
10100 HZ/10);
10101 return;
10102
10103 } else {
10104
10105
10106
10107
10108 if (bnx2x_reset_is_global(bp)) {
10109 schedule_delayed_work(
10110 &bp->sp_rtnl_task,
10111 HZ/10);
10112 return;
10113 }
10114
10115 error_recovered =
10116 bp->eth_stats.recoverable_error;
10117 error_unrecovered =
10118 bp->eth_stats.unrecoverable_error;
10119 bp->recovery_state =
10120 BNX2X_RECOVERY_NIC_LOADING;
10121 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10122 error_unrecovered++;
10123 netdev_err(bp->dev,
10124 "Recovery failed. Power cycle needed\n");
10125
10126 netif_device_detach(bp->dev);
10127
10128 bnx2x_set_power_state(
10129 bp, PCI_D3hot);
10130 smp_mb();
10131 } else {
10132 bp->recovery_state =
10133 BNX2X_RECOVERY_DONE;
10134 error_recovered++;
10135 smp_mb();
10136 }
10137 bp->eth_stats.recoverable_error =
10138 error_recovered;
10139 bp->eth_stats.unrecoverable_error =
10140 error_unrecovered;
10141
10142 return;
10143 }
10144 }
10145 default:
10146 return;
10147 }
10148 }
10149}
10150
10151static int bnx2x_udp_port_update(struct bnx2x *bp)
10152{
10153 struct bnx2x_func_switch_update_params *switch_update_params;
10154 struct bnx2x_func_state_params func_params = {NULL};
10155 u16 vxlan_port = 0, geneve_port = 0;
10156 int rc;
10157
10158 switch_update_params = &func_params.params.switch_update;
10159
10160
10161 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10162 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10163
10164 func_params.f_obj = &bp->func_obj;
10165 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10166
10167
10168 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10169 &switch_update_params->changes);
10170
10171 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
10172 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10173 switch_update_params->geneve_dst_port = geneve_port;
10174 }
10175
10176 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
10177 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10178 switch_update_params->vxlan_dst_port = vxlan_port;
10179 }
10180
10181
10182 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10183 &switch_update_params->changes);
10184
10185 rc = bnx2x_func_state_change(bp, &func_params);
10186 if (rc)
10187 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10188 vxlan_port, geneve_port, rc);
10189 else
10190 DP(BNX2X_MSG_SP,
10191 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10192 vxlan_port, geneve_port);
10193
10194 return rc;
10195}
10196
10197static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
10198{
10199 struct bnx2x *bp = netdev_priv(netdev);
10200 struct udp_tunnel_info ti;
10201
10202 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
10203 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
10204
10205 return bnx2x_udp_port_update(bp);
10206}
10207
10208static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
10209 .sync_table = bnx2x_udp_tunnel_sync,
10210 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
10211 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
10212 .tables = {
10213 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
10214 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
10215 },
10216};
10217
10218static int bnx2x_close(struct net_device *dev);
10219
10220
10221
10222
10223static void bnx2x_sp_rtnl_task(struct work_struct *work)
10224{
10225 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10226
10227 rtnl_lock();
10228
10229 if (!netif_running(bp->dev)) {
10230 rtnl_unlock();
10231 return;
10232 }
10233
10234 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10235#ifdef BNX2X_STOP_ON_ERROR
10236 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10237 "you will need to reboot when done\n");
10238 goto sp_rtnl_not_reset;
10239#endif
10240
10241
10242
10243
10244 bp->sp_rtnl_state = 0;
10245 smp_mb();
10246
10247 bnx2x_parity_recover(bp);
10248
10249 rtnl_unlock();
10250 return;
10251 }
10252
10253 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10254#ifdef BNX2X_STOP_ON_ERROR
10255 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10256 "you will need to reboot when done\n");
10257 goto sp_rtnl_not_reset;
10258#endif
10259
10260
10261
10262
10263
10264 bp->sp_rtnl_state = 0;
10265 smp_mb();
10266
10267
10268 bp->link_vars.link_up = 0;
10269 bp->force_link_down = true;
10270 netif_carrier_off(bp->dev);
10271 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10272
10273 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10274
10275
10276
10277
10278 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10279 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10280 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10281 BNX2X_ERR("Open the NIC fails again!\n");
10282 }
10283 rtnl_unlock();
10284 return;
10285 }
10286#ifdef BNX2X_STOP_ON_ERROR
10287sp_rtnl_not_reset:
10288#endif
10289 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10290 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10291 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10292 bnx2x_after_function_update(bp);
10293
10294
10295
10296
10297
10298 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10299 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10300 netif_device_detach(bp->dev);
10301 bnx2x_close(bp->dev);
10302 rtnl_unlock();
10303 return;
10304 }
10305
10306 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10307 DP(BNX2X_MSG_SP,
10308 "sending set mcast vf pf channel message from rtnl sp-task\n");
10309 bnx2x_vfpf_set_mcast(bp->dev);
10310 }
10311 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10312 &bp->sp_rtnl_state)){
10313 if (netif_carrier_ok(bp->dev)) {
10314 bnx2x_tx_disable(bp);
10315 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10316 }
10317 }
10318
10319 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10320 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10321 bnx2x_set_rx_mode_inner(bp);
10322 }
10323
10324 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10325 &bp->sp_rtnl_state))
10326 bnx2x_pf_set_vfs_vlan(bp);
10327
10328 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10329 bnx2x_dcbx_stop_hw_tx(bp);
10330 bnx2x_dcbx_resume_hw_tx(bp);
10331 }
10332
10333 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10334 &bp->sp_rtnl_state))
10335 bnx2x_update_mng_version(bp);
10336
10337 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10338 bnx2x_handle_update_svid_cmd(bp);
10339
10340
10341
10342
10343 rtnl_unlock();
10344
10345
10346 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10347 &bp->sp_rtnl_state)) {
10348 bnx2x_disable_sriov(bp);
10349 bnx2x_enable_sriov(bp);
10350 }
10351}
10352
10353static void bnx2x_period_task(struct work_struct *work)
10354{
10355 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10356
10357 if (!netif_running(bp->dev))
10358 goto period_task_exit;
10359
10360 if (CHIP_REV_IS_SLOW(bp)) {
10361 BNX2X_ERR("period task called on emulation, ignoring\n");
10362 goto period_task_exit;
10363 }
10364
10365 bnx2x_acquire_phy_lock(bp);
10366
10367
10368
10369
10370
10371 smp_mb();
10372 if (bp->port.pmf) {
10373 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10374
10375
10376 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10377 }
10378
10379 bnx2x_release_phy_lock(bp);
10380period_task_exit:
10381 return;
10382}
10383
10384
10385
10386
10387
10388static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10389{
10390 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10391 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10392 return base + (BP_ABS_FUNC(bp)) * stride;
10393}
10394
10395static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10396 u8 port, u32 reset_reg,
10397 struct bnx2x_mac_vals *vals)
10398{
10399 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10400 u32 base_addr;
10401
10402 if (!(mask & reset_reg))
10403 return false;
10404
10405 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10406 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10407 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10408 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10409 REG_WR(bp, vals->umac_addr[port], 0);
10410
10411 return true;
10412}
10413
10414static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10415 struct bnx2x_mac_vals *vals)
10416{
10417 u32 val, base_addr, offset, mask, reset_reg;
10418 bool mac_stopped = false;
10419 u8 port = BP_PORT(bp);
10420
10421
10422 memset(vals, 0, sizeof(*vals));
10423
10424 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10425
10426 if (!CHIP_IS_E3(bp)) {
10427 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10428 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10429 if ((mask & reset_reg) && val) {
10430 u32 wb_data[2];
10431 BNX2X_DEV_INFO("Disable bmac Rx\n");
10432 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10433 : NIG_REG_INGRESS_BMAC0_MEM;
10434 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10435 : BIGMAC_REGISTER_BMAC_CONTROL;
10436
10437
10438
10439
10440
10441
10442
10443 wb_data[0] = REG_RD(bp, base_addr + offset);
10444 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10445 vals->bmac_addr = base_addr + offset;
10446 vals->bmac_val[0] = wb_data[0];
10447 vals->bmac_val[1] = wb_data[1];
10448 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10449 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10450 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10451 }
10452 BNX2X_DEV_INFO("Disable emac Rx\n");
10453 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10454 vals->emac_val = REG_RD(bp, vals->emac_addr);
10455 REG_WR(bp, vals->emac_addr, 0);
10456 mac_stopped = true;
10457 } else {
10458 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10459 BNX2X_DEV_INFO("Disable xmac Rx\n");
10460 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10461 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10462 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10463 val & ~(1 << 1));
10464 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10465 val | (1 << 1));
10466 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10467 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10468 REG_WR(bp, vals->xmac_addr, 0);
10469 mac_stopped = true;
10470 }
10471
10472 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10473 reset_reg, vals);
10474 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10475 reset_reg, vals);
10476 }
10477
10478 if (mac_stopped)
10479 msleep(20);
10480}
10481
10482#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10483#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10484 0x1848 + ((f) << 4))
10485#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10486#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10487#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10488
10489#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10490#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10491#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10492
10493static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10494{
10495
10496
10497
10498 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10499 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10500 return false;
10501
10502 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10503 BNX2X_DEV_INFO("UNDI previously loaded\n");
10504 return true;
10505 }
10506
10507 return false;
10508}
10509
10510static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10511{
10512 u16 rcq, bd;
10513 u32 addr, tmp_reg;
10514
10515 if (BP_FUNC(bp) < 2)
10516 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10517 else
10518 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10519
10520 tmp_reg = REG_RD(bp, addr);
10521 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10522 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10523
10524 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10525 REG_WR(bp, addr, tmp_reg);
10526
10527 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10528 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10529}
10530
10531static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10532{
10533 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10534 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10535 if (!rc) {
10536 BNX2X_ERR("MCP response failure, aborting\n");
10537 return -EBUSY;
10538 }
10539
10540 return 0;
10541}
10542
10543static struct bnx2x_prev_path_list *
10544 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10545{
10546 struct bnx2x_prev_path_list *tmp_list;
10547
10548 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10549 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10550 bp->pdev->bus->number == tmp_list->bus &&
10551 BP_PATH(bp) == tmp_list->path)
10552 return tmp_list;
10553
10554 return NULL;
10555}
10556
10557static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10558{
10559 struct bnx2x_prev_path_list *tmp_list;
10560 int rc;
10561
10562 rc = down_interruptible(&bnx2x_prev_sem);
10563 if (rc) {
10564 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10565 return rc;
10566 }
10567
10568 tmp_list = bnx2x_prev_path_get_entry(bp);
10569 if (tmp_list) {
10570 tmp_list->aer = 1;
10571 rc = 0;
10572 } else {
10573 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10574 BP_PATH(bp));
10575 }
10576
10577 up(&bnx2x_prev_sem);
10578
10579 return rc;
10580}
10581
10582static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10583{
10584 struct bnx2x_prev_path_list *tmp_list;
10585 bool rc = false;
10586
10587 if (down_trylock(&bnx2x_prev_sem))
10588 return false;
10589
10590 tmp_list = bnx2x_prev_path_get_entry(bp);
10591 if (tmp_list) {
10592 if (tmp_list->aer) {
10593 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10594 BP_PATH(bp));
10595 } else {
10596 rc = true;
10597 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10598 BP_PATH(bp));
10599 }
10600 }
10601
10602 up(&bnx2x_prev_sem);
10603
10604 return rc;
10605}
10606
10607bool bnx2x_port_after_undi(struct bnx2x *bp)
10608{
10609 struct bnx2x_prev_path_list *entry;
10610 bool val;
10611
10612 down(&bnx2x_prev_sem);
10613
10614 entry = bnx2x_prev_path_get_entry(bp);
10615 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10616
10617 up(&bnx2x_prev_sem);
10618
10619 return val;
10620}
10621
10622static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10623{
10624 struct bnx2x_prev_path_list *tmp_list;
10625 int rc;
10626
10627 rc = down_interruptible(&bnx2x_prev_sem);
10628 if (rc) {
10629 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10630 return rc;
10631 }
10632
10633
10634 tmp_list = bnx2x_prev_path_get_entry(bp);
10635 if (tmp_list) {
10636 if (!tmp_list->aer) {
10637 BNX2X_ERR("Re-Marking the path.\n");
10638 } else {
10639 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10640 BP_PATH(bp));
10641 tmp_list->aer = 0;
10642 }
10643 up(&bnx2x_prev_sem);
10644 return 0;
10645 }
10646 up(&bnx2x_prev_sem);
10647
10648
10649 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10650 if (!tmp_list) {
10651 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10652 return -ENOMEM;
10653 }
10654
10655 tmp_list->bus = bp->pdev->bus->number;
10656 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10657 tmp_list->path = BP_PATH(bp);
10658 tmp_list->aer = 0;
10659 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10660
10661 rc = down_interruptible(&bnx2x_prev_sem);
10662 if (rc) {
10663 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10664 kfree(tmp_list);
10665 } else {
10666 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10667 BP_PATH(bp));
10668 list_add(&tmp_list->list, &bnx2x_prev_list);
10669 up(&bnx2x_prev_sem);
10670 }
10671
10672 return rc;
10673}
10674
10675static int bnx2x_do_flr(struct bnx2x *bp)
10676{
10677 struct pci_dev *dev = bp->pdev;
10678
10679 if (CHIP_IS_E1x(bp)) {
10680 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10681 return -EINVAL;
10682 }
10683
10684
10685 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10686 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10687 bp->common.bc_ver);
10688 return -EINVAL;
10689 }
10690
10691 if (!pci_wait_for_pending_transaction(dev))
10692 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10693
10694 BNX2X_DEV_INFO("Initiating FLR\n");
10695 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10696
10697 return 0;
10698}
10699
10700static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10701{
10702 int rc;
10703
10704 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10705
10706
10707 if (bnx2x_prev_is_path_marked(bp))
10708 return bnx2x_prev_mcp_done(bp);
10709
10710 BNX2X_DEV_INFO("Path is unmarked\n");
10711
10712
10713 if (bnx2x_prev_is_after_undi(bp))
10714 goto out;
10715
10716
10717
10718
10719
10720 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10721
10722 if (!rc) {
10723
10724 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10725 rc = bnx2x_do_flr(bp);
10726 }
10727
10728 if (!rc) {
10729
10730 BNX2X_DEV_INFO("FLR successful\n");
10731 return 0;
10732 }
10733
10734 BNX2X_DEV_INFO("Could not FLR\n");
10735
10736out:
10737
10738 rc = bnx2x_prev_mcp_done(bp);
10739 if (!rc)
10740 rc = BNX2X_PREV_WAIT_NEEDED;
10741
10742 return rc;
10743}
10744
10745static int bnx2x_prev_unload_common(struct bnx2x *bp)
10746{
10747 u32 reset_reg, tmp_reg = 0, rc;
10748 bool prev_undi = false;
10749 struct bnx2x_mac_vals mac_vals;
10750
10751
10752
10753
10754
10755 BNX2X_DEV_INFO("Common unload Flow\n");
10756
10757 memset(&mac_vals, 0, sizeof(mac_vals));
10758
10759 if (bnx2x_prev_is_path_marked(bp))
10760 return bnx2x_prev_mcp_done(bp);
10761
10762 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10763
10764
10765 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10766 u32 timer_count = 1000;
10767
10768
10769 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10770
10771
10772 bnx2x_set_rx_filter(&bp->link_params, 0);
10773 bp->link_params.port ^= 1;
10774 bnx2x_set_rx_filter(&bp->link_params, 0);
10775 bp->link_params.port ^= 1;
10776
10777
10778 if (bnx2x_prev_is_after_undi(bp)) {
10779 prev_undi = true;
10780
10781 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10782
10783 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10784 }
10785 if (!CHIP_IS_E1x(bp))
10786
10787 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10788
10789
10790 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10791 while (timer_count) {
10792 u32 prev_brb = tmp_reg;
10793
10794 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10795 if (!tmp_reg)
10796 break;
10797
10798 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10799
10800
10801 if (prev_brb > tmp_reg)
10802 timer_count = 1000;
10803 else
10804 timer_count--;
10805
10806
10807 if (prev_undi)
10808 bnx2x_prev_unload_undi_inc(bp, 1);
10809
10810 udelay(10);
10811 }
10812
10813 if (!timer_count)
10814 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10815 }
10816
10817
10818 bnx2x_reset_common(bp);
10819
10820 if (mac_vals.xmac_addr)
10821 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10822 if (mac_vals.umac_addr[0])
10823 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10824 if (mac_vals.umac_addr[1])
10825 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10826 if (mac_vals.emac_addr)
10827 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10828 if (mac_vals.bmac_addr) {
10829 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10830 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10831 }
10832
10833 rc = bnx2x_prev_mark_path(bp, prev_undi);
10834 if (rc) {
10835 bnx2x_prev_mcp_done(bp);
10836 return rc;
10837 }
10838
10839 return bnx2x_prev_mcp_done(bp);
10840}
10841
10842static int bnx2x_prev_unload(struct bnx2x *bp)
10843{
10844 int time_counter = 10;
10845 u32 rc, fw, hw_lock_reg, hw_lock_val;
10846 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10847
10848
10849
10850
10851 bnx2x_clean_pglue_errors(bp);
10852
10853
10854 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10855 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10856 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10857
10858 hw_lock_val = REG_RD(bp, hw_lock_reg);
10859 if (hw_lock_val) {
10860 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10861 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10862 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10863 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10864 }
10865
10866 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10867 REG_WR(bp, hw_lock_reg, 0xffffffff);
10868 } else
10869 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10870
10871 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10872 BNX2X_DEV_INFO("Release previously held alr\n");
10873 bnx2x_release_alr(bp);
10874 }
10875
10876 do {
10877 int aer = 0;
10878
10879 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10880 if (!fw) {
10881 BNX2X_ERR("MCP response failure, aborting\n");
10882 rc = -EBUSY;
10883 break;
10884 }
10885
10886 rc = down_interruptible(&bnx2x_prev_sem);
10887 if (rc) {
10888 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10889 rc);
10890 } else {
10891
10892 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10893 bnx2x_prev_path_get_entry(bp)->aer);
10894 up(&bnx2x_prev_sem);
10895 }
10896
10897 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10898 rc = bnx2x_prev_unload_common(bp);
10899 break;
10900 }
10901
10902
10903 rc = bnx2x_prev_unload_uncommon(bp);
10904 if (rc != BNX2X_PREV_WAIT_NEEDED)
10905 break;
10906
10907 msleep(20);
10908 } while (--time_counter);
10909
10910 if (!time_counter || rc) {
10911 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10912 rc = -EPROBE_DEFER;
10913 }
10914
10915
10916 if (bnx2x_port_after_undi(bp))
10917 bp->link_params.feature_config_flags |=
10918 FEATURE_CONFIG_BOOT_FROM_SAN;
10919
10920 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10921
10922 return rc;
10923}
10924
10925static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10926{
10927 u32 val, val2, val3, val4, id, boot_mode;
10928 u16 pmc;
10929
10930
10931
10932 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10933 id = ((val & 0xffff) << 16);
10934 val = REG_RD(bp, MISC_REG_CHIP_REV);
10935 id |= ((val & 0xf) << 12);
10936
10937
10938
10939
10940 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10941 id |= (((val >> 24) & 0xf) << 4);
10942 val = REG_RD(bp, MISC_REG_BOND_ID);
10943 id |= (val & 0xf);
10944 bp->common.chip_id = id;
10945
10946
10947 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10948 if (CHIP_IS_57810(bp))
10949 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10950 (bp->common.chip_id & 0x0000FFFF);
10951 else if (CHIP_IS_57810_MF(bp))
10952 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10953 (bp->common.chip_id & 0x0000FFFF);
10954 bp->common.chip_id |= 0x1;
10955 }
10956
10957
10958 bp->db_size = (1 << BNX2X_DB_SHIFT);
10959
10960 if (!CHIP_IS_E1x(bp)) {
10961 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10962 if ((val & 1) == 0)
10963 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10964 else
10965 val = (val >> 1) & 1;
10966 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10967 "2_PORT_MODE");
10968 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10969 CHIP_2_PORT_MODE;
10970
10971 if (CHIP_MODE_IS_4_PORT(bp))
10972 bp->pfid = (bp->pf_num >> 1);
10973 else
10974 bp->pfid = (bp->pf_num & 0x6);
10975 } else {
10976 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10977 bp->pfid = bp->pf_num;
10978 }
10979
10980 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10981
10982 bp->link_params.chip_id = bp->common.chip_id;
10983 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10984
10985 val = (REG_RD(bp, 0x2874) & 0x55);
10986 if ((bp->common.chip_id & 0x1) ||
10987 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10988 bp->flags |= ONE_PORT_FLAG;
10989 BNX2X_DEV_INFO("single port device\n");
10990 }
10991
10992 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10993 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10994 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10995 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10996 bp->common.flash_size, bp->common.flash_size);
10997
10998 bnx2x_init_shmem(bp);
10999
11000 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11001 MISC_REG_GENERIC_CR_1 :
11002 MISC_REG_GENERIC_CR_0));
11003
11004 bp->link_params.shmem_base = bp->common.shmem_base;
11005 bp->link_params.shmem2_base = bp->common.shmem2_base;
11006 if (SHMEM2_RD(bp, size) >
11007 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11008 bp->link_params.lfa_base =
11009 REG_RD(bp, bp->common.shmem2_base +
11010 (u32)offsetof(struct shmem2_region,
11011 lfa_host_addr[BP_PORT(bp)]));
11012 else
11013 bp->link_params.lfa_base = 0;
11014 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11015 bp->common.shmem_base, bp->common.shmem2_base);
11016
11017 if (!bp->common.shmem_base) {
11018 BNX2X_DEV_INFO("MCP not active\n");
11019 bp->flags |= NO_MCP_FLAG;
11020 return;
11021 }
11022
11023 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11024 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11025
11026 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11027 SHARED_HW_CFG_LED_MODE_MASK) >>
11028 SHARED_HW_CFG_LED_MODE_SHIFT);
11029
11030 bp->link_params.feature_config_flags = 0;
11031 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11032 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11033 bp->link_params.feature_config_flags |=
11034 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11035 else
11036 bp->link_params.feature_config_flags &=
11037 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11038
11039 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11040 bp->common.bc_ver = val;
11041 BNX2X_DEV_INFO("bc_ver %X\n", val);
11042 if (val < BNX2X_BC_VER) {
11043
11044
11045 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11046 BNX2X_BC_VER, val);
11047 }
11048 bp->link_params.feature_config_flags |=
11049 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11050 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11051
11052 bp->link_params.feature_config_flags |=
11053 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11054 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11055 bp->link_params.feature_config_flags |=
11056 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11057 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11058 bp->link_params.feature_config_flags |=
11059 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11060 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11061
11062 bp->link_params.feature_config_flags |=
11063 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11064 FEATURE_CONFIG_MT_SUPPORT : 0;
11065
11066 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11067 BC_SUPPORTS_PFC_STATS : 0;
11068
11069 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11070 BC_SUPPORTS_FCOE_FEATURES : 0;
11071
11072 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11073 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11074
11075 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11076 BC_SUPPORTS_RMMOD_CMD : 0;
11077
11078 boot_mode = SHMEM_RD(bp,
11079 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11080 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11081 switch (boot_mode) {
11082 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11083 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11084 break;
11085 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11086 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11087 break;
11088 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11089 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11090 break;
11091 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11092 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11093 break;
11094 }
11095
11096 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11097 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11098
11099 BNX2X_DEV_INFO("%sWoL capable\n",
11100 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11101
11102 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11103 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11104 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11105 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11106
11107 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11108 val, val2, val3, val4);
11109}
11110
11111#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11112#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11113
11114static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11115{
11116 int pfid = BP_FUNC(bp);
11117 int igu_sb_id;
11118 u32 val;
11119 u8 fid, igu_sb_cnt = 0;
11120
11121 bp->igu_base_sb = 0xff;
11122 if (CHIP_INT_MODE_IS_BC(bp)) {
11123 int vn = BP_VN(bp);
11124 igu_sb_cnt = bp->igu_sb_cnt;
11125 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11126 FP_SB_MAX_E1x;
11127
11128 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11129 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11130
11131 return 0;
11132 }
11133
11134
11135 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11136 igu_sb_id++) {
11137 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11138 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11139 continue;
11140 fid = IGU_FID(val);
11141 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11142 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11143 continue;
11144 if (IGU_VEC(val) == 0)
11145
11146 bp->igu_dsb_id = igu_sb_id;
11147 else {
11148 if (bp->igu_base_sb == 0xff)
11149 bp->igu_base_sb = igu_sb_id;
11150 igu_sb_cnt++;
11151 }
11152 }
11153 }
11154
11155#ifdef CONFIG_PCI_MSI
11156
11157
11158
11159
11160
11161
11162 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11163#endif
11164
11165 if (igu_sb_cnt == 0) {
11166 BNX2X_ERR("CAM configuration error\n");
11167 return -EINVAL;
11168 }
11169
11170 return 0;
11171}
11172
11173static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11174{
11175 int cfg_size = 0, idx, port = BP_PORT(bp);
11176
11177
11178 bp->port.supported[0] = 0;
11179 bp->port.supported[1] = 0;
11180 switch (bp->link_params.num_phys) {
11181 case 1:
11182 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11183 cfg_size = 1;
11184 break;
11185 case 2:
11186 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11187 cfg_size = 1;
11188 break;
11189 case 3:
11190 if (bp->link_params.multi_phy_config &
11191 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11192 bp->port.supported[1] =
11193 bp->link_params.phy[EXT_PHY1].supported;
11194 bp->port.supported[0] =
11195 bp->link_params.phy[EXT_PHY2].supported;
11196 } else {
11197 bp->port.supported[0] =
11198 bp->link_params.phy[EXT_PHY1].supported;
11199 bp->port.supported[1] =
11200 bp->link_params.phy[EXT_PHY2].supported;
11201 }
11202 cfg_size = 2;
11203 break;
11204 }
11205
11206 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11207 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11208 SHMEM_RD(bp,
11209 dev_info.port_hw_config[port].external_phy_config),
11210 SHMEM_RD(bp,
11211 dev_info.port_hw_config[port].external_phy_config2));
11212 return;
11213 }
11214
11215 if (CHIP_IS_E3(bp))
11216 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11217 else {
11218 switch (switch_cfg) {
11219 case SWITCH_CFG_1G:
11220 bp->port.phy_addr = REG_RD(
11221 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11222 break;
11223 case SWITCH_CFG_10G:
11224 bp->port.phy_addr = REG_RD(
11225 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11226 break;
11227 default:
11228 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11229 bp->port.link_config[0]);
11230 return;
11231 }
11232 }
11233 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11234
11235 for (idx = 0; idx < cfg_size; idx++) {
11236 if (!(bp->link_params.speed_cap_mask[idx] &
11237 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11238 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11239
11240 if (!(bp->link_params.speed_cap_mask[idx] &
11241 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11242 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11243
11244 if (!(bp->link_params.speed_cap_mask[idx] &
11245 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11246 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11247
11248 if (!(bp->link_params.speed_cap_mask[idx] &
11249 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11250 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11251
11252 if (!(bp->link_params.speed_cap_mask[idx] &
11253 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11254 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11255 SUPPORTED_1000baseT_Full);
11256
11257 if (!(bp->link_params.speed_cap_mask[idx] &
11258 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11259 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11260
11261 if (!(bp->link_params.speed_cap_mask[idx] &
11262 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11263 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11264
11265 if (!(bp->link_params.speed_cap_mask[idx] &
11266 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11267 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11268 }
11269
11270 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11271 bp->port.supported[1]);
11272}
11273
11274static void bnx2x_link_settings_requested(struct bnx2x *bp)
11275{
11276 u32 link_config, idx, cfg_size = 0;
11277 bp->port.advertising[0] = 0;
11278 bp->port.advertising[1] = 0;
11279 switch (bp->link_params.num_phys) {
11280 case 1:
11281 case 2:
11282 cfg_size = 1;
11283 break;
11284 case 3:
11285 cfg_size = 2;
11286 break;
11287 }
11288 for (idx = 0; idx < cfg_size; idx++) {
11289 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11290 link_config = bp->port.link_config[idx];
11291 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11292 case PORT_FEATURE_LINK_SPEED_AUTO:
11293 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11294 bp->link_params.req_line_speed[idx] =
11295 SPEED_AUTO_NEG;
11296 bp->port.advertising[idx] |=
11297 bp->port.supported[idx];
11298 if (bp->link_params.phy[EXT_PHY1].type ==
11299 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11300 bp->port.advertising[idx] |=
11301 (SUPPORTED_100baseT_Half |
11302 SUPPORTED_100baseT_Full);
11303 } else {
11304
11305 bp->link_params.req_line_speed[idx] =
11306 SPEED_10000;
11307 bp->port.advertising[idx] |=
11308 (ADVERTISED_10000baseT_Full |
11309 ADVERTISED_FIBRE);
11310 continue;
11311 }
11312 break;
11313
11314 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11315 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11316 bp->link_params.req_line_speed[idx] =
11317 SPEED_10;
11318 bp->port.advertising[idx] |=
11319 (ADVERTISED_10baseT_Full |
11320 ADVERTISED_TP);
11321 } else {
11322 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11323 link_config,
11324 bp->link_params.speed_cap_mask[idx]);
11325 return;
11326 }
11327 break;
11328
11329 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11330 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11331 bp->link_params.req_line_speed[idx] =
11332 SPEED_10;
11333 bp->link_params.req_duplex[idx] =
11334 DUPLEX_HALF;
11335 bp->port.advertising[idx] |=
11336 (ADVERTISED_10baseT_Half |
11337 ADVERTISED_TP);
11338 } else {
11339 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11340 link_config,
11341 bp->link_params.speed_cap_mask[idx]);
11342 return;
11343 }
11344 break;
11345
11346 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11347 if (bp->port.supported[idx] &
11348 SUPPORTED_100baseT_Full) {
11349 bp->link_params.req_line_speed[idx] =
11350 SPEED_100;
11351 bp->port.advertising[idx] |=
11352 (ADVERTISED_100baseT_Full |
11353 ADVERTISED_TP);
11354 } else {
11355 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11356 link_config,
11357 bp->link_params.speed_cap_mask[idx]);
11358 return;
11359 }
11360 break;
11361
11362 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11363 if (bp->port.supported[idx] &
11364 SUPPORTED_100baseT_Half) {
11365 bp->link_params.req_line_speed[idx] =
11366 SPEED_100;
11367 bp->link_params.req_duplex[idx] =
11368 DUPLEX_HALF;
11369 bp->port.advertising[idx] |=
11370 (ADVERTISED_100baseT_Half |
11371 ADVERTISED_TP);
11372 } else {
11373 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11374 link_config,
11375 bp->link_params.speed_cap_mask[idx]);
11376 return;
11377 }
11378 break;
11379
11380 case PORT_FEATURE_LINK_SPEED_1G:
11381 if (bp->port.supported[idx] &
11382 SUPPORTED_1000baseT_Full) {
11383 bp->link_params.req_line_speed[idx] =
11384 SPEED_1000;
11385 bp->port.advertising[idx] |=
11386 (ADVERTISED_1000baseT_Full |
11387 ADVERTISED_TP);
11388 } else if (bp->port.supported[idx] &
11389 SUPPORTED_1000baseKX_Full) {
11390 bp->link_params.req_line_speed[idx] =
11391 SPEED_1000;
11392 bp->port.advertising[idx] |=
11393 ADVERTISED_1000baseKX_Full;
11394 } else {
11395 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11396 link_config,
11397 bp->link_params.speed_cap_mask[idx]);
11398 return;
11399 }
11400 break;
11401
11402 case PORT_FEATURE_LINK_SPEED_2_5G:
11403 if (bp->port.supported[idx] &
11404 SUPPORTED_2500baseX_Full) {
11405 bp->link_params.req_line_speed[idx] =
11406 SPEED_2500;
11407 bp->port.advertising[idx] |=
11408 (ADVERTISED_2500baseX_Full |
11409 ADVERTISED_TP);
11410 } else {
11411 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11412 link_config,
11413 bp->link_params.speed_cap_mask[idx]);
11414 return;
11415 }
11416 break;
11417
11418 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11419 if (bp->port.supported[idx] &
11420 SUPPORTED_10000baseT_Full) {
11421 bp->link_params.req_line_speed[idx] =
11422 SPEED_10000;
11423 bp->port.advertising[idx] |=
11424 (ADVERTISED_10000baseT_Full |
11425 ADVERTISED_FIBRE);
11426 } else if (bp->port.supported[idx] &
11427 SUPPORTED_10000baseKR_Full) {
11428 bp->link_params.req_line_speed[idx] =
11429 SPEED_10000;
11430 bp->port.advertising[idx] |=
11431 (ADVERTISED_10000baseKR_Full |
11432 ADVERTISED_FIBRE);
11433 } else {
11434 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11435 link_config,
11436 bp->link_params.speed_cap_mask[idx]);
11437 return;
11438 }
11439 break;
11440 case PORT_FEATURE_LINK_SPEED_20G:
11441 bp->link_params.req_line_speed[idx] = SPEED_20000;
11442
11443 break;
11444 default:
11445 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11446 link_config);
11447 bp->link_params.req_line_speed[idx] =
11448 SPEED_AUTO_NEG;
11449 bp->port.advertising[idx] =
11450 bp->port.supported[idx];
11451 break;
11452 }
11453
11454 bp->link_params.req_flow_ctrl[idx] = (link_config &
11455 PORT_FEATURE_FLOW_CONTROL_MASK);
11456 if (bp->link_params.req_flow_ctrl[idx] ==
11457 BNX2X_FLOW_CTRL_AUTO) {
11458 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11459 bp->link_params.req_flow_ctrl[idx] =
11460 BNX2X_FLOW_CTRL_NONE;
11461 else
11462 bnx2x_set_requested_fc(bp);
11463 }
11464
11465 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11466 bp->link_params.req_line_speed[idx],
11467 bp->link_params.req_duplex[idx],
11468 bp->link_params.req_flow_ctrl[idx],
11469 bp->port.advertising[idx]);
11470 }
11471}
11472
11473static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11474{
11475 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11476 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11477 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11478 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11479}
11480
11481static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11482{
11483 int port = BP_PORT(bp);
11484 u32 config;
11485 u32 ext_phy_type, ext_phy_config, eee_mode;
11486
11487 bp->link_params.bp = bp;
11488 bp->link_params.port = port;
11489
11490 bp->link_params.lane_config =
11491 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11492
11493 bp->link_params.speed_cap_mask[0] =
11494 SHMEM_RD(bp,
11495 dev_info.port_hw_config[port].speed_capability_mask) &
11496 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11497 bp->link_params.speed_cap_mask[1] =
11498 SHMEM_RD(bp,
11499 dev_info.port_hw_config[port].speed_capability_mask2) &
11500 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11501 bp->port.link_config[0] =
11502 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11503
11504 bp->port.link_config[1] =
11505 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11506
11507 bp->link_params.multi_phy_config =
11508 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11509
11510
11511
11512 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11513 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11514 (config & PORT_FEATURE_WOL_ENABLED));
11515
11516 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11517 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11518 bp->flags |= NO_ISCSI_FLAG;
11519 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11520 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11521 bp->flags |= NO_FCOE_FLAG;
11522
11523 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11524 bp->link_params.lane_config,
11525 bp->link_params.speed_cap_mask[0],
11526 bp->port.link_config[0]);
11527
11528 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11529 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11530 bnx2x_phy_probe(&bp->link_params);
11531 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11532
11533 bnx2x_link_settings_requested(bp);
11534
11535
11536
11537
11538
11539 ext_phy_config =
11540 SHMEM_RD(bp,
11541 dev_info.port_hw_config[port].external_phy_config);
11542 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11544 bp->mdio.prtad = bp->port.phy_addr;
11545
11546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11548 bp->mdio.prtad =
11549 XGXS_EXT_PHY_ADDR(ext_phy_config);
11550
11551
11552 eee_mode = (((SHMEM_RD(bp, dev_info.
11553 port_feature_config[port].eee_power_mode)) &
11554 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11555 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11556 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11557 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11558 EEE_MODE_ENABLE_LPI |
11559 EEE_MODE_OUTPUT_TIME;
11560 } else {
11561 bp->link_params.eee_mode = 0;
11562 }
11563}
11564
11565void bnx2x_get_iscsi_info(struct bnx2x *bp)
11566{
11567 u32 no_flags = NO_ISCSI_FLAG;
11568 int port = BP_PORT(bp);
11569 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11570 drv_lic_key[port].max_iscsi_conn);
11571
11572 if (!CNIC_SUPPORT(bp)) {
11573 bp->flags |= no_flags;
11574 return;
11575 }
11576
11577
11578 bp->cnic_eth_dev.max_iscsi_conn =
11579 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11580 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11581
11582 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11583 bp->cnic_eth_dev.max_iscsi_conn);
11584
11585
11586
11587
11588
11589 if (!bp->cnic_eth_dev.max_iscsi_conn)
11590 bp->flags |= no_flags;
11591}
11592
11593static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11594{
11595
11596 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11597 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11598 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11599 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11600
11601
11602 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11603 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11604 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11605 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11606}
11607
11608static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11609{
11610 u8 count = 0;
11611
11612 if (IS_MF(bp)) {
11613 u8 fid;
11614
11615
11616 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11617 if (IS_MF_SD(bp)) {
11618 u32 cfg = MF_CFG_RD(bp,
11619 func_mf_config[fid].config);
11620
11621 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11622 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11623 FUNC_MF_CFG_PROTOCOL_FCOE))
11624 count++;
11625 } else {
11626 u32 cfg = MF_CFG_RD(bp,
11627 func_ext_config[fid].
11628 func_cfg);
11629
11630 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11631 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11632 count++;
11633 }
11634 }
11635 } else {
11636 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11637
11638 for (port = 0; port < port_cnt; port++) {
11639 u32 lic = SHMEM_RD(bp,
11640 drv_lic_key[port].max_fcoe_conn) ^
11641 FW_ENCODE_32BIT_PATTERN;
11642 if (lic)
11643 count++;
11644 }
11645 }
11646
11647 return count;
11648}
11649
11650static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11651{
11652 int port = BP_PORT(bp);
11653 int func = BP_ABS_FUNC(bp);
11654 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11655 drv_lic_key[port].max_fcoe_conn);
11656 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11657
11658 if (!CNIC_SUPPORT(bp)) {
11659 bp->flags |= NO_FCOE_FLAG;
11660 return;
11661 }
11662
11663
11664 bp->cnic_eth_dev.max_fcoe_conn =
11665 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11666 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11667
11668
11669 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11670
11671
11672 if (num_fcoe_func)
11673 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11674
11675
11676 if (!IS_MF(bp)) {
11677
11678 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11679 SHMEM_RD(bp,
11680 dev_info.port_hw_config[port].
11681 fcoe_wwn_port_name_upper);
11682 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11683 SHMEM_RD(bp,
11684 dev_info.port_hw_config[port].
11685 fcoe_wwn_port_name_lower);
11686
11687
11688 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11689 SHMEM_RD(bp,
11690 dev_info.port_hw_config[port].
11691 fcoe_wwn_node_name_upper);
11692 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11693 SHMEM_RD(bp,
11694 dev_info.port_hw_config[port].
11695 fcoe_wwn_node_name_lower);
11696 } else if (!IS_MF_SD(bp)) {
11697
11698
11699
11700 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11701 bnx2x_get_ext_wwn_info(bp, func);
11702 } else {
11703 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11704 bnx2x_get_ext_wwn_info(bp, func);
11705 }
11706
11707 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11708
11709
11710
11711
11712
11713 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11714 bp->flags |= NO_FCOE_FLAG;
11715 eth_zero_addr(bp->fip_mac);
11716 }
11717}
11718
11719static void bnx2x_get_cnic_info(struct bnx2x *bp)
11720{
11721
11722
11723
11724
11725
11726 bnx2x_get_iscsi_info(bp);
11727 bnx2x_get_fcoe_info(bp);
11728}
11729
11730static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11731{
11732 u32 val, val2;
11733 int func = BP_ABS_FUNC(bp);
11734 int port = BP_PORT(bp);
11735 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11736 u8 *fip_mac = bp->fip_mac;
11737
11738 if (IS_MF(bp)) {
11739
11740
11741
11742
11743
11744 if (!IS_MF_SD(bp)) {
11745 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11746 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11747 val2 = MF_CFG_RD(bp, func_ext_config[func].
11748 iscsi_mac_addr_upper);
11749 val = MF_CFG_RD(bp, func_ext_config[func].
11750 iscsi_mac_addr_lower);
11751 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11752 BNX2X_DEV_INFO
11753 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11754 } else {
11755 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11756 }
11757
11758 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11759 val2 = MF_CFG_RD(bp, func_ext_config[func].
11760 fcoe_mac_addr_upper);
11761 val = MF_CFG_RD(bp, func_ext_config[func].
11762 fcoe_mac_addr_lower);
11763 bnx2x_set_mac_buf(fip_mac, val, val2);
11764 BNX2X_DEV_INFO
11765 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11766 } else {
11767 bp->flags |= NO_FCOE_FLAG;
11768 }
11769
11770 bp->mf_ext_config = cfg;
11771
11772 } else {
11773 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11774
11775 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11776
11777 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11778 BNX2X_DEV_INFO
11779 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11780 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11781
11782 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11783 BNX2X_DEV_INFO("SD FCoE MODE\n");
11784 BNX2X_DEV_INFO
11785 ("Read FIP MAC: %pM\n", fip_mac);
11786 }
11787 }
11788
11789
11790
11791
11792
11793 if (IS_MF_FCOE_AFEX(bp))
11794 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11795 } else {
11796 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11797 iscsi_mac_upper);
11798 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11799 iscsi_mac_lower);
11800 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11801
11802 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11803 fcoe_fip_mac_upper);
11804 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11805 fcoe_fip_mac_lower);
11806 bnx2x_set_mac_buf(fip_mac, val, val2);
11807 }
11808
11809
11810 if (!is_valid_ether_addr(iscsi_mac)) {
11811 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11812 eth_zero_addr(iscsi_mac);
11813 }
11814
11815
11816 if (!is_valid_ether_addr(fip_mac)) {
11817 bp->flags |= NO_FCOE_FLAG;
11818 eth_zero_addr(bp->fip_mac);
11819 }
11820}
11821
11822static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11823{
11824 u32 val, val2;
11825 int func = BP_ABS_FUNC(bp);
11826 int port = BP_PORT(bp);
11827
11828
11829 eth_zero_addr(bp->dev->dev_addr);
11830
11831 if (BP_NOMCP(bp)) {
11832 BNX2X_ERROR("warning: random MAC workaround active\n");
11833 eth_hw_addr_random(bp->dev);
11834 } else if (IS_MF(bp)) {
11835 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11836 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11837 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11838 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11839 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11840
11841 if (CNIC_SUPPORT(bp))
11842 bnx2x_get_cnic_mac_hwinfo(bp);
11843 } else {
11844
11845 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11846 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11847 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11848
11849 if (CNIC_SUPPORT(bp))
11850 bnx2x_get_cnic_mac_hwinfo(bp);
11851 }
11852
11853 if (!BP_NOMCP(bp)) {
11854
11855 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11856 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11857 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11858 bp->flags |= HAS_PHYS_PORT_ID;
11859 }
11860
11861 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11862
11863 if (!is_valid_ether_addr(bp->dev->dev_addr))
11864 dev_err(&bp->pdev->dev,
11865 "bad Ethernet MAC address configuration: %pM\n"
11866 "change it manually before bringing up the appropriate network interface\n",
11867 bp->dev->dev_addr);
11868}
11869
11870static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11871{
11872 int tmp;
11873 u32 cfg;
11874
11875 if (IS_VF(bp))
11876 return false;
11877
11878 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11879
11880 tmp = BP_ABS_FUNC(bp);
11881 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11882 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11883 } else {
11884
11885 tmp = BP_PORT(bp);
11886 cfg = SHMEM_RD(bp,
11887 dev_info.port_hw_config[tmp].generic_features);
11888 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11889 }
11890 return cfg;
11891}
11892
11893static void validate_set_si_mode(struct bnx2x *bp)
11894{
11895 u8 func = BP_ABS_FUNC(bp);
11896 u32 val;
11897
11898 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11899
11900
11901 if (val != 0xffff) {
11902 bp->mf_mode = MULTI_FUNCTION_SI;
11903 bp->mf_config[BP_VN(bp)] =
11904 MF_CFG_RD(bp, func_mf_config[func].config);
11905 } else
11906 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11907}
11908
11909static int bnx2x_get_hwinfo(struct bnx2x *bp)
11910{
11911 int func = BP_ABS_FUNC(bp);
11912 int vn;
11913 u32 val = 0, val2 = 0;
11914 int rc = 0;
11915
11916
11917 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11918 dev_err(&bp->pdev->dev,
11919 "Chip read returns all Fs. Preventing probe from continuing\n");
11920 return -EINVAL;
11921 }
11922
11923 bnx2x_get_common_hwinfo(bp);
11924
11925
11926
11927
11928 if (CHIP_IS_E1x(bp)) {
11929 bp->common.int_block = INT_BLOCK_HC;
11930
11931 bp->igu_dsb_id = DEF_SB_IGU_ID;
11932 bp->igu_base_sb = 0;
11933 } else {
11934 bp->common.int_block = INT_BLOCK_IGU;
11935
11936
11937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11938
11939 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11940
11941 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11942 int tout = 5000;
11943
11944 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11945
11946 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11947 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11948 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11949
11950 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11951 tout--;
11952 usleep_range(1000, 2000);
11953 }
11954
11955 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11956 dev_err(&bp->pdev->dev,
11957 "FORCING Normal Mode failed!!!\n");
11958 bnx2x_release_hw_lock(bp,
11959 HW_LOCK_RESOURCE_RESET);
11960 return -EPERM;
11961 }
11962 }
11963
11964 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11965 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11966 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11967 } else
11968 BNX2X_DEV_INFO("IGU Normal Mode\n");
11969
11970 rc = bnx2x_get_igu_cam_info(bp);
11971 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11972 if (rc)
11973 return rc;
11974 }
11975
11976
11977
11978
11979
11980
11981 if (CHIP_IS_E1x(bp))
11982 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11983 else
11984
11985
11986
11987
11988 bp->base_fw_ndsb = bp->igu_base_sb;
11989
11990 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11991 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11992 bp->igu_sb_cnt, bp->base_fw_ndsb);
11993
11994
11995
11996
11997 bp->mf_ov = 0;
11998 bp->mf_mode = 0;
11999 bp->mf_sub_mode = 0;
12000 vn = BP_VN(bp);
12001
12002 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12003 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12004 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12005 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12006
12007 if (SHMEM2_HAS(bp, mf_cfg_addr))
12008 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12009 else
12010 bp->common.mf_cfg_base = bp->common.shmem_base +
12011 offsetof(struct shmem_region, func_mb) +
12012 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12013
12014
12015
12016
12017
12018
12019
12020
12021 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12022
12023 val = SHMEM_RD(bp,
12024 dev_info.shared_feature_config.config);
12025 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12026
12027 switch (val) {
12028 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12029 validate_set_si_mode(bp);
12030 break;
12031 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12032 if ((!CHIP_IS_E1x(bp)) &&
12033 (MF_CFG_RD(bp, func_mf_config[func].
12034 mac_upper) != 0xffff) &&
12035 (SHMEM2_HAS(bp,
12036 afex_driver_support))) {
12037 bp->mf_mode = MULTI_FUNCTION_AFEX;
12038 bp->mf_config[vn] = MF_CFG_RD(bp,
12039 func_mf_config[func].config);
12040 } else {
12041 BNX2X_DEV_INFO("can not configure afex mode\n");
12042 }
12043 break;
12044 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12045
12046 val = MF_CFG_RD(bp,
12047 func_mf_config[FUNC_0].e1hov_tag);
12048 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12049
12050 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12051 bp->mf_mode = MULTI_FUNCTION_SD;
12052 bp->mf_config[vn] = MF_CFG_RD(bp,
12053 func_mf_config[func].config);
12054 } else
12055 BNX2X_DEV_INFO("illegal OV for SD\n");
12056 break;
12057 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12058 bp->mf_mode = MULTI_FUNCTION_SD;
12059 bp->mf_sub_mode = SUB_MF_MODE_BD;
12060 bp->mf_config[vn] =
12061 MF_CFG_RD(bp,
12062 func_mf_config[func].config);
12063
12064 if (SHMEM2_HAS(bp, mtu_size)) {
12065 int mtu_idx = BP_FW_MB_IDX(bp);
12066 u16 mtu_size;
12067 u32 mtu;
12068
12069 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12070 mtu_size = (u16)mtu;
12071 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12072 mtu_size, mtu);
12073
12074
12075 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12076 (mtu_size <=
12077 ETH_MAX_JUMBO_PACKET_SIZE))
12078 bp->dev->mtu = mtu_size;
12079 }
12080 break;
12081 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12082 bp->mf_mode = MULTI_FUNCTION_SD;
12083 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12084 bp->mf_config[vn] =
12085 MF_CFG_RD(bp,
12086 func_mf_config[func].config);
12087 break;
12088 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12089 bp->mf_config[vn] = 0;
12090 break;
12091 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12092 val2 = SHMEM_RD(bp,
12093 dev_info.shared_hw_config.config_3);
12094 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12095 switch (val2) {
12096 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12097 validate_set_si_mode(bp);
12098 bp->mf_sub_mode =
12099 SUB_MF_MODE_NPAR1_DOT_5;
12100 break;
12101 default:
12102
12103 bp->mf_config[vn] = 0;
12104 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12105 val);
12106 }
12107 break;
12108 default:
12109
12110 bp->mf_config[vn] = 0;
12111 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12112 }
12113 }
12114
12115 BNX2X_DEV_INFO("%s function mode\n",
12116 IS_MF(bp) ? "multi" : "single");
12117
12118 switch (bp->mf_mode) {
12119 case MULTI_FUNCTION_SD:
12120 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12121 FUNC_MF_CFG_E1HOV_TAG_MASK;
12122 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12123 bp->mf_ov = val;
12124 bp->path_has_ovlan = true;
12125
12126 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12127 func, bp->mf_ov, bp->mf_ov);
12128 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12129 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12130 dev_err(&bp->pdev->dev,
12131 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12132 func);
12133 bp->path_has_ovlan = true;
12134 } else {
12135 dev_err(&bp->pdev->dev,
12136 "No valid MF OV for func %d, aborting\n",
12137 func);
12138 return -EPERM;
12139 }
12140 break;
12141 case MULTI_FUNCTION_AFEX:
12142 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12143 break;
12144 case MULTI_FUNCTION_SI:
12145 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12146 func);
12147 break;
12148 default:
12149 if (vn) {
12150 dev_err(&bp->pdev->dev,
12151 "VN %d is in a single function mode, aborting\n",
12152 vn);
12153 return -EPERM;
12154 }
12155 break;
12156 }
12157
12158
12159
12160
12161
12162
12163 if (CHIP_MODE_IS_4_PORT(bp) &&
12164 !bp->path_has_ovlan &&
12165 !IS_MF(bp) &&
12166 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12167 u8 other_port = !BP_PORT(bp);
12168 u8 other_func = BP_PATH(bp) + 2*other_port;
12169 val = MF_CFG_RD(bp,
12170 func_mf_config[other_func].e1hov_tag);
12171 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12172 bp->path_has_ovlan = true;
12173 }
12174 }
12175
12176
12177 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12178 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12179
12180
12181 bnx2x_get_port_hwinfo(bp);
12182
12183
12184 bnx2x_get_mac_hwinfo(bp);
12185
12186 bnx2x_get_cnic_info(bp);
12187
12188 return rc;
12189}
12190
12191static void bnx2x_read_fwinfo(struct bnx2x *bp)
12192{
12193 int cnt, i, block_end, rodi;
12194 char vpd_start[BNX2X_VPD_LEN+1];
12195 char str_id_reg[VENDOR_ID_LEN+1];
12196 char str_id_cap[VENDOR_ID_LEN+1];
12197 char *vpd_data;
12198 char *vpd_extended_data = NULL;
12199 u8 len;
12200
12201 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12202 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12203
12204 if (cnt < BNX2X_VPD_LEN)
12205 goto out_not_found;
12206
12207
12208
12209
12210 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12211 PCI_VPD_LRDT_RO_DATA);
12212 if (i < 0)
12213 goto out_not_found;
12214
12215 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12216 pci_vpd_lrdt_size(&vpd_start[i]);
12217
12218 i += PCI_VPD_LRDT_TAG_SIZE;
12219
12220 if (block_end > BNX2X_VPD_LEN) {
12221 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12222 if (vpd_extended_data == NULL)
12223 goto out_not_found;
12224
12225
12226 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12227 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12228 block_end - BNX2X_VPD_LEN,
12229 vpd_extended_data + BNX2X_VPD_LEN);
12230 if (cnt < (block_end - BNX2X_VPD_LEN))
12231 goto out_not_found;
12232 vpd_data = vpd_extended_data;
12233 } else
12234 vpd_data = vpd_start;
12235
12236
12237
12238 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12239 PCI_VPD_RO_KEYWORD_MFR_ID);
12240 if (rodi < 0)
12241 goto out_not_found;
12242
12243 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12244
12245 if (len != VENDOR_ID_LEN)
12246 goto out_not_found;
12247
12248 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12249
12250
12251 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12252 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12253 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12254 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12255
12256 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12257 PCI_VPD_RO_KEYWORD_VENDOR0);
12258 if (rodi >= 0) {
12259 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12260
12261 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12262
12263 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12264 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12265 bp->fw_ver[len] = ' ';
12266 }
12267 }
12268 kfree(vpd_extended_data);
12269 return;
12270 }
12271out_not_found:
12272 kfree(vpd_extended_data);
12273 return;
12274}
12275
12276static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12277{
12278 u32 flags = 0;
12279
12280 if (CHIP_REV_IS_FPGA(bp))
12281 SET_FLAGS(flags, MODE_FPGA);
12282 else if (CHIP_REV_IS_EMUL(bp))
12283 SET_FLAGS(flags, MODE_EMUL);
12284 else
12285 SET_FLAGS(flags, MODE_ASIC);
12286
12287 if (CHIP_MODE_IS_4_PORT(bp))
12288 SET_FLAGS(flags, MODE_PORT4);
12289 else
12290 SET_FLAGS(flags, MODE_PORT2);
12291
12292 if (CHIP_IS_E2(bp))
12293 SET_FLAGS(flags, MODE_E2);
12294 else if (CHIP_IS_E3(bp)) {
12295 SET_FLAGS(flags, MODE_E3);
12296 if (CHIP_REV(bp) == CHIP_REV_Ax)
12297 SET_FLAGS(flags, MODE_E3_A0);
12298 else
12299 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12300 }
12301
12302 if (IS_MF(bp)) {
12303 SET_FLAGS(flags, MODE_MF);
12304 switch (bp->mf_mode) {
12305 case MULTI_FUNCTION_SD:
12306 SET_FLAGS(flags, MODE_MF_SD);
12307 break;
12308 case MULTI_FUNCTION_SI:
12309 SET_FLAGS(flags, MODE_MF_SI);
12310 break;
12311 case MULTI_FUNCTION_AFEX:
12312 SET_FLAGS(flags, MODE_MF_AFEX);
12313 break;
12314 }
12315 } else
12316 SET_FLAGS(flags, MODE_SF);
12317
12318#if defined(__LITTLE_ENDIAN)
12319 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12320#else
12321 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12322#endif
12323 INIT_MODE_FLAGS(bp) = flags;
12324}
12325
12326static int bnx2x_init_bp(struct bnx2x *bp)
12327{
12328 int func;
12329 int rc;
12330
12331 mutex_init(&bp->port.phy_mutex);
12332 mutex_init(&bp->fw_mb_mutex);
12333 mutex_init(&bp->drv_info_mutex);
12334 sema_init(&bp->stats_lock, 1);
12335 bp->drv_info_mng_owner = false;
12336 INIT_LIST_HEAD(&bp->vlan_reg);
12337
12338 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12339 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12340 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12341 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12342 if (IS_PF(bp)) {
12343 rc = bnx2x_get_hwinfo(bp);
12344 if (rc)
12345 return rc;
12346 } else {
12347 eth_zero_addr(bp->dev->dev_addr);
12348 }
12349
12350 bnx2x_set_modes_bitmap(bp);
12351
12352 rc = bnx2x_alloc_mem_bp(bp);
12353 if (rc)
12354 return rc;
12355
12356 bnx2x_read_fwinfo(bp);
12357
12358 func = BP_FUNC(bp);
12359
12360
12361 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12362
12363 bp->fw_seq =
12364 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12365 DRV_MSG_SEQ_NUMBER_MASK;
12366 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12367
12368 rc = bnx2x_prev_unload(bp);
12369 if (rc) {
12370 bnx2x_free_mem_bp(bp);
12371 return rc;
12372 }
12373 }
12374
12375 if (CHIP_REV_IS_FPGA(bp))
12376 dev_err(&bp->pdev->dev, "FPGA detected\n");
12377
12378 if (BP_NOMCP(bp) && (func == 0))
12379 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12380
12381 bp->disable_tpa = disable_tpa;
12382 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12383
12384 bp->disable_tpa |= is_kdump_kernel();
12385
12386
12387 if (bp->disable_tpa) {
12388 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12389 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12390 }
12391
12392 if (CHIP_IS_E1(bp))
12393 bp->dropless_fc = false;
12394 else
12395 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12396
12397 bp->mrrs = mrrs;
12398
12399 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12400 if (IS_VF(bp))
12401 bp->rx_ring_size = MAX_RX_AVAIL;
12402
12403
12404 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12405 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12406
12407 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12408
12409 timer_setup(&bp->timer, bnx2x_timer, 0);
12410 bp->timer.expires = jiffies + bp->current_interval;
12411
12412 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12413 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12414 SHMEM2_HAS(bp, dcbx_en) &&
12415 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12416 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12417 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12418 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12419 bnx2x_dcbx_init_params(bp);
12420 } else {
12421 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12422 }
12423
12424 if (CHIP_IS_E1x(bp))
12425 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12426 else
12427 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12428
12429
12430 if (IS_VF(bp))
12431 bp->max_cos = 1;
12432 else if (CHIP_IS_E1x(bp))
12433 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12434 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12435 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12436 else if (CHIP_IS_E3B0(bp))
12437 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12438 else
12439 BNX2X_ERR("unknown chip %x revision %x\n",
12440 CHIP_NUM(bp), CHIP_REV(bp));
12441 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12442
12443
12444
12445
12446
12447 if (IS_VF(bp))
12448 bp->min_msix_vec_cnt = 1;
12449 else if (CNIC_SUPPORT(bp))
12450 bp->min_msix_vec_cnt = 3;
12451 else
12452 bp->min_msix_vec_cnt = 2;
12453 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12454
12455 bp->dump_preset_idx = 1;
12456
12457 return rc;
12458}
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469static int bnx2x_open(struct net_device *dev)
12470{
12471 struct bnx2x *bp = netdev_priv(dev);
12472 int rc;
12473
12474 bp->stats_init = true;
12475
12476 netif_carrier_off(dev);
12477
12478 bnx2x_set_power_state(bp, PCI_D0);
12479
12480
12481
12482
12483
12484
12485
12486 if (IS_PF(bp)) {
12487 int other_engine = BP_PATH(bp) ? 0 : 1;
12488 bool other_load_status, load_status;
12489 bool global = false;
12490
12491 other_load_status = bnx2x_get_load_status(bp, other_engine);
12492 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12493 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12494 bnx2x_chk_parity_attn(bp, &global, true)) {
12495 do {
12496
12497
12498
12499
12500
12501 if (global)
12502 bnx2x_set_reset_global(bp);
12503
12504
12505
12506
12507
12508
12509 if ((!load_status &&
12510 (!global || !other_load_status)) &&
12511 bnx2x_trylock_leader_lock(bp) &&
12512 !bnx2x_leader_reset(bp)) {
12513 netdev_info(bp->dev,
12514 "Recovered in open\n");
12515 break;
12516 }
12517
12518
12519 bnx2x_set_power_state(bp, PCI_D3hot);
12520 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12521
12522 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12523 "If you still see this message after a few retries then power cycle is required.\n");
12524
12525 return -EAGAIN;
12526 } while (0);
12527 }
12528 }
12529
12530 bp->recovery_state = BNX2X_RECOVERY_DONE;
12531 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12532 if (rc)
12533 return rc;
12534
12535 return 0;
12536}
12537
12538
12539static int bnx2x_close(struct net_device *dev)
12540{
12541 struct bnx2x *bp = netdev_priv(dev);
12542
12543
12544 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12545
12546 return 0;
12547}
12548
12549struct bnx2x_mcast_list_elem_group
12550{
12551 struct list_head mcast_group_link;
12552 struct bnx2x_mcast_list_elem mcast_elems[];
12553};
12554
12555#define MCAST_ELEMS_PER_PG \
12556 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12557 sizeof(struct bnx2x_mcast_list_elem))
12558
12559static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12560{
12561 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12562
12563 while (!list_empty(mcast_group_list)) {
12564 current_mcast_group = list_first_entry(mcast_group_list,
12565 struct bnx2x_mcast_list_elem_group,
12566 mcast_group_link);
12567 list_del(¤t_mcast_group->mcast_group_link);
12568 free_page((unsigned long)current_mcast_group);
12569 }
12570}
12571
12572static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12573 struct bnx2x_mcast_ramrod_params *p,
12574 struct list_head *mcast_group_list)
12575{
12576 struct bnx2x_mcast_list_elem *mc_mac;
12577 struct netdev_hw_addr *ha;
12578 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12579 int mc_count = netdev_mc_count(bp->dev);
12580 int offset = 0;
12581
12582 INIT_LIST_HEAD(&p->mcast_list);
12583 netdev_for_each_mc_addr(ha, bp->dev) {
12584 if (!offset) {
12585 current_mcast_group =
12586 (struct bnx2x_mcast_list_elem_group *)
12587 __get_free_page(GFP_ATOMIC);
12588 if (!current_mcast_group) {
12589 bnx2x_free_mcast_macs_list(mcast_group_list);
12590 BNX2X_ERR("Failed to allocate mc MAC list\n");
12591 return -ENOMEM;
12592 }
12593 list_add(¤t_mcast_group->mcast_group_link,
12594 mcast_group_list);
12595 }
12596 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12597 mc_mac->mac = bnx2x_mc_addr(ha);
12598 list_add_tail(&mc_mac->link, &p->mcast_list);
12599 offset++;
12600 if (offset == MCAST_ELEMS_PER_PG)
12601 offset = 0;
12602 }
12603 p->mcast_list_len = mc_count;
12604 return 0;
12605}
12606
12607
12608
12609
12610
12611
12612
12613
12614static int bnx2x_set_uc_list(struct bnx2x *bp)
12615{
12616 int rc;
12617 struct net_device *dev = bp->dev;
12618 struct netdev_hw_addr *ha;
12619 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12620 unsigned long ramrod_flags = 0;
12621
12622
12623 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12624 if (rc < 0) {
12625 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12626 return rc;
12627 }
12628
12629 netdev_for_each_uc_addr(ha, dev) {
12630 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12631 BNX2X_UC_LIST_MAC, &ramrod_flags);
12632 if (rc == -EEXIST) {
12633 DP(BNX2X_MSG_SP,
12634 "Failed to schedule ADD operations: %d\n", rc);
12635
12636 rc = 0;
12637
12638 } else if (rc < 0) {
12639
12640 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12641 rc);
12642 return rc;
12643 }
12644 }
12645
12646
12647 __set_bit(RAMROD_CONT, &ramrod_flags);
12648 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12649 BNX2X_UC_LIST_MAC, &ramrod_flags);
12650}
12651
12652static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12653{
12654 LIST_HEAD(mcast_group_list);
12655 struct net_device *dev = bp->dev;
12656 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12657 int rc = 0;
12658
12659 rparam.mcast_obj = &bp->mcast_obj;
12660
12661
12662 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12663 if (rc < 0) {
12664 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12665 return rc;
12666 }
12667
12668
12669 if (netdev_mc_count(dev)) {
12670 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12671 if (rc)
12672 return rc;
12673
12674
12675 rc = bnx2x_config_mcast(bp, &rparam,
12676 BNX2X_MCAST_CMD_ADD);
12677 if (rc < 0)
12678 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12679 rc);
12680
12681 bnx2x_free_mcast_macs_list(&mcast_group_list);
12682 }
12683
12684 return rc;
12685}
12686
12687static int bnx2x_set_mc_list(struct bnx2x *bp)
12688{
12689 LIST_HEAD(mcast_group_list);
12690 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12691 struct net_device *dev = bp->dev;
12692 int rc = 0;
12693
12694
12695 if (CHIP_IS_E1x(bp))
12696 return bnx2x_set_mc_list_e1x(bp);
12697
12698 rparam.mcast_obj = &bp->mcast_obj;
12699
12700 if (netdev_mc_count(dev)) {
12701 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12702 if (rc)
12703 return rc;
12704
12705
12706 rc = bnx2x_config_mcast(bp, &rparam,
12707 BNX2X_MCAST_CMD_SET);
12708 if (rc < 0)
12709 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12710 rc);
12711
12712 bnx2x_free_mcast_macs_list(&mcast_group_list);
12713 } else {
12714
12715 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12716 if (rc < 0)
12717 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12718 rc);
12719 }
12720
12721 return rc;
12722}
12723
12724
12725static void bnx2x_set_rx_mode(struct net_device *dev)
12726{
12727 struct bnx2x *bp = netdev_priv(dev);
12728
12729 if (bp->state != BNX2X_STATE_OPEN) {
12730 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12731 return;
12732 } else {
12733
12734 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12735 NETIF_MSG_IFUP);
12736 }
12737}
12738
12739void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12740{
12741 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12742
12743 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12744
12745 netif_addr_lock_bh(bp->dev);
12746
12747 if (bp->dev->flags & IFF_PROMISC) {
12748 rx_mode = BNX2X_RX_MODE_PROMISC;
12749 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12750 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12751 CHIP_IS_E1(bp))) {
12752 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12753 } else {
12754 if (IS_PF(bp)) {
12755
12756 if (bnx2x_set_mc_list(bp) < 0)
12757 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12758
12759
12760 netif_addr_unlock_bh(bp->dev);
12761 if (bnx2x_set_uc_list(bp) < 0)
12762 rx_mode = BNX2X_RX_MODE_PROMISC;
12763 netif_addr_lock_bh(bp->dev);
12764 } else {
12765
12766
12767
12768 bnx2x_schedule_sp_rtnl(bp,
12769 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12770 }
12771 }
12772
12773 bp->rx_mode = rx_mode;
12774
12775 if (IS_MF_ISCSI_ONLY(bp))
12776 bp->rx_mode = BNX2X_RX_MODE_NONE;
12777
12778
12779 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12780 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12781 netif_addr_unlock_bh(bp->dev);
12782 return;
12783 }
12784
12785 if (IS_PF(bp)) {
12786 bnx2x_set_storm_rx_mode(bp);
12787 netif_addr_unlock_bh(bp->dev);
12788 } else {
12789
12790
12791
12792
12793 netif_addr_unlock_bh(bp->dev);
12794 bnx2x_vfpf_storm_rx_mode(bp);
12795 }
12796}
12797
12798
12799static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12800 int devad, u16 addr)
12801{
12802 struct bnx2x *bp = netdev_priv(netdev);
12803 u16 value;
12804 int rc;
12805
12806 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12807 prtad, devad, addr);
12808
12809
12810 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12811
12812 bnx2x_acquire_phy_lock(bp);
12813 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12814 bnx2x_release_phy_lock(bp);
12815 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12816
12817 if (!rc)
12818 rc = value;
12819 return rc;
12820}
12821
12822
12823static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12824 u16 addr, u16 value)
12825{
12826 struct bnx2x *bp = netdev_priv(netdev);
12827 int rc;
12828
12829 DP(NETIF_MSG_LINK,
12830 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12831 prtad, devad, addr, value);
12832
12833
12834 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12835
12836 bnx2x_acquire_phy_lock(bp);
12837 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12838 bnx2x_release_phy_lock(bp);
12839 return rc;
12840}
12841
12842
12843static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12844{
12845 struct bnx2x *bp = netdev_priv(dev);
12846 struct mii_ioctl_data *mdio = if_mii(ifr);
12847
12848 if (!netif_running(dev))
12849 return -EAGAIN;
12850
12851 switch (cmd) {
12852 case SIOCSHWTSTAMP:
12853 return bnx2x_hwtstamp_ioctl(bp, ifr);
12854 default:
12855 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12856 mdio->phy_id, mdio->reg_num, mdio->val_in);
12857 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12858 }
12859}
12860
12861static int bnx2x_validate_addr(struct net_device *dev)
12862{
12863 struct bnx2x *bp = netdev_priv(dev);
12864
12865
12866 if (IS_VF(bp))
12867 bnx2x_sample_bulletin(bp);
12868
12869 if (!is_valid_ether_addr(dev->dev_addr)) {
12870 BNX2X_ERR("Non-valid Ethernet address\n");
12871 return -EADDRNOTAVAIL;
12872 }
12873 return 0;
12874}
12875
12876static int bnx2x_get_phys_port_id(struct net_device *netdev,
12877 struct netdev_phys_item_id *ppid)
12878{
12879 struct bnx2x *bp = netdev_priv(netdev);
12880
12881 if (!(bp->flags & HAS_PHYS_PORT_ID))
12882 return -EOPNOTSUPP;
12883
12884 ppid->id_len = sizeof(bp->phys_port_id);
12885 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12886
12887 return 0;
12888}
12889
12890static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12891 struct net_device *dev,
12892 netdev_features_t features)
12893{
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907 if (unlikely(skb_is_gso(skb) &&
12908 (skb_shinfo(skb)->gso_size > 9000) &&
12909 !skb_gso_validate_mac_len(skb, 9700)))
12910 features &= ~NETIF_F_GSO_MASK;
12911
12912 features = vlan_features_check(skb, features);
12913 return vxlan_features_check(skb, features);
12914}
12915
12916static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12917{
12918 int rc;
12919
12920 if (IS_PF(bp)) {
12921 unsigned long ramrod_flags = 0;
12922
12923 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12924 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12925 add, &ramrod_flags);
12926 } else {
12927 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12928 }
12929
12930 return rc;
12931}
12932
12933static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12934{
12935 struct bnx2x_vlan_entry *vlan;
12936 int rc = 0;
12937
12938
12939 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12940 if (vlan->hw)
12941 continue;
12942
12943 if (bp->vlan_cnt >= bp->vlan_credit)
12944 return -ENOBUFS;
12945
12946 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12947 if (rc) {
12948 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
12949 return rc;
12950 }
12951
12952 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
12953 vlan->hw = true;
12954 bp->vlan_cnt++;
12955 }
12956
12957 return 0;
12958}
12959
12960static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12961{
12962 bool need_accept_any_vlan;
12963
12964 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12965
12966 if (bp->accept_any_vlan != need_accept_any_vlan) {
12967 bp->accept_any_vlan = need_accept_any_vlan;
12968 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
12969 bp->accept_any_vlan ? "raised" : "cleared");
12970 if (set_rx_mode) {
12971 if (IS_PF(bp))
12972 bnx2x_set_rx_mode_inner(bp);
12973 else
12974 bnx2x_vfpf_storm_rx_mode(bp);
12975 }
12976 }
12977}
12978
12979int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12980{
12981
12982 bnx2x_vlan_configure(bp, false);
12983
12984 return 0;
12985}
12986
12987static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12988{
12989 struct bnx2x *bp = netdev_priv(dev);
12990 struct bnx2x_vlan_entry *vlan;
12991
12992 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12993
12994 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12995 if (!vlan)
12996 return -ENOMEM;
12997
12998 vlan->vid = vid;
12999 vlan->hw = false;
13000 list_add_tail(&vlan->link, &bp->vlan_reg);
13001
13002 if (netif_running(dev))
13003 bnx2x_vlan_configure(bp, true);
13004
13005 return 0;
13006}
13007
13008static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13009{
13010 struct bnx2x *bp = netdev_priv(dev);
13011 struct bnx2x_vlan_entry *vlan;
13012 bool found = false;
13013 int rc = 0;
13014
13015 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13016
13017 list_for_each_entry(vlan, &bp->vlan_reg, link)
13018 if (vlan->vid == vid) {
13019 found = true;
13020 break;
13021 }
13022
13023 if (!found) {
13024 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13025 return -EINVAL;
13026 }
13027
13028 if (netif_running(dev) && vlan->hw) {
13029 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13030 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13031 bp->vlan_cnt--;
13032 }
13033
13034 list_del(&vlan->link);
13035 kfree(vlan);
13036
13037 if (netif_running(dev))
13038 bnx2x_vlan_configure(bp, true);
13039
13040 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13041
13042 return rc;
13043}
13044
13045static const struct net_device_ops bnx2x_netdev_ops = {
13046 .ndo_open = bnx2x_open,
13047 .ndo_stop = bnx2x_close,
13048 .ndo_start_xmit = bnx2x_start_xmit,
13049 .ndo_select_queue = bnx2x_select_queue,
13050 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13051 .ndo_set_mac_address = bnx2x_change_mac_addr,
13052 .ndo_validate_addr = bnx2x_validate_addr,
13053 .ndo_do_ioctl = bnx2x_ioctl,
13054 .ndo_change_mtu = bnx2x_change_mtu,
13055 .ndo_fix_features = bnx2x_fix_features,
13056 .ndo_set_features = bnx2x_set_features,
13057 .ndo_tx_timeout = bnx2x_tx_timeout,
13058 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13059 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13060 .ndo_setup_tc = __bnx2x_setup_tc,
13061#ifdef CONFIG_BNX2X_SRIOV
13062 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13063 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13064 .ndo_get_vf_config = bnx2x_get_vf_config,
13065 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13066#endif
13067#ifdef NETDEV_FCOE_WWNN
13068 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13069#endif
13070
13071 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13072 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13073 .ndo_features_check = bnx2x_features_check,
13074 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
13075 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
13076};
13077
13078static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13079{
13080 struct device *dev = &bp->pdev->dev;
13081
13082 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13083 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13084 dev_err(dev, "System does not support DMA, aborting\n");
13085 return -EIO;
13086 }
13087
13088 return 0;
13089}
13090
13091static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13092{
13093 if (bp->flags & AER_ENABLED) {
13094 pci_disable_pcie_error_reporting(bp->pdev);
13095 bp->flags &= ~AER_ENABLED;
13096 }
13097}
13098
13099static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13100 struct net_device *dev, unsigned long board_type)
13101{
13102 int rc;
13103 u32 pci_cfg_dword;
13104 bool chip_is_e1x = (board_type == BCM57710 ||
13105 board_type == BCM57711 ||
13106 board_type == BCM57711E);
13107
13108 SET_NETDEV_DEV(dev, &pdev->dev);
13109
13110 bp->dev = dev;
13111 bp->pdev = pdev;
13112
13113 rc = pci_enable_device(pdev);
13114 if (rc) {
13115 dev_err(&bp->pdev->dev,
13116 "Cannot enable PCI device, aborting\n");
13117 goto err_out;
13118 }
13119
13120 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13121 dev_err(&bp->pdev->dev,
13122 "Cannot find PCI device base address, aborting\n");
13123 rc = -ENODEV;
13124 goto err_out_disable;
13125 }
13126
13127 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13128 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13129 rc = -ENODEV;
13130 goto err_out_disable;
13131 }
13132
13133 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13134 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13135 PCICFG_REVESION_ID_ERROR_VAL) {
13136 pr_err("PCI device error, probably due to fan failure, aborting\n");
13137 rc = -ENODEV;
13138 goto err_out_disable;
13139 }
13140
13141 if (atomic_read(&pdev->enable_cnt) == 1) {
13142 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13143 if (rc) {
13144 dev_err(&bp->pdev->dev,
13145 "Cannot obtain PCI resources, aborting\n");
13146 goto err_out_disable;
13147 }
13148
13149 pci_set_master(pdev);
13150 pci_save_state(pdev);
13151 }
13152
13153 if (IS_PF(bp)) {
13154 if (!pdev->pm_cap) {
13155 dev_err(&bp->pdev->dev,
13156 "Cannot find power management capability, aborting\n");
13157 rc = -EIO;
13158 goto err_out_release;
13159 }
13160 }
13161
13162 if (!pci_is_pcie(pdev)) {
13163 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13164 rc = -EIO;
13165 goto err_out_release;
13166 }
13167
13168 rc = bnx2x_set_coherency_mask(bp);
13169 if (rc)
13170 goto err_out_release;
13171
13172 dev->mem_start = pci_resource_start(pdev, 0);
13173 dev->base_addr = dev->mem_start;
13174 dev->mem_end = pci_resource_end(pdev, 0);
13175
13176 dev->irq = pdev->irq;
13177
13178 bp->regview = pci_ioremap_bar(pdev, 0);
13179 if (!bp->regview) {
13180 dev_err(&bp->pdev->dev,
13181 "Cannot map register space, aborting\n");
13182 rc = -ENOMEM;
13183 goto err_out_release;
13184 }
13185
13186
13187
13188
13189
13190
13191 if (chip_is_e1x) {
13192 bp->pf_num = PCI_FUNC(pdev->devfn);
13193 } else {
13194
13195 pci_read_config_dword(bp->pdev,
13196 PCICFG_ME_REGISTER, &pci_cfg_dword);
13197 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13198 ME_REG_ABS_PF_NUM_SHIFT);
13199 }
13200 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13201
13202
13203 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13204 PCICFG_VENDOR_ID_OFFSET);
13205
13206
13207 pdev->needs_freset = 1;
13208
13209
13210 rc = pci_enable_pcie_error_reporting(pdev);
13211 if (!rc)
13212 bp->flags |= AER_ENABLED;
13213 else
13214 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13215
13216
13217
13218
13219
13220 if (IS_PF(bp)) {
13221 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13222 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13223 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13224 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13225
13226 if (chip_is_e1x) {
13227 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13228 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13229 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13230 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13231 }
13232
13233
13234
13235
13236
13237 if (!chip_is_e1x)
13238 REG_WR(bp,
13239 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13240 }
13241
13242 dev->watchdog_timeo = TX_TIMEOUT;
13243
13244 dev->netdev_ops = &bnx2x_netdev_ops;
13245 bnx2x_set_ethtool_ops(bp, dev);
13246
13247 dev->priv_flags |= IFF_UNICAST_FLT;
13248
13249 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13250 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13251 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13252 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13253 if (!chip_is_e1x) {
13254 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13255 NETIF_F_GSO_IPXIP4 |
13256 NETIF_F_GSO_UDP_TUNNEL |
13257 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13258 NETIF_F_GSO_PARTIAL;
13259
13260 dev->hw_enc_features =
13261 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13262 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13263 NETIF_F_GSO_IPXIP4 |
13264 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13265 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13266 NETIF_F_GSO_PARTIAL;
13267
13268 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13269 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13270
13271 if (IS_PF(bp))
13272 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
13273 }
13274
13275 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13276 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13277
13278 if (IS_PF(bp)) {
13279 if (chip_is_e1x)
13280 bp->accept_any_vlan = true;
13281 else
13282 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13283 }
13284
13285
13286
13287
13288 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13289 dev->features |= NETIF_F_HIGHDMA;
13290 if (dev->features & NETIF_F_LRO)
13291 dev->features &= ~NETIF_F_GRO_HW;
13292
13293
13294 dev->hw_features |= NETIF_F_LOOPBACK;
13295
13296#ifdef BCM_DCBNL
13297 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13298#endif
13299
13300
13301 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13302 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13303
13304
13305 bp->mdio.prtad = MDIO_PRTAD_NONE;
13306 bp->mdio.mmds = 0;
13307 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13308 bp->mdio.dev = dev;
13309 bp->mdio.mdio_read = bnx2x_mdio_read;
13310 bp->mdio.mdio_write = bnx2x_mdio_write;
13311
13312 return 0;
13313
13314err_out_release:
13315 if (atomic_read(&pdev->enable_cnt) == 1)
13316 pci_release_regions(pdev);
13317
13318err_out_disable:
13319 pci_disable_device(pdev);
13320
13321err_out:
13322 return rc;
13323}
13324
13325static int bnx2x_check_firmware(struct bnx2x *bp)
13326{
13327 const struct firmware *firmware = bp->firmware;
13328 struct bnx2x_fw_file_hdr *fw_hdr;
13329 struct bnx2x_fw_file_section *sections;
13330 u32 offset, len, num_ops;
13331 __be16 *ops_offsets;
13332 int i;
13333 const u8 *fw_ver;
13334
13335 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13336 BNX2X_ERR("Wrong FW size\n");
13337 return -EINVAL;
13338 }
13339
13340 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13341 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13342
13343
13344
13345 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13346 offset = be32_to_cpu(sections[i].offset);
13347 len = be32_to_cpu(sections[i].len);
13348 if (offset + len > firmware->size) {
13349 BNX2X_ERR("Section %d length is out of bounds\n", i);
13350 return -EINVAL;
13351 }
13352 }
13353
13354
13355 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13356 ops_offsets = (__force __be16 *)(firmware->data + offset);
13357 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13358
13359 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13360 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13361 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13362 return -EINVAL;
13363 }
13364 }
13365
13366
13367 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13368 fw_ver = firmware->data + offset;
13369 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13370 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13371 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13372 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13373 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13374 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13375 BCM_5710_FW_MAJOR_VERSION,
13376 BCM_5710_FW_MINOR_VERSION,
13377 BCM_5710_FW_REVISION_VERSION,
13378 BCM_5710_FW_ENGINEERING_VERSION);
13379 return -EINVAL;
13380 }
13381
13382 return 0;
13383}
13384
13385static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13386{
13387 const __be32 *source = (const __be32 *)_source;
13388 u32 *target = (u32 *)_target;
13389 u32 i;
13390
13391 for (i = 0; i < n/4; i++)
13392 target[i] = be32_to_cpu(source[i]);
13393}
13394
13395
13396
13397
13398
13399static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13400{
13401 const __be32 *source = (const __be32 *)_source;
13402 struct raw_op *target = (struct raw_op *)_target;
13403 u32 i, j, tmp;
13404
13405 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13406 tmp = be32_to_cpu(source[j]);
13407 target[i].op = (tmp >> 24) & 0xff;
13408 target[i].offset = tmp & 0xffffff;
13409 target[i].raw_data = be32_to_cpu(source[j + 1]);
13410 }
13411}
13412
13413
13414
13415
13416static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13417{
13418 const __be32 *source = (const __be32 *)_source;
13419 struct iro *target = (struct iro *)_target;
13420 u32 i, j, tmp;
13421
13422 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13423 target[i].base = be32_to_cpu(source[j]);
13424 j++;
13425 tmp = be32_to_cpu(source[j]);
13426 target[i].m1 = (tmp >> 16) & 0xffff;
13427 target[i].m2 = tmp & 0xffff;
13428 j++;
13429 tmp = be32_to_cpu(source[j]);
13430 target[i].m3 = (tmp >> 16) & 0xffff;
13431 target[i].size = tmp & 0xffff;
13432 j++;
13433 }
13434}
13435
13436static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13437{
13438 const __be16 *source = (const __be16 *)_source;
13439 u16 *target = (u16 *)_target;
13440 u32 i;
13441
13442 for (i = 0; i < n/2; i++)
13443 target[i] = be16_to_cpu(source[i]);
13444}
13445
13446#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13447do { \
13448 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13449 bp->arr = kmalloc(len, GFP_KERNEL); \
13450 if (!bp->arr) \
13451 goto lbl; \
13452 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13453 (u8 *)bp->arr, len); \
13454} while (0)
13455
13456static int bnx2x_init_firmware(struct bnx2x *bp)
13457{
13458 const char *fw_file_name;
13459 struct bnx2x_fw_file_hdr *fw_hdr;
13460 int rc;
13461
13462 if (bp->firmware)
13463 return 0;
13464
13465 if (CHIP_IS_E1(bp))
13466 fw_file_name = FW_FILE_NAME_E1;
13467 else if (CHIP_IS_E1H(bp))
13468 fw_file_name = FW_FILE_NAME_E1H;
13469 else if (!CHIP_IS_E1x(bp))
13470 fw_file_name = FW_FILE_NAME_E2;
13471 else {
13472 BNX2X_ERR("Unsupported chip revision\n");
13473 return -EINVAL;
13474 }
13475 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13476
13477 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13478 if (rc) {
13479 BNX2X_ERR("Can't load firmware file %s\n",
13480 fw_file_name);
13481 goto request_firmware_exit;
13482 }
13483
13484 rc = bnx2x_check_firmware(bp);
13485 if (rc) {
13486 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13487 goto request_firmware_exit;
13488 }
13489
13490 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13491
13492
13493
13494 rc = -ENOMEM;
13495 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13496
13497
13498 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13499
13500
13501 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13502 be16_to_cpu_n);
13503
13504
13505 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13506 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13507 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13508 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13509 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13510 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13511 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13512 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13513 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13514 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13515 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13516 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13517 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13518 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13519 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13520 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13521
13522 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13523
13524 return 0;
13525
13526iro_alloc_err:
13527 kfree(bp->init_ops_offsets);
13528init_offsets_alloc_err:
13529 kfree(bp->init_ops);
13530init_ops_alloc_err:
13531 kfree(bp->init_data);
13532request_firmware_exit:
13533 release_firmware(bp->firmware);
13534 bp->firmware = NULL;
13535
13536 return rc;
13537}
13538
13539static void bnx2x_release_firmware(struct bnx2x *bp)
13540{
13541 kfree(bp->init_ops_offsets);
13542 kfree(bp->init_ops);
13543 kfree(bp->init_data);
13544 release_firmware(bp->firmware);
13545 bp->firmware = NULL;
13546}
13547
13548static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13549 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13550 .init_hw_cmn = bnx2x_init_hw_common,
13551 .init_hw_port = bnx2x_init_hw_port,
13552 .init_hw_func = bnx2x_init_hw_func,
13553
13554 .reset_hw_cmn = bnx2x_reset_common,
13555 .reset_hw_port = bnx2x_reset_port,
13556 .reset_hw_func = bnx2x_reset_func,
13557
13558 .gunzip_init = bnx2x_gunzip_init,
13559 .gunzip_end = bnx2x_gunzip_end,
13560
13561 .init_fw = bnx2x_init_firmware,
13562 .release_fw = bnx2x_release_firmware,
13563};
13564
13565void bnx2x__init_func_obj(struct bnx2x *bp)
13566{
13567
13568 bnx2x_setup_dmae(bp);
13569
13570 bnx2x_init_func_obj(bp, &bp->func_obj,
13571 bnx2x_sp(bp, func_rdata),
13572 bnx2x_sp_mapping(bp, func_rdata),
13573 bnx2x_sp(bp, func_afex_rdata),
13574 bnx2x_sp_mapping(bp, func_afex_rdata),
13575 &bnx2x_func_sp_drv);
13576}
13577
13578
13579static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13580{
13581 int cid_count = BNX2X_L2_MAX_CID(bp);
13582
13583 if (IS_SRIOV(bp))
13584 cid_count += BNX2X_VF_CIDS;
13585
13586 if (CNIC_SUPPORT(bp))
13587 cid_count += CNIC_CID_MAX;
13588
13589 return roundup(cid_count, QM_CID_ROUND);
13590}
13591
13592
13593
13594
13595
13596
13597
13598static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13599{
13600 int index;
13601 u16 control = 0;
13602
13603
13604
13605
13606
13607 if (!pdev->msix_cap) {
13608 dev_info(&pdev->dev, "no msix capability found\n");
13609 return 1 + cnic_cnt;
13610 }
13611 dev_info(&pdev->dev, "msix capability found\n");
13612
13613
13614
13615
13616
13617
13618
13619
13620 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13621
13622 index = control & PCI_MSIX_FLAGS_QSIZE;
13623
13624 return index;
13625}
13626
13627static int set_max_cos_est(int chip_id)
13628{
13629 switch (chip_id) {
13630 case BCM57710:
13631 case BCM57711:
13632 case BCM57711E:
13633 return BNX2X_MULTI_TX_COS_E1X;
13634 case BCM57712:
13635 case BCM57712_MF:
13636 return BNX2X_MULTI_TX_COS_E2_E3A0;
13637 case BCM57800:
13638 case BCM57800_MF:
13639 case BCM57810:
13640 case BCM57810_MF:
13641 case BCM57840_4_10:
13642 case BCM57840_2_20:
13643 case BCM57840_O:
13644 case BCM57840_MFO:
13645 case BCM57840_MF:
13646 case BCM57811:
13647 case BCM57811_MF:
13648 return BNX2X_MULTI_TX_COS_E3B0;
13649 case BCM57712_VF:
13650 case BCM57800_VF:
13651 case BCM57810_VF:
13652 case BCM57840_VF:
13653 case BCM57811_VF:
13654 return 1;
13655 default:
13656 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13657 return -ENODEV;
13658 }
13659}
13660
13661static int set_is_vf(int chip_id)
13662{
13663 switch (chip_id) {
13664 case BCM57712_VF:
13665 case BCM57800_VF:
13666 case BCM57810_VF:
13667 case BCM57840_VF:
13668 case BCM57811_VF:
13669 return true;
13670 default:
13671 return false;
13672 }
13673}
13674
13675
13676#define tsgen_ctrl 0x0
13677#define tsgen_freecount 0x10
13678#define tsgen_synctime_t0 0x20
13679#define tsgen_offset_t0 0x28
13680#define tsgen_drift_t0 0x30
13681#define tsgen_synctime_t1 0x58
13682#define tsgen_offset_t1 0x60
13683#define tsgen_drift_t1 0x68
13684
13685
13686static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13687 int best_val, int best_period)
13688{
13689 struct bnx2x_func_state_params func_params = {NULL};
13690 struct bnx2x_func_set_timesync_params *set_timesync_params =
13691 &func_params.params.set_timesync;
13692
13693
13694 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13695 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13696
13697 func_params.f_obj = &bp->func_obj;
13698 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13699
13700
13701 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13702 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13703 set_timesync_params->add_sub_drift_adjust_value =
13704 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13705 set_timesync_params->drift_adjust_value = best_val;
13706 set_timesync_params->drift_adjust_period = best_period;
13707
13708 return bnx2x_func_state_change(bp, &func_params);
13709}
13710
13711static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13712{
13713 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13714 int rc;
13715 int drift_dir = 1;
13716 int val, period, period1, period2, dif, dif1, dif2;
13717 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13718
13719 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13720
13721 if (!netif_running(bp->dev)) {
13722 DP(BNX2X_MSG_PTP,
13723 "PTP adjfreq called while the interface is down\n");
13724 return -ENETDOWN;
13725 }
13726
13727 if (ppb < 0) {
13728 ppb = -ppb;
13729 drift_dir = 0;
13730 }
13731
13732 if (ppb == 0) {
13733 best_val = 1;
13734 best_period = 0x1FFFFFF;
13735 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13736 best_val = 31;
13737 best_period = 1;
13738 } else {
13739
13740
13741
13742 for (val = 0; val <= 31; val++) {
13743 if ((val & 0x7) == 0)
13744 continue;
13745 period1 = val * 1000000 / ppb;
13746 period2 = period1 + 1;
13747 if (period1 != 0)
13748 dif1 = ppb - (val * 1000000 / period1);
13749 else
13750 dif1 = BNX2X_MAX_PHC_DRIFT;
13751 if (dif1 < 0)
13752 dif1 = -dif1;
13753 dif2 = ppb - (val * 1000000 / period2);
13754 if (dif2 < 0)
13755 dif2 = -dif2;
13756 dif = (dif1 < dif2) ? dif1 : dif2;
13757 period = (dif1 < dif2) ? period1 : period2;
13758 if (dif < best_dif) {
13759 best_dif = dif;
13760 best_val = val;
13761 best_period = period;
13762 }
13763 }
13764 }
13765
13766 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13767 best_period);
13768 if (rc) {
13769 BNX2X_ERR("Failed to set drift\n");
13770 return -EFAULT;
13771 }
13772
13773 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13774 best_period);
13775
13776 return 0;
13777}
13778
13779static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13780{
13781 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13782
13783 if (!netif_running(bp->dev)) {
13784 DP(BNX2X_MSG_PTP,
13785 "PTP adjtime called while the interface is down\n");
13786 return -ENETDOWN;
13787 }
13788
13789 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13790
13791 timecounter_adjtime(&bp->timecounter, delta);
13792
13793 return 0;
13794}
13795
13796static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13797{
13798 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13799 u64 ns;
13800
13801 if (!netif_running(bp->dev)) {
13802 DP(BNX2X_MSG_PTP,
13803 "PTP gettime called while the interface is down\n");
13804 return -ENETDOWN;
13805 }
13806
13807 ns = timecounter_read(&bp->timecounter);
13808
13809 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13810
13811 *ts = ns_to_timespec64(ns);
13812
13813 return 0;
13814}
13815
13816static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13817 const struct timespec64 *ts)
13818{
13819 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13820 u64 ns;
13821
13822 if (!netif_running(bp->dev)) {
13823 DP(BNX2X_MSG_PTP,
13824 "PTP settime called while the interface is down\n");
13825 return -ENETDOWN;
13826 }
13827
13828 ns = timespec64_to_ns(ts);
13829
13830 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13831
13832
13833 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13834
13835 return 0;
13836}
13837
13838
13839static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13840 struct ptp_clock_request *rq, int on)
13841{
13842 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13843
13844 BNX2X_ERR("PHC ancillary features are not supported\n");
13845 return -ENOTSUPP;
13846}
13847
13848void bnx2x_register_phc(struct bnx2x *bp)
13849{
13850
13851 bp->ptp_clock_info.owner = THIS_MODULE;
13852 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13853 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13854 bp->ptp_clock_info.n_alarm = 0;
13855 bp->ptp_clock_info.n_ext_ts = 0;
13856 bp->ptp_clock_info.n_per_out = 0;
13857 bp->ptp_clock_info.pps = 0;
13858 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13859 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13860 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13861 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13862 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13863
13864 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13865 if (IS_ERR(bp->ptp_clock)) {
13866 bp->ptp_clock = NULL;
13867 BNX2X_ERR("PTP clock registration failed\n");
13868 }
13869}
13870
13871static int bnx2x_init_one(struct pci_dev *pdev,
13872 const struct pci_device_id *ent)
13873{
13874 struct net_device *dev = NULL;
13875 struct bnx2x *bp;
13876 int rc, max_non_def_sbs;
13877 int rx_count, tx_count, rss_count, doorbell_size;
13878 int max_cos_est;
13879 bool is_vf;
13880 int cnic_cnt;
13881
13882
13883
13884
13885 if (is_kdump_kernel()) {
13886 ktime_t now = ktime_get_boottime();
13887 ktime_t fw_ready_time = ktime_set(5, 0);
13888
13889 if (ktime_before(now, fw_ready_time))
13890 msleep(ktime_ms_delta(fw_ready_time, now));
13891 }
13892
13893
13894
13895
13896
13897
13898
13899
13900
13901 max_cos_est = set_max_cos_est(ent->driver_data);
13902 if (max_cos_est < 0)
13903 return max_cos_est;
13904 is_vf = set_is_vf(ent->driver_data);
13905 cnic_cnt = is_vf ? 0 : 1;
13906
13907 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13908
13909
13910 max_non_def_sbs += is_vf ? 1 : 0;
13911
13912
13913 rss_count = max_non_def_sbs - cnic_cnt;
13914
13915 if (rss_count < 1)
13916 return -EINVAL;
13917
13918
13919 rx_count = rss_count + cnic_cnt;
13920
13921
13922
13923
13924 tx_count = rss_count * max_cos_est + cnic_cnt;
13925
13926
13927 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13928 if (!dev)
13929 return -ENOMEM;
13930
13931 bp = netdev_priv(dev);
13932
13933 bp->flags = 0;
13934 if (is_vf)
13935 bp->flags |= IS_VF_FLAG;
13936
13937 bp->igu_sb_cnt = max_non_def_sbs;
13938 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13939 bp->msg_enable = debug;
13940 bp->cnic_support = cnic_cnt;
13941 bp->cnic_probe = bnx2x_cnic_probe;
13942
13943 pci_set_drvdata(pdev, dev);
13944
13945 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13946 if (rc < 0) {
13947 free_netdev(dev);
13948 return rc;
13949 }
13950
13951 BNX2X_DEV_INFO("This is a %s function\n",
13952 IS_PF(bp) ? "physical" : "virtual");
13953 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13954 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13955 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13956 tx_count, rx_count);
13957
13958 rc = bnx2x_init_bp(bp);
13959 if (rc)
13960 goto init_one_exit;
13961
13962
13963
13964
13965
13966 if (IS_VF(bp)) {
13967 bp->doorbells = bnx2x_vf_doorbells(bp);
13968 rc = bnx2x_vf_pci_alloc(bp);
13969 if (rc)
13970 goto init_one_freemem;
13971 } else {
13972 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13973 if (doorbell_size > pci_resource_len(pdev, 2)) {
13974 dev_err(&bp->pdev->dev,
13975 "Cannot map doorbells, bar size too small, aborting\n");
13976 rc = -ENOMEM;
13977 goto init_one_freemem;
13978 }
13979 bp->doorbells = ioremap(pci_resource_start(pdev, 2),
13980 doorbell_size);
13981 }
13982 if (!bp->doorbells) {
13983 dev_err(&bp->pdev->dev,
13984 "Cannot map doorbell space, aborting\n");
13985 rc = -ENOMEM;
13986 goto init_one_freemem;
13987 }
13988
13989 if (IS_VF(bp)) {
13990 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13991 if (rc)
13992 goto init_one_freemem;
13993
13994#ifdef CONFIG_BNX2X_SRIOV
13995
13996 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13997 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13998 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13999 }
14000#endif
14001 }
14002
14003
14004 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14005 if (rc)
14006 goto init_one_freemem;
14007
14008
14009 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14010 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14011
14012
14013 if (CHIP_IS_E1x(bp))
14014 bp->flags |= NO_FCOE_FLAG;
14015
14016
14017 bnx2x_set_num_queues(bp);
14018
14019
14020
14021
14022 rc = bnx2x_set_int_mode(bp);
14023 if (rc) {
14024 dev_err(&pdev->dev, "Cannot set interrupts\n");
14025 goto init_one_freemem;
14026 }
14027 BNX2X_DEV_INFO("set interrupts successfully\n");
14028
14029
14030 rc = register_netdev(dev);
14031 if (rc) {
14032 dev_err(&pdev->dev, "Cannot register net device\n");
14033 goto init_one_freemem;
14034 }
14035 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14036
14037 if (!NO_FCOE(bp)) {
14038
14039 rtnl_lock();
14040 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14041 rtnl_unlock();
14042 }
14043 BNX2X_DEV_INFO(
14044 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14045 board_info[ent->driver_data].name,
14046 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14047 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14048 pcie_print_link_status(bp->pdev);
14049
14050 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14051 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14052
14053 return 0;
14054
14055init_one_freemem:
14056 bnx2x_free_mem_bp(bp);
14057
14058init_one_exit:
14059 bnx2x_disable_pcie_error_reporting(bp);
14060
14061 if (bp->regview)
14062 iounmap(bp->regview);
14063
14064 if (IS_PF(bp) && bp->doorbells)
14065 iounmap(bp->doorbells);
14066
14067 free_netdev(dev);
14068
14069 if (atomic_read(&pdev->enable_cnt) == 1)
14070 pci_release_regions(pdev);
14071
14072 pci_disable_device(pdev);
14073
14074 return rc;
14075}
14076
14077static void __bnx2x_remove(struct pci_dev *pdev,
14078 struct net_device *dev,
14079 struct bnx2x *bp,
14080 bool remove_netdev)
14081{
14082
14083 if (!NO_FCOE(bp)) {
14084 rtnl_lock();
14085 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14086 rtnl_unlock();
14087 }
14088
14089#ifdef BCM_DCBNL
14090
14091 bnx2x_dcbnl_update_applist(bp, true);
14092#endif
14093
14094 if (IS_PF(bp) &&
14095 !BP_NOMCP(bp) &&
14096 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14097 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14098
14099
14100 if (remove_netdev) {
14101 unregister_netdev(dev);
14102 } else {
14103 rtnl_lock();
14104 dev_close(dev);
14105 rtnl_unlock();
14106 }
14107
14108 bnx2x_iov_remove_one(bp);
14109
14110
14111 if (IS_PF(bp)) {
14112 bnx2x_set_power_state(bp, PCI_D0);
14113 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14114
14115
14116
14117
14118 bnx2x_reset_endianity(bp);
14119 }
14120
14121
14122 bnx2x_disable_msi(bp);
14123
14124
14125 if (IS_PF(bp))
14126 bnx2x_set_power_state(bp, PCI_D3hot);
14127
14128
14129 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14130
14131
14132 if (IS_VF(bp))
14133 bnx2x_vfpf_release(bp);
14134
14135
14136 if (system_state == SYSTEM_POWER_OFF) {
14137 pci_wake_from_d3(pdev, bp->wol);
14138 pci_set_power_state(pdev, PCI_D3hot);
14139 }
14140
14141 bnx2x_disable_pcie_error_reporting(bp);
14142 if (remove_netdev) {
14143 if (bp->regview)
14144 iounmap(bp->regview);
14145
14146
14147
14148
14149 if (IS_PF(bp)) {
14150 if (bp->doorbells)
14151 iounmap(bp->doorbells);
14152
14153 bnx2x_release_firmware(bp);
14154 } else {
14155 bnx2x_vf_pci_dealloc(bp);
14156 }
14157 bnx2x_free_mem_bp(bp);
14158
14159 free_netdev(dev);
14160
14161 if (atomic_read(&pdev->enable_cnt) == 1)
14162 pci_release_regions(pdev);
14163
14164 pci_disable_device(pdev);
14165 }
14166}
14167
14168static void bnx2x_remove_one(struct pci_dev *pdev)
14169{
14170 struct net_device *dev = pci_get_drvdata(pdev);
14171 struct bnx2x *bp;
14172
14173 if (!dev) {
14174 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14175 return;
14176 }
14177 bp = netdev_priv(dev);
14178
14179 __bnx2x_remove(pdev, dev, bp, true);
14180}
14181
14182static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14183{
14184 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14185
14186 bp->rx_mode = BNX2X_RX_MODE_NONE;
14187
14188 if (CNIC_LOADED(bp))
14189 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14190
14191
14192 bnx2x_tx_disable(bp);
14193
14194 bnx2x_del_all_napi(bp);
14195 if (CNIC_LOADED(bp))
14196 bnx2x_del_all_napi_cnic(bp);
14197 netdev_reset_tc(bp->dev);
14198
14199 del_timer_sync(&bp->timer);
14200 cancel_delayed_work_sync(&bp->sp_task);
14201 cancel_delayed_work_sync(&bp->period_task);
14202
14203 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14204 bp->stats_state = STATS_STATE_DISABLED;
14205 up(&bp->stats_lock);
14206 }
14207
14208 bnx2x_save_statistics(bp);
14209
14210 netif_carrier_off(bp->dev);
14211
14212 return 0;
14213}
14214
14215
14216
14217
14218
14219
14220
14221
14222
14223static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14224 pci_channel_state_t state)
14225{
14226 struct net_device *dev = pci_get_drvdata(pdev);
14227 struct bnx2x *bp = netdev_priv(dev);
14228
14229 rtnl_lock();
14230
14231 BNX2X_ERR("IO error detected\n");
14232
14233 netif_device_detach(dev);
14234
14235 if (state == pci_channel_io_perm_failure) {
14236 rtnl_unlock();
14237 return PCI_ERS_RESULT_DISCONNECT;
14238 }
14239
14240 if (netif_running(dev))
14241 bnx2x_eeh_nic_unload(bp);
14242
14243 bnx2x_prev_path_mark_eeh(bp);
14244
14245 pci_disable_device(pdev);
14246
14247 rtnl_unlock();
14248
14249
14250 return PCI_ERS_RESULT_NEED_RESET;
14251}
14252
14253
14254
14255
14256
14257
14258
14259static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14260{
14261 struct net_device *dev = pci_get_drvdata(pdev);
14262 struct bnx2x *bp = netdev_priv(dev);
14263 int i;
14264
14265 rtnl_lock();
14266 BNX2X_ERR("IO slot reset initializing...\n");
14267 if (pci_enable_device(pdev)) {
14268 dev_err(&pdev->dev,
14269 "Cannot re-enable PCI device after reset\n");
14270 rtnl_unlock();
14271 return PCI_ERS_RESULT_DISCONNECT;
14272 }
14273
14274 pci_set_master(pdev);
14275 pci_restore_state(pdev);
14276 pci_save_state(pdev);
14277
14278 if (netif_running(dev))
14279 bnx2x_set_power_state(bp, PCI_D0);
14280
14281 if (netif_running(dev)) {
14282 BNX2X_ERR("IO slot reset --> driver unload\n");
14283
14284
14285 if (bnx2x_init_shmem(bp)) {
14286 rtnl_unlock();
14287 return PCI_ERS_RESULT_DISCONNECT;
14288 }
14289
14290 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14291 u32 v;
14292
14293 v = SHMEM2_RD(bp,
14294 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14295 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14296 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14297 }
14298 bnx2x_drain_tx_queues(bp);
14299 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14300 bnx2x_netif_stop(bp, 1);
14301 bnx2x_free_irq(bp);
14302
14303
14304 bnx2x_send_unload_done(bp, true);
14305
14306 bp->sp_state = 0;
14307 bp->port.pmf = 0;
14308
14309 bnx2x_prev_unload(bp);
14310
14311
14312
14313
14314 bnx2x_squeeze_objects(bp);
14315 bnx2x_free_skbs(bp);
14316 for_each_rx_queue(bp, i)
14317 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14318 bnx2x_free_fp_mem(bp);
14319 bnx2x_free_mem(bp);
14320
14321 bp->state = BNX2X_STATE_CLOSED;
14322 }
14323
14324 rtnl_unlock();
14325
14326 return PCI_ERS_RESULT_RECOVERED;
14327}
14328
14329
14330
14331
14332
14333
14334
14335
14336static void bnx2x_io_resume(struct pci_dev *pdev)
14337{
14338 struct net_device *dev = pci_get_drvdata(pdev);
14339 struct bnx2x *bp = netdev_priv(dev);
14340
14341 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14342 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14343 return;
14344 }
14345
14346 rtnl_lock();
14347
14348 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14349 DRV_MSG_SEQ_NUMBER_MASK;
14350
14351 if (netif_running(dev))
14352 bnx2x_nic_load(bp, LOAD_NORMAL);
14353
14354 netif_device_attach(dev);
14355
14356 rtnl_unlock();
14357}
14358
14359static const struct pci_error_handlers bnx2x_err_handler = {
14360 .error_detected = bnx2x_io_error_detected,
14361 .slot_reset = bnx2x_io_slot_reset,
14362 .resume = bnx2x_io_resume,
14363};
14364
14365static void bnx2x_shutdown(struct pci_dev *pdev)
14366{
14367 struct net_device *dev = pci_get_drvdata(pdev);
14368 struct bnx2x *bp;
14369
14370 if (!dev)
14371 return;
14372
14373 bp = netdev_priv(dev);
14374 if (!bp)
14375 return;
14376
14377 rtnl_lock();
14378 netif_device_detach(dev);
14379 rtnl_unlock();
14380
14381
14382
14383
14384
14385 __bnx2x_remove(pdev, dev, bp, false);
14386}
14387
14388static struct pci_driver bnx2x_pci_driver = {
14389 .name = DRV_MODULE_NAME,
14390 .id_table = bnx2x_pci_tbl,
14391 .probe = bnx2x_init_one,
14392 .remove = bnx2x_remove_one,
14393 .driver.pm = &bnx2x_pm_ops,
14394 .err_handler = &bnx2x_err_handler,
14395#ifdef CONFIG_BNX2X_SRIOV
14396 .sriov_configure = bnx2x_sriov_configure,
14397#endif
14398 .shutdown = bnx2x_shutdown,
14399};
14400
14401static int __init bnx2x_init(void)
14402{
14403 int ret;
14404
14405 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14406 if (bnx2x_wq == NULL) {
14407 pr_err("Cannot create workqueue\n");
14408 return -ENOMEM;
14409 }
14410 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14411 if (!bnx2x_iov_wq) {
14412 pr_err("Cannot create iov workqueue\n");
14413 destroy_workqueue(bnx2x_wq);
14414 return -ENOMEM;
14415 }
14416
14417 ret = pci_register_driver(&bnx2x_pci_driver);
14418 if (ret) {
14419 pr_err("Cannot register driver\n");
14420 destroy_workqueue(bnx2x_wq);
14421 destroy_workqueue(bnx2x_iov_wq);
14422 }
14423 return ret;
14424}
14425
14426static void __exit bnx2x_cleanup(void)
14427{
14428 struct list_head *pos, *q;
14429
14430 pci_unregister_driver(&bnx2x_pci_driver);
14431
14432 destroy_workqueue(bnx2x_wq);
14433 destroy_workqueue(bnx2x_iov_wq);
14434
14435
14436 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14437 struct bnx2x_prev_path_list *tmp =
14438 list_entry(pos, struct bnx2x_prev_path_list, list);
14439 list_del(pos);
14440 kfree(tmp);
14441 }
14442}
14443
14444void bnx2x_notify_link_changed(struct bnx2x *bp)
14445{
14446 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14447}
14448
14449module_init(bnx2x_init);
14450module_exit(bnx2x_cleanup);
14451
14452
14453
14454
14455
14456
14457
14458
14459static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14460{
14461 unsigned long ramrod_flags = 0;
14462
14463 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14464 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14465 &bp->iscsi_l2_mac_obj, true,
14466 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14467}
14468
14469
14470static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14471{
14472 struct eth_spe *spe;
14473 int cxt_index, cxt_offset;
14474
14475#ifdef BNX2X_STOP_ON_ERROR
14476 if (unlikely(bp->panic))
14477 return;
14478#endif
14479
14480 spin_lock_bh(&bp->spq_lock);
14481 BUG_ON(bp->cnic_spq_pending < count);
14482 bp->cnic_spq_pending -= count;
14483
14484 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14485 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14486 & SPE_HDR_CONN_TYPE) >>
14487 SPE_HDR_CONN_TYPE_SHIFT;
14488 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14489 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14490
14491
14492
14493
14494 if (type == ETH_CONNECTION_TYPE) {
14495 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14496 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14497 ILT_PAGE_CIDS;
14498 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14499 (cxt_index * ILT_PAGE_CIDS);
14500 bnx2x_set_ctx_validation(bp,
14501 &bp->context[cxt_index].
14502 vcxt[cxt_offset].eth,
14503 BNX2X_ISCSI_ETH_CID(bp));
14504 }
14505 }
14506
14507
14508
14509
14510
14511
14512
14513 if (type == ETH_CONNECTION_TYPE) {
14514 if (!atomic_read(&bp->cq_spq_left))
14515 break;
14516 else
14517 atomic_dec(&bp->cq_spq_left);
14518 } else if (type == NONE_CONNECTION_TYPE) {
14519 if (!atomic_read(&bp->eq_spq_left))
14520 break;
14521 else
14522 atomic_dec(&bp->eq_spq_left);
14523 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14524 (type == FCOE_CONNECTION_TYPE)) {
14525 if (bp->cnic_spq_pending >=
14526 bp->cnic_eth_dev.max_kwqe_pending)
14527 break;
14528 else
14529 bp->cnic_spq_pending++;
14530 } else {
14531 BNX2X_ERR("Unknown SPE type: %d\n", type);
14532 bnx2x_panic();
14533 break;
14534 }
14535
14536 spe = bnx2x_sp_get_next(bp);
14537 *spe = *bp->cnic_kwq_cons;
14538
14539 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14540 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14541
14542 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14543 bp->cnic_kwq_cons = bp->cnic_kwq;
14544 else
14545 bp->cnic_kwq_cons++;
14546 }
14547 bnx2x_sp_prod_update(bp);
14548 spin_unlock_bh(&bp->spq_lock);
14549}
14550
14551static int bnx2x_cnic_sp_queue(struct net_device *dev,
14552 struct kwqe_16 *kwqes[], u32 count)
14553{
14554 struct bnx2x *bp = netdev_priv(dev);
14555 int i;
14556
14557#ifdef BNX2X_STOP_ON_ERROR
14558 if (unlikely(bp->panic)) {
14559 BNX2X_ERR("Can't post to SP queue while panic\n");
14560 return -EIO;
14561 }
14562#endif
14563
14564 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14565 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14566 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14567 return -EAGAIN;
14568 }
14569
14570 spin_lock_bh(&bp->spq_lock);
14571
14572 for (i = 0; i < count; i++) {
14573 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14574
14575 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14576 break;
14577
14578 *bp->cnic_kwq_prod = *spe;
14579
14580 bp->cnic_kwq_pending++;
14581
14582 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14583 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14584 spe->data.update_data_addr.hi,
14585 spe->data.update_data_addr.lo,
14586 bp->cnic_kwq_pending);
14587
14588 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14589 bp->cnic_kwq_prod = bp->cnic_kwq;
14590 else
14591 bp->cnic_kwq_prod++;
14592 }
14593
14594 spin_unlock_bh(&bp->spq_lock);
14595
14596 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14597 bnx2x_cnic_sp_post(bp, 0);
14598
14599 return i;
14600}
14601
14602static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14603{
14604 struct cnic_ops *c_ops;
14605 int rc = 0;
14606
14607 mutex_lock(&bp->cnic_mutex);
14608 c_ops = rcu_dereference_protected(bp->cnic_ops,
14609 lockdep_is_held(&bp->cnic_mutex));
14610 if (c_ops)
14611 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14612 mutex_unlock(&bp->cnic_mutex);
14613
14614 return rc;
14615}
14616
14617static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14618{
14619 struct cnic_ops *c_ops;
14620 int rc = 0;
14621
14622 rcu_read_lock();
14623 c_ops = rcu_dereference(bp->cnic_ops);
14624 if (c_ops)
14625 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14626 rcu_read_unlock();
14627
14628 return rc;
14629}
14630
14631
14632
14633
14634int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14635{
14636 struct cnic_ctl_info ctl = {0};
14637
14638 ctl.cmd = cmd;
14639
14640 return bnx2x_cnic_ctl_send(bp, &ctl);
14641}
14642
14643static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14644{
14645 struct cnic_ctl_info ctl = {0};
14646
14647
14648 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14649 ctl.data.comp.cid = cid;
14650 ctl.data.comp.error = err;
14651
14652 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14653 bnx2x_cnic_sp_post(bp, 0);
14654}
14655
14656
14657
14658
14659
14660
14661static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14662{
14663 unsigned long accept_flags = 0, ramrod_flags = 0;
14664 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14665 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14666
14667 if (start) {
14668
14669
14670
14671
14672
14673
14674 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14675 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14676 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14677 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14678
14679
14680 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14681
14682 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14683 } else
14684
14685 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14686
14687 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14688 set_bit(sched_state, &bp->sp_state);
14689 else {
14690 __set_bit(RAMROD_RX, &ramrod_flags);
14691 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14692 ramrod_flags);
14693 }
14694}
14695
14696static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14697{
14698 struct bnx2x *bp = netdev_priv(dev);
14699 int rc = 0;
14700
14701 switch (ctl->cmd) {
14702 case DRV_CTL_CTXTBL_WR_CMD: {
14703 u32 index = ctl->data.io.offset;
14704 dma_addr_t addr = ctl->data.io.dma_addr;
14705
14706 bnx2x_ilt_wr(bp, index, addr);
14707 break;
14708 }
14709
14710 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14711 int count = ctl->data.credit.credit_count;
14712
14713 bnx2x_cnic_sp_post(bp, count);
14714 break;
14715 }
14716
14717
14718 case DRV_CTL_START_L2_CMD: {
14719 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14720 unsigned long sp_bits = 0;
14721
14722
14723 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14724 cp->iscsi_l2_client_id,
14725 cp->iscsi_l2_cid, BP_FUNC(bp),
14726 bnx2x_sp(bp, mac_rdata),
14727 bnx2x_sp_mapping(bp, mac_rdata),
14728 BNX2X_FILTER_MAC_PENDING,
14729 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14730 &bp->macs_pool);
14731
14732
14733 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14734 if (rc)
14735 break;
14736
14737 barrier();
14738
14739
14740
14741 netif_addr_lock_bh(dev);
14742 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14743 netif_addr_unlock_bh(dev);
14744
14745
14746 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14747 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14748
14749 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14750 BNX2X_ERR("rx_mode completion timed out!\n");
14751
14752 break;
14753 }
14754
14755
14756 case DRV_CTL_STOP_L2_CMD: {
14757 unsigned long sp_bits = 0;
14758
14759
14760 netif_addr_lock_bh(dev);
14761 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14762 netif_addr_unlock_bh(dev);
14763
14764
14765 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14766 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14767
14768 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14769 BNX2X_ERR("rx_mode completion timed out!\n");
14770
14771 barrier();
14772
14773
14774 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14775 BNX2X_ISCSI_ETH_MAC, true);
14776 break;
14777 }
14778 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14779 int count = ctl->data.credit.credit_count;
14780
14781 smp_mb__before_atomic();
14782 atomic_add(count, &bp->cq_spq_left);
14783 smp_mb__after_atomic();
14784 break;
14785 }
14786 case DRV_CTL_ULP_REGISTER_CMD: {
14787 int ulp_type = ctl->data.register_data.ulp_type;
14788
14789 if (CHIP_IS_E3(bp)) {
14790 int idx = BP_FW_MB_IDX(bp);
14791 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14792 int path = BP_PATH(bp);
14793 int port = BP_PORT(bp);
14794 int i;
14795 u32 scratch_offset;
14796 u32 *host_addr;
14797
14798
14799 if (ulp_type == CNIC_ULP_ISCSI)
14800 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14801 else if (ulp_type == CNIC_ULP_FCOE)
14802 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14803 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14804
14805 if ((ulp_type != CNIC_ULP_FCOE) ||
14806 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14807 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14808 break;
14809
14810
14811 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14812 if (!scratch_offset)
14813 break;
14814 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14815 fcoe_features[path][port]);
14816 host_addr = (u32 *) &(ctl->data.register_data.
14817 fcoe_features);
14818 for (i = 0; i < sizeof(struct fcoe_capabilities);
14819 i += 4)
14820 REG_WR(bp, scratch_offset + i,
14821 *(host_addr + i/4));
14822 }
14823 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14824 break;
14825 }
14826
14827 case DRV_CTL_ULP_UNREGISTER_CMD: {
14828 int ulp_type = ctl->data.ulp_type;
14829
14830 if (CHIP_IS_E3(bp)) {
14831 int idx = BP_FW_MB_IDX(bp);
14832 u32 cap;
14833
14834 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14835 if (ulp_type == CNIC_ULP_ISCSI)
14836 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14837 else if (ulp_type == CNIC_ULP_FCOE)
14838 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14839 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14840 }
14841 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14842 break;
14843 }
14844
14845 default:
14846 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14847 rc = -EINVAL;
14848 }
14849
14850
14851 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14852 switch (ctl->drv_state) {
14853 case DRV_NOP:
14854 break;
14855 case DRV_ACTIVE:
14856 bnx2x_set_os_driver_state(bp,
14857 OS_DRIVER_STATE_ACTIVE);
14858 break;
14859 case DRV_INACTIVE:
14860 bnx2x_set_os_driver_state(bp,
14861 OS_DRIVER_STATE_DISABLED);
14862 break;
14863 case DRV_UNLOADED:
14864 bnx2x_set_os_driver_state(bp,
14865 OS_DRIVER_STATE_NOT_LOADED);
14866 break;
14867 default:
14868 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14869 }
14870 }
14871
14872 return rc;
14873}
14874
14875static int bnx2x_get_fc_npiv(struct net_device *dev,
14876 struct cnic_fc_npiv_tbl *cnic_tbl)
14877{
14878 struct bnx2x *bp = netdev_priv(dev);
14879 struct bdn_fc_npiv_tbl *tbl = NULL;
14880 u32 offset, entries;
14881 int rc = -EINVAL;
14882 int i;
14883
14884 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14885 goto out;
14886
14887 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14888
14889 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14890 if (!tbl) {
14891 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14892 goto out;
14893 }
14894
14895 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14896 if (!offset) {
14897 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14898 goto out;
14899 }
14900 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14901
14902
14903 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14904 BNX2X_ERR("Failed to read FC-NPIV table\n");
14905 goto out;
14906 }
14907
14908
14909
14910
14911 entries = tbl->fc_npiv_cfg.num_of_npiv;
14912 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14913 tbl->fc_npiv_cfg.num_of_npiv = entries;
14914
14915 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14916 DP(BNX2X_MSG_MCP,
14917 "No FC-NPIV table [valid, simply not present]\n");
14918 goto out;
14919 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14920 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14921 tbl->fc_npiv_cfg.num_of_npiv);
14922 goto out;
14923 } else {
14924 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14925 tbl->fc_npiv_cfg.num_of_npiv);
14926 }
14927
14928
14929 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14930 for (i = 0; i < cnic_tbl->count; i++) {
14931 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14932 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14933 }
14934
14935 rc = 0;
14936out:
14937 kfree(tbl);
14938 return rc;
14939}
14940
14941void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14942{
14943 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14944
14945 if (bp->flags & USING_MSIX_FLAG) {
14946 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14947 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14948 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14949 } else {
14950 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14951 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14952 }
14953 if (!CHIP_IS_E1x(bp))
14954 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14955 else
14956 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14957
14958 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14959 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14960 cp->irq_arr[1].status_blk = bp->def_status_blk;
14961 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14962 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14963
14964 cp->num_irq = 2;
14965}
14966
14967void bnx2x_setup_cnic_info(struct bnx2x *bp)
14968{
14969 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14970
14971 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14972 bnx2x_cid_ilt_lines(bp);
14973 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14974 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14975 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14976
14977 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14978 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14979 cp->iscsi_l2_cid);
14980
14981 if (NO_ISCSI_OOO(bp))
14982 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14983}
14984
14985static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14986 void *data)
14987{
14988 struct bnx2x *bp = netdev_priv(dev);
14989 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14990 int rc;
14991
14992 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14993
14994 if (ops == NULL) {
14995 BNX2X_ERR("NULL ops received\n");
14996 return -EINVAL;
14997 }
14998
14999 if (!CNIC_SUPPORT(bp)) {
15000 BNX2X_ERR("Can't register CNIC when not supported\n");
15001 return -EOPNOTSUPP;
15002 }
15003
15004 if (!CNIC_LOADED(bp)) {
15005 rc = bnx2x_load_cnic(bp);
15006 if (rc) {
15007 BNX2X_ERR("CNIC-related load failed\n");
15008 return rc;
15009 }
15010 }
15011
15012 bp->cnic_enabled = true;
15013
15014 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15015 if (!bp->cnic_kwq)
15016 return -ENOMEM;
15017
15018 bp->cnic_kwq_cons = bp->cnic_kwq;
15019 bp->cnic_kwq_prod = bp->cnic_kwq;
15020 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15021
15022 bp->cnic_spq_pending = 0;
15023 bp->cnic_kwq_pending = 0;
15024
15025 bp->cnic_data = data;
15026
15027 cp->num_irq = 0;
15028 cp->drv_state |= CNIC_DRV_STATE_REGD;
15029 cp->iro_arr = bp->iro_arr;
15030
15031 bnx2x_setup_cnic_irq_info(bp);
15032
15033 rcu_assign_pointer(bp->cnic_ops, ops);
15034
15035
15036 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15037
15038 return 0;
15039}
15040
15041static int bnx2x_unregister_cnic(struct net_device *dev)
15042{
15043 struct bnx2x *bp = netdev_priv(dev);
15044 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15045
15046 mutex_lock(&bp->cnic_mutex);
15047 cp->drv_state = 0;
15048 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15049 mutex_unlock(&bp->cnic_mutex);
15050 synchronize_rcu();
15051 bp->cnic_enabled = false;
15052 kfree(bp->cnic_kwq);
15053 bp->cnic_kwq = NULL;
15054
15055 return 0;
15056}
15057
15058static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15059{
15060 struct bnx2x *bp = netdev_priv(dev);
15061 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15062
15063
15064
15065
15066
15067 if (NO_ISCSI(bp) && NO_FCOE(bp))
15068 return NULL;
15069
15070 cp->drv_owner = THIS_MODULE;
15071 cp->chip_id = CHIP_ID(bp);
15072 cp->pdev = bp->pdev;
15073 cp->io_base = bp->regview;
15074 cp->io_base2 = bp->doorbells;
15075 cp->max_kwqe_pending = 8;
15076 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15077 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15078 bnx2x_cid_ilt_lines(bp);
15079 cp->ctx_tbl_len = CNIC_ILT_LINES;
15080 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15081 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15082 cp->drv_ctl = bnx2x_drv_ctl;
15083 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15084 cp->drv_register_cnic = bnx2x_register_cnic;
15085 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15086 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15087 cp->iscsi_l2_client_id =
15088 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15089 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15090
15091 if (NO_ISCSI_OOO(bp))
15092 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15093
15094 if (NO_ISCSI(bp))
15095 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15096
15097 if (NO_FCOE(bp))
15098 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15099
15100 BNX2X_DEV_INFO(
15101 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15102 cp->ctx_blk_size,
15103 cp->ctx_tbl_offset,
15104 cp->ctx_tbl_len,
15105 cp->starting_cid);
15106 return cp;
15107}
15108
15109static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15110{
15111 struct bnx2x *bp = fp->bp;
15112 u32 offset = BAR_USTRORM_INTMEM;
15113
15114 if (IS_VF(bp))
15115 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15116 else if (!CHIP_IS_E1x(bp))
15117 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15118 else
15119 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15120
15121 return offset;
15122}
15123
15124
15125
15126
15127
15128
15129int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15130{
15131 u32 pretend_reg;
15132
15133 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15134 return -1;
15135
15136
15137 pretend_reg = bnx2x_get_pretend_reg(bp);
15138 REG_WR(bp, pretend_reg, pretend_func_val);
15139 REG_RD(bp, pretend_reg);
15140 return 0;
15141}
15142
15143static void bnx2x_ptp_task(struct work_struct *work)
15144{
15145 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15146 int port = BP_PORT(bp);
15147 u32 val_seq;
15148 u64 timestamp, ns;
15149 struct skb_shared_hwtstamps shhwtstamps;
15150 bool bail = true;
15151 int i;
15152
15153
15154
15155
15156 for (i = 0; i < 10; i++) {
15157
15158 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15159 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15160 if (val_seq & 0x10000) {
15161 bail = false;
15162 break;
15163 }
15164 msleep(1 << i);
15165 }
15166
15167 if (!bail) {
15168
15169 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15170 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15171 timestamp <<= 32;
15172 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15173 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15174
15175 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15176 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15177 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15178
15179 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15180 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15181 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15182
15183 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15184 timestamp, ns);
15185 } else {
15186 DP(BNX2X_MSG_PTP,
15187 "Tx timestamp is not recorded (register read=%u)\n",
15188 val_seq);
15189 bp->eth_stats.ptp_skip_tx_ts++;
15190 }
15191
15192 dev_kfree_skb_any(bp->ptp_tx_skb);
15193 bp->ptp_tx_skb = NULL;
15194}
15195
15196void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15197{
15198 int port = BP_PORT(bp);
15199 u64 timestamp, ns;
15200
15201 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15202 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15203 timestamp <<= 32;
15204 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15205 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15206
15207
15208 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15209 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15210
15211 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15212
15213 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15214
15215 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15216 timestamp, ns);
15217}
15218
15219
15220static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15221{
15222 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15223 int port = BP_PORT(bp);
15224 u32 wb_data[2];
15225 u64 phc_cycles;
15226
15227 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15228 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15229 phc_cycles = wb_data[1];
15230 phc_cycles = (phc_cycles << 32) + wb_data[0];
15231
15232 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15233
15234 return phc_cycles;
15235}
15236
15237static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15238{
15239 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15240 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15241 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15242 bp->cyclecounter.shift = 0;
15243 bp->cyclecounter.mult = 1;
15244}
15245
15246static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15247{
15248 struct bnx2x_func_state_params func_params = {NULL};
15249 struct bnx2x_func_set_timesync_params *set_timesync_params =
15250 &func_params.params.set_timesync;
15251
15252
15253 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15254 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15255
15256 func_params.f_obj = &bp->func_obj;
15257 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15258
15259
15260 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15261 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15262
15263 return bnx2x_func_state_change(bp, &func_params);
15264}
15265
15266static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15267{
15268 struct bnx2x_queue_state_params q_params;
15269 int rc, i;
15270
15271
15272 memset(&q_params, 0, sizeof(q_params));
15273 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15274 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15275 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15276 &q_params.params.update.update_flags);
15277 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15278 &q_params.params.update.update_flags);
15279
15280
15281 for_each_eth_queue(bp, i) {
15282 struct bnx2x_fastpath *fp = &bp->fp[i];
15283
15284
15285 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15286
15287
15288 rc = bnx2x_queue_state_change(bp, &q_params);
15289 if (rc) {
15290 BNX2X_ERR("Failed to enable PTP packets\n");
15291 return rc;
15292 }
15293 }
15294
15295 return 0;
15296}
15297
15298#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15299#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15300#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15301#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15302#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15303#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15304#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15305#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15306#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15307#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15308#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15309#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15310
15311int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15312{
15313 int port = BP_PORT(bp);
15314 u32 param, rule;
15315 int rc;
15316
15317 if (!bp->hwtstamp_ioctl_called)
15318 return 0;
15319
15320 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15321 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15322 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15323 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15324 switch (bp->tx_type) {
15325 case HWTSTAMP_TX_ON:
15326 bp->flags |= TX_TIMESTAMPING_EN;
15327 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15328 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15329 break;
15330 case HWTSTAMP_TX_ONESTEP_SYNC:
15331 case HWTSTAMP_TX_ONESTEP_P2P:
15332 BNX2X_ERR("One-step timestamping is not supported\n");
15333 return -ERANGE;
15334 }
15335
15336 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15337 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15338 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15339 NIG_REG_P0_LLH_PTP_RULE_MASK;
15340 switch (bp->rx_filter) {
15341 case HWTSTAMP_FILTER_NONE:
15342 break;
15343 case HWTSTAMP_FILTER_ALL:
15344 case HWTSTAMP_FILTER_SOME:
15345 case HWTSTAMP_FILTER_NTP_ALL:
15346 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15347 break;
15348 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15349 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15350 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15351 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15352
15353 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15354 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15355 break;
15356 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15357 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15358 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15359 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15360
15361 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15362 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15363 break;
15364 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15365 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15366 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15367 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15368
15369 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15370 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15371
15372 break;
15373 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15374 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15375 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15376 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15377
15378 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15379 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15380 break;
15381 }
15382
15383
15384 rc = bnx2x_enable_ptp_packets(bp);
15385 if (rc)
15386 return rc;
15387
15388
15389 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15390 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15391
15392 return 0;
15393}
15394
15395static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15396{
15397 struct hwtstamp_config config;
15398 int rc;
15399
15400 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15401
15402 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15403 return -EFAULT;
15404
15405 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15406 config.tx_type, config.rx_filter);
15407
15408 if (config.flags) {
15409 BNX2X_ERR("config.flags is reserved for future use\n");
15410 return -EINVAL;
15411 }
15412
15413 bp->hwtstamp_ioctl_called = true;
15414 bp->tx_type = config.tx_type;
15415 bp->rx_filter = config.rx_filter;
15416
15417 rc = bnx2x_configure_ptp_filters(bp);
15418 if (rc)
15419 return rc;
15420
15421 config.rx_filter = bp->rx_filter;
15422
15423 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15424 -EFAULT : 0;
15425}
15426
15427
15428static int bnx2x_configure_ptp(struct bnx2x *bp)
15429{
15430 int rc, port = BP_PORT(bp);
15431 u32 wb_data[2];
15432
15433
15434 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15435 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15436 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15437 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15438 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15439 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15440 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15441 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15442
15443
15444 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15445 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15446
15447
15448 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15449 NIG_REG_P0_PTP_EN, 0x3F);
15450
15451
15452 wb_data[0] = 0;
15453 wb_data[1] = 0;
15454 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15455
15456
15457 rc = bnx2x_send_reset_timesync_ramrod(bp);
15458 if (rc) {
15459 BNX2X_ERR("Failed to reset PHC drift register\n");
15460 return -EFAULT;
15461 }
15462
15463
15464 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15465 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15466 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15467 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15468
15469 return 0;
15470}
15471
15472
15473void bnx2x_init_ptp(struct bnx2x *bp)
15474{
15475 int rc;
15476
15477
15478 rc = bnx2x_configure_ptp(bp);
15479 if (rc) {
15480 BNX2X_ERR("Stopping PTP initialization\n");
15481 return;
15482 }
15483
15484
15485 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15486
15487
15488
15489
15490
15491 if (!bp->timecounter_init_done) {
15492 bnx2x_init_cyclecounter(bp);
15493 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15494 ktime_to_ns(ktime_get_real()));
15495 bp->timecounter_init_done = true;
15496 }
15497
15498 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15499}
15500