1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/ioport.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/aer.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39#include <linux/irq.h>
40#include <linux/delay.h>
41#include <asm/byteorder.h>
42#include <linux/time.h>
43#include <linux/ethtool.h>
44#include <linux/mii.h>
45#include <linux/if_vlan.h>
46#include <linux/crash_dump.h>
47#include <net/ip.h>
48#include <net/ipv6.h>
49#include <net/tcp.h>
50#include <net/vxlan.h>
51#include <net/checksum.h>
52#include <net/ip6_checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/crc32c.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/io.h>
59#include <linux/semaphore.h>
60#include <linux/stringify.h>
61#include <linux/vmalloc.h>
62#include "bnx2x.h"
63#include "bnx2x_init.h"
64#include "bnx2x_init_ops.h"
65#include "bnx2x_cmn.h"
66#include "bnx2x_vfpf.h"
67#include "bnx2x_dcb.h"
68#include "bnx2x_sp.h"
69#include <linux/firmware.h>
70#include "bnx2x_fw_file_hdr.h"
71
72#define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81
82#define TX_TIMEOUT (5*HZ)
83
84MODULE_AUTHOR("Eliezer Tamir");
85MODULE_DESCRIPTION("QLogic "
86 "BCM57710/57711/57711E/"
87 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
88 "57840/57840_MF Driver");
89MODULE_LICENSE("GPL");
90MODULE_FIRMWARE(FW_FILE_NAME_E1);
91MODULE_FIRMWARE(FW_FILE_NAME_E1H);
92MODULE_FIRMWARE(FW_FILE_NAME_E2);
93
94int bnx2x_num_queues;
95module_param_named(num_queues, bnx2x_num_queues, int, 0444);
96MODULE_PARM_DESC(num_queues,
97 " Set number of queues (default is as a number of CPUs)");
98
99static int disable_tpa;
100module_param(disable_tpa, int, 0444);
101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103static int int_mode;
104module_param(int_mode, int, 0444);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
106 "(1 INT#x; 2 MSI)");
107
108static int dropless_fc;
109module_param(dropless_fc, int, 0444);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112static int mrrs = -1;
113module_param(mrrs, int, 0444);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116static int debug;
117module_param(debug, int, 0444);
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static struct workqueue_struct *bnx2x_wq;
121struct workqueue_struct *bnx2x_iov_wq;
122
123struct bnx2x_mac_vals {
124 u32 xmac_addr;
125 u32 xmac_val;
126 u32 emac_addr;
127 u32 emac_val;
128 u32 umac_addr[2];
129 u32 umac_val[2];
130 u32 bmac_addr;
131 u32 bmac_val[2];
132};
133
134enum bnx2x_board_type {
135 BCM57710 = 0,
136 BCM57711,
137 BCM57711E,
138 BCM57712,
139 BCM57712_MF,
140 BCM57712_VF,
141 BCM57800,
142 BCM57800_MF,
143 BCM57800_VF,
144 BCM57810,
145 BCM57810_MF,
146 BCM57810_VF,
147 BCM57840_4_10,
148 BCM57840_2_20,
149 BCM57840_MF,
150 BCM57840_VF,
151 BCM57811,
152 BCM57811_MF,
153 BCM57840_O,
154 BCM57840_MFO,
155 BCM57811_VF
156};
157
158
159static struct {
160 char *name;
161} board_info[] = {
162 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
163 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
164 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
165 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
166 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
167 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
168 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
169 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
170 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
171 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
172 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
173 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
174 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
175 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
176 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
177 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
178 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
179 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
180 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
181 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
183};
184
185#ifndef PCI_DEVICE_ID_NX2_57710
186#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
187#endif
188#ifndef PCI_DEVICE_ID_NX2_57711
189#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
190#endif
191#ifndef PCI_DEVICE_ID_NX2_57711E
192#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
193#endif
194#ifndef PCI_DEVICE_ID_NX2_57712
195#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
196#endif
197#ifndef PCI_DEVICE_ID_NX2_57712_MF
198#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
199#endif
200#ifndef PCI_DEVICE_ID_NX2_57712_VF
201#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
202#endif
203#ifndef PCI_DEVICE_ID_NX2_57800
204#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
205#endif
206#ifndef PCI_DEVICE_ID_NX2_57800_MF
207#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
208#endif
209#ifndef PCI_DEVICE_ID_NX2_57800_VF
210#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
211#endif
212#ifndef PCI_DEVICE_ID_NX2_57810
213#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
214#endif
215#ifndef PCI_DEVICE_ID_NX2_57810_MF
216#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
217#endif
218#ifndef PCI_DEVICE_ID_NX2_57840_O
219#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
220#endif
221#ifndef PCI_DEVICE_ID_NX2_57810_VF
222#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
223#endif
224#ifndef PCI_DEVICE_ID_NX2_57840_4_10
225#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
226#endif
227#ifndef PCI_DEVICE_ID_NX2_57840_2_20
228#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
229#endif
230#ifndef PCI_DEVICE_ID_NX2_57840_MFO
231#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
232#endif
233#ifndef PCI_DEVICE_ID_NX2_57840_MF
234#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
235#endif
236#ifndef PCI_DEVICE_ID_NX2_57840_VF
237#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
238#endif
239#ifndef PCI_DEVICE_ID_NX2_57811
240#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
241#endif
242#ifndef PCI_DEVICE_ID_NX2_57811_MF
243#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
244#endif
245#ifndef PCI_DEVICE_ID_NX2_57811_VF
246#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
247#endif
248
249static const struct pci_device_id bnx2x_pci_tbl[] = {
250 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
251 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
263 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
270 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
274 { 0 }
275};
276
277MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
278
279const u32 dmae_reg_go_c[] = {
280 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
281 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
282 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
283 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
284};
285
286
287#define BNX2X_PREV_WAIT_NEEDED 1
288static DEFINE_SEMAPHORE(bnx2x_prev_sem);
289static LIST_HEAD(bnx2x_prev_list);
290
291
292static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
293static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
294static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
295
296
297
298
299
300static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
301
302static void __storm_memset_dma_mapping(struct bnx2x *bp,
303 u32 addr, dma_addr_t mapping)
304{
305 REG_WR(bp, addr, U64_LO(mapping));
306 REG_WR(bp, addr + 4, U64_HI(mapping));
307}
308
309static void storm_memset_spq_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
311{
312 u32 addr = XSEM_REG_FAST_MEMORY +
313 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
314
315 __storm_memset_dma_mapping(bp, addr, mapping);
316}
317
318static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
320{
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329}
330
331static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
333{
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342}
343
344static void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
347{
348 size_t size = sizeof(struct event_ring_data);
349
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
351
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353}
354
355static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
357{
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
360}
361
362
363
364
365static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
366{
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
370 PCICFG_VENDOR_ID_OFFSET);
371}
372
373static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
374{
375 u32 val;
376
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
378 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
380 PCICFG_VENDOR_ID_OFFSET);
381
382 return val;
383}
384
385#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
386#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
387#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
388#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
389#define DMAE_DP_DST_NONE "dst_addr [none]"
390
391static void bnx2x_dp_dmae(struct bnx2x *bp,
392 struct dmae_command *dmae, int msglvl)
393{
394 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
395 int i;
396
397 switch (dmae->opcode & DMAE_COMMAND_DST) {
398 case DMAE_CMD_DST_PCI:
399 if (src_type == DMAE_CMD_SRC_PCI)
400 DP(msglvl, "DMAE: opcode 0x%08x\n"
401 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
402 "comp_addr [%x:%08x], comp_val 0x%08x\n",
403 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
404 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
405 dmae->comp_addr_hi, dmae->comp_addr_lo,
406 dmae->comp_val);
407 else
408 DP(msglvl, "DMAE: opcode 0x%08x\n"
409 "src [%08x], len [%d*4], dst [%x:%08x]\n"
410 "comp_addr [%x:%08x], comp_val 0x%08x\n",
411 dmae->opcode, dmae->src_addr_lo >> 2,
412 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
413 dmae->comp_addr_hi, dmae->comp_addr_lo,
414 dmae->comp_val);
415 break;
416 case DMAE_CMD_DST_GRC:
417 if (src_type == DMAE_CMD_SRC_PCI)
418 DP(msglvl, "DMAE: opcode 0x%08x\n"
419 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
420 "comp_addr [%x:%08x], comp_val 0x%08x\n",
421 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
422 dmae->len, dmae->dst_addr_lo >> 2,
423 dmae->comp_addr_hi, dmae->comp_addr_lo,
424 dmae->comp_val);
425 else
426 DP(msglvl, "DMAE: opcode 0x%08x\n"
427 "src [%08x], len [%d*4], dst [%08x]\n"
428 "comp_addr [%x:%08x], comp_val 0x%08x\n",
429 dmae->opcode, dmae->src_addr_lo >> 2,
430 dmae->len, dmae->dst_addr_lo >> 2,
431 dmae->comp_addr_hi, dmae->comp_addr_lo,
432 dmae->comp_val);
433 break;
434 default:
435 if (src_type == DMAE_CMD_SRC_PCI)
436 DP(msglvl, "DMAE: opcode 0x%08x\n"
437 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
438 "comp_addr [%x:%08x] comp_val 0x%08x\n",
439 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
440 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
441 dmae->comp_val);
442 else
443 DP(msglvl, "DMAE: opcode 0x%08x\n"
444 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
445 "comp_addr [%x:%08x] comp_val 0x%08x\n",
446 dmae->opcode, dmae->src_addr_lo >> 2,
447 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
448 dmae->comp_val);
449 break;
450 }
451
452 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
453 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
454 i, *(((u32 *)dmae) + i));
455}
456
457
458void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
459{
460 u32 cmd_offset;
461 int i;
462
463 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
464 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
465 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
466 }
467 REG_WR(bp, dmae_reg_go_c[idx], 1);
468}
469
470u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
471{
472 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
473 DMAE_CMD_C_ENABLE);
474}
475
476u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
477{
478 return opcode & ~DMAE_CMD_SRC_RESET;
479}
480
481u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
482 bool with_comp, u8 comp_type)
483{
484 u32 opcode = 0;
485
486 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
487 (dst_type << DMAE_COMMAND_DST_SHIFT));
488
489 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
490
491 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
492 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
493 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
494 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
495
496#ifdef __BIG_ENDIAN
497 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
498#else
499 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
500#endif
501 if (with_comp)
502 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
503 return opcode;
504}
505
506void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
507 struct dmae_command *dmae,
508 u8 src_type, u8 dst_type)
509{
510 memset(dmae, 0, sizeof(struct dmae_command));
511
512
513 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
514 true, DMAE_COMP_PCI);
515
516
517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
519 dmae->comp_val = DMAE_COMP_VAL;
520}
521
522
523int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
524 u32 *comp)
525{
526 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
527 int rc = 0;
528
529 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
530
531
532
533
534
535
536 spin_lock_bh(&bp->dmae_lock);
537
538
539 *comp = 0;
540
541
542 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
543
544
545 udelay(5);
546 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
547
548 if (!cnt ||
549 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
550 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
551 BNX2X_ERR("DMAE timeout!\n");
552 rc = DMAE_TIMEOUT;
553 goto unlock;
554 }
555 cnt--;
556 udelay(50);
557 }
558 if (*comp & DMAE_PCI_ERR_FLAG) {
559 BNX2X_ERR("DMAE PCI error!\n");
560 rc = DMAE_PCI_ERROR;
561 }
562
563unlock:
564
565 spin_unlock_bh(&bp->dmae_lock);
566
567 return rc;
568}
569
570void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
571 u32 len32)
572{
573 int rc;
574 struct dmae_command dmae;
575
576 if (!bp->dmae_ready) {
577 u32 *data = bnx2x_sp(bp, wb_data[0]);
578
579 if (CHIP_IS_E1(bp))
580 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
581 else
582 bnx2x_init_str_wr(bp, dst_addr, data, len32);
583 return;
584 }
585
586
587 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
588
589
590 dmae.src_addr_lo = U64_LO(dma_addr);
591 dmae.src_addr_hi = U64_HI(dma_addr);
592 dmae.dst_addr_lo = dst_addr >> 2;
593 dmae.dst_addr_hi = 0;
594 dmae.len = len32;
595
596
597 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
598 if (rc) {
599 BNX2X_ERR("DMAE returned failure %d\n", rc);
600#ifdef BNX2X_STOP_ON_ERROR
601 bnx2x_panic();
602#endif
603 }
604}
605
606void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
607{
608 int rc;
609 struct dmae_command dmae;
610
611 if (!bp->dmae_ready) {
612 u32 *data = bnx2x_sp(bp, wb_data[0]);
613 int i;
614
615 if (CHIP_IS_E1(bp))
616 for (i = 0; i < len32; i++)
617 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
618 else
619 for (i = 0; i < len32; i++)
620 data[i] = REG_RD(bp, src_addr + i*4);
621
622 return;
623 }
624
625
626 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
627
628
629 dmae.src_addr_lo = src_addr >> 2;
630 dmae.src_addr_hi = 0;
631 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
632 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
633 dmae.len = len32;
634
635
636 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
637 if (rc) {
638 BNX2X_ERR("DMAE returned failure %d\n", rc);
639#ifdef BNX2X_STOP_ON_ERROR
640 bnx2x_panic();
641#endif
642 }
643}
644
645static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
646 u32 addr, u32 len)
647{
648 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
649 int offset = 0;
650
651 while (len > dmae_wr_max) {
652 bnx2x_write_dmae(bp, phys_addr + offset,
653 addr + offset, dmae_wr_max);
654 offset += dmae_wr_max * 4;
655 len -= dmae_wr_max;
656 }
657
658 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
659}
660
661enum storms {
662 XSTORM,
663 TSTORM,
664 CSTORM,
665 USTORM,
666 MAX_STORMS
667};
668
669#define STORMS_NUM 4
670#define REGS_IN_ENTRY 4
671
672static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
673 enum storms storm,
674 int entry)
675{
676 switch (storm) {
677 case XSTORM:
678 return XSTORM_ASSERT_LIST_OFFSET(entry);
679 case TSTORM:
680 return TSTORM_ASSERT_LIST_OFFSET(entry);
681 case CSTORM:
682 return CSTORM_ASSERT_LIST_OFFSET(entry);
683 case USTORM:
684 return USTORM_ASSERT_LIST_OFFSET(entry);
685 case MAX_STORMS:
686 default:
687 BNX2X_ERR("unknown storm\n");
688 }
689 return -EINVAL;
690}
691
692static int bnx2x_mc_assert(struct bnx2x *bp)
693{
694 char last_idx;
695 int i, j, rc = 0;
696 enum storms storm;
697 u32 regs[REGS_IN_ENTRY];
698 u32 bar_storm_intmem[STORMS_NUM] = {
699 BAR_XSTRORM_INTMEM,
700 BAR_TSTRORM_INTMEM,
701 BAR_CSTRORM_INTMEM,
702 BAR_USTRORM_INTMEM
703 };
704 u32 storm_assert_list_index[STORMS_NUM] = {
705 XSTORM_ASSERT_LIST_INDEX_OFFSET,
706 TSTORM_ASSERT_LIST_INDEX_OFFSET,
707 CSTORM_ASSERT_LIST_INDEX_OFFSET,
708 USTORM_ASSERT_LIST_INDEX_OFFSET
709 };
710 char *storms_string[STORMS_NUM] = {
711 "XSTORM",
712 "TSTORM",
713 "CSTORM",
714 "USTORM"
715 };
716
717 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
718 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
719 storm_assert_list_index[storm]);
720 if (last_idx)
721 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
722 storms_string[storm], last_idx);
723
724
725 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
726
727 for (j = 0; j < REGS_IN_ENTRY; j++)
728 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
729 bnx2x_get_assert_list_entry(bp,
730 storm,
731 i) +
732 sizeof(u32) * j);
733
734
735 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
736 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
737 storms_string[storm], i, regs[3],
738 regs[2], regs[1], regs[0]);
739 rc++;
740 } else {
741 break;
742 }
743 }
744 }
745
746 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
747 CHIP_IS_E1(bp) ? "everest1" :
748 CHIP_IS_E1H(bp) ? "everest1h" :
749 CHIP_IS_E2(bp) ? "everest2" : "everest3",
750 BCM_5710_FW_MAJOR_VERSION,
751 BCM_5710_FW_MINOR_VERSION,
752 BCM_5710_FW_REVISION_VERSION);
753
754 return rc;
755}
756
757#define MCPR_TRACE_BUFFER_SIZE (0x800)
758#define SCRATCH_BUFFER_SIZE(bp) \
759 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
760
761void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
762{
763 u32 addr, val;
764 u32 mark, offset;
765 __be32 data[9];
766 int word;
767 u32 trace_shmem_base;
768 if (BP_NOMCP(bp)) {
769 BNX2X_ERR("NO MCP - can not dump\n");
770 return;
771 }
772 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
773 (bp->common.bc_ver & 0xff0000) >> 16,
774 (bp->common.bc_ver & 0xff00) >> 8,
775 (bp->common.bc_ver & 0xff));
776
777 if (pci_channel_offline(bp->pdev)) {
778 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
779 return;
780 }
781
782 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
783 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
784 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
785
786 if (BP_PATH(bp) == 0)
787 trace_shmem_base = bp->common.shmem_base;
788 else
789 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
790
791
792 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
793 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
794 SCRATCH_BUFFER_SIZE(bp)) {
795 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
796 trace_shmem_base);
797 return;
798 }
799
800 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
801
802
803 mark = REG_RD(bp, addr);
804 if (mark != MFW_TRACE_SIGNATURE) {
805 BNX2X_ERR("Trace buffer signature is missing.");
806 return ;
807 }
808
809
810 addr += 4;
811 mark = REG_RD(bp, addr);
812 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
813 if (mark >= trace_shmem_base || mark < addr + 4) {
814 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
815 return;
816 }
817 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
818
819 printk("%s", lvl);
820
821
822 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
823 for (word = 0; word < 8; word++)
824 data[word] = htonl(REG_RD(bp, offset + 4*word));
825 data[8] = 0x0;
826 pr_cont("%s", (char *)data);
827 }
828
829
830 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
831 for (word = 0; word < 8; word++)
832 data[word] = htonl(REG_RD(bp, offset + 4*word));
833 data[8] = 0x0;
834 pr_cont("%s", (char *)data);
835 }
836 printk("%s" "end of fw dump\n", lvl);
837}
838
839static void bnx2x_fw_dump(struct bnx2x *bp)
840{
841 bnx2x_fw_dump_lvl(bp, KERN_ERR);
842}
843
844static void bnx2x_hc_int_disable(struct bnx2x *bp)
845{
846 int port = BP_PORT(bp);
847 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
848 u32 val = REG_RD(bp, addr);
849
850
851
852
853
854 if (CHIP_IS_E1(bp)) {
855
856
857
858
859 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
860
861 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
862 HC_CONFIG_0_REG_INT_LINE_EN_0 |
863 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
864 } else
865 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
866 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
867 HC_CONFIG_0_REG_INT_LINE_EN_0 |
868 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
869
870 DP(NETIF_MSG_IFDOWN,
871 "write %x to HC %d (addr 0x%x)\n",
872 val, port, addr);
873
874 REG_WR(bp, addr, val);
875 if (REG_RD(bp, addr) != val)
876 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
877}
878
879static void bnx2x_igu_int_disable(struct bnx2x *bp)
880{
881 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
882
883 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
884 IGU_PF_CONF_INT_LINE_EN |
885 IGU_PF_CONF_ATTN_BIT_EN);
886
887 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
888
889 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
890 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
891 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
892}
893
894static void bnx2x_int_disable(struct bnx2x *bp)
895{
896 if (bp->common.int_block == INT_BLOCK_HC)
897 bnx2x_hc_int_disable(bp);
898 else
899 bnx2x_igu_int_disable(bp);
900}
901
902void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
903{
904 int i;
905 u16 j;
906 struct hc_sp_status_block_data sp_sb_data;
907 int func = BP_FUNC(bp);
908#ifdef BNX2X_STOP_ON_ERROR
909 u16 start = 0, end = 0;
910 u8 cos;
911#endif
912 if (IS_PF(bp) && disable_int)
913 bnx2x_int_disable(bp);
914
915 bp->stats_state = STATS_STATE_DISABLED;
916 bp->eth_stats.unrecoverable_error++;
917 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
918
919 BNX2X_ERR("begin crash dump -----------------\n");
920
921
922
923 if (IS_PF(bp)) {
924 struct host_sp_status_block *def_sb = bp->def_status_blk;
925 int data_size, cstorm_offset;
926
927 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
928 bp->def_idx, bp->def_att_idx, bp->attn_state,
929 bp->spq_prod_idx, bp->stats_counter);
930 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
931 def_sb->atten_status_block.attn_bits,
932 def_sb->atten_status_block.attn_bits_ack,
933 def_sb->atten_status_block.status_block_id,
934 def_sb->atten_status_block.attn_bits_index);
935 BNX2X_ERR(" def (");
936 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
937 pr_cont("0x%x%s",
938 def_sb->sp_sb.index_values[i],
939 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
940
941 data_size = sizeof(struct hc_sp_status_block_data) /
942 sizeof(u32);
943 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
944 for (i = 0; i < data_size; i++)
945 *((u32 *)&sp_sb_data + i) =
946 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
947 i * sizeof(u32));
948
949 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
950 sp_sb_data.igu_sb_id,
951 sp_sb_data.igu_seg_id,
952 sp_sb_data.p_func.pf_id,
953 sp_sb_data.p_func.vnic_id,
954 sp_sb_data.p_func.vf_id,
955 sp_sb_data.p_func.vf_valid,
956 sp_sb_data.state);
957 }
958
959 for_each_eth_queue(bp, i) {
960 struct bnx2x_fastpath *fp = &bp->fp[i];
961 int loop;
962 struct hc_status_block_data_e2 sb_data_e2;
963 struct hc_status_block_data_e1x sb_data_e1x;
964 struct hc_status_block_sm *hc_sm_p =
965 CHIP_IS_E1x(bp) ?
966 sb_data_e1x.common.state_machine :
967 sb_data_e2.common.state_machine;
968 struct hc_index_data *hc_index_p =
969 CHIP_IS_E1x(bp) ?
970 sb_data_e1x.index_data :
971 sb_data_e2.index_data;
972 u8 data_size, cos;
973 u32 *sb_data_p;
974 struct bnx2x_fp_txdata txdata;
975
976 if (!bp->fp)
977 break;
978
979 if (!fp->rx_cons_sb)
980 continue;
981
982
983 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
984 i, fp->rx_bd_prod, fp->rx_bd_cons,
985 fp->rx_comp_prod,
986 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
987 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
988 fp->rx_sge_prod, fp->last_max_sge,
989 le16_to_cpu(fp->fp_hc_idx));
990
991
992 for_each_cos_in_tx_queue(fp, cos)
993 {
994 if (!fp->txdata_ptr[cos])
995 break;
996
997 txdata = *fp->txdata_ptr[cos];
998
999 if (!txdata.tx_cons_sb)
1000 continue;
1001
1002 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1003 i, txdata.tx_pkt_prod,
1004 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1005 txdata.tx_bd_cons,
1006 le16_to_cpu(*txdata.tx_cons_sb));
1007 }
1008
1009 loop = CHIP_IS_E1x(bp) ?
1010 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1011
1012
1013
1014 if (IS_FCOE_FP(fp))
1015 continue;
1016
1017 BNX2X_ERR(" run indexes (");
1018 for (j = 0; j < HC_SB_MAX_SM; j++)
1019 pr_cont("0x%x%s",
1020 fp->sb_running_index[j],
1021 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1022
1023 BNX2X_ERR(" indexes (");
1024 for (j = 0; j < loop; j++)
1025 pr_cont("0x%x%s",
1026 fp->sb_index_values[j],
1027 (j == loop - 1) ? ")" : " ");
1028
1029
1030 if (IS_VF(bp))
1031 continue;
1032
1033
1034 data_size = CHIP_IS_E1x(bp) ?
1035 sizeof(struct hc_status_block_data_e1x) :
1036 sizeof(struct hc_status_block_data_e2);
1037 data_size /= sizeof(u32);
1038 sb_data_p = CHIP_IS_E1x(bp) ?
1039 (u32 *)&sb_data_e1x :
1040 (u32 *)&sb_data_e2;
1041
1042 for (j = 0; j < data_size; j++)
1043 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1044 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1045 j * sizeof(u32));
1046
1047 if (!CHIP_IS_E1x(bp)) {
1048 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1049 sb_data_e2.common.p_func.pf_id,
1050 sb_data_e2.common.p_func.vf_id,
1051 sb_data_e2.common.p_func.vf_valid,
1052 sb_data_e2.common.p_func.vnic_id,
1053 sb_data_e2.common.same_igu_sb_1b,
1054 sb_data_e2.common.state);
1055 } else {
1056 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1057 sb_data_e1x.common.p_func.pf_id,
1058 sb_data_e1x.common.p_func.vf_id,
1059 sb_data_e1x.common.p_func.vf_valid,
1060 sb_data_e1x.common.p_func.vnic_id,
1061 sb_data_e1x.common.same_igu_sb_1b,
1062 sb_data_e1x.common.state);
1063 }
1064
1065
1066 for (j = 0; j < HC_SB_MAX_SM; j++) {
1067 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1068 j, hc_sm_p[j].__flags,
1069 hc_sm_p[j].igu_sb_id,
1070 hc_sm_p[j].igu_seg_id,
1071 hc_sm_p[j].time_to_expire,
1072 hc_sm_p[j].timer_value);
1073 }
1074
1075
1076 for (j = 0; j < loop; j++) {
1077 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1078 hc_index_p[j].flags,
1079 hc_index_p[j].timeout);
1080 }
1081 }
1082
1083#ifdef BNX2X_STOP_ON_ERROR
1084 if (IS_PF(bp)) {
1085
1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1087 for (i = 0; i < NUM_EQ_DESC; i++) {
1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1089
1090 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1091 i, bp->eq_ring[i].message.opcode,
1092 bp->eq_ring[i].message.error);
1093 BNX2X_ERR("data: %x %x %x\n",
1094 data[0], data[1], data[2]);
1095 }
1096 }
1097
1098
1099
1100 for_each_valid_rx_queue(bp, i) {
1101 struct bnx2x_fastpath *fp = &bp->fp[i];
1102
1103 if (!bp->fp)
1104 break;
1105
1106 if (!fp->rx_cons_sb)
1107 continue;
1108
1109 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1110 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1111 for (j = start; j != end; j = RX_BD(j + 1)) {
1112 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1113 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1114
1115 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1116 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1117 }
1118
1119 start = RX_SGE(fp->rx_sge_prod);
1120 end = RX_SGE(fp->last_max_sge);
1121 for (j = start; j != end; j = RX_SGE(j + 1)) {
1122 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1123 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1124
1125 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1126 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1127 }
1128
1129 start = RCQ_BD(fp->rx_comp_cons - 10);
1130 end = RCQ_BD(fp->rx_comp_cons + 503);
1131 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1133
1134 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1135 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1136 }
1137 }
1138
1139
1140 for_each_valid_tx_queue(bp, i) {
1141 struct bnx2x_fastpath *fp = &bp->fp[i];
1142
1143 if (!bp->fp)
1144 break;
1145
1146 for_each_cos_in_tx_queue(fp, cos) {
1147 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1148
1149 if (!fp->txdata_ptr[cos])
1150 break;
1151
1152 if (!txdata->tx_cons_sb)
1153 continue;
1154
1155 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1156 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1157 for (j = start; j != end; j = TX_BD(j + 1)) {
1158 struct sw_tx_bd *sw_bd =
1159 &txdata->tx_buf_ring[j];
1160
1161 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1162 i, cos, j, sw_bd->skb,
1163 sw_bd->first_bd);
1164 }
1165
1166 start = TX_BD(txdata->tx_bd_cons - 10);
1167 end = TX_BD(txdata->tx_bd_cons + 254);
1168 for (j = start; j != end; j = TX_BD(j + 1)) {
1169 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1170
1171 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1172 i, cos, j, tx_bd[0], tx_bd[1],
1173 tx_bd[2], tx_bd[3]);
1174 }
1175 }
1176 }
1177#endif
1178 if (IS_PF(bp)) {
1179 int tmp_msg_en = bp->msg_enable;
1180
1181 bnx2x_fw_dump(bp);
1182 bp->msg_enable |= NETIF_MSG_HW;
1183 BNX2X_ERR("Idle check (1st round) ----------\n");
1184 bnx2x_idle_chk(bp);
1185 BNX2X_ERR("Idle check (2nd round) ----------\n");
1186 bnx2x_idle_chk(bp);
1187 bp->msg_enable = tmp_msg_en;
1188 bnx2x_mc_assert(bp);
1189 }
1190
1191 BNX2X_ERR("end crash dump -----------------\n");
1192}
1193
1194
1195
1196
1197
1198
1199
1200#define FLR_WAIT_USEC 10000
1201#define FLR_WAIT_INTERVAL 50
1202#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1203
1204struct pbf_pN_buf_regs {
1205 int pN;
1206 u32 init_crd;
1207 u32 crd;
1208 u32 crd_freed;
1209};
1210
1211struct pbf_pN_cmd_regs {
1212 int pN;
1213 u32 lines_occup;
1214 u32 lines_freed;
1215};
1216
1217static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1218 struct pbf_pN_buf_regs *regs,
1219 u32 poll_count)
1220{
1221 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1222 u32 cur_cnt = poll_count;
1223
1224 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1225 crd = crd_start = REG_RD(bp, regs->crd);
1226 init_crd = REG_RD(bp, regs->init_crd);
1227
1228 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1229 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1230 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1231
1232 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1233 (init_crd - crd_start))) {
1234 if (cur_cnt--) {
1235 udelay(FLR_WAIT_INTERVAL);
1236 crd = REG_RD(bp, regs->crd);
1237 crd_freed = REG_RD(bp, regs->crd_freed);
1238 } else {
1239 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1240 regs->pN);
1241 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1242 regs->pN, crd);
1243 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1244 regs->pN, crd_freed);
1245 break;
1246 }
1247 }
1248 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1249 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1250}
1251
1252static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1253 struct pbf_pN_cmd_regs *regs,
1254 u32 poll_count)
1255{
1256 u32 occup, to_free, freed, freed_start;
1257 u32 cur_cnt = poll_count;
1258
1259 occup = to_free = REG_RD(bp, regs->lines_occup);
1260 freed = freed_start = REG_RD(bp, regs->lines_freed);
1261
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1263 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1264
1265 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1266 if (cur_cnt--) {
1267 udelay(FLR_WAIT_INTERVAL);
1268 occup = REG_RD(bp, regs->lines_occup);
1269 freed = REG_RD(bp, regs->lines_freed);
1270 } else {
1271 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1272 regs->pN);
1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1274 regs->pN, occup);
1275 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1276 regs->pN, freed);
1277 break;
1278 }
1279 }
1280 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1281 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1282}
1283
1284static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1285 u32 expected, u32 poll_count)
1286{
1287 u32 cur_cnt = poll_count;
1288 u32 val;
1289
1290 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1291 udelay(FLR_WAIT_INTERVAL);
1292
1293 return val;
1294}
1295
1296int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1297 char *msg, u32 poll_cnt)
1298{
1299 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1300 if (val != 0) {
1301 BNX2X_ERR("%s usage count=%d\n", msg, val);
1302 return 1;
1303 }
1304 return 0;
1305}
1306
1307
1308u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1309{
1310
1311 if (CHIP_REV_IS_EMUL(bp))
1312 return FLR_POLL_CNT * 2000;
1313
1314 if (CHIP_REV_IS_FPGA(bp))
1315 return FLR_POLL_CNT * 120;
1316
1317 return FLR_POLL_CNT;
1318}
1319
1320void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1321{
1322 struct pbf_pN_cmd_regs cmd_regs[] = {
1323 {0, (CHIP_IS_E3B0(bp)) ?
1324 PBF_REG_TQ_OCCUPANCY_Q0 :
1325 PBF_REG_P0_TQ_OCCUPANCY,
1326 (CHIP_IS_E3B0(bp)) ?
1327 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1328 PBF_REG_P0_TQ_LINES_FREED_CNT},
1329 {1, (CHIP_IS_E3B0(bp)) ?
1330 PBF_REG_TQ_OCCUPANCY_Q1 :
1331 PBF_REG_P1_TQ_OCCUPANCY,
1332 (CHIP_IS_E3B0(bp)) ?
1333 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1334 PBF_REG_P1_TQ_LINES_FREED_CNT},
1335 {4, (CHIP_IS_E3B0(bp)) ?
1336 PBF_REG_TQ_OCCUPANCY_LB_Q :
1337 PBF_REG_P4_TQ_OCCUPANCY,
1338 (CHIP_IS_E3B0(bp)) ?
1339 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1340 PBF_REG_P4_TQ_LINES_FREED_CNT}
1341 };
1342
1343 struct pbf_pN_buf_regs buf_regs[] = {
1344 {0, (CHIP_IS_E3B0(bp)) ?
1345 PBF_REG_INIT_CRD_Q0 :
1346 PBF_REG_P0_INIT_CRD ,
1347 (CHIP_IS_E3B0(bp)) ?
1348 PBF_REG_CREDIT_Q0 :
1349 PBF_REG_P0_CREDIT,
1350 (CHIP_IS_E3B0(bp)) ?
1351 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1352 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1353 {1, (CHIP_IS_E3B0(bp)) ?
1354 PBF_REG_INIT_CRD_Q1 :
1355 PBF_REG_P1_INIT_CRD,
1356 (CHIP_IS_E3B0(bp)) ?
1357 PBF_REG_CREDIT_Q1 :
1358 PBF_REG_P1_CREDIT,
1359 (CHIP_IS_E3B0(bp)) ?
1360 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1361 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1362 {4, (CHIP_IS_E3B0(bp)) ?
1363 PBF_REG_INIT_CRD_LB_Q :
1364 PBF_REG_P4_INIT_CRD,
1365 (CHIP_IS_E3B0(bp)) ?
1366 PBF_REG_CREDIT_LB_Q :
1367 PBF_REG_P4_CREDIT,
1368 (CHIP_IS_E3B0(bp)) ?
1369 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1370 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1371 };
1372
1373 int i;
1374
1375
1376 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1377 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1378
1379
1380 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1381 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1382}
1383
1384#define OP_GEN_PARAM(param) \
1385 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1386
1387#define OP_GEN_TYPE(type) \
1388 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1389
1390#define OP_GEN_AGG_VECT(index) \
1391 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1392
1393int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1394{
1395 u32 op_gen_command = 0;
1396 u32 comp_addr = BAR_CSTRORM_INTMEM +
1397 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1398
1399 if (REG_RD(bp, comp_addr)) {
1400 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1401 return 1;
1402 }
1403
1404 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1405 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1406 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1407 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1408
1409 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1410 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1411
1412 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1413 BNX2X_ERR("FW final cleanup did not succeed\n");
1414 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1415 (REG_RD(bp, comp_addr)));
1416 bnx2x_panic();
1417 return 1;
1418 }
1419
1420 REG_WR(bp, comp_addr, 0);
1421
1422 return 0;
1423}
1424
1425u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1426{
1427 u16 status;
1428
1429 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1430 return status & PCI_EXP_DEVSTA_TRPND;
1431}
1432
1433
1434
1435static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1436{
1437
1438 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1439 CFC_REG_NUM_LCIDS_INSIDE_PF,
1440 "CFC PF usage counter timed out",
1441 poll_cnt))
1442 return 1;
1443
1444
1445 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1446 DORQ_REG_PF_USAGE_CNT,
1447 "DQ PF usage counter timed out",
1448 poll_cnt))
1449 return 1;
1450
1451
1452 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1453 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1454 "QM PF usage counter timed out",
1455 poll_cnt))
1456 return 1;
1457
1458
1459 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1460 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1461 "Timers VNIC usage counter timed out",
1462 poll_cnt))
1463 return 1;
1464 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1465 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1466 "Timers NUM_SCANS usage counter timed out",
1467 poll_cnt))
1468 return 1;
1469
1470
1471 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1472 dmae_reg_go_c[INIT_DMAE_C(bp)],
1473 "DMAE command register timed out",
1474 poll_cnt))
1475 return 1;
1476
1477 return 0;
1478}
1479
1480static void bnx2x_hw_enable_status(struct bnx2x *bp)
1481{
1482 u32 val;
1483
1484 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1485 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1486
1487 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1488 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1489
1490 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1491 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1492
1493 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1494 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1495
1496 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1497 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1498
1499 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1500 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1501
1502 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1503 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1504
1505 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1506 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1507 val);
1508}
1509
1510static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1511{
1512 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1513
1514 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1515
1516
1517 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1518
1519
1520 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1521 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1522 return -EBUSY;
1523
1524
1525
1526
1527 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1528 return -EBUSY;
1529
1530
1531
1532
1533 bnx2x_tx_hw_flushed(bp, poll_cnt);
1534
1535
1536 msleep(100);
1537
1538
1539 if (bnx2x_is_pcie_pending(bp->pdev))
1540 BNX2X_ERR("PCIE Transactions still pending\n");
1541
1542
1543 bnx2x_hw_enable_status(bp);
1544
1545
1546
1547
1548
1549 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1550
1551 return 0;
1552}
1553
1554static void bnx2x_hc_int_enable(struct bnx2x *bp)
1555{
1556 int port = BP_PORT(bp);
1557 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1558 u32 val = REG_RD(bp, addr);
1559 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1560 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1561 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1562
1563 if (msix) {
1564 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1565 HC_CONFIG_0_REG_INT_LINE_EN_0);
1566 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1567 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1568 if (single_msix)
1569 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1570 } else if (msi) {
1571 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1572 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1573 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1574 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1575 } else {
1576 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1577 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1578 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1579 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1580
1581 if (!CHIP_IS_E1(bp)) {
1582 DP(NETIF_MSG_IFUP,
1583 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1584
1585 REG_WR(bp, addr, val);
1586
1587 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1588 }
1589 }
1590
1591 if (CHIP_IS_E1(bp))
1592 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1593
1594 DP(NETIF_MSG_IFUP,
1595 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1596 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1597
1598 REG_WR(bp, addr, val);
1599
1600
1601
1602 barrier();
1603
1604 if (!CHIP_IS_E1(bp)) {
1605
1606 if (IS_MF(bp)) {
1607 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1608 if (bp->port.pmf)
1609
1610 val |= 0x1100;
1611 } else
1612 val = 0xffff;
1613
1614 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1615 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1616 }
1617}
1618
1619static void bnx2x_igu_int_enable(struct bnx2x *bp)
1620{
1621 u32 val;
1622 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1623 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1624 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1625
1626 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1627
1628 if (msix) {
1629 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1630 IGU_PF_CONF_SINGLE_ISR_EN);
1631 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1632 IGU_PF_CONF_ATTN_BIT_EN);
1633
1634 if (single_msix)
1635 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1636 } else if (msi) {
1637 val &= ~IGU_PF_CONF_INT_LINE_EN;
1638 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1639 IGU_PF_CONF_ATTN_BIT_EN |
1640 IGU_PF_CONF_SINGLE_ISR_EN);
1641 } else {
1642 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1643 val |= (IGU_PF_CONF_INT_LINE_EN |
1644 IGU_PF_CONF_ATTN_BIT_EN |
1645 IGU_PF_CONF_SINGLE_ISR_EN);
1646 }
1647
1648
1649 if ((!msix) || single_msix) {
1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1651 bnx2x_ack_int(bp);
1652 }
1653
1654 val |= IGU_PF_CONF_FUNC_EN;
1655
1656 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1657 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1658
1659 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1660
1661 if (val & IGU_PF_CONF_INT_LINE_EN)
1662 pci_intx(bp->pdev, true);
1663
1664 barrier();
1665
1666
1667 if (IS_MF(bp)) {
1668 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1669 if (bp->port.pmf)
1670
1671 val |= 0x1100;
1672 } else
1673 val = 0xffff;
1674
1675 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1676 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1677}
1678
1679void bnx2x_int_enable(struct bnx2x *bp)
1680{
1681 if (bp->common.int_block == INT_BLOCK_HC)
1682 bnx2x_hc_int_enable(bp);
1683 else
1684 bnx2x_igu_int_enable(bp);
1685}
1686
1687void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1688{
1689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1690 int i, offset;
1691
1692 if (disable_hw)
1693
1694 bnx2x_int_disable(bp);
1695
1696
1697 if (msix) {
1698 synchronize_irq(bp->msix_table[0].vector);
1699 offset = 1;
1700 if (CNIC_SUPPORT(bp))
1701 offset++;
1702 for_each_eth_queue(bp, i)
1703 synchronize_irq(bp->msix_table[offset++].vector);
1704 } else
1705 synchronize_irq(bp->pdev->irq);
1706
1707
1708 cancel_delayed_work(&bp->sp_task);
1709 cancel_delayed_work(&bp->period_task);
1710 flush_workqueue(bnx2x_wq);
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1721{
1722 u32 lock_status;
1723 u32 resource_bit = (1 << resource);
1724 int func = BP_FUNC(bp);
1725 u32 hw_lock_control_reg;
1726
1727 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1728 "Trying to take a lock on resource %d\n", resource);
1729
1730
1731 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1732 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1733 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1734 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1735 return false;
1736 }
1737
1738 if (func <= 5)
1739 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1740 else
1741 hw_lock_control_reg =
1742 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1743
1744
1745 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1746 lock_status = REG_RD(bp, hw_lock_control_reg);
1747 if (lock_status & resource_bit)
1748 return true;
1749
1750 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1751 "Failed to get a lock on resource %d\n", resource);
1752 return false;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1764{
1765 if (BP_PATH(bp))
1766 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1767 else
1768 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1779{
1780 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1781}
1782
1783static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1784
1785
1786static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1787{
1788
1789
1790
1791
1792 atomic_set(&bp->interrupt_occurred, 1);
1793
1794
1795
1796
1797
1798 smp_wmb();
1799
1800
1801 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1802}
1803
1804void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1805{
1806 struct bnx2x *bp = fp->bp;
1807 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1808 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1810 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1811
1812 DP(BNX2X_MSG_SP,
1813 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1814 fp->index, cid, command, bp->state,
1815 rr_cqe->ramrod_cqe.ramrod_type);
1816
1817
1818
1819
1820 if (cid >= BNX2X_FIRST_VF_CID &&
1821 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1822 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1823
1824 switch (command) {
1825 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1826 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1827 drv_cmd = BNX2X_Q_CMD_UPDATE;
1828 break;
1829
1830 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1831 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1832 drv_cmd = BNX2X_Q_CMD_SETUP;
1833 break;
1834
1835 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1836 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1837 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1838 break;
1839
1840 case (RAMROD_CMD_ID_ETH_HALT):
1841 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1842 drv_cmd = BNX2X_Q_CMD_HALT;
1843 break;
1844
1845 case (RAMROD_CMD_ID_ETH_TERMINATE):
1846 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1847 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1848 break;
1849
1850 case (RAMROD_CMD_ID_ETH_EMPTY):
1851 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1852 drv_cmd = BNX2X_Q_CMD_EMPTY;
1853 break;
1854
1855 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1856 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1857 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1858 break;
1859
1860 default:
1861 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1862 command, fp->index);
1863 return;
1864 }
1865
1866 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1867 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1868
1869
1870
1871
1872
1873
1874
1875#ifdef BNX2X_STOP_ON_ERROR
1876 bnx2x_panic();
1877#else
1878 return;
1879#endif
1880
1881 smp_mb__before_atomic();
1882 atomic_inc(&bp->cq_spq_left);
1883
1884 smp_mb__after_atomic();
1885
1886 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1887
1888 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1889 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899 smp_mb__before_atomic();
1900 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1901 wmb();
1902 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1903 smp_mb__after_atomic();
1904
1905
1906 bnx2x_schedule_sp_task(bp);
1907 }
1908
1909 return;
1910}
1911
1912irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1913{
1914 struct bnx2x *bp = netdev_priv(dev_instance);
1915 u16 status = bnx2x_ack_int(bp);
1916 u16 mask;
1917 int i;
1918 u8 cos;
1919
1920
1921 if (unlikely(status == 0)) {
1922 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1923 return IRQ_NONE;
1924 }
1925 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1926
1927#ifdef BNX2X_STOP_ON_ERROR
1928 if (unlikely(bp->panic))
1929 return IRQ_HANDLED;
1930#endif
1931
1932 for_each_eth_queue(bp, i) {
1933 struct bnx2x_fastpath *fp = &bp->fp[i];
1934
1935 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1936 if (status & mask) {
1937
1938 for_each_cos_in_tx_queue(fp, cos)
1939 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1940 prefetch(&fp->sb_running_index[SM_RX_ID]);
1941 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1942 status &= ~mask;
1943 }
1944 }
1945
1946 if (CNIC_SUPPORT(bp)) {
1947 mask = 0x2;
1948 if (status & (mask | 0x1)) {
1949 struct cnic_ops *c_ops = NULL;
1950
1951 rcu_read_lock();
1952 c_ops = rcu_dereference(bp->cnic_ops);
1953 if (c_ops && (bp->cnic_eth_dev.drv_state &
1954 CNIC_DRV_STATE_HANDLES_IRQ))
1955 c_ops->cnic_handler(bp->cnic_data, NULL);
1956 rcu_read_unlock();
1957
1958 status &= ~mask;
1959 }
1960 }
1961
1962 if (unlikely(status & 0x1)) {
1963
1964
1965
1966
1967 bnx2x_schedule_sp_task(bp);
1968
1969 status &= ~0x1;
1970 if (!status)
1971 return IRQ_HANDLED;
1972 }
1973
1974 if (unlikely(status))
1975 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1976 status);
1977
1978 return IRQ_HANDLED;
1979}
1980
1981
1982
1983
1984
1985
1986
1987int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1988{
1989 u32 lock_status;
1990 u32 resource_bit = (1 << resource);
1991 int func = BP_FUNC(bp);
1992 u32 hw_lock_control_reg;
1993 int cnt;
1994
1995
1996 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1997 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1998 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1999 return -EINVAL;
2000 }
2001
2002 if (func <= 5) {
2003 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2004 } else {
2005 hw_lock_control_reg =
2006 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2007 }
2008
2009
2010 lock_status = REG_RD(bp, hw_lock_control_reg);
2011 if (lock_status & resource_bit) {
2012 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2013 lock_status, resource_bit);
2014 return -EEXIST;
2015 }
2016
2017
2018 for (cnt = 0; cnt < 1000; cnt++) {
2019
2020 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2021 lock_status = REG_RD(bp, hw_lock_control_reg);
2022 if (lock_status & resource_bit)
2023 return 0;
2024
2025 usleep_range(5000, 10000);
2026 }
2027 BNX2X_ERR("Timeout\n");
2028 return -EAGAIN;
2029}
2030
2031int bnx2x_release_leader_lock(struct bnx2x *bp)
2032{
2033 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2034}
2035
2036int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2037{
2038 u32 lock_status;
2039 u32 resource_bit = (1 << resource);
2040 int func = BP_FUNC(bp);
2041 u32 hw_lock_control_reg;
2042
2043
2044 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2045 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2046 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2047 return -EINVAL;
2048 }
2049
2050 if (func <= 5) {
2051 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2052 } else {
2053 hw_lock_control_reg =
2054 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2055 }
2056
2057
2058 lock_status = REG_RD(bp, hw_lock_control_reg);
2059 if (!(lock_status & resource_bit)) {
2060 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2061 lock_status, resource_bit);
2062 return -EFAULT;
2063 }
2064
2065 REG_WR(bp, hw_lock_control_reg, resource_bit);
2066 return 0;
2067}
2068
2069int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2070{
2071
2072 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2073 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2074 int gpio_shift = gpio_num +
2075 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2076 u32 gpio_mask = (1 << gpio_shift);
2077 u32 gpio_reg;
2078 int value;
2079
2080 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2081 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2082 return -EINVAL;
2083 }
2084
2085
2086 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2087
2088
2089 if ((gpio_reg & gpio_mask) == gpio_mask)
2090 value = 1;
2091 else
2092 value = 0;
2093
2094 return value;
2095}
2096
2097int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2098{
2099
2100 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2101 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2102 int gpio_shift = gpio_num +
2103 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2104 u32 gpio_mask = (1 << gpio_shift);
2105 u32 gpio_reg;
2106
2107 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2108 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2109 return -EINVAL;
2110 }
2111
2112 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2113
2114 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2115
2116 switch (mode) {
2117 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2118 DP(NETIF_MSG_LINK,
2119 "Set GPIO %d (shift %d) -> output low\n",
2120 gpio_num, gpio_shift);
2121
2122 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2123 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2124 break;
2125
2126 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2127 DP(NETIF_MSG_LINK,
2128 "Set GPIO %d (shift %d) -> output high\n",
2129 gpio_num, gpio_shift);
2130
2131 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2132 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2133 break;
2134
2135 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2136 DP(NETIF_MSG_LINK,
2137 "Set GPIO %d (shift %d) -> input\n",
2138 gpio_num, gpio_shift);
2139
2140 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2141 break;
2142
2143 default:
2144 break;
2145 }
2146
2147 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2148 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2149
2150 return 0;
2151}
2152
2153int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2154{
2155 u32 gpio_reg = 0;
2156 int rc = 0;
2157
2158
2159
2160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2161
2162 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2163 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2164 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2165 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2166
2167 switch (mode) {
2168 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2169 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2170
2171 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2172 break;
2173
2174 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2176
2177 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2178 break;
2179
2180 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2181 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2182
2183 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2184 break;
2185
2186 default:
2187 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2188 rc = -EINVAL;
2189 break;
2190 }
2191
2192 if (rc == 0)
2193 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2194
2195 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2196
2197 return rc;
2198}
2199
2200int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2201{
2202
2203 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2204 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2205 int gpio_shift = gpio_num +
2206 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2207 u32 gpio_mask = (1 << gpio_shift);
2208 u32 gpio_reg;
2209
2210 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2211 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2212 return -EINVAL;
2213 }
2214
2215 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2216
2217 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2218
2219 switch (mode) {
2220 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2221 DP(NETIF_MSG_LINK,
2222 "Clear GPIO INT %d (shift %d) -> output low\n",
2223 gpio_num, gpio_shift);
2224
2225 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2226 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2227 break;
2228
2229 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2230 DP(NETIF_MSG_LINK,
2231 "Set GPIO INT %d (shift %d) -> output high\n",
2232 gpio_num, gpio_shift);
2233
2234 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2235 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2236 break;
2237
2238 default:
2239 break;
2240 }
2241
2242 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2243 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2244
2245 return 0;
2246}
2247
2248static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2249{
2250 u32 spio_reg;
2251
2252
2253 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2254 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2255 return -EINVAL;
2256 }
2257
2258 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2259
2260 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2261
2262 switch (mode) {
2263 case MISC_SPIO_OUTPUT_LOW:
2264 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2265
2266 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2267 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2268 break;
2269
2270 case MISC_SPIO_OUTPUT_HIGH:
2271 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2272
2273 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2274 spio_reg |= (spio << MISC_SPIO_SET_POS);
2275 break;
2276
2277 case MISC_SPIO_INPUT_HI_Z:
2278 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2279
2280 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2281 break;
2282
2283 default:
2284 break;
2285 }
2286
2287 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2288 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2289
2290 return 0;
2291}
2292
2293void bnx2x_calc_fc_adv(struct bnx2x *bp)
2294{
2295 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2296
2297 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2298 ADVERTISED_Pause);
2299 switch (bp->link_vars.ieee_fc &
2300 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2301 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2302 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2303 ADVERTISED_Pause);
2304 break;
2305
2306 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2307 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2308 break;
2309
2310 default:
2311 break;
2312 }
2313}
2314
2315static void bnx2x_set_requested_fc(struct bnx2x *bp)
2316{
2317
2318
2319
2320
2321 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2322 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2323 else
2324 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2325}
2326
2327static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2328{
2329 u32 pause_enabled = 0;
2330
2331 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2332 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2333 pause_enabled = 1;
2334
2335 REG_WR(bp, BAR_USTRORM_INTMEM +
2336 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2337 pause_enabled);
2338 }
2339
2340 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2341 pause_enabled ? "enabled" : "disabled");
2342}
2343
2344int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2345{
2346 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2347 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2348
2349 if (!BP_NOMCP(bp)) {
2350 bnx2x_set_requested_fc(bp);
2351 bnx2x_acquire_phy_lock(bp);
2352
2353 if (load_mode == LOAD_DIAG) {
2354 struct link_params *lp = &bp->link_params;
2355 lp->loopback_mode = LOOPBACK_XGXS;
2356
2357 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2358 if (lp->speed_cap_mask[cfx_idx] &
2359 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2360 lp->req_line_speed[cfx_idx] =
2361 SPEED_20000;
2362 else if (lp->speed_cap_mask[cfx_idx] &
2363 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2364 lp->req_line_speed[cfx_idx] =
2365 SPEED_10000;
2366 else
2367 lp->req_line_speed[cfx_idx] =
2368 SPEED_1000;
2369 }
2370 }
2371
2372 if (load_mode == LOAD_LOOPBACK_EXT) {
2373 struct link_params *lp = &bp->link_params;
2374 lp->loopback_mode = LOOPBACK_EXT;
2375 }
2376
2377 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2378
2379 bnx2x_release_phy_lock(bp);
2380
2381 bnx2x_init_dropless_fc(bp);
2382
2383 bnx2x_calc_fc_adv(bp);
2384
2385 if (bp->link_vars.link_up) {
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387 bnx2x_link_report(bp);
2388 }
2389 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2390 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2391 return rc;
2392 }
2393 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2394 return -EINVAL;
2395}
2396
2397void bnx2x_link_set(struct bnx2x *bp)
2398{
2399 if (!BP_NOMCP(bp)) {
2400 bnx2x_acquire_phy_lock(bp);
2401 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2402 bnx2x_release_phy_lock(bp);
2403
2404 bnx2x_init_dropless_fc(bp);
2405
2406 bnx2x_calc_fc_adv(bp);
2407 } else
2408 BNX2X_ERR("Bootcode is missing - can not set link\n");
2409}
2410
2411static void bnx2x__link_reset(struct bnx2x *bp)
2412{
2413 if (!BP_NOMCP(bp)) {
2414 bnx2x_acquire_phy_lock(bp);
2415 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2416 bnx2x_release_phy_lock(bp);
2417 } else
2418 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2419}
2420
2421void bnx2x_force_link_reset(struct bnx2x *bp)
2422{
2423 bnx2x_acquire_phy_lock(bp);
2424 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2425 bnx2x_release_phy_lock(bp);
2426}
2427
2428u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2429{
2430 u8 rc = 0;
2431
2432 if (!BP_NOMCP(bp)) {
2433 bnx2x_acquire_phy_lock(bp);
2434 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2435 is_serdes);
2436 bnx2x_release_phy_lock(bp);
2437 } else
2438 BNX2X_ERR("Bootcode is missing - can not test link\n");
2439
2440 return rc;
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452static void bnx2x_calc_vn_min(struct bnx2x *bp,
2453 struct cmng_init_input *input)
2454{
2455 int all_zero = 1;
2456 int vn;
2457
2458 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2459 u32 vn_cfg = bp->mf_config[vn];
2460 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2461 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2462
2463
2464 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2465 vn_min_rate = 0;
2466
2467 else if (!vn_min_rate)
2468 vn_min_rate = DEF_MIN_RATE;
2469 else
2470 all_zero = 0;
2471
2472 input->vnic_min_rate[vn] = vn_min_rate;
2473 }
2474
2475
2476 if (BNX2X_IS_ETS_ENABLED(bp)) {
2477 input->flags.cmng_enables &=
2478 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2479 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2480 } else if (all_zero) {
2481 input->flags.cmng_enables &=
2482 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2483 DP(NETIF_MSG_IFUP,
2484 "All MIN values are zeroes fairness will be disabled\n");
2485 } else
2486 input->flags.cmng_enables |=
2487 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2488}
2489
2490static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2491 struct cmng_init_input *input)
2492{
2493 u16 vn_max_rate;
2494 u32 vn_cfg = bp->mf_config[vn];
2495
2496 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2497 vn_max_rate = 0;
2498 else {
2499 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2500
2501 if (IS_MF_PERCENT_BW(bp)) {
2502
2503 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2504 } else
2505
2506 vn_max_rate = maxCfg * 100;
2507 }
2508
2509 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2510
2511 input->vnic_max_rate[vn] = vn_max_rate;
2512}
2513
2514static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2515{
2516 if (CHIP_REV_IS_SLOW(bp))
2517 return CMNG_FNS_NONE;
2518 if (IS_MF(bp))
2519 return CMNG_FNS_MINMAX;
2520
2521 return CMNG_FNS_NONE;
2522}
2523
2524void bnx2x_read_mf_cfg(struct bnx2x *bp)
2525{
2526 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2527
2528 if (BP_NOMCP(bp))
2529 return;
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2543 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2544
2545 if (func >= E1H_FUNC_MAX)
2546 break;
2547
2548 bp->mf_config[vn] =
2549 MF_CFG_RD(bp, func_mf_config[func].config);
2550 }
2551 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2552 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2553 bp->flags |= MF_FUNC_DIS;
2554 } else {
2555 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2556 bp->flags &= ~MF_FUNC_DIS;
2557 }
2558}
2559
2560static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2561{
2562 struct cmng_init_input input;
2563 memset(&input, 0, sizeof(struct cmng_init_input));
2564
2565 input.port_rate = bp->link_vars.line_speed;
2566
2567 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2568 int vn;
2569
2570
2571 if (read_cfg)
2572 bnx2x_read_mf_cfg(bp);
2573
2574
2575 bnx2x_calc_vn_min(bp, &input);
2576
2577
2578 if (bp->port.pmf)
2579 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2580 bnx2x_calc_vn_max(bp, vn, &input);
2581
2582
2583 input.flags.cmng_enables |=
2584 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2585
2586 bnx2x_init_cmng(&input, &bp->cmng);
2587 return;
2588 }
2589
2590
2591 DP(NETIF_MSG_IFUP,
2592 "rate shaping and fairness are disabled\n");
2593}
2594
2595static void storm_memset_cmng(struct bnx2x *bp,
2596 struct cmng_init *cmng,
2597 u8 port)
2598{
2599 int vn;
2600 size_t size = sizeof(struct cmng_struct_per_port);
2601
2602 u32 addr = BAR_XSTRORM_INTMEM +
2603 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2604
2605 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2606
2607 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2608 int func = func_by_vn(bp, vn);
2609
2610 addr = BAR_XSTRORM_INTMEM +
2611 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2612 size = sizeof(struct rate_shaping_vars_per_vn);
2613 __storm_memset_struct(bp, addr, size,
2614 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2615
2616 addr = BAR_XSTRORM_INTMEM +
2617 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2618 size = sizeof(struct fairness_vars_per_vn);
2619 __storm_memset_struct(bp, addr, size,
2620 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2621 }
2622}
2623
2624
2625void bnx2x_set_local_cmng(struct bnx2x *bp)
2626{
2627 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2628
2629 if (cmng_fns != CMNG_FNS_NONE) {
2630 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2631 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2632 } else {
2633
2634 DP(NETIF_MSG_IFUP,
2635 "single function mode without fairness\n");
2636 }
2637}
2638
2639
2640static void bnx2x_link_attn(struct bnx2x *bp)
2641{
2642
2643 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2644
2645 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2646
2647 bnx2x_init_dropless_fc(bp);
2648
2649 if (bp->link_vars.link_up) {
2650
2651 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2652 struct host_port_stats *pstats;
2653
2654 pstats = bnx2x_sp(bp, port_stats);
2655
2656 memset(&(pstats->mac_stx[0]), 0,
2657 sizeof(struct mac_stx));
2658 }
2659 if (bp->state == BNX2X_STATE_OPEN)
2660 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2661 }
2662
2663 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2664 bnx2x_set_local_cmng(bp);
2665
2666 __bnx2x_link_report(bp);
2667
2668 if (IS_MF(bp))
2669 bnx2x_link_sync_notify(bp);
2670}
2671
2672void bnx2x__link_status_update(struct bnx2x *bp)
2673{
2674 if (bp->state != BNX2X_STATE_OPEN)
2675 return;
2676
2677
2678 if (IS_PF(bp)) {
2679 bnx2x_dcbx_pmf_update(bp);
2680 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2681 if (bp->link_vars.link_up)
2682 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2683 else
2684 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2685
2686 bnx2x_link_report(bp);
2687
2688 } else {
2689 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2690 SUPPORTED_10baseT_Full |
2691 SUPPORTED_100baseT_Half |
2692 SUPPORTED_100baseT_Full |
2693 SUPPORTED_1000baseT_Full |
2694 SUPPORTED_2500baseX_Full |
2695 SUPPORTED_10000baseT_Full |
2696 SUPPORTED_TP |
2697 SUPPORTED_FIBRE |
2698 SUPPORTED_Autoneg |
2699 SUPPORTED_Pause |
2700 SUPPORTED_Asym_Pause);
2701 bp->port.advertising[0] = bp->port.supported[0];
2702
2703 bp->link_params.bp = bp;
2704 bp->link_params.port = BP_PORT(bp);
2705 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2706 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2707 bp->link_params.req_line_speed[0] = SPEED_10000;
2708 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2709 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2710 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2711 bp->link_vars.line_speed = SPEED_10000;
2712 bp->link_vars.link_status =
2713 (LINK_STATUS_LINK_UP |
2714 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2715 bp->link_vars.link_up = 1;
2716 bp->link_vars.duplex = DUPLEX_FULL;
2717 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2718 __bnx2x_link_report(bp);
2719
2720 bnx2x_sample_bulletin(bp);
2721
2722
2723
2724
2725
2726
2727 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2728 }
2729}
2730
2731static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2732 u16 vlan_val, u8 allowed_prio)
2733{
2734 struct bnx2x_func_state_params func_params = {NULL};
2735 struct bnx2x_func_afex_update_params *f_update_params =
2736 &func_params.params.afex_update;
2737
2738 func_params.f_obj = &bp->func_obj;
2739 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2740
2741
2742
2743
2744
2745 f_update_params->vif_id = vifid;
2746 f_update_params->afex_default_vlan = vlan_val;
2747 f_update_params->allowed_priorities = allowed_prio;
2748
2749
2750 if (bnx2x_func_state_change(bp, &func_params) < 0)
2751 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2752
2753 return 0;
2754}
2755
2756static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2757 u16 vif_index, u8 func_bit_map)
2758{
2759 struct bnx2x_func_state_params func_params = {NULL};
2760 struct bnx2x_func_afex_viflists_params *update_params =
2761 &func_params.params.afex_viflists;
2762 int rc;
2763 u32 drv_msg_code;
2764
2765
2766 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2767 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2768 cmd_type);
2769
2770 func_params.f_obj = &bp->func_obj;
2771 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2772
2773
2774 update_params->afex_vif_list_command = cmd_type;
2775 update_params->vif_list_index = vif_index;
2776 update_params->func_bit_map =
2777 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2778 update_params->func_to_clear = 0;
2779 drv_msg_code =
2780 (cmd_type == VIF_LIST_RULE_GET) ?
2781 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2782 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2783
2784
2785
2786
2787 rc = bnx2x_func_state_change(bp, &func_params);
2788 if (rc < 0)
2789 bnx2x_fw_command(bp, drv_msg_code, 0);
2790
2791 return 0;
2792}
2793
2794static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2795{
2796 struct afex_stats afex_stats;
2797 u32 func = BP_ABS_FUNC(bp);
2798 u32 mf_config;
2799 u16 vlan_val;
2800 u32 vlan_prio;
2801 u16 vif_id;
2802 u8 allowed_prio;
2803 u8 vlan_mode;
2804 u32 addr_to_write, vifid, addrs, stats_type, i;
2805
2806 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2807 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2808 DP(BNX2X_MSG_MCP,
2809 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2810 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2811 }
2812
2813 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2814 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2815 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2816 DP(BNX2X_MSG_MCP,
2817 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2818 vifid, addrs);
2819 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2820 addrs);
2821 }
2822
2823 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2824 addr_to_write = SHMEM2_RD(bp,
2825 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2826 stats_type = SHMEM2_RD(bp,
2827 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2828
2829 DP(BNX2X_MSG_MCP,
2830 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2831 addr_to_write);
2832
2833 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2834
2835
2836 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2837 REG_WR(bp, addr_to_write + i*sizeof(u32),
2838 *(((u32 *)(&afex_stats))+i));
2839
2840
2841 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2842 }
2843
2844 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2845 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2846 bp->mf_config[BP_VN(bp)] = mf_config;
2847 DP(BNX2X_MSG_MCP,
2848 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2849 mf_config);
2850
2851
2852 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2853
2854 struct cmng_init_input cmng_input;
2855 struct rate_shaping_vars_per_vn m_rs_vn;
2856 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2857 u32 addr = BAR_XSTRORM_INTMEM +
2858 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2859
2860 bp->mf_config[BP_VN(bp)] = mf_config;
2861
2862 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2863 m_rs_vn.vn_counter.rate =
2864 cmng_input.vnic_max_rate[BP_VN(bp)];
2865 m_rs_vn.vn_counter.quota =
2866 (m_rs_vn.vn_counter.rate *
2867 RS_PERIODIC_TIMEOUT_USEC) / 8;
2868
2869 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2870
2871
2872 vif_id =
2873 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2874 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2875 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2876 vlan_val =
2877 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2878 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2879 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2880 vlan_prio = (mf_config &
2881 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2882 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2883 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2884 vlan_mode =
2885 (MF_CFG_RD(bp,
2886 func_mf_config[func].afex_config) &
2887 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2888 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2889 allowed_prio =
2890 (MF_CFG_RD(bp,
2891 func_mf_config[func].afex_config) &
2892 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2893 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2894
2895
2896 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2897 allowed_prio))
2898 return;
2899
2900 bp->afex_def_vlan_tag = vlan_val;
2901 bp->afex_vlan_mode = vlan_mode;
2902 } else {
2903
2904 bnx2x_link_report(bp);
2905
2906
2907 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2908
2909
2910 bp->afex_def_vlan_tag = -1;
2911 }
2912 }
2913}
2914
2915static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2916{
2917 struct bnx2x_func_switch_update_params *switch_update_params;
2918 struct bnx2x_func_state_params func_params;
2919
2920 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2921 switch_update_params = &func_params.params.switch_update;
2922 func_params.f_obj = &bp->func_obj;
2923 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2924
2925
2926 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2927 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2928
2929 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2930 int func = BP_ABS_FUNC(bp);
2931 u32 val;
2932
2933
2934 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2935 FUNC_MF_CFG_E1HOV_TAG_MASK;
2936 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2937 bp->mf_ov = val;
2938 } else {
2939 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2940 goto fail;
2941 }
2942
2943
2944 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2945 bp->mf_ov);
2946
2947
2948 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2949 &switch_update_params->changes);
2950 switch_update_params->vlan = bp->mf_ov;
2951
2952 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2953 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2954 bp->mf_ov);
2955 goto fail;
2956 } else {
2957 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2958 bp->mf_ov);
2959 }
2960 } else {
2961 goto fail;
2962 }
2963
2964 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2965 return;
2966fail:
2967 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2968}
2969
2970static void bnx2x_pmf_update(struct bnx2x *bp)
2971{
2972 int port = BP_PORT(bp);
2973 u32 val;
2974
2975 bp->port.pmf = 1;
2976 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2977
2978
2979
2980
2981
2982 smp_mb();
2983
2984
2985 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2986
2987 bnx2x_dcbx_pmf_update(bp);
2988
2989
2990 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2991 if (bp->common.int_block == INT_BLOCK_HC) {
2992 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2993 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2994 } else if (!CHIP_IS_E1x(bp)) {
2995 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2996 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2997 }
2998
2999 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3000}
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3012{
3013 int mb_idx = BP_FW_MB_IDX(bp);
3014 u32 seq;
3015 u32 rc = 0;
3016 u32 cnt = 1;
3017 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3018
3019 mutex_lock(&bp->fw_mb_mutex);
3020 seq = ++bp->fw_seq;
3021 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3022 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3023
3024 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3025 (command | seq), param);
3026
3027 do {
3028
3029 msleep(delay);
3030
3031 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3032
3033
3034 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3035
3036 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3037 cnt*delay, rc, seq);
3038
3039
3040 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3041 rc &= FW_MSG_CODE_MASK;
3042 else {
3043
3044 BNX2X_ERR("FW failed to respond!\n");
3045 bnx2x_fw_dump(bp);
3046 rc = 0;
3047 }
3048 mutex_unlock(&bp->fw_mb_mutex);
3049
3050 return rc;
3051}
3052
3053static void storm_memset_func_cfg(struct bnx2x *bp,
3054 struct tstorm_eth_function_common_config *tcfg,
3055 u16 abs_fid)
3056{
3057 size_t size = sizeof(struct tstorm_eth_function_common_config);
3058
3059 u32 addr = BAR_TSTRORM_INTMEM +
3060 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3061
3062 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3063}
3064
3065void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3066{
3067 if (CHIP_IS_E1x(bp)) {
3068 struct tstorm_eth_function_common_config tcfg = {0};
3069
3070 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3071 }
3072
3073
3074 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3075 storm_memset_func_en(bp, p->func_id, 1);
3076
3077
3078 if (p->spq_active) {
3079 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3080 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3081 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3082 }
3083}
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3095 struct bnx2x_fastpath *fp,
3096 bool zero_stats)
3097{
3098 unsigned long flags = 0;
3099
3100
3101 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3102
3103
3104
3105
3106
3107
3108 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3109 if (zero_stats)
3110 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3111
3112 if (bp->flags & TX_SWITCHING)
3113 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3114
3115 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3116 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3117
3118#ifdef BNX2X_STOP_ON_ERROR
3119 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3120#endif
3121
3122 return flags;
3123}
3124
3125static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3126 struct bnx2x_fastpath *fp,
3127 bool leading)
3128{
3129 unsigned long flags = 0;
3130
3131
3132 if (IS_MF_SD(bp))
3133 __set_bit(BNX2X_Q_FLG_OV, &flags);
3134
3135 if (IS_FCOE_FP(fp)) {
3136 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3137
3138 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3139 }
3140
3141 if (fp->mode != TPA_MODE_DISABLED) {
3142 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3143 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3144 if (fp->mode == TPA_MODE_GRO)
3145 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3146 }
3147
3148 if (leading) {
3149 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3150 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3151 }
3152
3153
3154 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3155
3156
3157 if (IS_MF_AFEX(bp))
3158 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3159
3160 return flags | bnx2x_get_common_flags(bp, fp, true);
3161}
3162
3163static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3164 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3165 u8 cos)
3166{
3167 gen_init->stat_id = bnx2x_stats_id(fp);
3168 gen_init->spcl_id = fp->cl_id;
3169
3170
3171 if (IS_FCOE_FP(fp))
3172 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3173 else
3174 gen_init->mtu = bp->dev->mtu;
3175
3176 gen_init->cos = cos;
3177
3178 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3179}
3180
3181static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3182 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3183 struct bnx2x_rxq_setup_params *rxq_init)
3184{
3185 u8 max_sge = 0;
3186 u16 sge_sz = 0;
3187 u16 tpa_agg_size = 0;
3188
3189 if (fp->mode != TPA_MODE_DISABLED) {
3190 pause->sge_th_lo = SGE_TH_LO(bp);
3191 pause->sge_th_hi = SGE_TH_HI(bp);
3192
3193
3194 WARN_ON(bp->dropless_fc &&
3195 pause->sge_th_hi + FW_PREFETCH_CNT >
3196 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3197
3198 tpa_agg_size = TPA_AGG_SIZE;
3199 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3200 SGE_PAGE_SHIFT;
3201 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3202 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3203 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3204 }
3205
3206
3207 if (!CHIP_IS_E1(bp)) {
3208 pause->bd_th_lo = BD_TH_LO(bp);
3209 pause->bd_th_hi = BD_TH_HI(bp);
3210
3211 pause->rcq_th_lo = RCQ_TH_LO(bp);
3212 pause->rcq_th_hi = RCQ_TH_HI(bp);
3213
3214
3215
3216
3217 WARN_ON(bp->dropless_fc &&
3218 pause->bd_th_hi + FW_PREFETCH_CNT >
3219 bp->rx_ring_size);
3220 WARN_ON(bp->dropless_fc &&
3221 pause->rcq_th_hi + FW_PREFETCH_CNT >
3222 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3223
3224 pause->pri_map = 1;
3225 }
3226
3227
3228 rxq_init->dscr_map = fp->rx_desc_mapping;
3229 rxq_init->sge_map = fp->rx_sge_mapping;
3230 rxq_init->rcq_map = fp->rx_comp_mapping;
3231 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3232
3233
3234
3235
3236 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3237 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3238
3239 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3240 rxq_init->tpa_agg_sz = tpa_agg_size;
3241 rxq_init->sge_buf_sz = sge_sz;
3242 rxq_init->max_sges_pkt = max_sge;
3243 rxq_init->rss_engine_id = BP_FUNC(bp);
3244 rxq_init->mcast_engine_id = BP_FUNC(bp);
3245
3246
3247
3248
3249
3250
3251 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3252
3253 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3254 rxq_init->fw_sb_id = fp->fw_sb_id;
3255
3256 if (IS_FCOE_FP(fp))
3257 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3258 else
3259 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3260
3261
3262
3263 if (IS_MF_AFEX(bp)) {
3264 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3265 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3266 }
3267}
3268
3269static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3270 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3271 u8 cos)
3272{
3273 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3274 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3275 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3276 txq_init->fw_sb_id = fp->fw_sb_id;
3277
3278
3279
3280
3281
3282 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3283
3284 if (IS_FCOE_FP(fp)) {
3285 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3286 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3287 }
3288}
3289
3290static void bnx2x_pf_init(struct bnx2x *bp)
3291{
3292 struct bnx2x_func_init_params func_init = {0};
3293 struct event_ring_data eq_data = { {0} };
3294
3295 if (!CHIP_IS_E1x(bp)) {
3296
3297
3298 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3299 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3300 (CHIP_MODE_IS_4_PORT(bp) ?
3301 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3302
3303 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3304 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3305 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3306 (CHIP_MODE_IS_4_PORT(bp) ?
3307 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3308 }
3309
3310 func_init.spq_active = true;
3311 func_init.pf_id = BP_FUNC(bp);
3312 func_init.func_id = BP_FUNC(bp);
3313 func_init.spq_map = bp->spq_mapping;
3314 func_init.spq_prod = bp->spq_prod_idx;
3315
3316 bnx2x_func_init(bp, &func_init);
3317
3318 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3319
3320
3321
3322
3323
3324
3325
3326 bp->link_vars.line_speed = SPEED_10000;
3327 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3328
3329
3330 if (bp->port.pmf)
3331 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3332
3333
3334 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3335 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3336 eq_data.producer = bp->eq_prod;
3337 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3338 eq_data.sb_id = DEF_SB_ID;
3339 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3340}
3341
3342static void bnx2x_e1h_disable(struct bnx2x *bp)
3343{
3344 int port = BP_PORT(bp);
3345
3346 bnx2x_tx_disable(bp);
3347
3348 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3349}
3350
3351static void bnx2x_e1h_enable(struct bnx2x *bp)
3352{
3353 int port = BP_PORT(bp);
3354
3355 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3356 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3357
3358
3359 netif_tx_wake_all_queues(bp->dev);
3360
3361
3362
3363
3364
3365}
3366
3367#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3368
3369static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3370{
3371 struct eth_stats_info *ether_stat =
3372 &bp->slowpath->drv_info_to_mcp.ether_stat;
3373 struct bnx2x_vlan_mac_obj *mac_obj =
3374 &bp->sp_objs->mac_obj;
3375 int i;
3376
3377 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3378 ETH_STAT_INFO_VERSION_LEN);
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3389 memset(ether_stat->mac_local + i, 0,
3390 sizeof(ether_stat->mac_local[0]));
3391 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3392 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3393 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3394 ETH_ALEN);
3395 ether_stat->mtu_size = bp->dev->mtu;
3396 if (bp->dev->features & NETIF_F_RXCSUM)
3397 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3398 if (bp->dev->features & NETIF_F_TSO)
3399 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3400 ether_stat->feature_flags |= bp->common.boot_mode;
3401
3402 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3403
3404 ether_stat->txq_size = bp->tx_ring_size;
3405 ether_stat->rxq_size = bp->rx_ring_size;
3406
3407#ifdef CONFIG_BNX2X_SRIOV
3408 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3409#endif
3410}
3411
3412static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3413{
3414 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3415 struct fcoe_stats_info *fcoe_stat =
3416 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3417
3418 if (!CNIC_LOADED(bp))
3419 return;
3420
3421 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3422
3423 fcoe_stat->qos_priority =
3424 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3425
3426
3427 if (!NO_FCOE(bp)) {
3428 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3429 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3430 tstorm_queue_statistics;
3431
3432 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3433 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3434 xstorm_queue_statistics;
3435
3436 struct fcoe_statistics_params *fw_fcoe_stat =
3437 &bp->fw_stats_data->fcoe;
3438
3439 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3440 fcoe_stat->rx_bytes_lo,
3441 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3442
3443 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3444 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3445 fcoe_stat->rx_bytes_lo,
3446 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3447
3448 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3449 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3450 fcoe_stat->rx_bytes_lo,
3451 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3452
3453 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3454 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3455 fcoe_stat->rx_bytes_lo,
3456 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3457
3458 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3459 fcoe_stat->rx_frames_lo,
3460 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3461
3462 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3463 fcoe_stat->rx_frames_lo,
3464 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3465
3466 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3467 fcoe_stat->rx_frames_lo,
3468 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3469
3470 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3471 fcoe_stat->rx_frames_lo,
3472 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3473
3474 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3475 fcoe_stat->tx_bytes_lo,
3476 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3477
3478 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3479 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3480 fcoe_stat->tx_bytes_lo,
3481 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3482
3483 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3484 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3485 fcoe_stat->tx_bytes_lo,
3486 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3487
3488 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3489 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3490 fcoe_stat->tx_bytes_lo,
3491 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3492
3493 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3494 fcoe_stat->tx_frames_lo,
3495 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3496
3497 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3498 fcoe_stat->tx_frames_lo,
3499 fcoe_q_xstorm_stats->ucast_pkts_sent);
3500
3501 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3502 fcoe_stat->tx_frames_lo,
3503 fcoe_q_xstorm_stats->bcast_pkts_sent);
3504
3505 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3506 fcoe_stat->tx_frames_lo,
3507 fcoe_q_xstorm_stats->mcast_pkts_sent);
3508 }
3509
3510
3511 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3512}
3513
3514static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3515{
3516 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3517 struct iscsi_stats_info *iscsi_stat =
3518 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3519
3520 if (!CNIC_LOADED(bp))
3521 return;
3522
3523 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3524 ETH_ALEN);
3525
3526 iscsi_stat->qos_priority =
3527 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3528
3529
3530 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3531}
3532
3533
3534
3535
3536
3537
3538static void bnx2x_config_mf_bw(struct bnx2x *bp)
3539{
3540
3541
3542
3543
3544 if (!IS_MF(bp)) {
3545 DP(BNX2X_MSG_MCP,
3546 "Ignoring MF BW config in single function mode\n");
3547 return;
3548 }
3549
3550 if (bp->link_vars.link_up) {
3551 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3552 bnx2x_link_sync_notify(bp);
3553 }
3554 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3555}
3556
3557static void bnx2x_set_mf_bw(struct bnx2x *bp)
3558{
3559 bnx2x_config_mf_bw(bp);
3560 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3561}
3562
3563static void bnx2x_handle_eee_event(struct bnx2x *bp)
3564{
3565 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3566 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3567}
3568
3569#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3570#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3571
3572static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3573{
3574 enum drv_info_opcode op_code;
3575 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3576 bool release = false;
3577 int wait;
3578
3579
3580 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3581 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3582 return;
3583 }
3584
3585 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3586 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3587
3588
3589 mutex_lock(&bp->drv_info_mutex);
3590
3591 memset(&bp->slowpath->drv_info_to_mcp, 0,
3592 sizeof(union drv_info_to_mcp));
3593
3594 switch (op_code) {
3595 case ETH_STATS_OPCODE:
3596 bnx2x_drv_info_ether_stat(bp);
3597 break;
3598 case FCOE_STATS_OPCODE:
3599 bnx2x_drv_info_fcoe_stat(bp);
3600 break;
3601 case ISCSI_STATS_OPCODE:
3602 bnx2x_drv_info_iscsi_stat(bp);
3603 break;
3604 default:
3605
3606 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3607 goto out;
3608 }
3609
3610
3611
3612
3613 SHMEM2_WR(bp, drv_info_host_addr_lo,
3614 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3615 SHMEM2_WR(bp, drv_info_host_addr_hi,
3616 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3617
3618 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3619
3620
3621
3622
3623
3624 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3625 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3626 } else if (!bp->drv_info_mng_owner) {
3627 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3628
3629 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3630 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3631
3632
3633 if (indication & bit) {
3634 SHMEM2_WR(bp, mfw_drv_indication,
3635 indication & ~bit);
3636 release = true;
3637 break;
3638 }
3639
3640 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3641 }
3642 }
3643 if (!release) {
3644 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3645 bp->drv_info_mng_owner = true;
3646 }
3647
3648out:
3649 mutex_unlock(&bp->drv_info_mutex);
3650}
3651
3652static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3653{
3654 u8 vals[4];
3655 int i = 0;
3656
3657 if (bnx2x_format) {
3658 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3659 &vals[0], &vals[1], &vals[2], &vals[3]);
3660 if (i > 0)
3661 vals[0] -= '0';
3662 } else {
3663 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3664 &vals[0], &vals[1], &vals[2], &vals[3]);
3665 }
3666
3667 while (i < 4)
3668 vals[i++] = 0;
3669
3670 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3671}
3672
3673void bnx2x_update_mng_version(struct bnx2x *bp)
3674{
3675 u32 iscsiver = DRV_VER_NOT_LOADED;
3676 u32 fcoever = DRV_VER_NOT_LOADED;
3677 u32 ethver = DRV_VER_NOT_LOADED;
3678 int idx = BP_FW_MB_IDX(bp);
3679 u8 *version;
3680
3681 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3682 return;
3683
3684 mutex_lock(&bp->drv_info_mutex);
3685
3686 if (bp->drv_info_mng_owner)
3687 goto out;
3688
3689 if (bp->state != BNX2X_STATE_OPEN)
3690 goto out;
3691
3692
3693 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3694 if (!CNIC_LOADED(bp))
3695 goto out;
3696
3697
3698 memset(&bp->slowpath->drv_info_to_mcp, 0,
3699 sizeof(union drv_info_to_mcp));
3700 bnx2x_drv_info_iscsi_stat(bp);
3701 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3702 iscsiver = bnx2x_update_mng_version_utility(version, false);
3703
3704 memset(&bp->slowpath->drv_info_to_mcp, 0,
3705 sizeof(union drv_info_to_mcp));
3706 bnx2x_drv_info_fcoe_stat(bp);
3707 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3708 fcoever = bnx2x_update_mng_version_utility(version, false);
3709
3710out:
3711 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3712 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3713 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3714
3715 mutex_unlock(&bp->drv_info_mutex);
3716
3717 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3718 ethver, iscsiver, fcoever);
3719}
3720
3721void bnx2x_update_mfw_dump(struct bnx2x *bp)
3722{
3723 u32 drv_ver;
3724 u32 valid_dump;
3725
3726 if (!SHMEM2_HAS(bp, drv_info))
3727 return;
3728
3729
3730 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3731
3732 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3733 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3734
3735 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3736
3737
3738 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3739
3740 if (valid_dump & FIRST_DUMP_VALID)
3741 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3742
3743 if (valid_dump & SECOND_DUMP_VALID)
3744 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3745}
3746
3747static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3748{
3749 u32 cmd_ok, cmd_fail;
3750
3751
3752 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3753 event & DRV_STATUS_OEM_EVENT_MASK) {
3754 BNX2X_ERR("Received simultaneous events %08x\n", event);
3755 return;
3756 }
3757
3758 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3759 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3760 cmd_ok = DRV_MSG_CODE_DCC_OK;
3761 } else {
3762 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3763 cmd_ok = DRV_MSG_CODE_OEM_OK;
3764 }
3765
3766 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3767
3768 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3769 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3770
3771
3772
3773
3774 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3775 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3776 bp->flags |= MF_FUNC_DIS;
3777
3778 bnx2x_e1h_disable(bp);
3779 } else {
3780 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3781 bp->flags &= ~MF_FUNC_DIS;
3782
3783 bnx2x_e1h_enable(bp);
3784 }
3785 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3786 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3787 }
3788
3789 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3790 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3791 bnx2x_config_mf_bw(bp);
3792 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3793 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3794 }
3795
3796
3797 if (event)
3798 bnx2x_fw_command(bp, cmd_fail, 0);
3799 else
3800 bnx2x_fw_command(bp, cmd_ok, 0);
3801}
3802
3803
3804static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3805{
3806 struct eth_spe *next_spe = bp->spq_prod_bd;
3807
3808 if (bp->spq_prod_bd == bp->spq_last_bd) {
3809 bp->spq_prod_bd = bp->spq;
3810 bp->spq_prod_idx = 0;
3811 DP(BNX2X_MSG_SP, "end of spq\n");
3812 } else {
3813 bp->spq_prod_bd++;
3814 bp->spq_prod_idx++;
3815 }
3816 return next_spe;
3817}
3818
3819
3820static void bnx2x_sp_prod_update(struct bnx2x *bp)
3821{
3822 int func = BP_FUNC(bp);
3823
3824
3825
3826
3827
3828
3829 mb();
3830
3831 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3832 bp->spq_prod_idx);
3833}
3834
3835
3836
3837
3838
3839
3840
3841static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3842{
3843 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3844 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3845 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3846 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3847 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3848 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3849 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3850 return true;
3851 else
3852 return false;
3853}
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3870 u32 data_hi, u32 data_lo, int cmd_type)
3871{
3872 struct eth_spe *spe;
3873 u16 type;
3874 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3875
3876#ifdef BNX2X_STOP_ON_ERROR
3877 if (unlikely(bp->panic)) {
3878 BNX2X_ERR("Can't post SP when there is panic\n");
3879 return -EIO;
3880 }
3881#endif
3882
3883 spin_lock_bh(&bp->spq_lock);
3884
3885 if (common) {
3886 if (!atomic_read(&bp->eq_spq_left)) {
3887 BNX2X_ERR("BUG! EQ ring full!\n");
3888 spin_unlock_bh(&bp->spq_lock);
3889 bnx2x_panic();
3890 return -EBUSY;
3891 }
3892 } else if (!atomic_read(&bp->cq_spq_left)) {
3893 BNX2X_ERR("BUG! SPQ ring full!\n");
3894 spin_unlock_bh(&bp->spq_lock);
3895 bnx2x_panic();
3896 return -EBUSY;
3897 }
3898
3899 spe = bnx2x_sp_get_next(bp);
3900
3901
3902 spe->hdr.conn_and_cmd_data =
3903 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3904 HW_CID(bp, cid));
3905
3906
3907
3908
3909
3910 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3911 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3912 SPE_HDR_CONN_TYPE;
3913 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3914 SPE_HDR_FUNCTION_ID);
3915 } else {
3916 type = cmd_type;
3917 }
3918
3919 spe->hdr.type = cpu_to_le16(type);
3920
3921 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3922 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3923
3924
3925
3926
3927
3928
3929 if (common)
3930 atomic_dec(&bp->eq_spq_left);
3931 else
3932 atomic_dec(&bp->cq_spq_left);
3933
3934 DP(BNX2X_MSG_SP,
3935 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3936 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3937 (u32)(U64_LO(bp->spq_mapping) +
3938 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3939 HW_CID(bp, cid), data_hi, data_lo, type,
3940 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3941
3942 bnx2x_sp_prod_update(bp);
3943 spin_unlock_bh(&bp->spq_lock);
3944 return 0;
3945}
3946
3947
3948static int bnx2x_acquire_alr(struct bnx2x *bp)
3949{
3950 u32 j, val;
3951 int rc = 0;
3952
3953 might_sleep();
3954 for (j = 0; j < 1000; j++) {
3955 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3956 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3957 if (val & MCPR_ACCESS_LOCK_LOCK)
3958 break;
3959
3960 usleep_range(5000, 10000);
3961 }
3962 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3963 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3964 rc = -EBUSY;
3965 }
3966
3967 return rc;
3968}
3969
3970
3971static void bnx2x_release_alr(struct bnx2x *bp)
3972{
3973 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3974}
3975
3976#define BNX2X_DEF_SB_ATT_IDX 0x0001
3977#define BNX2X_DEF_SB_IDX 0x0002
3978
3979static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3980{
3981 struct host_sp_status_block *def_sb = bp->def_status_blk;
3982 u16 rc = 0;
3983
3984 barrier();
3985 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3986 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3987 rc |= BNX2X_DEF_SB_ATT_IDX;
3988 }
3989
3990 if (bp->def_idx != def_sb->sp_sb.running_index) {
3991 bp->def_idx = def_sb->sp_sb.running_index;
3992 rc |= BNX2X_DEF_SB_IDX;
3993 }
3994
3995
3996 barrier();
3997 return rc;
3998}
3999
4000
4001
4002
4003
4004static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4005{
4006 int port = BP_PORT(bp);
4007 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4008 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4009 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4010 NIG_REG_MASK_INTERRUPT_PORT0;
4011 u32 aeu_mask;
4012 u32 nig_mask = 0;
4013 u32 reg_addr;
4014
4015 if (bp->attn_state & asserted)
4016 BNX2X_ERR("IGU ERROR\n");
4017
4018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4019 aeu_mask = REG_RD(bp, aeu_addr);
4020
4021 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4022 aeu_mask, asserted);
4023 aeu_mask &= ~(asserted & 0x3ff);
4024 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4025
4026 REG_WR(bp, aeu_addr, aeu_mask);
4027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4028
4029 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4030 bp->attn_state |= asserted;
4031 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4032
4033 if (asserted & ATTN_HARD_WIRED_MASK) {
4034 if (asserted & ATTN_NIG_FOR_FUNC) {
4035
4036 bnx2x_acquire_phy_lock(bp);
4037
4038
4039 nig_mask = REG_RD(bp, nig_int_mask_addr);
4040
4041
4042
4043
4044 if (nig_mask) {
4045 REG_WR(bp, nig_int_mask_addr, 0);
4046
4047 bnx2x_link_attn(bp);
4048 }
4049
4050
4051 }
4052 if (asserted & ATTN_SW_TIMER_4_FUNC)
4053 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4054
4055 if (asserted & GPIO_2_FUNC)
4056 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4057
4058 if (asserted & GPIO_3_FUNC)
4059 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4060
4061 if (asserted & GPIO_4_FUNC)
4062 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4063
4064 if (port == 0) {
4065 if (asserted & ATTN_GENERAL_ATTN_1) {
4066 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4068 }
4069 if (asserted & ATTN_GENERAL_ATTN_2) {
4070 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4071 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4072 }
4073 if (asserted & ATTN_GENERAL_ATTN_3) {
4074 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4076 }
4077 } else {
4078 if (asserted & ATTN_GENERAL_ATTN_4) {
4079 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4080 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4081 }
4082 if (asserted & ATTN_GENERAL_ATTN_5) {
4083 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4085 }
4086 if (asserted & ATTN_GENERAL_ATTN_6) {
4087 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4089 }
4090 }
4091
4092 }
4093
4094 if (bp->common.int_block == INT_BLOCK_HC)
4095 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4096 COMMAND_REG_ATTN_BITS_SET);
4097 else
4098 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4099
4100 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4101 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4102 REG_WR(bp, reg_addr, asserted);
4103
4104
4105 if (asserted & ATTN_NIG_FOR_FUNC) {
4106
4107
4108
4109 if (bp->common.int_block != INT_BLOCK_HC) {
4110 u32 cnt = 0, igu_acked;
4111 do {
4112 igu_acked = REG_RD(bp,
4113 IGU_REG_ATTENTION_ACK_BITS);
4114 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4115 (++cnt < MAX_IGU_ATTN_ACK_TO));
4116 if (!igu_acked)
4117 DP(NETIF_MSG_HW,
4118 "Failed to verify IGU ack on time\n");
4119 barrier();
4120 }
4121 REG_WR(bp, nig_int_mask_addr, nig_mask);
4122 bnx2x_release_phy_lock(bp);
4123 }
4124}
4125
4126static void bnx2x_fan_failure(struct bnx2x *bp)
4127{
4128 int port = BP_PORT(bp);
4129 u32 ext_phy_config;
4130
4131 ext_phy_config =
4132 SHMEM_RD(bp,
4133 dev_info.port_hw_config[port].external_phy_config);
4134
4135 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4136 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4137 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4138 ext_phy_config);
4139
4140
4141 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4142 "Please contact OEM Support for assistance\n");
4143
4144
4145
4146
4147
4148 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4149}
4150
4151static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4152{
4153 int port = BP_PORT(bp);
4154 int reg_offset;
4155 u32 val;
4156
4157 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4158 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4159
4160 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4161
4162 val = REG_RD(bp, reg_offset);
4163 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4164 REG_WR(bp, reg_offset, val);
4165
4166 BNX2X_ERR("SPIO5 hw attention\n");
4167
4168
4169 bnx2x_hw_reset_phy(&bp->link_params);
4170 bnx2x_fan_failure(bp);
4171 }
4172
4173 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4174 bnx2x_acquire_phy_lock(bp);
4175 bnx2x_handle_module_detect_int(&bp->link_params);
4176 bnx2x_release_phy_lock(bp);
4177 }
4178
4179 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4180
4181 val = REG_RD(bp, reg_offset);
4182 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4183 REG_WR(bp, reg_offset, val);
4184
4185 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4186 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4187 bnx2x_panic();
4188 }
4189}
4190
4191static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4192{
4193 u32 val;
4194
4195 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4196
4197 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4198 BNX2X_ERR("DB hw attention 0x%x\n", val);
4199
4200 if (val & 0x2)
4201 BNX2X_ERR("FATAL error from DORQ\n");
4202 }
4203
4204 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4205
4206 int port = BP_PORT(bp);
4207 int reg_offset;
4208
4209 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4210 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4211
4212 val = REG_RD(bp, reg_offset);
4213 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4214 REG_WR(bp, reg_offset, val);
4215
4216 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4217 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4218 bnx2x_panic();
4219 }
4220}
4221
4222static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4223{
4224 u32 val;
4225
4226 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4227
4228 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4229 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4230
4231 if (val & 0x2)
4232 BNX2X_ERR("FATAL error from CFC\n");
4233 }
4234
4235 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4236 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4237 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4238
4239 if (val & 0x18000)
4240 BNX2X_ERR("FATAL error from PXP\n");
4241
4242 if (!CHIP_IS_E1x(bp)) {
4243 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4244 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4245 }
4246 }
4247
4248 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4249
4250 int port = BP_PORT(bp);
4251 int reg_offset;
4252
4253 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4254 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4255
4256 val = REG_RD(bp, reg_offset);
4257 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4258 REG_WR(bp, reg_offset, val);
4259
4260 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4261 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4262 bnx2x_panic();
4263 }
4264}
4265
4266static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4267{
4268 u32 val;
4269
4270 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4271
4272 if (attn & BNX2X_PMF_LINK_ASSERT) {
4273 int func = BP_FUNC(bp);
4274
4275 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4276 bnx2x_read_mf_cfg(bp);
4277 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4278 func_mf_config[BP_ABS_FUNC(bp)].config);
4279 val = SHMEM_RD(bp,
4280 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4281
4282 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4283 DRV_STATUS_OEM_EVENT_MASK))
4284 bnx2x_oem_event(bp,
4285 (val & (DRV_STATUS_DCC_EVENT_MASK |
4286 DRV_STATUS_OEM_EVENT_MASK)));
4287
4288 if (val & DRV_STATUS_SET_MF_BW)
4289 bnx2x_set_mf_bw(bp);
4290
4291 if (val & DRV_STATUS_DRV_INFO_REQ)
4292 bnx2x_handle_drv_info_req(bp);
4293
4294 if (val & DRV_STATUS_VF_DISABLED)
4295 bnx2x_schedule_iov_task(bp,
4296 BNX2X_IOV_HANDLE_FLR);
4297
4298 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4299 bnx2x_pmf_update(bp);
4300
4301 if (bp->port.pmf &&
4302 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4303 bp->dcbx_enabled > 0)
4304
4305 bnx2x_dcbx_set_params(bp,
4306 BNX2X_DCBX_STATE_NEG_RECEIVED);
4307 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4308 bnx2x_handle_afex_cmd(bp,
4309 val & DRV_STATUS_AFEX_EVENT_MASK);
4310 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4311 bnx2x_handle_eee_event(bp);
4312
4313 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4314 bnx2x_schedule_sp_rtnl(bp,
4315 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4316
4317 if (bp->link_vars.periodic_flags &
4318 PERIODIC_FLAGS_LINK_EVENT) {
4319
4320 bnx2x_acquire_phy_lock(bp);
4321 bp->link_vars.periodic_flags &=
4322 ~PERIODIC_FLAGS_LINK_EVENT;
4323 bnx2x_release_phy_lock(bp);
4324 if (IS_MF(bp))
4325 bnx2x_link_sync_notify(bp);
4326 bnx2x_link_report(bp);
4327 }
4328
4329
4330
4331 bnx2x__link_status_update(bp);
4332 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4333
4334 BNX2X_ERR("MC assert!\n");
4335 bnx2x_mc_assert(bp);
4336 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4337 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4338 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4339 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4340 bnx2x_panic();
4341
4342 } else if (attn & BNX2X_MCP_ASSERT) {
4343
4344 BNX2X_ERR("MCP assert!\n");
4345 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4346 bnx2x_fw_dump(bp);
4347
4348 } else
4349 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4350 }
4351
4352 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4353 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4354 if (attn & BNX2X_GRC_TIMEOUT) {
4355 val = CHIP_IS_E1(bp) ? 0 :
4356 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4357 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4358 }
4359 if (attn & BNX2X_GRC_RSV) {
4360 val = CHIP_IS_E1(bp) ? 0 :
4361 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4362 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4363 }
4364 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4365 }
4366}
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4383
4384#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4385#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4386#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4387#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4388#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4389#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4390#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4391
4392
4393
4394
4395
4396
4397void bnx2x_set_reset_global(struct bnx2x *bp)
4398{
4399 u32 val;
4400 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4401 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4402 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4403 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4404}
4405
4406
4407
4408
4409
4410
4411static void bnx2x_clear_reset_global(struct bnx2x *bp)
4412{
4413 u32 val;
4414 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4415 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4416 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4417 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4418}
4419
4420
4421
4422
4423
4424
4425static bool bnx2x_reset_is_global(struct bnx2x *bp)
4426{
4427 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4428
4429 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4430 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4431}
4432
4433
4434
4435
4436
4437
4438static void bnx2x_set_reset_done(struct bnx2x *bp)
4439{
4440 u32 val;
4441 u32 bit = BP_PATH(bp) ?
4442 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4443 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4444 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4445
4446
4447 val &= ~bit;
4448 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4449
4450 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4451}
4452
4453
4454
4455
4456
4457
4458void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4459{
4460 u32 val;
4461 u32 bit = BP_PATH(bp) ?
4462 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4463 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4464 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4465
4466
4467 val |= bit;
4468 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4470}
4471
4472
4473
4474
4475
4476bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4477{
4478 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4479 u32 bit = engine ?
4480 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4481
4482
4483 return (val & bit) ? false : true;
4484}
4485
4486
4487
4488
4489
4490
4491void bnx2x_set_pf_load(struct bnx2x *bp)
4492{
4493 u32 val1, val;
4494 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4495 BNX2X_PATH0_LOAD_CNT_MASK;
4496 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4497 BNX2X_PATH0_LOAD_CNT_SHIFT;
4498
4499 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4500 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4501
4502 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4503
4504
4505 val1 = (val & mask) >> shift;
4506
4507
4508 val1 |= (1 << bp->pf_num);
4509
4510
4511 val &= ~mask;
4512
4513
4514 val |= ((val1 << shift) & mask);
4515
4516 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4517 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4518}
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529bool bnx2x_clear_pf_load(struct bnx2x *bp)
4530{
4531 u32 val1, val;
4532 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4533 BNX2X_PATH0_LOAD_CNT_MASK;
4534 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4535 BNX2X_PATH0_LOAD_CNT_SHIFT;
4536
4537 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4538 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4539 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4540
4541
4542 val1 = (val & mask) >> shift;
4543
4544
4545 val1 &= ~(1 << bp->pf_num);
4546
4547
4548 val &= ~mask;
4549
4550
4551 val |= ((val1 << shift) & mask);
4552
4553 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4554 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4555 return val1 != 0;
4556}
4557
4558
4559
4560
4561
4562
4563static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4564{
4565 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4566 BNX2X_PATH0_LOAD_CNT_MASK);
4567 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4568 BNX2X_PATH0_LOAD_CNT_SHIFT);
4569 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4570
4571 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4572
4573 val = (val & mask) >> shift;
4574
4575 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4576 engine, val);
4577
4578 return val != 0;
4579}
4580
4581static void _print_parity(struct bnx2x *bp, u32 reg)
4582{
4583 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4584}
4585
4586static void _print_next_block(int idx, const char *blk)
4587{
4588 pr_cont("%s%s", idx ? ", " : "", blk);
4589}
4590
4591static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4592 int *par_num, bool print)
4593{
4594 u32 cur_bit;
4595 bool res;
4596 int i;
4597
4598 res = false;
4599
4600 for (i = 0; sig; i++) {
4601 cur_bit = (0x1UL << i);
4602 if (sig & cur_bit) {
4603 res |= true;
4604
4605 if (print) {
4606 switch (cur_bit) {
4607 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4608 _print_next_block((*par_num)++, "BRB");
4609 _print_parity(bp,
4610 BRB1_REG_BRB1_PRTY_STS);
4611 break;
4612 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4613 _print_next_block((*par_num)++,
4614 "PARSER");
4615 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4616 break;
4617 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4618 _print_next_block((*par_num)++, "TSDM");
4619 _print_parity(bp,
4620 TSDM_REG_TSDM_PRTY_STS);
4621 break;
4622 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4623 _print_next_block((*par_num)++,
4624 "SEARCHER");
4625 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4626 break;
4627 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4628 _print_next_block((*par_num)++, "TCM");
4629 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4630 break;
4631 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4632 _print_next_block((*par_num)++,
4633 "TSEMI");
4634 _print_parity(bp,
4635 TSEM_REG_TSEM_PRTY_STS_0);
4636 _print_parity(bp,
4637 TSEM_REG_TSEM_PRTY_STS_1);
4638 break;
4639 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4640 _print_next_block((*par_num)++, "XPB");
4641 _print_parity(bp, GRCBASE_XPB +
4642 PB_REG_PB_PRTY_STS);
4643 break;
4644 }
4645 }
4646
4647
4648 sig &= ~cur_bit;
4649 }
4650 }
4651
4652 return res;
4653}
4654
4655static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4656 int *par_num, bool *global,
4657 bool print)
4658{
4659 u32 cur_bit;
4660 bool res;
4661 int i;
4662
4663 res = false;
4664
4665 for (i = 0; sig; i++) {
4666 cur_bit = (0x1UL << i);
4667 if (sig & cur_bit) {
4668 res |= true;
4669 switch (cur_bit) {
4670 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4671 if (print) {
4672 _print_next_block((*par_num)++, "PBF");
4673 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4674 }
4675 break;
4676 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4677 if (print) {
4678 _print_next_block((*par_num)++, "QM");
4679 _print_parity(bp, QM_REG_QM_PRTY_STS);
4680 }
4681 break;
4682 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4683 if (print) {
4684 _print_next_block((*par_num)++, "TM");
4685 _print_parity(bp, TM_REG_TM_PRTY_STS);
4686 }
4687 break;
4688 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4689 if (print) {
4690 _print_next_block((*par_num)++, "XSDM");
4691 _print_parity(bp,
4692 XSDM_REG_XSDM_PRTY_STS);
4693 }
4694 break;
4695 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4696 if (print) {
4697 _print_next_block((*par_num)++, "XCM");
4698 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4699 }
4700 break;
4701 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4702 if (print) {
4703 _print_next_block((*par_num)++,
4704 "XSEMI");
4705 _print_parity(bp,
4706 XSEM_REG_XSEM_PRTY_STS_0);
4707 _print_parity(bp,
4708 XSEM_REG_XSEM_PRTY_STS_1);
4709 }
4710 break;
4711 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4712 if (print) {
4713 _print_next_block((*par_num)++,
4714 "DOORBELLQ");
4715 _print_parity(bp,
4716 DORQ_REG_DORQ_PRTY_STS);
4717 }
4718 break;
4719 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4720 if (print) {
4721 _print_next_block((*par_num)++, "NIG");
4722 if (CHIP_IS_E1x(bp)) {
4723 _print_parity(bp,
4724 NIG_REG_NIG_PRTY_STS);
4725 } else {
4726 _print_parity(bp,
4727 NIG_REG_NIG_PRTY_STS_0);
4728 _print_parity(bp,
4729 NIG_REG_NIG_PRTY_STS_1);
4730 }
4731 }
4732 break;
4733 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4734 if (print)
4735 _print_next_block((*par_num)++,
4736 "VAUX PCI CORE");
4737 *global = true;
4738 break;
4739 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4740 if (print) {
4741 _print_next_block((*par_num)++,
4742 "DEBUG");
4743 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4744 }
4745 break;
4746 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4747 if (print) {
4748 _print_next_block((*par_num)++, "USDM");
4749 _print_parity(bp,
4750 USDM_REG_USDM_PRTY_STS);
4751 }
4752 break;
4753 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4754 if (print) {
4755 _print_next_block((*par_num)++, "UCM");
4756 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4757 }
4758 break;
4759 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4760 if (print) {
4761 _print_next_block((*par_num)++,
4762 "USEMI");
4763 _print_parity(bp,
4764 USEM_REG_USEM_PRTY_STS_0);
4765 _print_parity(bp,
4766 USEM_REG_USEM_PRTY_STS_1);
4767 }
4768 break;
4769 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4770 if (print) {
4771 _print_next_block((*par_num)++, "UPB");
4772 _print_parity(bp, GRCBASE_UPB +
4773 PB_REG_PB_PRTY_STS);
4774 }
4775 break;
4776 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4777 if (print) {
4778 _print_next_block((*par_num)++, "CSDM");
4779 _print_parity(bp,
4780 CSDM_REG_CSDM_PRTY_STS);
4781 }
4782 break;
4783 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4784 if (print) {
4785 _print_next_block((*par_num)++, "CCM");
4786 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4787 }
4788 break;
4789 }
4790
4791
4792 sig &= ~cur_bit;
4793 }
4794 }
4795
4796 return res;
4797}
4798
4799static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4800 int *par_num, bool print)
4801{
4802 u32 cur_bit;
4803 bool res;
4804 int i;
4805
4806 res = false;
4807
4808 for (i = 0; sig; i++) {
4809 cur_bit = (0x1UL << i);
4810 if (sig & cur_bit) {
4811 res = true;
4812 if (print) {
4813 switch (cur_bit) {
4814 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4815 _print_next_block((*par_num)++,
4816 "CSEMI");
4817 _print_parity(bp,
4818 CSEM_REG_CSEM_PRTY_STS_0);
4819 _print_parity(bp,
4820 CSEM_REG_CSEM_PRTY_STS_1);
4821 break;
4822 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4823 _print_next_block((*par_num)++, "PXP");
4824 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4825 _print_parity(bp,
4826 PXP2_REG_PXP2_PRTY_STS_0);
4827 _print_parity(bp,
4828 PXP2_REG_PXP2_PRTY_STS_1);
4829 break;
4830 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4831 _print_next_block((*par_num)++,
4832 "PXPPCICLOCKCLIENT");
4833 break;
4834 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4835 _print_next_block((*par_num)++, "CFC");
4836 _print_parity(bp,
4837 CFC_REG_CFC_PRTY_STS);
4838 break;
4839 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4840 _print_next_block((*par_num)++, "CDU");
4841 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4842 break;
4843 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4844 _print_next_block((*par_num)++, "DMAE");
4845 _print_parity(bp,
4846 DMAE_REG_DMAE_PRTY_STS);
4847 break;
4848 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4849 _print_next_block((*par_num)++, "IGU");
4850 if (CHIP_IS_E1x(bp))
4851 _print_parity(bp,
4852 HC_REG_HC_PRTY_STS);
4853 else
4854 _print_parity(bp,
4855 IGU_REG_IGU_PRTY_STS);
4856 break;
4857 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4858 _print_next_block((*par_num)++, "MISC");
4859 _print_parity(bp,
4860 MISC_REG_MISC_PRTY_STS);
4861 break;
4862 }
4863 }
4864
4865
4866 sig &= ~cur_bit;
4867 }
4868 }
4869
4870 return res;
4871}
4872
4873static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4874 int *par_num, bool *global,
4875 bool print)
4876{
4877 bool res = false;
4878 u32 cur_bit;
4879 int i;
4880
4881 for (i = 0; sig; i++) {
4882 cur_bit = (0x1UL << i);
4883 if (sig & cur_bit) {
4884 switch (cur_bit) {
4885 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4886 if (print)
4887 _print_next_block((*par_num)++,
4888 "MCP ROM");
4889 *global = true;
4890 res = true;
4891 break;
4892 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4893 if (print)
4894 _print_next_block((*par_num)++,
4895 "MCP UMP RX");
4896 *global = true;
4897 res = true;
4898 break;
4899 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4900 if (print)
4901 _print_next_block((*par_num)++,
4902 "MCP UMP TX");
4903 *global = true;
4904 res = true;
4905 break;
4906 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4907 (*par_num)++;
4908
4909 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4910 1UL << 10);
4911 break;
4912 }
4913
4914
4915 sig &= ~cur_bit;
4916 }
4917 }
4918
4919 return res;
4920}
4921
4922static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4923 int *par_num, bool print)
4924{
4925 u32 cur_bit;
4926 bool res;
4927 int i;
4928
4929 res = false;
4930
4931 for (i = 0; sig; i++) {
4932 cur_bit = (0x1UL << i);
4933 if (sig & cur_bit) {
4934 res = true;
4935 if (print) {
4936 switch (cur_bit) {
4937 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4938 _print_next_block((*par_num)++,
4939 "PGLUE_B");
4940 _print_parity(bp,
4941 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4942 break;
4943 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4944 _print_next_block((*par_num)++, "ATC");
4945 _print_parity(bp,
4946 ATC_REG_ATC_PRTY_STS);
4947 break;
4948 }
4949 }
4950
4951 sig &= ~cur_bit;
4952 }
4953 }
4954
4955 return res;
4956}
4957
4958static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4959 u32 *sig)
4960{
4961 bool res = false;
4962
4963 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4964 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4965 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4966 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4967 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4968 int par_num = 0;
4969
4970 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4971 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4972 sig[0] & HW_PRTY_ASSERT_SET_0,
4973 sig[1] & HW_PRTY_ASSERT_SET_1,
4974 sig[2] & HW_PRTY_ASSERT_SET_2,
4975 sig[3] & HW_PRTY_ASSERT_SET_3,
4976 sig[4] & HW_PRTY_ASSERT_SET_4);
4977 if (print) {
4978 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4979 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4980 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4981 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4982 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4983 netdev_err(bp->dev,
4984 "Parity errors detected in blocks: ");
4985 } else {
4986 print = false;
4987 }
4988 }
4989 res |= bnx2x_check_blocks_with_parity0(bp,
4990 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4991 res |= bnx2x_check_blocks_with_parity1(bp,
4992 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4993 res |= bnx2x_check_blocks_with_parity2(bp,
4994 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4995 res |= bnx2x_check_blocks_with_parity3(bp,
4996 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4997 res |= bnx2x_check_blocks_with_parity4(bp,
4998 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4999
5000 if (print)
5001 pr_cont("\n");
5002 }
5003
5004 return res;
5005}
5006
5007
5008
5009
5010
5011
5012
5013
5014bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5015{
5016 struct attn_route attn = { {0} };
5017 int port = BP_PORT(bp);
5018
5019 attn.sig[0] = REG_RD(bp,
5020 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5021 port*4);
5022 attn.sig[1] = REG_RD(bp,
5023 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5024 port*4);
5025 attn.sig[2] = REG_RD(bp,
5026 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5027 port*4);
5028 attn.sig[3] = REG_RD(bp,
5029 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5030 port*4);
5031
5032
5033
5034 attn.sig[3] &= ((REG_RD(bp,
5035 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5036 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5037 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5038 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5039
5040 if (!CHIP_IS_E1x(bp))
5041 attn.sig[4] = REG_RD(bp,
5042 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5043 port*4);
5044
5045 return bnx2x_parity_attn(bp, global, print, attn.sig);
5046}
5047
5048static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5049{
5050 u32 val;
5051 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5052
5053 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5054 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5055 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5056 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5057 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5058 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5059 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5060 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5061 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5063 if (val &
5064 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5065 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5066 if (val &
5067 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5068 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5069 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5070 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5071 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5072 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5073 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5074 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5075 }
5076 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5077 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5078 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5079 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5080 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5081 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5082 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5083 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5084 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5085 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5086 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5087 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5088 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5089 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5090 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5091 }
5092
5093 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5094 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5095 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5096 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5097 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5098 }
5099}
5100
5101static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5102{
5103 struct attn_route attn, *group_mask;
5104 int port = BP_PORT(bp);
5105 int index;
5106 u32 reg_addr;
5107 u32 val;
5108 u32 aeu_mask;
5109 bool global = false;
5110
5111
5112
5113 bnx2x_acquire_alr(bp);
5114
5115 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5116#ifndef BNX2X_STOP_ON_ERROR
5117 bp->recovery_state = BNX2X_RECOVERY_INIT;
5118 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5119
5120 bnx2x_int_disable(bp);
5121
5122
5123
5124#else
5125 bnx2x_panic();
5126#endif
5127 bnx2x_release_alr(bp);
5128 return;
5129 }
5130
5131 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5132 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5133 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5134 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5135 if (!CHIP_IS_E1x(bp))
5136 attn.sig[4] =
5137 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5138 else
5139 attn.sig[4] = 0;
5140
5141 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5142 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5143
5144 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5145 if (deasserted & (1 << index)) {
5146 group_mask = &bp->attn_group[index];
5147
5148 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5149 index,
5150 group_mask->sig[0], group_mask->sig[1],
5151 group_mask->sig[2], group_mask->sig[3],
5152 group_mask->sig[4]);
5153
5154 bnx2x_attn_int_deasserted4(bp,
5155 attn.sig[4] & group_mask->sig[4]);
5156 bnx2x_attn_int_deasserted3(bp,
5157 attn.sig[3] & group_mask->sig[3]);
5158 bnx2x_attn_int_deasserted1(bp,
5159 attn.sig[1] & group_mask->sig[1]);
5160 bnx2x_attn_int_deasserted2(bp,
5161 attn.sig[2] & group_mask->sig[2]);
5162 bnx2x_attn_int_deasserted0(bp,
5163 attn.sig[0] & group_mask->sig[0]);
5164 }
5165 }
5166
5167 bnx2x_release_alr(bp);
5168
5169 if (bp->common.int_block == INT_BLOCK_HC)
5170 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5171 COMMAND_REG_ATTN_BITS_CLR);
5172 else
5173 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5174
5175 val = ~deasserted;
5176 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5177 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5178 REG_WR(bp, reg_addr, val);
5179
5180 if (~bp->attn_state & deasserted)
5181 BNX2X_ERR("IGU ERROR\n");
5182
5183 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5184 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5185
5186 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5187 aeu_mask = REG_RD(bp, reg_addr);
5188
5189 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5190 aeu_mask, deasserted);
5191 aeu_mask |= (deasserted & 0x3ff);
5192 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5193
5194 REG_WR(bp, reg_addr, aeu_mask);
5195 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5196
5197 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5198 bp->attn_state &= ~deasserted;
5199 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5200}
5201
5202static void bnx2x_attn_int(struct bnx2x *bp)
5203{
5204
5205 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5206 attn_bits);
5207 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5208 attn_bits_ack);
5209 u32 attn_state = bp->attn_state;
5210
5211
5212 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5213 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5214
5215 DP(NETIF_MSG_HW,
5216 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5217 attn_bits, attn_ack, asserted, deasserted);
5218
5219 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5220 BNX2X_ERR("BAD attention state\n");
5221
5222
5223 if (asserted)
5224 bnx2x_attn_int_asserted(bp, asserted);
5225
5226 if (deasserted)
5227 bnx2x_attn_int_deasserted(bp, deasserted);
5228}
5229
5230void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5231 u16 index, u8 op, u8 update)
5232{
5233 u32 igu_addr = bp->igu_base_addr;
5234 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5235 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5236 igu_addr);
5237}
5238
5239static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5240{
5241
5242 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5243}
5244
5245static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5246 union event_ring_elem *elem)
5247{
5248 u8 err = elem->message.error;
5249
5250 if (!bp->cnic_eth_dev.starting_cid ||
5251 (cid < bp->cnic_eth_dev.starting_cid &&
5252 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5253 return 1;
5254
5255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5256
5257 if (unlikely(err)) {
5258
5259 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5260 cid);
5261 bnx2x_panic_dump(bp, false);
5262 }
5263 bnx2x_cnic_cfc_comp(bp, cid, err);
5264 return 0;
5265}
5266
5267static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5268{
5269 struct bnx2x_mcast_ramrod_params rparam;
5270 int rc;
5271
5272 memset(&rparam, 0, sizeof(rparam));
5273
5274 rparam.mcast_obj = &bp->mcast_obj;
5275
5276 netif_addr_lock_bh(bp->dev);
5277
5278
5279 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5280
5281
5282 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5283 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5284 if (rc < 0)
5285 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5286 rc);
5287 }
5288
5289 netif_addr_unlock_bh(bp->dev);
5290}
5291
5292static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5293 union event_ring_elem *elem)
5294{
5295 unsigned long ramrod_flags = 0;
5296 int rc = 0;
5297 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5298 u32 cid = echo & BNX2X_SWCID_MASK;
5299 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5300
5301
5302 __set_bit(RAMROD_CONT, &ramrod_flags);
5303
5304 switch (echo >> BNX2X_SWCID_SHIFT) {
5305 case BNX2X_FILTER_MAC_PENDING:
5306 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5307 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5308 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5309 else
5310 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5311
5312 break;
5313 case BNX2X_FILTER_VLAN_PENDING:
5314 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5315 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5316 break;
5317 case BNX2X_FILTER_MCAST_PENDING:
5318 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5319
5320
5321
5322 bnx2x_handle_mcast_eqe(bp);
5323 return;
5324 default:
5325 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5326 return;
5327 }
5328
5329 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5330
5331 if (rc < 0)
5332 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5333 else if (rc > 0)
5334 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5335}
5336
5337static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5338
5339static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5340{
5341 netif_addr_lock_bh(bp->dev);
5342
5343 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5344
5345
5346 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5347 bnx2x_set_storm_rx_mode(bp);
5348 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5349 &bp->sp_state))
5350 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5351 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5352 &bp->sp_state))
5353 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5354
5355 netif_addr_unlock_bh(bp->dev);
5356}
5357
5358static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5359 union event_ring_elem *elem)
5360{
5361 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5362 DP(BNX2X_MSG_SP,
5363 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5364 elem->message.data.vif_list_event.func_bit_map);
5365 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5366 elem->message.data.vif_list_event.func_bit_map);
5367 } else if (elem->message.data.vif_list_event.echo ==
5368 VIF_LIST_RULE_SET) {
5369 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5370 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5371 }
5372}
5373
5374
5375static void bnx2x_after_function_update(struct bnx2x *bp)
5376{
5377 int q, rc;
5378 struct bnx2x_fastpath *fp;
5379 struct bnx2x_queue_state_params queue_params = {NULL};
5380 struct bnx2x_queue_update_params *q_update_params =
5381 &queue_params.params.update;
5382
5383
5384 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5385
5386
5387 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5388 &q_update_params->update_flags);
5389 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5390 &q_update_params->update_flags);
5391 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5392
5393
5394 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5395 q_update_params->silent_removal_value = 0;
5396 q_update_params->silent_removal_mask = 0;
5397 } else {
5398 q_update_params->silent_removal_value =
5399 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5400 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5401 }
5402
5403 for_each_eth_queue(bp, q) {
5404
5405 fp = &bp->fp[q];
5406 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5407
5408
5409 rc = bnx2x_queue_state_change(bp, &queue_params);
5410 if (rc < 0)
5411 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5412 q);
5413 }
5414
5415 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5416 fp = &bp->fp[FCOE_IDX(bp)];
5417 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5418
5419
5420 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5421
5422
5423 smp_mb__before_atomic();
5424 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5425 smp_mb__after_atomic();
5426
5427
5428 rc = bnx2x_queue_state_change(bp, &queue_params);
5429 if (rc < 0)
5430 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5431 q);
5432 } else {
5433
5434 bnx2x_link_report(bp);
5435 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5436 }
5437}
5438
5439static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5440 struct bnx2x *bp, u32 cid)
5441{
5442 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5443
5444 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5445 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5446 else
5447 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5448}
5449
5450static void bnx2x_eq_int(struct bnx2x *bp)
5451{
5452 u16 hw_cons, sw_cons, sw_prod;
5453 union event_ring_elem *elem;
5454 u8 echo;
5455 u32 cid;
5456 u8 opcode;
5457 int rc, spqe_cnt = 0;
5458 struct bnx2x_queue_sp_obj *q_obj;
5459 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5460 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5461
5462 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5463
5464
5465
5466
5467
5468
5469 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5470 hw_cons++;
5471
5472
5473
5474
5475
5476 sw_cons = bp->eq_cons;
5477 sw_prod = bp->eq_prod;
5478
5479 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5480 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5481
5482 for (; sw_cons != hw_cons;
5483 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5484
5485 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5486
5487 rc = bnx2x_iov_eq_sp_event(bp, elem);
5488 if (!rc) {
5489 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5490 rc);
5491 goto next_spqe;
5492 }
5493
5494 opcode = elem->message.opcode;
5495
5496
5497 switch (opcode) {
5498 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5499 bnx2x_vf_mbx_schedule(bp,
5500 &elem->message.data.vf_pf_event);
5501 continue;
5502
5503 case EVENT_RING_OPCODE_STAT_QUERY:
5504 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5505 "got statistics comp event %d\n",
5506 bp->stats_comp++);
5507
5508 goto next_spqe;
5509
5510 case EVENT_RING_OPCODE_CFC_DEL:
5511
5512
5513
5514
5515
5516
5517
5518 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5519
5520 DP(BNX2X_MSG_SP,
5521 "got delete ramrod for MULTI[%d]\n", cid);
5522
5523 if (CNIC_LOADED(bp) &&
5524 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5525 goto next_spqe;
5526
5527 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5528
5529 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5530 break;
5531
5532 goto next_spqe;
5533
5534 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5535 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5536 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5537 if (f_obj->complete_cmd(bp, f_obj,
5538 BNX2X_F_CMD_TX_STOP))
5539 break;
5540 goto next_spqe;
5541
5542 case EVENT_RING_OPCODE_START_TRAFFIC:
5543 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5544 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5545 if (f_obj->complete_cmd(bp, f_obj,
5546 BNX2X_F_CMD_TX_START))
5547 break;
5548 goto next_spqe;
5549
5550 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5551 echo = elem->message.data.function_update_event.echo;
5552 if (echo == SWITCH_UPDATE) {
5553 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5554 "got FUNC_SWITCH_UPDATE ramrod\n");
5555 if (f_obj->complete_cmd(
5556 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5557 break;
5558
5559 } else {
5560 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5561
5562 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5563 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5564 f_obj->complete_cmd(bp, f_obj,
5565 BNX2X_F_CMD_AFEX_UPDATE);
5566
5567
5568
5569
5570
5571 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5572 }
5573
5574 goto next_spqe;
5575
5576 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5577 f_obj->complete_cmd(bp, f_obj,
5578 BNX2X_F_CMD_AFEX_VIFLISTS);
5579 bnx2x_after_afex_vif_lists(bp, elem);
5580 goto next_spqe;
5581 case EVENT_RING_OPCODE_FUNCTION_START:
5582 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5583 "got FUNC_START ramrod\n");
5584 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5585 break;
5586
5587 goto next_spqe;
5588
5589 case EVENT_RING_OPCODE_FUNCTION_STOP:
5590 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5591 "got FUNC_STOP ramrod\n");
5592 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5593 break;
5594
5595 goto next_spqe;
5596
5597 case EVENT_RING_OPCODE_SET_TIMESYNC:
5598 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5599 "got set_timesync ramrod completion\n");
5600 if (f_obj->complete_cmd(bp, f_obj,
5601 BNX2X_F_CMD_SET_TIMESYNC))
5602 break;
5603 goto next_spqe;
5604 }
5605
5606 switch (opcode | bp->state) {
5607 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5608 BNX2X_STATE_OPEN):
5609 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5610 BNX2X_STATE_OPENING_WAIT4_PORT):
5611 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5612 BNX2X_STATE_CLOSING_WAIT4_HALT):
5613 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5614 SW_CID(elem->message.data.eth_event.echo));
5615 rss_raw->clear_pending(rss_raw);
5616 break;
5617
5618 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5619 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5620 case (EVENT_RING_OPCODE_SET_MAC |
5621 BNX2X_STATE_CLOSING_WAIT4_HALT):
5622 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5623 BNX2X_STATE_OPEN):
5624 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5625 BNX2X_STATE_DIAG):
5626 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5627 BNX2X_STATE_CLOSING_WAIT4_HALT):
5628 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5629 bnx2x_handle_classification_eqe(bp, elem);
5630 break;
5631
5632 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5633 BNX2X_STATE_OPEN):
5634 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5635 BNX2X_STATE_DIAG):
5636 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5637 BNX2X_STATE_CLOSING_WAIT4_HALT):
5638 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5639 bnx2x_handle_mcast_eqe(bp);
5640 break;
5641
5642 case (EVENT_RING_OPCODE_FILTERS_RULES |
5643 BNX2X_STATE_OPEN):
5644 case (EVENT_RING_OPCODE_FILTERS_RULES |
5645 BNX2X_STATE_DIAG):
5646 case (EVENT_RING_OPCODE_FILTERS_RULES |
5647 BNX2X_STATE_CLOSING_WAIT4_HALT):
5648 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5649 bnx2x_handle_rx_mode_eqe(bp);
5650 break;
5651 default:
5652
5653 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5654 elem->message.opcode, bp->state);
5655 }
5656next_spqe:
5657 spqe_cnt++;
5658 }
5659
5660 smp_mb__before_atomic();
5661 atomic_add(spqe_cnt, &bp->eq_spq_left);
5662
5663 bp->eq_cons = sw_cons;
5664 bp->eq_prod = sw_prod;
5665
5666 smp_wmb();
5667
5668
5669 bnx2x_update_eq_prod(bp, bp->eq_prod);
5670}
5671
5672static void bnx2x_sp_task(struct work_struct *work)
5673{
5674 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5675
5676 DP(BNX2X_MSG_SP, "sp task invoked\n");
5677
5678
5679 smp_rmb();
5680 if (atomic_read(&bp->interrupt_occurred)) {
5681
5682
5683 u16 status = bnx2x_update_dsb_idx(bp);
5684
5685 DP(BNX2X_MSG_SP, "status %x\n", status);
5686 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5687 atomic_set(&bp->interrupt_occurred, 0);
5688
5689
5690 if (status & BNX2X_DEF_SB_ATT_IDX) {
5691 bnx2x_attn_int(bp);
5692 status &= ~BNX2X_DEF_SB_ATT_IDX;
5693 }
5694
5695
5696 if (status & BNX2X_DEF_SB_IDX) {
5697 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5698
5699 if (FCOE_INIT(bp) &&
5700 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5701
5702
5703
5704 local_bh_disable();
5705 napi_schedule(&bnx2x_fcoe(bp, napi));
5706 local_bh_enable();
5707 }
5708
5709
5710 bnx2x_eq_int(bp);
5711 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5712 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5713
5714 status &= ~BNX2X_DEF_SB_IDX;
5715 }
5716
5717
5718 if (unlikely(status))
5719 DP(BNX2X_MSG_SP,
5720 "got an unknown interrupt! (status 0x%x)\n", status);
5721
5722
5723 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5724 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5725 }
5726
5727
5728 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5729 &bp->sp_state)) {
5730 bnx2x_link_report(bp);
5731 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5732 }
5733}
5734
5735irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5736{
5737 struct net_device *dev = dev_instance;
5738 struct bnx2x *bp = netdev_priv(dev);
5739
5740 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5741 IGU_INT_DISABLE, 0);
5742
5743#ifdef BNX2X_STOP_ON_ERROR
5744 if (unlikely(bp->panic))
5745 return IRQ_HANDLED;
5746#endif
5747
5748 if (CNIC_LOADED(bp)) {
5749 struct cnic_ops *c_ops;
5750
5751 rcu_read_lock();
5752 c_ops = rcu_dereference(bp->cnic_ops);
5753 if (c_ops)
5754 c_ops->cnic_handler(bp->cnic_data, NULL);
5755 rcu_read_unlock();
5756 }
5757
5758
5759
5760
5761 bnx2x_schedule_sp_task(bp);
5762
5763 return IRQ_HANDLED;
5764}
5765
5766
5767
5768void bnx2x_drv_pulse(struct bnx2x *bp)
5769{
5770 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5771 bp->fw_drv_pulse_wr_seq);
5772}
5773
5774static void bnx2x_timer(struct timer_list *t)
5775{
5776 struct bnx2x *bp = from_timer(bp, t, timer);
5777
5778 if (!netif_running(bp->dev))
5779 return;
5780
5781 if (IS_PF(bp) &&
5782 !BP_NOMCP(bp)) {
5783 int mb_idx = BP_FW_MB_IDX(bp);
5784 u16 drv_pulse;
5785 u16 mcp_pulse;
5786
5787 ++bp->fw_drv_pulse_wr_seq;
5788 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5789 drv_pulse = bp->fw_drv_pulse_wr_seq;
5790 bnx2x_drv_pulse(bp);
5791
5792 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5793 MCP_PULSE_SEQ_MASK);
5794
5795
5796
5797
5798
5799 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5800 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5801 drv_pulse, mcp_pulse);
5802 }
5803
5804 if (bp->state == BNX2X_STATE_OPEN)
5805 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5806
5807
5808 if (IS_VF(bp))
5809 bnx2x_timer_sriov(bp);
5810
5811 mod_timer(&bp->timer, jiffies + bp->current_interval);
5812}
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5823{
5824 u32 i;
5825 if (!(len%4) && !(addr%4))
5826 for (i = 0; i < len; i += 4)
5827 REG_WR(bp, addr + i, fill);
5828 else
5829 for (i = 0; i < len; i++)
5830 REG_WR8(bp, addr + i, fill);
5831}
5832
5833
5834static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5835 int fw_sb_id,
5836 u32 *sb_data_p,
5837 u32 data_size)
5838{
5839 int index;
5840 for (index = 0; index < data_size; index++)
5841 REG_WR(bp, BAR_CSTRORM_INTMEM +
5842 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5843 sizeof(u32)*index,
5844 *(sb_data_p + index));
5845}
5846
5847static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5848{
5849 u32 *sb_data_p;
5850 u32 data_size = 0;
5851 struct hc_status_block_data_e2 sb_data_e2;
5852 struct hc_status_block_data_e1x sb_data_e1x;
5853
5854
5855 if (!CHIP_IS_E1x(bp)) {
5856 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5857 sb_data_e2.common.state = SB_DISABLED;
5858 sb_data_e2.common.p_func.vf_valid = false;
5859 sb_data_p = (u32 *)&sb_data_e2;
5860 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5861 } else {
5862 memset(&sb_data_e1x, 0,
5863 sizeof(struct hc_status_block_data_e1x));
5864 sb_data_e1x.common.state = SB_DISABLED;
5865 sb_data_e1x.common.p_func.vf_valid = false;
5866 sb_data_p = (u32 *)&sb_data_e1x;
5867 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5868 }
5869 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5870
5871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5872 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5873 CSTORM_STATUS_BLOCK_SIZE);
5874 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5875 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5876 CSTORM_SYNC_BLOCK_SIZE);
5877}
5878
5879
5880static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5881 struct hc_sp_status_block_data *sp_sb_data)
5882{
5883 int func = BP_FUNC(bp);
5884 int i;
5885 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5886 REG_WR(bp, BAR_CSTRORM_INTMEM +
5887 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5888 i*sizeof(u32),
5889 *((u32 *)sp_sb_data + i));
5890}
5891
5892static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5893{
5894 int func = BP_FUNC(bp);
5895 struct hc_sp_status_block_data sp_sb_data;
5896 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5897
5898 sp_sb_data.state = SB_DISABLED;
5899 sp_sb_data.p_func.vf_valid = false;
5900
5901 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5902
5903 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5904 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5905 CSTORM_SP_STATUS_BLOCK_SIZE);
5906 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5907 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5908 CSTORM_SP_SYNC_BLOCK_SIZE);
5909}
5910
5911static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5912 int igu_sb_id, int igu_seg_id)
5913{
5914 hc_sm->igu_sb_id = igu_sb_id;
5915 hc_sm->igu_seg_id = igu_seg_id;
5916 hc_sm->timer_value = 0xFF;
5917 hc_sm->time_to_expire = 0xFFFFFFFF;
5918}
5919
5920
5921static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5922{
5923
5924
5925 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5926
5927
5928 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5929 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5930 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5932
5933
5934
5935 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5936 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5937
5938
5939 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5940 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5941 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5942 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5943 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5944 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5945 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5946 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5947}
5948
5949void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5950 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5951{
5952 int igu_seg_id;
5953
5954 struct hc_status_block_data_e2 sb_data_e2;
5955 struct hc_status_block_data_e1x sb_data_e1x;
5956 struct hc_status_block_sm *hc_sm_p;
5957 int data_size;
5958 u32 *sb_data_p;
5959
5960 if (CHIP_INT_MODE_IS_BC(bp))
5961 igu_seg_id = HC_SEG_ACCESS_NORM;
5962 else
5963 igu_seg_id = IGU_SEG_ACCESS_NORM;
5964
5965 bnx2x_zero_fp_sb(bp, fw_sb_id);
5966
5967 if (!CHIP_IS_E1x(bp)) {
5968 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5969 sb_data_e2.common.state = SB_ENABLED;
5970 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5971 sb_data_e2.common.p_func.vf_id = vfid;
5972 sb_data_e2.common.p_func.vf_valid = vf_valid;
5973 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5974 sb_data_e2.common.same_igu_sb_1b = true;
5975 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5976 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5977 hc_sm_p = sb_data_e2.common.state_machine;
5978 sb_data_p = (u32 *)&sb_data_e2;
5979 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5980 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5981 } else {
5982 memset(&sb_data_e1x, 0,
5983 sizeof(struct hc_status_block_data_e1x));
5984 sb_data_e1x.common.state = SB_ENABLED;
5985 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5986 sb_data_e1x.common.p_func.vf_id = 0xff;
5987 sb_data_e1x.common.p_func.vf_valid = false;
5988 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5989 sb_data_e1x.common.same_igu_sb_1b = true;
5990 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5991 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5992 hc_sm_p = sb_data_e1x.common.state_machine;
5993 sb_data_p = (u32 *)&sb_data_e1x;
5994 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5995 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5996 }
5997
5998 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5999 igu_sb_id, igu_seg_id);
6000 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
6001 igu_sb_id, igu_seg_id);
6002
6003 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6004
6005
6006 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6007}
6008
6009static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6010 u16 tx_usec, u16 rx_usec)
6011{
6012 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6013 false, rx_usec);
6014 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6015 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6016 tx_usec);
6017 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6018 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6019 tx_usec);
6020 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6021 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6022 tx_usec);
6023}
6024
6025static void bnx2x_init_def_sb(struct bnx2x *bp)
6026{
6027 struct host_sp_status_block *def_sb = bp->def_status_blk;
6028 dma_addr_t mapping = bp->def_status_blk_mapping;
6029 int igu_sp_sb_index;
6030 int igu_seg_id;
6031 int port = BP_PORT(bp);
6032 int func = BP_FUNC(bp);
6033 int reg_offset, reg_offset_en5;
6034 u64 section;
6035 int index;
6036 struct hc_sp_status_block_data sp_sb_data;
6037 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6038
6039 if (CHIP_INT_MODE_IS_BC(bp)) {
6040 igu_sp_sb_index = DEF_SB_IGU_ID;
6041 igu_seg_id = HC_SEG_ACCESS_DEF;
6042 } else {
6043 igu_sp_sb_index = bp->igu_dsb_id;
6044 igu_seg_id = IGU_SEG_ACCESS_DEF;
6045 }
6046
6047
6048 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6049 atten_status_block);
6050 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6051
6052 bp->attn_state = 0;
6053
6054 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6055 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6056 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6057 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6058 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6059 int sindex;
6060
6061 for (sindex = 0; sindex < 4; sindex++)
6062 bp->attn_group[index].sig[sindex] =
6063 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6064
6065 if (!CHIP_IS_E1x(bp))
6066
6067
6068
6069
6070
6071 bp->attn_group[index].sig[4] = REG_RD(bp,
6072 reg_offset_en5 + 0x4*index);
6073 else
6074 bp->attn_group[index].sig[4] = 0;
6075 }
6076
6077 if (bp->common.int_block == INT_BLOCK_HC) {
6078 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6079 HC_REG_ATTN_MSG0_ADDR_L);
6080
6081 REG_WR(bp, reg_offset, U64_LO(section));
6082 REG_WR(bp, reg_offset + 4, U64_HI(section));
6083 } else if (!CHIP_IS_E1x(bp)) {
6084 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6085 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6086 }
6087
6088 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6089 sp_sb);
6090
6091 bnx2x_zero_sp_sb(bp);
6092
6093
6094 sp_sb_data.state = SB_ENABLED;
6095 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6096 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6097 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6098 sp_sb_data.igu_seg_id = igu_seg_id;
6099 sp_sb_data.p_func.pf_id = func;
6100 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6101 sp_sb_data.p_func.vf_id = 0xff;
6102
6103 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6104
6105 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6106}
6107
6108void bnx2x_update_coalesce(struct bnx2x *bp)
6109{
6110 int i;
6111
6112 for_each_eth_queue(bp, i)
6113 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6114 bp->tx_ticks, bp->rx_ticks);
6115}
6116
6117static void bnx2x_init_sp_ring(struct bnx2x *bp)
6118{
6119 spin_lock_init(&bp->spq_lock);
6120 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6121
6122 bp->spq_prod_idx = 0;
6123 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6124 bp->spq_prod_bd = bp->spq;
6125 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6126}
6127
6128static void bnx2x_init_eq_ring(struct bnx2x *bp)
6129{
6130 int i;
6131 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6132 union event_ring_elem *elem =
6133 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6134
6135 elem->next_page.addr.hi =
6136 cpu_to_le32(U64_HI(bp->eq_mapping +
6137 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6138 elem->next_page.addr.lo =
6139 cpu_to_le32(U64_LO(bp->eq_mapping +
6140 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6141 }
6142 bp->eq_cons = 0;
6143 bp->eq_prod = NUM_EQ_DESC;
6144 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6145
6146 atomic_set(&bp->eq_spq_left,
6147 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6148}
6149
6150
6151static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6152 unsigned long rx_mode_flags,
6153 unsigned long rx_accept_flags,
6154 unsigned long tx_accept_flags,
6155 unsigned long ramrod_flags)
6156{
6157 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6158 int rc;
6159
6160 memset(&ramrod_param, 0, sizeof(ramrod_param));
6161
6162
6163 ramrod_param.cid = 0;
6164 ramrod_param.cl_id = cl_id;
6165 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6166 ramrod_param.func_id = BP_FUNC(bp);
6167
6168 ramrod_param.pstate = &bp->sp_state;
6169 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6170
6171 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6172 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6173
6174 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6175
6176 ramrod_param.ramrod_flags = ramrod_flags;
6177 ramrod_param.rx_mode_flags = rx_mode_flags;
6178
6179 ramrod_param.rx_accept_flags = rx_accept_flags;
6180 ramrod_param.tx_accept_flags = tx_accept_flags;
6181
6182 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6183 if (rc < 0) {
6184 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6185 return rc;
6186 }
6187
6188 return 0;
6189}
6190
6191static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6192 unsigned long *rx_accept_flags,
6193 unsigned long *tx_accept_flags)
6194{
6195
6196 *rx_accept_flags = 0;
6197 *tx_accept_flags = 0;
6198
6199 switch (rx_mode) {
6200 case BNX2X_RX_MODE_NONE:
6201
6202
6203
6204
6205 break;
6206 case BNX2X_RX_MODE_NORMAL:
6207 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6208 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6209 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6210
6211
6212 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6213 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6214 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6215
6216 if (bp->accept_any_vlan) {
6217 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6218 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6219 }
6220
6221 break;
6222 case BNX2X_RX_MODE_ALLMULTI:
6223 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6225 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6226
6227
6228 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6229 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6230 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6231
6232 if (bp->accept_any_vlan) {
6233 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6234 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6235 }
6236
6237 break;
6238 case BNX2X_RX_MODE_PROMISC:
6239
6240
6241
6242
6243 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6244 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6245 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6246 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6247
6248
6249 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6250 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6251
6252 if (IS_MF_SI(bp))
6253 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6254 else
6255 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6256
6257 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6258 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6259
6260 break;
6261 default:
6262 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6263 return -EINVAL;
6264 }
6265
6266 return 0;
6267}
6268
6269
6270static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6271{
6272 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6273 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6274 int rc;
6275
6276 if (!NO_FCOE(bp))
6277
6278 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6279
6280 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6281 &tx_accept_flags);
6282 if (rc)
6283 return rc;
6284
6285 __set_bit(RAMROD_RX, &ramrod_flags);
6286 __set_bit(RAMROD_TX, &ramrod_flags);
6287
6288 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6289 rx_accept_flags, tx_accept_flags,
6290 ramrod_flags);
6291}
6292
6293static void bnx2x_init_internal_common(struct bnx2x *bp)
6294{
6295 int i;
6296
6297
6298
6299 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6300 REG_WR(bp, BAR_USTRORM_INTMEM +
6301 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6302 if (!CHIP_IS_E1x(bp)) {
6303 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6304 CHIP_INT_MODE_IS_BC(bp) ?
6305 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6306 }
6307}
6308
6309static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6310{
6311 switch (load_code) {
6312 case FW_MSG_CODE_DRV_LOAD_COMMON:
6313 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6314 bnx2x_init_internal_common(bp);
6315 fallthrough;
6316
6317 case FW_MSG_CODE_DRV_LOAD_PORT:
6318
6319 fallthrough;
6320
6321 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6322
6323
6324 break;
6325
6326 default:
6327 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6328 break;
6329 }
6330}
6331
6332static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6333{
6334 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6335}
6336
6337static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6338{
6339 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6340}
6341
6342static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6343{
6344 if (CHIP_IS_E1x(fp->bp))
6345 return BP_L_ID(fp->bp) + fp->index;
6346 else
6347 return bnx2x_fp_igu_sb_id(fp);
6348}
6349
6350static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6351{
6352 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6353 u8 cos;
6354 unsigned long q_type = 0;
6355 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6356 fp->rx_queue = fp_idx;
6357 fp->cid = fp_idx;
6358 fp->cl_id = bnx2x_fp_cl_id(fp);
6359 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6360 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6361
6362 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6363
6364
6365 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6366
6367
6368 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6369
6370
6371 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6372 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6373
6374 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6375
6376
6377 for_each_cos_in_tx_queue(fp, cos) {
6378 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6379 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6380 FP_COS_TO_TXQ(fp, cos, bp),
6381 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6382 cids[cos] = fp->txdata_ptr[cos]->cid;
6383 }
6384
6385
6386 if (IS_VF(bp))
6387 return;
6388
6389 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6390 fp->fw_sb_id, fp->igu_sb_id);
6391 bnx2x_update_fpsb_idx(fp);
6392 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6393 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6394 bnx2x_sp_mapping(bp, q_rdata), q_type);
6395
6396
6397
6398
6399 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6400
6401 DP(NETIF_MSG_IFUP,
6402 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6403 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6404 fp->igu_sb_id);
6405}
6406
6407static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6408{
6409 int i;
6410
6411 for (i = 1; i <= NUM_TX_RINGS; i++) {
6412 struct eth_tx_next_bd *tx_next_bd =
6413 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6414
6415 tx_next_bd->addr_hi =
6416 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6417 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6418 tx_next_bd->addr_lo =
6419 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6420 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6421 }
6422
6423 *txdata->tx_cons_sb = cpu_to_le16(0);
6424
6425 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6426 txdata->tx_db.data.zero_fill1 = 0;
6427 txdata->tx_db.data.prod = 0;
6428
6429 txdata->tx_pkt_prod = 0;
6430 txdata->tx_pkt_cons = 0;
6431 txdata->tx_bd_prod = 0;
6432 txdata->tx_bd_cons = 0;
6433 txdata->tx_pkt = 0;
6434}
6435
6436static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6437{
6438 int i;
6439
6440 for_each_tx_queue_cnic(bp, i)
6441 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6442}
6443
6444static void bnx2x_init_tx_rings(struct bnx2x *bp)
6445{
6446 int i;
6447 u8 cos;
6448
6449 for_each_eth_queue(bp, i)
6450 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6451 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6452}
6453
6454static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6455{
6456 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6457 unsigned long q_type = 0;
6458
6459 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6460 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6461 BNX2X_FCOE_ETH_CL_ID_IDX);
6462 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6463 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6464 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6465 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6466 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6467 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6468 fp);
6469
6470 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6471
6472
6473 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6474
6475 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6476 bnx2x_rx_ustorm_prods_offset(fp);
6477
6478
6479 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6480 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6481
6482
6483 BUG_ON(fp->max_cos != 1);
6484
6485 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6486 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6487 bnx2x_sp_mapping(bp, q_rdata), q_type);
6488
6489 DP(NETIF_MSG_IFUP,
6490 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6491 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6492 fp->igu_sb_id);
6493}
6494
6495void bnx2x_nic_init_cnic(struct bnx2x *bp)
6496{
6497 if (!NO_FCOE(bp))
6498 bnx2x_init_fcoe_fp(bp);
6499
6500 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6501 BNX2X_VF_ID_INVALID, false,
6502 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6503
6504
6505 rmb();
6506 bnx2x_init_rx_rings_cnic(bp);
6507 bnx2x_init_tx_rings_cnic(bp);
6508
6509
6510 mb();
6511}
6512
6513void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6514{
6515 int i;
6516
6517
6518 for_each_eth_queue(bp, i)
6519 bnx2x_init_eth_fp(bp, i);
6520
6521
6522 rmb();
6523 bnx2x_init_rx_rings(bp);
6524 bnx2x_init_tx_rings(bp);
6525
6526 if (IS_PF(bp)) {
6527
6528 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6529 bp->common.shmem_base,
6530 bp->common.shmem2_base, BP_PORT(bp));
6531
6532
6533 bnx2x_init_def_sb(bp);
6534 bnx2x_update_dsb_idx(bp);
6535 bnx2x_init_sp_ring(bp);
6536 } else {
6537 bnx2x_memset_stats(bp);
6538 }
6539}
6540
6541void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6542{
6543 bnx2x_init_eq_ring(bp);
6544 bnx2x_init_internal(bp, load_code);
6545 bnx2x_pf_init(bp);
6546 bnx2x_stats_init(bp);
6547
6548
6549 mb();
6550
6551 bnx2x_int_enable(bp);
6552
6553
6554 bnx2x_attn_int_deasserted0(bp,
6555 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6556 AEU_INPUTS_ATTN_BITS_SPIO5);
6557}
6558
6559
6560static int bnx2x_gunzip_init(struct bnx2x *bp)
6561{
6562 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6563 &bp->gunzip_mapping, GFP_KERNEL);
6564 if (bp->gunzip_buf == NULL)
6565 goto gunzip_nomem1;
6566
6567 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6568 if (bp->strm == NULL)
6569 goto gunzip_nomem2;
6570
6571 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6572 if (bp->strm->workspace == NULL)
6573 goto gunzip_nomem3;
6574
6575 return 0;
6576
6577gunzip_nomem3:
6578 kfree(bp->strm);
6579 bp->strm = NULL;
6580
6581gunzip_nomem2:
6582 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6583 bp->gunzip_mapping);
6584 bp->gunzip_buf = NULL;
6585
6586gunzip_nomem1:
6587 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6588 return -ENOMEM;
6589}
6590
6591static void bnx2x_gunzip_end(struct bnx2x *bp)
6592{
6593 if (bp->strm) {
6594 vfree(bp->strm->workspace);
6595 kfree(bp->strm);
6596 bp->strm = NULL;
6597 }
6598
6599 if (bp->gunzip_buf) {
6600 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6601 bp->gunzip_mapping);
6602 bp->gunzip_buf = NULL;
6603 }
6604}
6605
6606static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6607{
6608 int n, rc;
6609
6610
6611 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6612 BNX2X_ERR("Bad gzip header\n");
6613 return -EINVAL;
6614 }
6615
6616 n = 10;
6617
6618#define FNAME 0x8
6619
6620 if (zbuf[3] & FNAME)
6621 while ((zbuf[n++] != 0) && (n < len));
6622
6623 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6624 bp->strm->avail_in = len - n;
6625 bp->strm->next_out = bp->gunzip_buf;
6626 bp->strm->avail_out = FW_BUF_SIZE;
6627
6628 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6629 if (rc != Z_OK)
6630 return rc;
6631
6632 rc = zlib_inflate(bp->strm, Z_FINISH);
6633 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6634 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6635 bp->strm->msg);
6636
6637 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6638 if (bp->gunzip_outlen & 0x3)
6639 netdev_err(bp->dev,
6640 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6641 bp->gunzip_outlen);
6642 bp->gunzip_outlen >>= 2;
6643
6644 zlib_inflateEnd(bp->strm);
6645
6646 if (rc == Z_STREAM_END)
6647 return 0;
6648
6649 return rc;
6650}
6651
6652
6653
6654
6655
6656
6657
6658
6659static void bnx2x_lb_pckt(struct bnx2x *bp)
6660{
6661 u32 wb_write[3];
6662
6663
6664 wb_write[0] = 0x55555555;
6665 wb_write[1] = 0x55555555;
6666 wb_write[2] = 0x20;
6667 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6668
6669
6670 wb_write[0] = 0x09000000;
6671 wb_write[1] = 0x55555555;
6672 wb_write[2] = 0x10;
6673 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6674}
6675
6676
6677
6678
6679
6680static int bnx2x_int_mem_test(struct bnx2x *bp)
6681{
6682 int factor;
6683 int count, i;
6684 u32 val = 0;
6685
6686 if (CHIP_REV_IS_FPGA(bp))
6687 factor = 120;
6688 else if (CHIP_REV_IS_EMUL(bp))
6689 factor = 200;
6690 else
6691 factor = 1;
6692
6693
6694 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6695 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6696 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6697 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6698
6699
6700 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6701
6702
6703 bnx2x_lb_pckt(bp);
6704
6705
6706
6707 count = 1000 * factor;
6708 while (count) {
6709
6710 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6711 val = *bnx2x_sp(bp, wb_data[0]);
6712 if (val == 0x10)
6713 break;
6714
6715 usleep_range(10000, 20000);
6716 count--;
6717 }
6718 if (val != 0x10) {
6719 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6720 return -1;
6721 }
6722
6723
6724 count = 1000 * factor;
6725 while (count) {
6726 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6727 if (val == 1)
6728 break;
6729
6730 usleep_range(10000, 20000);
6731 count--;
6732 }
6733 if (val != 0x1) {
6734 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6735 return -2;
6736 }
6737
6738
6739 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6740 msleep(50);
6741 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6742 msleep(50);
6743 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6744 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6745
6746 DP(NETIF_MSG_HW, "part2\n");
6747
6748
6749 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6750 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6751 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6752 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6753
6754
6755 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6756
6757
6758 for (i = 0; i < 10; i++)
6759 bnx2x_lb_pckt(bp);
6760
6761
6762
6763 count = 1000 * factor;
6764 while (count) {
6765
6766 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6767 val = *bnx2x_sp(bp, wb_data[0]);
6768 if (val == 0xb0)
6769 break;
6770
6771 usleep_range(10000, 20000);
6772 count--;
6773 }
6774 if (val != 0xb0) {
6775 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6776 return -3;
6777 }
6778
6779
6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6781 if (val != 2)
6782 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6783
6784
6785 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6786
6787
6788 msleep(10 * factor);
6789
6790 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6791 if (val != 3)
6792 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6793
6794
6795 for (i = 0; i < 11; i++)
6796 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6797 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6798 if (val != 1) {
6799 BNX2X_ERR("clear of NIG failed\n");
6800 return -4;
6801 }
6802
6803
6804 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6805 msleep(50);
6806 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6807 msleep(50);
6808 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6809 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6810 if (!CNIC_SUPPORT(bp))
6811
6812 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6813
6814
6815 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6816 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6817 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6818 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6819
6820 DP(NETIF_MSG_HW, "done\n");
6821
6822 return 0;
6823}
6824
6825static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6826{
6827 u32 val;
6828
6829 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6830 if (!CHIP_IS_E1x(bp))
6831 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6832 else
6833 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6834 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6835 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6836
6837
6838
6839
6840
6841
6842 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6843 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6844 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6845 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6846 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6847 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6848
6849
6850 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6851 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6852 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6853
6854
6855 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6856 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6857 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6858 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6859
6860
6861
6862 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6863 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6864 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6865 if (!CHIP_IS_E1x(bp))
6866 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6867 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6868 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6869
6870 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6871 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6872 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6873
6874
6875 if (!CHIP_IS_E1x(bp))
6876
6877 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6878
6879 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6880 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6881
6882 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6883}
6884
6885static void bnx2x_reset_common(struct bnx2x *bp)
6886{
6887 u32 val = 0x1400;
6888
6889
6890 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6891 0xd3ffff7f);
6892
6893 if (CHIP_IS_E3(bp)) {
6894 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6895 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6896 }
6897
6898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6899}
6900
6901static void bnx2x_setup_dmae(struct bnx2x *bp)
6902{
6903 bp->dmae_ready = 0;
6904 spin_lock_init(&bp->dmae_lock);
6905}
6906
6907static void bnx2x_init_pxp(struct bnx2x *bp)
6908{
6909 u16 devctl;
6910 int r_order, w_order;
6911
6912 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6913 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6914 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6915 if (bp->mrrs == -1)
6916 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6917 else {
6918 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6919 r_order = bp->mrrs;
6920 }
6921
6922 bnx2x_init_pxp_arb(bp, r_order, w_order);
6923}
6924
6925static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6926{
6927 int is_required;
6928 u32 val;
6929 int port;
6930
6931 if (BP_NOMCP(bp))
6932 return;
6933
6934 is_required = 0;
6935 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6936 SHARED_HW_CFG_FAN_FAILURE_MASK;
6937
6938 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6939 is_required = 1;
6940
6941
6942
6943
6944
6945
6946 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6947 for (port = PORT_0; port < PORT_MAX; port++) {
6948 is_required |=
6949 bnx2x_fan_failure_det_req(
6950 bp,
6951 bp->common.shmem_base,
6952 bp->common.shmem2_base,
6953 port);
6954 }
6955
6956 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6957
6958 if (is_required == 0)
6959 return;
6960
6961
6962 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6963
6964
6965 val = REG_RD(bp, MISC_REG_SPIO_INT);
6966 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6967 REG_WR(bp, MISC_REG_SPIO_INT, val);
6968
6969
6970 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6971 val |= MISC_SPIO_SPIO5;
6972 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6973}
6974
6975void bnx2x_pf_disable(struct bnx2x *bp)
6976{
6977 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6978 val &= ~IGU_PF_CONF_FUNC_EN;
6979
6980 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6981 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6982 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6983}
6984
6985static void bnx2x__common_init_phy(struct bnx2x *bp)
6986{
6987 u32 shmem_base[2], shmem2_base[2];
6988
6989 if (SHMEM2_RD(bp, size) >
6990 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6991 return;
6992 shmem_base[0] = bp->common.shmem_base;
6993 shmem2_base[0] = bp->common.shmem2_base;
6994 if (!CHIP_IS_E1x(bp)) {
6995 shmem_base[1] =
6996 SHMEM2_RD(bp, other_shmem_base_addr);
6997 shmem2_base[1] =
6998 SHMEM2_RD(bp, other_shmem2_base_addr);
6999 }
7000 bnx2x_acquire_phy_lock(bp);
7001 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7002 bp->common.chip_id);
7003 bnx2x_release_phy_lock(bp);
7004}
7005
7006static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7007{
7008 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7009 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7010 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7011 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7012 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7013
7014
7015 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7016
7017 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7018 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7019 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7020 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7021}
7022
7023static void bnx2x_set_endianity(struct bnx2x *bp)
7024{
7025#ifdef __BIG_ENDIAN
7026 bnx2x_config_endianity(bp, 1);
7027#else
7028 bnx2x_config_endianity(bp, 0);
7029#endif
7030}
7031
7032static void bnx2x_reset_endianity(struct bnx2x *bp)
7033{
7034 bnx2x_config_endianity(bp, 0);
7035}
7036
7037
7038
7039
7040
7041
7042static int bnx2x_init_hw_common(struct bnx2x *bp)
7043{
7044 u32 val;
7045
7046 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7047
7048
7049
7050
7051
7052 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7053
7054 bnx2x_reset_common(bp);
7055 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7056
7057 val = 0xfffc;
7058 if (CHIP_IS_E3(bp)) {
7059 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7060 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7061 }
7062 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7063
7064 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7065
7066 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7067
7068 if (!CHIP_IS_E1x(bp)) {
7069 u8 abs_func_id;
7070
7071
7072
7073
7074
7075
7076
7077
7078 for (abs_func_id = BP_PATH(bp);
7079 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7080 if (abs_func_id == BP_ABS_FUNC(bp)) {
7081 REG_WR(bp,
7082 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7083 1);
7084 continue;
7085 }
7086
7087 bnx2x_pretend_func(bp, abs_func_id);
7088
7089 bnx2x_pf_disable(bp);
7090 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7091 }
7092 }
7093
7094 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7095 if (CHIP_IS_E1(bp)) {
7096
7097
7098 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7099 }
7100
7101 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7102 bnx2x_init_pxp(bp);
7103 bnx2x_set_endianity(bp);
7104 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7105
7106 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7107 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7108
7109
7110 msleep(100);
7111
7112 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7113 if (val != 1) {
7114 BNX2X_ERR("PXP2 CFG failed\n");
7115 return -EBUSY;
7116 }
7117 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7118 if (val != 1) {
7119 BNX2X_ERR("PXP2 RD_INIT failed\n");
7120 return -EBUSY;
7121 }
7122
7123
7124
7125
7126
7127
7128 if (!CHIP_IS_E1x(bp)) {
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191 struct ilt_client_info ilt_cli;
7192 struct bnx2x_ilt ilt;
7193 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7194 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7195
7196
7197 ilt_cli.start = 0;
7198 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7199 ilt_cli.client_num = ILT_CLIENT_TM;
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7213 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7214 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7215
7216 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7217 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7218 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7219 }
7220
7221 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7222 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7223
7224 if (!CHIP_IS_E1x(bp)) {
7225 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7226 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7227 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7228
7229 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7230
7231
7232 do {
7233 msleep(200);
7234 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7235 } while (factor-- && (val != 1));
7236
7237 if (val != 1) {
7238 BNX2X_ERR("ATC_INIT failed\n");
7239 return -EBUSY;
7240 }
7241 }
7242
7243 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7244
7245 bnx2x_iov_init_dmae(bp);
7246
7247
7248 bp->dmae_ready = 1;
7249 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7250
7251 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7252
7253 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7254
7255 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7256
7257 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7258
7259 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7260 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7261 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7262 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7263
7264 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7265
7266
7267 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7268
7269
7270 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7271 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7272
7273 if (CNIC_SUPPORT(bp))
7274 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7275
7276 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7277
7278 if (!CHIP_REV_IS_SLOW(bp))
7279
7280 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7281
7282 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7283
7284 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7285 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7286
7287 if (!CHIP_IS_E1(bp))
7288 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7289
7290 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7291 if (IS_MF_AFEX(bp)) {
7292
7293
7294
7295 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7296 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7297 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7298 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7299 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7300 } else {
7301
7302
7303
7304 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7305 bp->path_has_ovlan ? 7 : 6);
7306 }
7307 }
7308
7309 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7310 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7311 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7312 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7313
7314 if (!CHIP_IS_E1x(bp)) {
7315
7316 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7317 VFC_MEMORIES_RST_REG_CAM_RST |
7318 VFC_MEMORIES_RST_REG_RAM_RST);
7319 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7320 VFC_MEMORIES_RST_REG_CAM_RST |
7321 VFC_MEMORIES_RST_REG_RAM_RST);
7322
7323 msleep(20);
7324 }
7325
7326 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7327 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7328 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7329 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7330
7331
7332 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7333 0x80000000);
7334 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7335 0x80000000);
7336
7337 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7338 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7339 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7340
7341 if (!CHIP_IS_E1x(bp)) {
7342 if (IS_MF_AFEX(bp)) {
7343
7344
7345
7346 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7347 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7348 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7349 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7350 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7351 } else {
7352 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7353 bp->path_has_ovlan ? 7 : 6);
7354 }
7355 }
7356
7357 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7358
7359 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7360
7361 if (CNIC_SUPPORT(bp)) {
7362 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7363 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7364 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7365 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7366 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7367 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7368 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7369 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7370 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7371 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7372 }
7373 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7374
7375 if (sizeof(union cdu_context) != 1024)
7376
7377 dev_alert(&bp->pdev->dev,
7378 "please adjust the size of cdu_context(%ld)\n",
7379 (long)sizeof(union cdu_context));
7380
7381 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7382 val = (4 << 24) + (0 << 12) + 1024;
7383 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7384
7385 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7386 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7387
7388 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7389
7390
7391 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7392
7393 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7394
7395 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7396 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7397
7398 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7399 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7400
7401
7402 REG_WR(bp, 0x2814, 0xffffffff);
7403 REG_WR(bp, 0x3820, 0xffffffff);
7404
7405 if (!CHIP_IS_E1x(bp)) {
7406 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7407 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7408 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7409 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7410 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7411 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7412 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7413 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7414 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7415 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7416 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7417 }
7418
7419 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7420 if (!CHIP_IS_E1(bp)) {
7421
7422 if (!CHIP_IS_E3(bp))
7423 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7424 }
7425 if (CHIP_IS_E1H(bp))
7426
7427 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7428
7429 if (CHIP_REV_IS_SLOW(bp))
7430 msleep(200);
7431
7432
7433 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7434 if (val != 1) {
7435 BNX2X_ERR("CFC LL_INIT failed\n");
7436 return -EBUSY;
7437 }
7438 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7439 if (val != 1) {
7440 BNX2X_ERR("CFC AC_INIT failed\n");
7441 return -EBUSY;
7442 }
7443 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7444 if (val != 1) {
7445 BNX2X_ERR("CFC CAM_INIT failed\n");
7446 return -EBUSY;
7447 }
7448 REG_WR(bp, CFC_REG_DEBUG0, 0);
7449
7450 if (CHIP_IS_E1(bp)) {
7451
7452
7453 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7454 val = *bnx2x_sp(bp, wb_data[0]);
7455
7456
7457 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7458 BNX2X_ERR("internal mem self test failed\n");
7459 return -EBUSY;
7460 }
7461 }
7462
7463 bnx2x_setup_fan_failure_detection(bp);
7464
7465
7466 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7467
7468 bnx2x_enable_blocks_attention(bp);
7469 bnx2x_enable_blocks_parity(bp);
7470
7471 if (!BP_NOMCP(bp)) {
7472 if (CHIP_IS_E1x(bp))
7473 bnx2x__common_init_phy(bp);
7474 } else
7475 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7476
7477 if (SHMEM2_HAS(bp, netproc_fw_ver))
7478 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7479
7480 return 0;
7481}
7482
7483
7484
7485
7486
7487
7488static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7489{
7490 int rc = bnx2x_init_hw_common(bp);
7491
7492 if (rc)
7493 return rc;
7494
7495
7496 if (!BP_NOMCP(bp))
7497 bnx2x__common_init_phy(bp);
7498
7499 return 0;
7500}
7501
7502static int bnx2x_init_hw_port(struct bnx2x *bp)
7503{
7504 int port = BP_PORT(bp);
7505 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7506 u32 low, high;
7507 u32 val, reg;
7508
7509 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7510
7511 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7512
7513 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7514 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7515 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7516
7517
7518
7519
7520
7521
7522 if (!CHIP_IS_E1x(bp))
7523 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7524
7525 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7526 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7527 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7528 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7529
7530 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7531 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7532 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7533 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7534
7535
7536 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7537
7538 if (CNIC_SUPPORT(bp)) {
7539 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7540 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7541 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7542 }
7543
7544 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7545
7546 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7547
7548 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7549
7550 if (IS_MF(bp))
7551 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7552 else if (bp->dev->mtu > 4096) {
7553 if (bp->flags & ONE_PORT_FLAG)
7554 low = 160;
7555 else {
7556 val = bp->dev->mtu;
7557
7558 low = 96 + (val/64) +
7559 ((val % 64) ? 1 : 0);
7560 }
7561 } else
7562 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7563 high = low + 56;
7564 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7565 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7566 }
7567
7568 if (CHIP_MODE_IS_4_PORT(bp))
7569 REG_WR(bp, (BP_PORT(bp) ?
7570 BRB1_REG_MAC_GUARANTIED_1 :
7571 BRB1_REG_MAC_GUARANTIED_0), 40);
7572
7573 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7574 if (CHIP_IS_E3B0(bp)) {
7575 if (IS_MF_AFEX(bp)) {
7576
7577 REG_WR(bp, BP_PORT(bp) ?
7578 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7579 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7580 REG_WR(bp, BP_PORT(bp) ?
7581 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7582 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7583 REG_WR(bp, BP_PORT(bp) ?
7584 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7585 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7586 } else {
7587
7588
7589
7590
7591 REG_WR(bp, BP_PORT(bp) ?
7592 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7593 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7594 (bp->path_has_ovlan ? 7 : 6));
7595 }
7596 }
7597
7598 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7599 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7600 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7601 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7602
7603 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7604 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7605 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7606 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7607
7608 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7609 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7610
7611 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7612
7613 if (CHIP_IS_E1x(bp)) {
7614
7615 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7616
7617
7618 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7619
7620 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7621
7622
7623 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7624 udelay(50);
7625 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7626 }
7627
7628 if (CNIC_SUPPORT(bp))
7629 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7630
7631 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7632 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7633
7634 if (CHIP_IS_E1(bp)) {
7635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7636 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7637 }
7638 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7639
7640 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7641
7642 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7643
7644
7645
7646
7647 val = IS_MF(bp) ? 0xF7 : 0x7;
7648
7649 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7650 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7651
7652
7653 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7654 REG_WR(bp, reg,
7655 REG_RD(bp, reg) &
7656 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7657
7658 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7659 REG_WR(bp, reg,
7660 REG_RD(bp, reg) &
7661 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7662
7663 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7664
7665 if (!CHIP_IS_E1x(bp)) {
7666
7667
7668
7669 if (IS_MF_AFEX(bp))
7670 REG_WR(bp, BP_PORT(bp) ?
7671 NIG_REG_P1_HDRS_AFTER_BASIC :
7672 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7673 else
7674 REG_WR(bp, BP_PORT(bp) ?
7675 NIG_REG_P1_HDRS_AFTER_BASIC :
7676 NIG_REG_P0_HDRS_AFTER_BASIC,
7677 IS_MF_SD(bp) ? 7 : 6);
7678
7679 if (CHIP_IS_E3(bp))
7680 REG_WR(bp, BP_PORT(bp) ?
7681 NIG_REG_LLH1_MF_MODE :
7682 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7683 }
7684 if (!CHIP_IS_E3(bp))
7685 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7686
7687 if (!CHIP_IS_E1(bp)) {
7688
7689 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7690 (IS_MF_SD(bp) ? 0x1 : 0x2));
7691
7692 if (!CHIP_IS_E1x(bp)) {
7693 val = 0;
7694 switch (bp->mf_mode) {
7695 case MULTI_FUNCTION_SD:
7696 val = 1;
7697 break;
7698 case MULTI_FUNCTION_SI:
7699 case MULTI_FUNCTION_AFEX:
7700 val = 2;
7701 break;
7702 }
7703
7704 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7705 NIG_REG_LLH0_CLS_TYPE), val);
7706 }
7707 {
7708 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7709 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7710 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7711 }
7712 }
7713
7714
7715 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7716 if (val & MISC_SPIO_SPIO5) {
7717 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7718 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7719 val = REG_RD(bp, reg_addr);
7720 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7721 REG_WR(bp, reg_addr, val);
7722 }
7723
7724 if (CHIP_IS_E3B0(bp))
7725 bp->flags |= PTP_SUPPORTED;
7726
7727 return 0;
7728}
7729
7730static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7731{
7732 int reg;
7733 u32 wb_write[2];
7734
7735 if (CHIP_IS_E1(bp))
7736 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7737 else
7738 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7739
7740 wb_write[0] = ONCHIP_ADDR1(addr);
7741 wb_write[1] = ONCHIP_ADDR2(addr);
7742 REG_WR_DMAE(bp, reg, wb_write, 2);
7743}
7744
7745void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7746{
7747 u32 data, ctl, cnt = 100;
7748 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7749 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7750 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7751 u32 sb_bit = 1 << (idu_sb_id%32);
7752 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7753 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7754
7755
7756 if (CHIP_INT_MODE_IS_BC(bp))
7757 return;
7758
7759 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7760 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7761 IGU_REGULAR_CLEANUP_SET |
7762 IGU_REGULAR_BCLEANUP;
7763
7764 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7765 func_encode << IGU_CTRL_REG_FID_SHIFT |
7766 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7767
7768 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7769 data, igu_addr_data);
7770 REG_WR(bp, igu_addr_data, data);
7771 barrier();
7772 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7773 ctl, igu_addr_ctl);
7774 REG_WR(bp, igu_addr_ctl, ctl);
7775 barrier();
7776
7777
7778 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7779 msleep(20);
7780
7781 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7782 DP(NETIF_MSG_HW,
7783 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7784 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7785 }
7786}
7787
7788static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7789{
7790 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7791}
7792
7793static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7794{
7795 u32 i, base = FUNC_ILT_BASE(func);
7796 for (i = base; i < base + ILT_PER_FUNC; i++)
7797 bnx2x_ilt_wr(bp, i, 0);
7798}
7799
7800static void bnx2x_init_searcher(struct bnx2x *bp)
7801{
7802 int port = BP_PORT(bp);
7803 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7804
7805 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7806}
7807
7808static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7809{
7810 int rc;
7811 struct bnx2x_func_state_params func_params = {NULL};
7812 struct bnx2x_func_switch_update_params *switch_update_params =
7813 &func_params.params.switch_update;
7814
7815
7816 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7817 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7818
7819 func_params.f_obj = &bp->func_obj;
7820 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7821
7822
7823 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7824 &switch_update_params->changes);
7825 if (suspend)
7826 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7827 &switch_update_params->changes);
7828
7829 rc = bnx2x_func_state_change(bp, &func_params);
7830
7831 return rc;
7832}
7833
7834static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7835{
7836 int rc, i, port = BP_PORT(bp);
7837 int vlan_en = 0, mac_en[NUM_MACS];
7838
7839
7840 if (bp->mf_mode == SINGLE_FUNCTION) {
7841 bnx2x_set_rx_filter(&bp->link_params, 0);
7842 } else {
7843 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7844 NIG_REG_LLH0_FUNC_EN);
7845 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7846 NIG_REG_LLH0_FUNC_EN, 0);
7847 for (i = 0; i < NUM_MACS; i++) {
7848 mac_en[i] = REG_RD(bp, port ?
7849 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7850 4 * i) :
7851 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7852 4 * i));
7853 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7854 4 * i) :
7855 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7856 }
7857 }
7858
7859
7860 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7861 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7862
7863
7864
7865
7866
7867
7868 rc = bnx2x_func_switch_update(bp, 1);
7869 if (rc) {
7870 BNX2X_ERR("Can't suspend tx-switching!\n");
7871 return rc;
7872 }
7873
7874
7875 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7876
7877
7878 if (bp->mf_mode == SINGLE_FUNCTION) {
7879 bnx2x_set_rx_filter(&bp->link_params, 1);
7880 } else {
7881 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7882 NIG_REG_LLH0_FUNC_EN, vlan_en);
7883 for (i = 0; i < NUM_MACS; i++) {
7884 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7885 4 * i) :
7886 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7887 mac_en[i]);
7888 }
7889 }
7890
7891
7892 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7893 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7894
7895
7896 rc = bnx2x_func_switch_update(bp, 0);
7897 if (rc) {
7898 BNX2X_ERR("Can't resume tx-switching!\n");
7899 return rc;
7900 }
7901
7902 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7903 return 0;
7904}
7905
7906int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7907{
7908 int rc;
7909
7910 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7911
7912 if (CONFIGURE_NIC_MODE(bp)) {
7913
7914 bnx2x_init_searcher(bp);
7915
7916
7917 rc = bnx2x_reset_nic_mode(bp);
7918 if (rc)
7919 BNX2X_ERR("Can't change NIC mode!\n");
7920 return rc;
7921 }
7922
7923 return 0;
7924}
7925
7926
7927
7928
7929
7930
7931
7932
7933static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7934{
7935 if (!CHIP_IS_E1x(bp))
7936 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7937 1 << BP_ABS_FUNC(bp));
7938}
7939
7940static int bnx2x_init_hw_func(struct bnx2x *bp)
7941{
7942 int port = BP_PORT(bp);
7943 int func = BP_FUNC(bp);
7944 int init_phase = PHASE_PF0 + func;
7945 struct bnx2x_ilt *ilt = BP_ILT(bp);
7946 u16 cdu_ilt_start;
7947 u32 addr, val;
7948 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7949 int i, main_mem_width, rc;
7950
7951 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7952
7953
7954 if (!CHIP_IS_E1x(bp)) {
7955 rc = bnx2x_pf_flr_clnup(bp);
7956 if (rc) {
7957 bnx2x_fw_dump(bp);
7958 return rc;
7959 }
7960 }
7961
7962
7963 if (bp->common.int_block == INT_BLOCK_HC) {
7964 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7965 val = REG_RD(bp, addr);
7966 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7967 REG_WR(bp, addr, val);
7968 }
7969
7970 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7971 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7972
7973 ilt = BP_ILT(bp);
7974 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7975
7976 if (IS_SRIOV(bp))
7977 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7978 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7979
7980
7981
7982
7983 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7984 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7985 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7986 ilt->lines[cdu_ilt_start + i].page_mapping =
7987 bp->context[i].cxt_mapping;
7988 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7989 }
7990
7991 bnx2x_ilt_init_op(bp, INITOP_SET);
7992
7993 if (!CONFIGURE_NIC_MODE(bp)) {
7994 bnx2x_init_searcher(bp);
7995 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7996 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7997 } else {
7998
7999 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8000 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
8001 }
8002
8003 if (!CHIP_IS_E1x(bp)) {
8004 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8005
8006
8007
8008
8009 if (!(bp->flags & USING_MSIX_FLAG))
8010 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8011
8012
8013
8014
8015
8016
8017 msleep(20);
8018
8019
8020
8021
8022
8023 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8024
8025 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8026 }
8027
8028 bp->dmae_ready = 1;
8029
8030 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8031
8032 bnx2x_clean_pglue_errors(bp);
8033
8034 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8035 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8036 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8037 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8038 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8039 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8040 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8041 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8042 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8043 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8044 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8045 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8046 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8047
8048 if (!CHIP_IS_E1x(bp))
8049 REG_WR(bp, QM_REG_PF_EN, 1);
8050
8051 if (!CHIP_IS_E1x(bp)) {
8052 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8053 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8054 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8055 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8056 }
8057 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8058
8059 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8060 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8061 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8062
8063 bnx2x_iov_init_dq(bp);
8064
8065 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8066 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8067 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8068 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8069 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8070 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8071 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8072 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8073 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8074 if (!CHIP_IS_E1x(bp))
8075 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8076
8077 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8078
8079 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8080
8081 if (!CHIP_IS_E1x(bp))
8082 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8083
8084 if (IS_MF(bp)) {
8085 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8086 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8087 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8088 bp->mf_ov);
8089 }
8090 }
8091
8092 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8093
8094
8095 if (bp->common.int_block == INT_BLOCK_HC) {
8096 if (CHIP_IS_E1H(bp)) {
8097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8098
8099 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8100 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8101 }
8102 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8103
8104 } else {
8105 int num_segs, sb_idx, prod_offset;
8106
8107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8108
8109 if (!CHIP_IS_E1x(bp)) {
8110 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8111 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8112 }
8113
8114 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8115
8116 if (!CHIP_IS_E1x(bp)) {
8117 int dsb_idx = 0;
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8140 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8141 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8142 prod_offset = (bp->igu_base_sb + sb_idx) *
8143 num_segs;
8144
8145 for (i = 0; i < num_segs; i++) {
8146 addr = IGU_REG_PROD_CONS_MEMORY +
8147 (prod_offset + i) * 4;
8148 REG_WR(bp, addr, 0);
8149 }
8150
8151 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8152 USTORM_ID, 0, IGU_INT_NOP, 1);
8153 bnx2x_igu_clear_sb(bp,
8154 bp->igu_base_sb + sb_idx);
8155 }
8156
8157
8158 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8159 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8160
8161 if (CHIP_MODE_IS_4_PORT(bp))
8162 dsb_idx = BP_FUNC(bp);
8163 else
8164 dsb_idx = BP_VN(bp);
8165
8166 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8167 IGU_BC_BASE_DSB_PROD + dsb_idx :
8168 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8169
8170
8171
8172
8173
8174 for (i = 0; i < (num_segs * E1HVN_MAX);
8175 i += E1HVN_MAX) {
8176 addr = IGU_REG_PROD_CONS_MEMORY +
8177 (prod_offset + i)*4;
8178 REG_WR(bp, addr, 0);
8179 }
8180
8181 if (CHIP_INT_MODE_IS_BC(bp)) {
8182 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8183 USTORM_ID, 0, IGU_INT_NOP, 1);
8184 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8185 CSTORM_ID, 0, IGU_INT_NOP, 1);
8186 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8187 XSTORM_ID, 0, IGU_INT_NOP, 1);
8188 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8189 TSTORM_ID, 0, IGU_INT_NOP, 1);
8190 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8191 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8192 } else {
8193 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8194 USTORM_ID, 0, IGU_INT_NOP, 1);
8195 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8196 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8197 }
8198 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8199
8200
8201
8202 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8203 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8204 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8205 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8206 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8207 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8208 }
8209 }
8210
8211
8212 REG_WR(bp, 0x2114, 0xffffffff);
8213 REG_WR(bp, 0x2120, 0xffffffff);
8214
8215 if (CHIP_IS_E1x(bp)) {
8216 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8217 main_mem_base = HC_REG_MAIN_MEMORY +
8218 BP_PORT(bp) * (main_mem_size * 4);
8219 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8220 main_mem_width = 8;
8221
8222 val = REG_RD(bp, main_mem_prty_clr);
8223 if (val)
8224 DP(NETIF_MSG_HW,
8225 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8226 val);
8227
8228
8229 for (i = main_mem_base;
8230 i < main_mem_base + main_mem_size * 4;
8231 i += main_mem_width) {
8232 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8233 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8234 i, main_mem_width / 4);
8235 }
8236
8237 REG_RD(bp, main_mem_prty_clr);
8238 }
8239
8240#ifdef BNX2X_STOP_ON_ERROR
8241
8242 REG_WR8(bp, BAR_USTRORM_INTMEM +
8243 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8244 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8245 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8246 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8247 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8248 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8249 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8250#endif
8251
8252 bnx2x_phy_probe(&bp->link_params);
8253
8254 return 0;
8255}
8256
8257void bnx2x_free_mem_cnic(struct bnx2x *bp)
8258{
8259 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8260
8261 if (!CHIP_IS_E1x(bp))
8262 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8263 sizeof(struct host_hc_status_block_e2));
8264 else
8265 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8266 sizeof(struct host_hc_status_block_e1x));
8267
8268 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8269}
8270
8271void bnx2x_free_mem(struct bnx2x *bp)
8272{
8273 int i;
8274
8275 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8276 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8277
8278 if (IS_VF(bp))
8279 return;
8280
8281 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8282 sizeof(struct host_sp_status_block));
8283
8284 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8285 sizeof(struct bnx2x_slowpath));
8286
8287 for (i = 0; i < L2_ILT_LINES(bp); i++)
8288 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8289 bp->context[i].size);
8290 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8291
8292 BNX2X_FREE(bp->ilt->lines);
8293
8294 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8295
8296 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8297 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8298
8299 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8300
8301 bnx2x_iov_free_mem(bp);
8302}
8303
8304int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8305{
8306 if (!CHIP_IS_E1x(bp)) {
8307
8308 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8309 sizeof(struct host_hc_status_block_e2));
8310 if (!bp->cnic_sb.e2_sb)
8311 goto alloc_mem_err;
8312 } else {
8313 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8314 sizeof(struct host_hc_status_block_e1x));
8315 if (!bp->cnic_sb.e1x_sb)
8316 goto alloc_mem_err;
8317 }
8318
8319 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8320
8321 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8322 if (!bp->t2)
8323 goto alloc_mem_err;
8324 }
8325
8326
8327 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8328 &bp->slowpath->drv_info_to_mcp;
8329
8330 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8331 goto alloc_mem_err;
8332
8333 return 0;
8334
8335alloc_mem_err:
8336 bnx2x_free_mem_cnic(bp);
8337 BNX2X_ERR("Can't allocate memory\n");
8338 return -ENOMEM;
8339}
8340
8341int bnx2x_alloc_mem(struct bnx2x *bp)
8342{
8343 int i, allocated, context_size;
8344
8345 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8346
8347 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8348 if (!bp->t2)
8349 goto alloc_mem_err;
8350 }
8351
8352 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8353 sizeof(struct host_sp_status_block));
8354 if (!bp->def_status_blk)
8355 goto alloc_mem_err;
8356
8357 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8358 sizeof(struct bnx2x_slowpath));
8359 if (!bp->slowpath)
8360 goto alloc_mem_err;
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8376
8377 for (i = 0, allocated = 0; allocated < context_size; i++) {
8378 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8379 (context_size - allocated));
8380 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8381 bp->context[i].size);
8382 if (!bp->context[i].vcxt)
8383 goto alloc_mem_err;
8384 allocated += bp->context[i].size;
8385 }
8386 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8387 GFP_KERNEL);
8388 if (!bp->ilt->lines)
8389 goto alloc_mem_err;
8390
8391 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8392 goto alloc_mem_err;
8393
8394 if (bnx2x_iov_alloc_mem(bp))
8395 goto alloc_mem_err;
8396
8397
8398 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8399 if (!bp->spq)
8400 goto alloc_mem_err;
8401
8402
8403 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8404 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8405 if (!bp->eq_ring)
8406 goto alloc_mem_err;
8407
8408 return 0;
8409
8410alloc_mem_err:
8411 bnx2x_free_mem(bp);
8412 BNX2X_ERR("Can't allocate memory\n");
8413 return -ENOMEM;
8414}
8415
8416
8417
8418
8419
8420int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8421 struct bnx2x_vlan_mac_obj *obj, bool set,
8422 int mac_type, unsigned long *ramrod_flags)
8423{
8424 int rc;
8425 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8426
8427 memset(&ramrod_param, 0, sizeof(ramrod_param));
8428
8429
8430 ramrod_param.vlan_mac_obj = obj;
8431 ramrod_param.ramrod_flags = *ramrod_flags;
8432
8433
8434 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8435 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8436
8437 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8438
8439
8440 if (set)
8441 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8442 else
8443 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8444 }
8445
8446 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8447
8448 if (rc == -EEXIST) {
8449 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8450
8451 rc = 0;
8452 } else if (rc < 0)
8453 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8454
8455 return rc;
8456}
8457
8458int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8459 struct bnx2x_vlan_mac_obj *obj, bool set,
8460 unsigned long *ramrod_flags)
8461{
8462 int rc;
8463 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8464
8465 memset(&ramrod_param, 0, sizeof(ramrod_param));
8466
8467
8468 ramrod_param.vlan_mac_obj = obj;
8469 ramrod_param.ramrod_flags = *ramrod_flags;
8470
8471
8472 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8473 ramrod_param.user_req.u.vlan.vlan = vlan;
8474 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8475
8476 if (set)
8477 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8478 else
8479 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8480 }
8481
8482 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8483
8484 if (rc == -EEXIST) {
8485
8486 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8487 rc = 0;
8488 } else if (rc < 0) {
8489 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8490 }
8491
8492 return rc;
8493}
8494
8495void bnx2x_clear_vlan_info(struct bnx2x *bp)
8496{
8497 struct bnx2x_vlan_entry *vlan;
8498
8499
8500 list_for_each_entry(vlan, &bp->vlan_reg, link)
8501 vlan->hw = false;
8502
8503 bp->vlan_cnt = 0;
8504}
8505
8506static int bnx2x_del_all_vlans(struct bnx2x *bp)
8507{
8508 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8509 unsigned long ramrod_flags = 0, vlan_flags = 0;
8510 int rc;
8511
8512 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8513 __set_bit(BNX2X_VLAN, &vlan_flags);
8514 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8515 if (rc)
8516 return rc;
8517
8518 bnx2x_clear_vlan_info(bp);
8519
8520 return 0;
8521}
8522
8523int bnx2x_del_all_macs(struct bnx2x *bp,
8524 struct bnx2x_vlan_mac_obj *mac_obj,
8525 int mac_type, bool wait_for_comp)
8526{
8527 int rc;
8528 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8529
8530
8531 if (wait_for_comp)
8532 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8533
8534
8535 __set_bit(mac_type, &vlan_mac_flags);
8536
8537 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8538 if (rc < 0)
8539 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8540
8541 return rc;
8542}
8543
8544int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8545{
8546 if (IS_PF(bp)) {
8547 unsigned long ramrod_flags = 0;
8548
8549 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8550 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8551 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8552 &bp->sp_objs->mac_obj, set,
8553 BNX2X_ETH_MAC, &ramrod_flags);
8554 } else {
8555 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8556 bp->fp->index, set);
8557 }
8558}
8559
8560int bnx2x_setup_leading(struct bnx2x *bp)
8561{
8562 if (IS_PF(bp))
8563 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8564 else
8565 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8566}
8567
8568
8569
8570
8571
8572
8573
8574
8575int bnx2x_set_int_mode(struct bnx2x *bp)
8576{
8577 int rc = 0;
8578
8579 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8580 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8581 return -EINVAL;
8582 }
8583
8584 switch (int_mode) {
8585 case BNX2X_INT_MODE_MSIX:
8586
8587 rc = bnx2x_enable_msix(bp);
8588
8589
8590 if (!rc)
8591 return 0;
8592
8593
8594 if (rc && IS_VF(bp))
8595 return rc;
8596
8597
8598 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8599 bp->num_queues,
8600 1 + bp->num_cnic_queues);
8601
8602 fallthrough;
8603 case BNX2X_INT_MODE_MSI:
8604 bnx2x_enable_msi(bp);
8605
8606 fallthrough;
8607 case BNX2X_INT_MODE_INTX:
8608 bp->num_ethernet_queues = 1;
8609 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8610 BNX2X_DEV_INFO("set number of queues to 1\n");
8611 break;
8612 default:
8613 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8614 return -EINVAL;
8615 }
8616 return 0;
8617}
8618
8619
8620static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8621{
8622 if (IS_SRIOV(bp))
8623 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8624 return L2_ILT_LINES(bp);
8625}
8626
8627void bnx2x_ilt_set_info(struct bnx2x *bp)
8628{
8629 struct ilt_client_info *ilt_client;
8630 struct bnx2x_ilt *ilt = BP_ILT(bp);
8631 u16 line = 0;
8632
8633 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8634 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8635
8636
8637 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8638 ilt_client->client_num = ILT_CLIENT_CDU;
8639 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8640 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8641 ilt_client->start = line;
8642 line += bnx2x_cid_ilt_lines(bp);
8643
8644 if (CNIC_SUPPORT(bp))
8645 line += CNIC_ILT_LINES;
8646 ilt_client->end = line - 1;
8647
8648 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8649 ilt_client->start,
8650 ilt_client->end,
8651 ilt_client->page_size,
8652 ilt_client->flags,
8653 ilog2(ilt_client->page_size >> 12));
8654
8655
8656 if (QM_INIT(bp->qm_cid_count)) {
8657 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8658 ilt_client->client_num = ILT_CLIENT_QM;
8659 ilt_client->page_size = QM_ILT_PAGE_SZ;
8660 ilt_client->flags = 0;
8661 ilt_client->start = line;
8662
8663
8664 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8665 QM_ILT_PAGE_SZ);
8666
8667 ilt_client->end = line - 1;
8668
8669 DP(NETIF_MSG_IFUP,
8670 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8671 ilt_client->start,
8672 ilt_client->end,
8673 ilt_client->page_size,
8674 ilt_client->flags,
8675 ilog2(ilt_client->page_size >> 12));
8676 }
8677
8678 if (CNIC_SUPPORT(bp)) {
8679
8680 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8681 ilt_client->client_num = ILT_CLIENT_SRC;
8682 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8683 ilt_client->flags = 0;
8684 ilt_client->start = line;
8685 line += SRC_ILT_LINES;
8686 ilt_client->end = line - 1;
8687
8688 DP(NETIF_MSG_IFUP,
8689 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8690 ilt_client->start,
8691 ilt_client->end,
8692 ilt_client->page_size,
8693 ilt_client->flags,
8694 ilog2(ilt_client->page_size >> 12));
8695
8696
8697 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8698 ilt_client->client_num = ILT_CLIENT_TM;
8699 ilt_client->page_size = TM_ILT_PAGE_SZ;
8700 ilt_client->flags = 0;
8701 ilt_client->start = line;
8702 line += TM_ILT_LINES;
8703 ilt_client->end = line - 1;
8704
8705 DP(NETIF_MSG_IFUP,
8706 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8707 ilt_client->start,
8708 ilt_client->end,
8709 ilt_client->page_size,
8710 ilt_client->flags,
8711 ilog2(ilt_client->page_size >> 12));
8712 }
8713
8714 BUG_ON(line > ILT_MAX_LINES);
8715}
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8729 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8730{
8731 u8 cos;
8732 int cxt_index, cxt_offset;
8733
8734
8735 if (!IS_FCOE_FP(fp)) {
8736 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8737 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8738
8739
8740
8741
8742 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8743 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8744
8745
8746 init_params->rx.hc_rate = bp->rx_ticks ?
8747 (1000000 / bp->rx_ticks) : 0;
8748 init_params->tx.hc_rate = bp->tx_ticks ?
8749 (1000000 / bp->tx_ticks) : 0;
8750
8751
8752 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8753 fp->fw_sb_id;
8754
8755
8756
8757
8758
8759 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8760 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8761 }
8762
8763
8764 init_params->max_cos = fp->max_cos;
8765
8766 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8767 fp->index, init_params->max_cos);
8768
8769
8770 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8771 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8772 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8773 ILT_PAGE_CIDS);
8774 init_params->cxts[cos] =
8775 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8776 }
8777}
8778
8779static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8780 struct bnx2x_queue_state_params *q_params,
8781 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8782 int tx_index, bool leading)
8783{
8784 memset(tx_only_params, 0, sizeof(*tx_only_params));
8785
8786
8787 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8788
8789
8790 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8791
8792
8793 tx_only_params->cid_index = tx_index;
8794
8795
8796 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8797
8798
8799 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8800
8801 DP(NETIF_MSG_IFUP,
8802 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8803 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8804 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8805 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8806
8807
8808 return bnx2x_queue_state_change(bp, q_params);
8809}
8810
8811
8812
8813
8814
8815
8816
8817
8818
8819
8820
8821
8822int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8823 bool leading)
8824{
8825 struct bnx2x_queue_state_params q_params = {NULL};
8826 struct bnx2x_queue_setup_params *setup_params =
8827 &q_params.params.setup;
8828 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8829 &q_params.params.tx_only;
8830 int rc;
8831 u8 tx_index;
8832
8833 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8834
8835
8836 if (!IS_FCOE_FP(fp))
8837 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8838 IGU_INT_ENABLE, 0);
8839
8840 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8841
8842 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8843
8844
8845 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8846
8847
8848 q_params.cmd = BNX2X_Q_CMD_INIT;
8849
8850
8851 rc = bnx2x_queue_state_change(bp, &q_params);
8852 if (rc) {
8853 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8854 return rc;
8855 }
8856
8857 DP(NETIF_MSG_IFUP, "init complete\n");
8858
8859
8860 memset(setup_params, 0, sizeof(*setup_params));
8861
8862
8863 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8864
8865
8866 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8867 FIRST_TX_COS_INDEX);
8868
8869 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8870 &setup_params->rxq_params);
8871
8872 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8873 FIRST_TX_COS_INDEX);
8874
8875
8876 q_params.cmd = BNX2X_Q_CMD_SETUP;
8877
8878 if (IS_FCOE_FP(fp))
8879 bp->fcoe_init = true;
8880
8881
8882 rc = bnx2x_queue_state_change(bp, &q_params);
8883 if (rc) {
8884 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8885 return rc;
8886 }
8887
8888
8889 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8890 tx_index < fp->max_cos;
8891 tx_index++) {
8892
8893
8894 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8895 tx_only_params, tx_index, leading);
8896 if (rc) {
8897 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8898 fp->index, tx_index);
8899 return rc;
8900 }
8901 }
8902
8903 return rc;
8904}
8905
8906static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8907{
8908 struct bnx2x_fastpath *fp = &bp->fp[index];
8909 struct bnx2x_fp_txdata *txdata;
8910 struct bnx2x_queue_state_params q_params = {NULL};
8911 int rc, tx_index;
8912
8913 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8914
8915 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8916
8917 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8918
8919
8920 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8921 tx_index < fp->max_cos;
8922 tx_index++){
8923
8924
8925 txdata = fp->txdata_ptr[tx_index];
8926
8927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8928 txdata->txq_index);
8929
8930
8931 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8932 memset(&q_params.params.terminate, 0,
8933 sizeof(q_params.params.terminate));
8934 q_params.params.terminate.cid_index = tx_index;
8935
8936 rc = bnx2x_queue_state_change(bp, &q_params);
8937 if (rc)
8938 return rc;
8939
8940
8941 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8942 memset(&q_params.params.cfc_del, 0,
8943 sizeof(q_params.params.cfc_del));
8944 q_params.params.cfc_del.cid_index = tx_index;
8945 rc = bnx2x_queue_state_change(bp, &q_params);
8946 if (rc)
8947 return rc;
8948 }
8949
8950
8951 q_params.cmd = BNX2X_Q_CMD_HALT;
8952 rc = bnx2x_queue_state_change(bp, &q_params);
8953 if (rc)
8954 return rc;
8955
8956
8957 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8958 memset(&q_params.params.terminate, 0,
8959 sizeof(q_params.params.terminate));
8960 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8961 rc = bnx2x_queue_state_change(bp, &q_params);
8962 if (rc)
8963 return rc;
8964
8965 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8966 memset(&q_params.params.cfc_del, 0,
8967 sizeof(q_params.params.cfc_del));
8968 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8969 return bnx2x_queue_state_change(bp, &q_params);
8970}
8971
8972static void bnx2x_reset_func(struct bnx2x *bp)
8973{
8974 int port = BP_PORT(bp);
8975 int func = BP_FUNC(bp);
8976 int i;
8977
8978
8979 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8980 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8981 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8982 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8983
8984
8985 for_each_eth_queue(bp, i) {
8986 struct bnx2x_fastpath *fp = &bp->fp[i];
8987 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8988 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8989 SB_DISABLED);
8990 }
8991
8992 if (CNIC_LOADED(bp))
8993
8994 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8995 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8996 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8997
8998
8999 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9000 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
9001 SB_DISABLED);
9002
9003 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
9004 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9005 0);
9006
9007
9008 if (bp->common.int_block == INT_BLOCK_HC) {
9009 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9010 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9011 } else {
9012 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9013 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9014 }
9015
9016 if (CNIC_LOADED(bp)) {
9017
9018 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9019
9020
9021
9022
9023 for (i = 0; i < 200; i++) {
9024 usleep_range(10000, 20000);
9025 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9026 break;
9027 }
9028 }
9029
9030 bnx2x_clear_func_ilt(bp, func);
9031
9032
9033
9034
9035 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9036 struct ilt_client_info ilt_cli;
9037
9038 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9039 ilt_cli.start = 0;
9040 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9041 ilt_cli.client_num = ILT_CLIENT_TM;
9042
9043 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9044 }
9045
9046
9047 if (!CHIP_IS_E1x(bp))
9048 bnx2x_pf_disable(bp);
9049
9050 bp->dmae_ready = 0;
9051}
9052
9053static void bnx2x_reset_port(struct bnx2x *bp)
9054{
9055 int port = BP_PORT(bp);
9056 u32 val;
9057
9058
9059 bnx2x__link_reset(bp);
9060
9061 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9062
9063
9064 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9065
9066 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9067 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9068
9069
9070 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9071
9072 msleep(100);
9073
9074 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9075 if (val)
9076 DP(NETIF_MSG_IFDOWN,
9077 "BRB1 is not empty %d blocks are occupied\n", val);
9078
9079
9080}
9081
9082static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9083{
9084 struct bnx2x_func_state_params func_params = {NULL};
9085
9086
9087 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9088
9089 func_params.f_obj = &bp->func_obj;
9090 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9091
9092 func_params.params.hw_init.load_phase = load_code;
9093
9094 return bnx2x_func_state_change(bp, &func_params);
9095}
9096
9097static int bnx2x_func_stop(struct bnx2x *bp)
9098{
9099 struct bnx2x_func_state_params func_params = {NULL};
9100 int rc;
9101
9102
9103 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9104 func_params.f_obj = &bp->func_obj;
9105 func_params.cmd = BNX2X_F_CMD_STOP;
9106
9107
9108
9109
9110
9111
9112
9113 rc = bnx2x_func_state_change(bp, &func_params);
9114 if (rc) {
9115#ifdef BNX2X_STOP_ON_ERROR
9116 return rc;
9117#else
9118 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9119 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9120 return bnx2x_func_state_change(bp, &func_params);
9121#endif
9122 }
9123
9124 return 0;
9125}
9126
9127
9128
9129
9130
9131
9132
9133
9134
9135u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9136{
9137 u32 reset_code = 0;
9138 int port = BP_PORT(bp);
9139
9140
9141 if (unload_mode == UNLOAD_NORMAL)
9142 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9143
9144 else if (bp->flags & NO_WOL_FLAG)
9145 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9146
9147 else if (bp->wol) {
9148 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9149 u8 *mac_addr = bp->dev->dev_addr;
9150 struct pci_dev *pdev = bp->pdev;
9151 u32 val;
9152 u16 pmc;
9153
9154
9155
9156
9157 u8 entry = (BP_VN(bp) + 1)*8;
9158
9159 val = (mac_addr[0] << 8) | mac_addr[1];
9160 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9161
9162 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9163 (mac_addr[4] << 8) | mac_addr[5];
9164 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9165
9166
9167 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9168 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9169 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9170
9171 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9172
9173 } else
9174 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9175
9176
9177 if (!BP_NOMCP(bp))
9178 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9179 else {
9180 int path = BP_PATH(bp);
9181
9182 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9183 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9184 bnx2x_load_count[path][2]);
9185 bnx2x_load_count[path][0]--;
9186 bnx2x_load_count[path][1 + port]--;
9187 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9188 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9189 bnx2x_load_count[path][2]);
9190 if (bnx2x_load_count[path][0] == 0)
9191 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9192 else if (bnx2x_load_count[path][1 + port] == 0)
9193 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9194 else
9195 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9196 }
9197
9198 return reset_code;
9199}
9200
9201
9202
9203
9204
9205
9206
9207void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9208{
9209 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9210
9211
9212 if (!BP_NOMCP(bp))
9213 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9214}
9215
9216static int bnx2x_func_wait_started(struct bnx2x *bp)
9217{
9218 int tout = 50;
9219 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9220
9221 if (!bp->port.pmf)
9222 return 0;
9223
9224
9225
9226
9227
9228
9229
9230
9231
9232
9233
9234
9235
9236
9237
9238
9239 if (msix)
9240 synchronize_irq(bp->msix_table[0].vector);
9241 else
9242 synchronize_irq(bp->pdev->irq);
9243
9244 flush_workqueue(bnx2x_wq);
9245 flush_workqueue(bnx2x_iov_wq);
9246
9247 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9248 BNX2X_F_STATE_STARTED && tout--)
9249 msleep(20);
9250
9251 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9252 BNX2X_F_STATE_STARTED) {
9253#ifdef BNX2X_STOP_ON_ERROR
9254 BNX2X_ERR("Wrong function state\n");
9255 return -EBUSY;
9256#else
9257
9258
9259
9260
9261 struct bnx2x_func_state_params func_params = {NULL};
9262
9263 DP(NETIF_MSG_IFDOWN,
9264 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9265
9266 func_params.f_obj = &bp->func_obj;
9267 __set_bit(RAMROD_DRV_CLR_ONLY,
9268 &func_params.ramrod_flags);
9269
9270
9271 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9272 bnx2x_func_state_change(bp, &func_params);
9273
9274
9275 func_params.cmd = BNX2X_F_CMD_TX_START;
9276 return bnx2x_func_state_change(bp, &func_params);
9277#endif
9278 }
9279
9280 return 0;
9281}
9282
9283static void bnx2x_disable_ptp(struct bnx2x *bp)
9284{
9285 int port = BP_PORT(bp);
9286
9287
9288 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9289 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9290
9291
9292 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9293 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9294 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9295 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9296 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9297 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9298 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9299 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9300
9301
9302 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9303 NIG_REG_P0_PTP_EN, 0x0);
9304}
9305
9306
9307static void bnx2x_stop_ptp(struct bnx2x *bp)
9308{
9309
9310
9311
9312 cancel_work_sync(&bp->ptp_task);
9313
9314 if (bp->ptp_tx_skb) {
9315 dev_kfree_skb_any(bp->ptp_tx_skb);
9316 bp->ptp_tx_skb = NULL;
9317 }
9318
9319
9320 bnx2x_disable_ptp(bp);
9321
9322 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9323}
9324
9325void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9326{
9327 int port = BP_PORT(bp);
9328 int i, rc = 0;
9329 u8 cos;
9330 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9331 u32 reset_code;
9332
9333
9334 for_each_tx_queue(bp, i) {
9335 struct bnx2x_fastpath *fp = &bp->fp[i];
9336
9337 for_each_cos_in_tx_queue(fp, cos)
9338 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9339#ifdef BNX2X_STOP_ON_ERROR
9340 if (rc)
9341 return;
9342#endif
9343 }
9344
9345
9346 usleep_range(1000, 2000);
9347
9348
9349 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9350 false);
9351 if (rc < 0)
9352 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9353
9354
9355 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9356 true);
9357 if (rc < 0)
9358 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9359 rc);
9360
9361
9362
9363
9364
9365 if (!CHIP_IS_E1x(bp)) {
9366
9367 rc = bnx2x_del_all_vlans(bp);
9368 if (rc < 0)
9369 BNX2X_ERR("Failed to delete all VLANs\n");
9370 }
9371
9372
9373 if (!CHIP_IS_E1(bp))
9374 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9375
9376
9377
9378
9379
9380 netif_addr_lock_bh(bp->dev);
9381
9382 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9383 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9384 else if (bp->slowpath)
9385 bnx2x_set_storm_rx_mode(bp);
9386
9387
9388 rparam.mcast_obj = &bp->mcast_obj;
9389 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9390 if (rc < 0)
9391 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9392
9393 netif_addr_unlock_bh(bp->dev);
9394
9395 bnx2x_iov_chip_cleanup(bp);
9396
9397
9398
9399
9400
9401
9402 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9403
9404
9405
9406
9407
9408 rc = bnx2x_func_wait_started(bp);
9409 if (rc) {
9410 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9411#ifdef BNX2X_STOP_ON_ERROR
9412 return;
9413#endif
9414 }
9415
9416
9417
9418
9419 for_each_eth_queue(bp, i)
9420 if (bnx2x_stop_queue(bp, i))
9421#ifdef BNX2X_STOP_ON_ERROR
9422 return;
9423#else
9424 goto unload_error;
9425#endif
9426
9427 if (CNIC_LOADED(bp)) {
9428 for_each_cnic_queue(bp, i)
9429 if (bnx2x_stop_queue(bp, i))
9430#ifdef BNX2X_STOP_ON_ERROR
9431 return;
9432#else
9433 goto unload_error;
9434#endif
9435 }
9436
9437
9438
9439
9440 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9441 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9442
9443#ifndef BNX2X_STOP_ON_ERROR
9444unload_error:
9445#endif
9446 rc = bnx2x_func_stop(bp);
9447 if (rc) {
9448 BNX2X_ERR("Function stop failed!\n");
9449#ifdef BNX2X_STOP_ON_ERROR
9450 return;
9451#endif
9452 }
9453
9454
9455
9456
9457
9458
9459 if (bp->flags & PTP_SUPPORTED) {
9460 bnx2x_stop_ptp(bp);
9461 if (bp->ptp_clock) {
9462 ptp_clock_unregister(bp->ptp_clock);
9463 bp->ptp_clock = NULL;
9464 }
9465 }
9466
9467
9468 bnx2x_netif_stop(bp, 1);
9469
9470 bnx2x_del_all_napi(bp);
9471 if (CNIC_LOADED(bp))
9472 bnx2x_del_all_napi_cnic(bp);
9473
9474
9475 bnx2x_free_irq(bp);
9476
9477
9478
9479
9480
9481
9482 if (!pci_channel_offline(bp->pdev)) {
9483 rc = bnx2x_reset_hw(bp, reset_code);
9484 if (rc)
9485 BNX2X_ERR("HW_RESET failed\n");
9486 }
9487
9488
9489 bnx2x_send_unload_done(bp, keep_link);
9490}
9491
9492void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9493{
9494 u32 val;
9495
9496 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9497
9498 if (CHIP_IS_E1(bp)) {
9499 int port = BP_PORT(bp);
9500 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9501 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9502
9503 val = REG_RD(bp, addr);
9504 val &= ~(0x300);
9505 REG_WR(bp, addr, val);
9506 } else {
9507 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9508 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9509 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9510 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9511 }
9512}
9513
9514
9515static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9516{
9517 u32 val;
9518
9519
9520 if (!CHIP_IS_E1(bp)) {
9521
9522 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9523
9524 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9525 }
9526
9527
9528 if (CHIP_IS_E1x(bp)) {
9529
9530 val = REG_RD(bp, HC_REG_CONFIG_1);
9531 REG_WR(bp, HC_REG_CONFIG_1,
9532 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9533 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9534
9535 val = REG_RD(bp, HC_REG_CONFIG_0);
9536 REG_WR(bp, HC_REG_CONFIG_0,
9537 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9538 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9539 } else {
9540
9541 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9542
9543 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9544 (!close) ?
9545 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9546 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9547 }
9548
9549 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9550 close ? "closing" : "opening");
9551}
9552
9553#define SHARED_MF_CLP_MAGIC 0x80000000
9554
9555static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9556{
9557
9558 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9559 *magic_val = val & SHARED_MF_CLP_MAGIC;
9560 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9561}
9562
9563
9564
9565
9566
9567
9568
9569static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9570{
9571
9572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9573 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9574 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9575}
9576
9577
9578
9579
9580
9581
9582
9583
9584
9585static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9586{
9587 u32 shmem;
9588 u32 validity_offset;
9589
9590 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9591
9592
9593 if (!CHIP_IS_E1(bp))
9594 bnx2x_clp_reset_prep(bp, magic_val);
9595
9596
9597 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9598 validity_offset =
9599 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9600
9601
9602 if (shmem > 0)
9603 REG_WR(bp, shmem + validity_offset, 0);
9604}
9605
9606#define MCP_TIMEOUT 5000
9607#define MCP_ONE_TIMEOUT 100
9608
9609
9610
9611
9612
9613
9614static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9615{
9616
9617
9618 if (CHIP_REV_IS_SLOW(bp))
9619 msleep(MCP_ONE_TIMEOUT*10);
9620 else
9621 msleep(MCP_ONE_TIMEOUT);
9622}
9623
9624
9625
9626
9627static int bnx2x_init_shmem(struct bnx2x *bp)
9628{
9629 int cnt = 0;
9630 u32 val = 0;
9631
9632 do {
9633 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9634
9635
9636
9637
9638 if (bp->common.shmem_base == 0xFFFFFFFF) {
9639 bp->flags |= NO_MCP_FLAG;
9640 return -ENODEV;
9641 }
9642
9643 if (bp->common.shmem_base) {
9644 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9645 if (val & SHR_MEM_VALIDITY_MB)
9646 return 0;
9647 }
9648
9649 bnx2x_mcp_wait_one(bp);
9650
9651 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9652
9653 BNX2X_ERR("BAD MCP validity signature\n");
9654
9655 return -ENODEV;
9656}
9657
9658static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9659{
9660 int rc = bnx2x_init_shmem(bp);
9661
9662
9663 if (!CHIP_IS_E1(bp))
9664 bnx2x_clp_reset_done(bp, magic_val);
9665
9666 return rc;
9667}
9668
9669static void bnx2x_pxp_prep(struct bnx2x *bp)
9670{
9671 if (!CHIP_IS_E1(bp)) {
9672 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9673 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9674 }
9675}
9676
9677
9678
9679
9680
9681
9682
9683
9684
9685
9686
9687static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9688{
9689 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9690 u32 global_bits2, stay_reset2;
9691
9692
9693
9694
9695
9696 global_bits2 =
9697 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9698 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9699
9700
9701
9702
9703
9704
9705 not_reset_mask1 =
9706 MISC_REGISTERS_RESET_REG_1_RST_HC |
9707 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9708 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9709
9710 not_reset_mask2 =
9711 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9712 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9713 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9714 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9715 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9716 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9717 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9718 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9719 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9720 MISC_REGISTERS_RESET_REG_2_PGLC |
9721 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9722 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9723 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9724 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9725 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9726 MISC_REGISTERS_RESET_REG_2_UMAC1;
9727
9728
9729
9730
9731
9732 stay_reset2 =
9733 MISC_REGISTERS_RESET_REG_2_XMAC |
9734 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9735
9736
9737 reset_mask1 = 0xffffffff;
9738
9739 if (CHIP_IS_E1(bp))
9740 reset_mask2 = 0xffff;
9741 else if (CHIP_IS_E1H(bp))
9742 reset_mask2 = 0x1ffff;
9743 else if (CHIP_IS_E2(bp))
9744 reset_mask2 = 0xfffff;
9745 else
9746 reset_mask2 = 0x3ffffff;
9747
9748
9749 if (!global)
9750 reset_mask2 &= ~global_bits2;
9751
9752
9753
9754
9755
9756
9757
9758
9759
9760
9761
9762
9763
9764
9765
9766 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9767 reset_mask2 & (~not_reset_mask2));
9768
9769 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9770 reset_mask1 & (~not_reset_mask1));
9771
9772 barrier();
9773
9774 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9775 reset_mask2 & (~stay_reset2));
9776
9777 barrier();
9778
9779 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9780}
9781
9782
9783
9784
9785
9786
9787
9788
9789
9790
9791static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9792{
9793 u32 cnt = 1000;
9794 u32 pend_bits = 0;
9795
9796 do {
9797 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9798
9799 if (pend_bits == 0)
9800 break;
9801
9802 usleep_range(1000, 2000);
9803 } while (cnt-- > 0);
9804
9805 if (cnt <= 0) {
9806 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9807 pend_bits);
9808 return -EBUSY;
9809 }
9810
9811 return 0;
9812}
9813
9814static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9815{
9816 int cnt = 1000;
9817 u32 val = 0;
9818 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9819 u32 tags_63_32 = 0;
9820
9821
9822 do {
9823 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9824 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9825 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9826 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9827 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9828 if (CHIP_IS_E3(bp))
9829 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9830
9831 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9832 ((port_is_idle_0 & 0x1) == 0x1) &&
9833 ((port_is_idle_1 & 0x1) == 0x1) &&
9834 (pgl_exp_rom2 == 0xffffffff) &&
9835 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9836 break;
9837 usleep_range(1000, 2000);
9838 } while (cnt-- > 0);
9839
9840 if (cnt <= 0) {
9841 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9842 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9843 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9844 pgl_exp_rom2);
9845 return -EAGAIN;
9846 }
9847
9848 barrier();
9849
9850
9851 bnx2x_set_234_gates(bp, true);
9852
9853
9854 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9855 return -EAGAIN;
9856
9857
9858
9859
9860 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9861 barrier();
9862
9863
9864
9865
9866 usleep_range(1000, 2000);
9867
9868
9869
9870 if (global)
9871 bnx2x_reset_mcp_prep(bp, &val);
9872
9873
9874 bnx2x_pxp_prep(bp);
9875 barrier();
9876
9877
9878 bnx2x_process_kill_chip_reset(bp, global);
9879 barrier();
9880
9881
9882 if (!CHIP_IS_E1x(bp))
9883 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9884
9885
9886
9887 if (global && bnx2x_reset_mcp_comp(bp, val))
9888 return -EAGAIN;
9889
9890
9891
9892
9893 bnx2x_set_234_gates(bp, false);
9894
9895
9896
9897
9898 return 0;
9899}
9900
9901static int bnx2x_leader_reset(struct bnx2x *bp)
9902{
9903 int rc = 0;
9904 bool global = bnx2x_reset_is_global(bp);
9905 u32 load_code;
9906
9907
9908
9909
9910 if (!global && !BP_NOMCP(bp)) {
9911 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9912 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9913 if (!load_code) {
9914 BNX2X_ERR("MCP response failure, aborting\n");
9915 rc = -EAGAIN;
9916 goto exit_leader_reset;
9917 }
9918 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9919 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9920 BNX2X_ERR("MCP unexpected resp, aborting\n");
9921 rc = -EAGAIN;
9922 goto exit_leader_reset2;
9923 }
9924 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9925 if (!load_code) {
9926 BNX2X_ERR("MCP response failure, aborting\n");
9927 rc = -EAGAIN;
9928 goto exit_leader_reset2;
9929 }
9930 }
9931
9932
9933 if (bnx2x_process_kill(bp, global)) {
9934 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9935 BP_PATH(bp));
9936 rc = -EAGAIN;
9937 goto exit_leader_reset2;
9938 }
9939
9940
9941
9942
9943
9944 bnx2x_set_reset_done(bp);
9945 if (global)
9946 bnx2x_clear_reset_global(bp);
9947
9948exit_leader_reset2:
9949
9950 if (!global && !BP_NOMCP(bp)) {
9951 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9953 }
9954exit_leader_reset:
9955 bp->is_leader = 0;
9956 bnx2x_release_leader_lock(bp);
9957 smp_mb();
9958 return rc;
9959}
9960
9961static void bnx2x_recovery_failed(struct bnx2x *bp)
9962{
9963 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9964
9965
9966 netif_device_detach(bp->dev);
9967
9968
9969
9970
9971
9972 bnx2x_set_reset_in_progress(bp);
9973
9974
9975 bnx2x_set_power_state(bp, PCI_D3hot);
9976
9977 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9978
9979 smp_mb();
9980}
9981
9982
9983
9984
9985
9986
9987static void bnx2x_parity_recover(struct bnx2x *bp)
9988{
9989 u32 error_recovered, error_unrecovered;
9990 bool is_parity, global = false;
9991#ifdef CONFIG_BNX2X_SRIOV
9992 int vf_idx;
9993
9994 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
9995 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
9996
9997 if (vf)
9998 vf->state = VF_LOST;
9999 }
10000#endif
10001 DP(NETIF_MSG_HW, "Handling parity\n");
10002 while (1) {
10003 switch (bp->recovery_state) {
10004 case BNX2X_RECOVERY_INIT:
10005 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
10006 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10007 WARN_ON(!is_parity);
10008
10009
10010 if (bnx2x_trylock_leader_lock(bp)) {
10011 bnx2x_set_reset_in_progress(bp);
10012
10013
10014
10015
10016
10017
10018 if (global)
10019 bnx2x_set_reset_global(bp);
10020
10021 bp->is_leader = 1;
10022 }
10023
10024
10025
10026 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10027 return;
10028
10029 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10030
10031
10032
10033
10034
10035 smp_mb();
10036 break;
10037
10038 case BNX2X_RECOVERY_WAIT:
10039 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10040 if (bp->is_leader) {
10041 int other_engine = BP_PATH(bp) ? 0 : 1;
10042 bool other_load_status =
10043 bnx2x_get_load_status(bp, other_engine);
10044 bool load_status =
10045 bnx2x_get_load_status(bp, BP_PATH(bp));
10046 global = bnx2x_reset_is_global(bp);
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056 if (load_status ||
10057 (global && other_load_status)) {
10058
10059
10060
10061 schedule_delayed_work(&bp->sp_rtnl_task,
10062 HZ/10);
10063 return;
10064 } else {
10065
10066
10067
10068
10069
10070 if (bnx2x_leader_reset(bp)) {
10071 bnx2x_recovery_failed(bp);
10072 return;
10073 }
10074
10075
10076
10077
10078
10079
10080 break;
10081 }
10082 } else {
10083 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10084
10085
10086
10087
10088
10089
10090 if (bnx2x_trylock_leader_lock(bp)) {
10091
10092
10093
10094 bp->is_leader = 1;
10095 break;
10096 }
10097
10098 schedule_delayed_work(&bp->sp_rtnl_task,
10099 HZ/10);
10100 return;
10101
10102 } else {
10103
10104
10105
10106
10107 if (bnx2x_reset_is_global(bp)) {
10108 schedule_delayed_work(
10109 &bp->sp_rtnl_task,
10110 HZ/10);
10111 return;
10112 }
10113
10114 error_recovered =
10115 bp->eth_stats.recoverable_error;
10116 error_unrecovered =
10117 bp->eth_stats.unrecoverable_error;
10118 bp->recovery_state =
10119 BNX2X_RECOVERY_NIC_LOADING;
10120 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10121 error_unrecovered++;
10122 netdev_err(bp->dev,
10123 "Recovery failed. Power cycle needed\n");
10124
10125 netif_device_detach(bp->dev);
10126
10127 bnx2x_set_power_state(
10128 bp, PCI_D3hot);
10129 smp_mb();
10130 } else {
10131 bp->recovery_state =
10132 BNX2X_RECOVERY_DONE;
10133 error_recovered++;
10134 smp_mb();
10135 }
10136 bp->eth_stats.recoverable_error =
10137 error_recovered;
10138 bp->eth_stats.unrecoverable_error =
10139 error_unrecovered;
10140
10141 return;
10142 }
10143 }
10144 default:
10145 return;
10146 }
10147 }
10148}
10149
10150static int bnx2x_udp_port_update(struct bnx2x *bp)
10151{
10152 struct bnx2x_func_switch_update_params *switch_update_params;
10153 struct bnx2x_func_state_params func_params = {NULL};
10154 u16 vxlan_port = 0, geneve_port = 0;
10155 int rc;
10156
10157 switch_update_params = &func_params.params.switch_update;
10158
10159
10160 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10161 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10162
10163 func_params.f_obj = &bp->func_obj;
10164 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10165
10166
10167 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10168 &switch_update_params->changes);
10169
10170 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
10171 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10172 switch_update_params->geneve_dst_port = geneve_port;
10173 }
10174
10175 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
10176 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10177 switch_update_params->vxlan_dst_port = vxlan_port;
10178 }
10179
10180
10181 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10182 &switch_update_params->changes);
10183
10184 rc = bnx2x_func_state_change(bp, &func_params);
10185 if (rc)
10186 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10187 vxlan_port, geneve_port, rc);
10188 else
10189 DP(BNX2X_MSG_SP,
10190 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10191 vxlan_port, geneve_port);
10192
10193 return rc;
10194}
10195
10196static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
10197{
10198 struct bnx2x *bp = netdev_priv(netdev);
10199 struct udp_tunnel_info ti;
10200
10201 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
10202 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
10203
10204 return bnx2x_udp_port_update(bp);
10205}
10206
10207static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
10208 .sync_table = bnx2x_udp_tunnel_sync,
10209 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
10210 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
10211 .tables = {
10212 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
10213 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
10214 },
10215};
10216
10217static int bnx2x_close(struct net_device *dev);
10218
10219
10220
10221
10222static void bnx2x_sp_rtnl_task(struct work_struct *work)
10223{
10224 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10225
10226 rtnl_lock();
10227
10228 if (!netif_running(bp->dev)) {
10229 rtnl_unlock();
10230 return;
10231 }
10232
10233 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10234#ifdef BNX2X_STOP_ON_ERROR
10235 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10236 "you will need to reboot when done\n");
10237 goto sp_rtnl_not_reset;
10238#endif
10239
10240
10241
10242
10243 bp->sp_rtnl_state = 0;
10244 smp_mb();
10245
10246 bnx2x_parity_recover(bp);
10247
10248 rtnl_unlock();
10249 return;
10250 }
10251
10252 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10253#ifdef BNX2X_STOP_ON_ERROR
10254 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10255 "you will need to reboot when done\n");
10256 goto sp_rtnl_not_reset;
10257#endif
10258
10259
10260
10261
10262
10263 bp->sp_rtnl_state = 0;
10264 smp_mb();
10265
10266
10267 bp->link_vars.link_up = 0;
10268 bp->force_link_down = true;
10269 netif_carrier_off(bp->dev);
10270 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10271
10272 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10273
10274
10275
10276
10277 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10278 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10279 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10280 BNX2X_ERR("Open the NIC fails again!\n");
10281 }
10282 rtnl_unlock();
10283 return;
10284 }
10285#ifdef BNX2X_STOP_ON_ERROR
10286sp_rtnl_not_reset:
10287#endif
10288 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10289 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10290 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10291 bnx2x_after_function_update(bp);
10292
10293
10294
10295
10296
10297 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10298 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10299 netif_device_detach(bp->dev);
10300 bnx2x_close(bp->dev);
10301 rtnl_unlock();
10302 return;
10303 }
10304
10305 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10306 DP(BNX2X_MSG_SP,
10307 "sending set mcast vf pf channel message from rtnl sp-task\n");
10308 bnx2x_vfpf_set_mcast(bp->dev);
10309 }
10310 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10311 &bp->sp_rtnl_state)){
10312 if (netif_carrier_ok(bp->dev)) {
10313 bnx2x_tx_disable(bp);
10314 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10315 }
10316 }
10317
10318 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10319 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10320 bnx2x_set_rx_mode_inner(bp);
10321 }
10322
10323 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10324 &bp->sp_rtnl_state))
10325 bnx2x_pf_set_vfs_vlan(bp);
10326
10327 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10328 bnx2x_dcbx_stop_hw_tx(bp);
10329 bnx2x_dcbx_resume_hw_tx(bp);
10330 }
10331
10332 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10333 &bp->sp_rtnl_state))
10334 bnx2x_update_mng_version(bp);
10335
10336 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10337 bnx2x_handle_update_svid_cmd(bp);
10338
10339
10340
10341
10342 rtnl_unlock();
10343
10344
10345 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10346 &bp->sp_rtnl_state)) {
10347 bnx2x_disable_sriov(bp);
10348 bnx2x_enable_sriov(bp);
10349 }
10350}
10351
10352static void bnx2x_period_task(struct work_struct *work)
10353{
10354 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10355
10356 if (!netif_running(bp->dev))
10357 goto period_task_exit;
10358
10359 if (CHIP_REV_IS_SLOW(bp)) {
10360 BNX2X_ERR("period task called on emulation, ignoring\n");
10361 goto period_task_exit;
10362 }
10363
10364 bnx2x_acquire_phy_lock(bp);
10365
10366
10367
10368
10369
10370 smp_mb();
10371 if (bp->port.pmf) {
10372 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10373
10374
10375 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10376 }
10377
10378 bnx2x_release_phy_lock(bp);
10379period_task_exit:
10380 return;
10381}
10382
10383
10384
10385
10386
10387static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10388{
10389 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10390 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10391 return base + (BP_ABS_FUNC(bp)) * stride;
10392}
10393
10394static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10395 u8 port, u32 reset_reg,
10396 struct bnx2x_mac_vals *vals)
10397{
10398 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10399 u32 base_addr;
10400
10401 if (!(mask & reset_reg))
10402 return false;
10403
10404 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10405 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10406 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10407 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10408 REG_WR(bp, vals->umac_addr[port], 0);
10409
10410 return true;
10411}
10412
10413static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10414 struct bnx2x_mac_vals *vals)
10415{
10416 u32 val, base_addr, offset, mask, reset_reg;
10417 bool mac_stopped = false;
10418 u8 port = BP_PORT(bp);
10419
10420
10421 memset(vals, 0, sizeof(*vals));
10422
10423 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10424
10425 if (!CHIP_IS_E3(bp)) {
10426 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10427 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10428 if ((mask & reset_reg) && val) {
10429 u32 wb_data[2];
10430 BNX2X_DEV_INFO("Disable bmac Rx\n");
10431 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10432 : NIG_REG_INGRESS_BMAC0_MEM;
10433 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10434 : BIGMAC_REGISTER_BMAC_CONTROL;
10435
10436
10437
10438
10439
10440
10441
10442 wb_data[0] = REG_RD(bp, base_addr + offset);
10443 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10444 vals->bmac_addr = base_addr + offset;
10445 vals->bmac_val[0] = wb_data[0];
10446 vals->bmac_val[1] = wb_data[1];
10447 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10448 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10449 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10450 }
10451 BNX2X_DEV_INFO("Disable emac Rx\n");
10452 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10453 vals->emac_val = REG_RD(bp, vals->emac_addr);
10454 REG_WR(bp, vals->emac_addr, 0);
10455 mac_stopped = true;
10456 } else {
10457 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10458 BNX2X_DEV_INFO("Disable xmac Rx\n");
10459 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10460 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10461 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10462 val & ~(1 << 1));
10463 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10464 val | (1 << 1));
10465 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10466 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10467 REG_WR(bp, vals->xmac_addr, 0);
10468 mac_stopped = true;
10469 }
10470
10471 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10472 reset_reg, vals);
10473 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10474 reset_reg, vals);
10475 }
10476
10477 if (mac_stopped)
10478 msleep(20);
10479}
10480
10481#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10482#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10483 0x1848 + ((f) << 4))
10484#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10485#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10486#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10487
10488#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10489#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10490#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10491
10492static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10493{
10494
10495
10496
10497 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10498 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10499 return false;
10500
10501 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10502 BNX2X_DEV_INFO("UNDI previously loaded\n");
10503 return true;
10504 }
10505
10506 return false;
10507}
10508
10509static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10510{
10511 u16 rcq, bd;
10512 u32 addr, tmp_reg;
10513
10514 if (BP_FUNC(bp) < 2)
10515 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10516 else
10517 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10518
10519 tmp_reg = REG_RD(bp, addr);
10520 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10521 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10522
10523 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10524 REG_WR(bp, addr, tmp_reg);
10525
10526 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10527 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10528}
10529
10530static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10531{
10532 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10533 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10534 if (!rc) {
10535 BNX2X_ERR("MCP response failure, aborting\n");
10536 return -EBUSY;
10537 }
10538
10539 return 0;
10540}
10541
10542static struct bnx2x_prev_path_list *
10543 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10544{
10545 struct bnx2x_prev_path_list *tmp_list;
10546
10547 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10548 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10549 bp->pdev->bus->number == tmp_list->bus &&
10550 BP_PATH(bp) == tmp_list->path)
10551 return tmp_list;
10552
10553 return NULL;
10554}
10555
10556static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10557{
10558 struct bnx2x_prev_path_list *tmp_list;
10559 int rc;
10560
10561 rc = down_interruptible(&bnx2x_prev_sem);
10562 if (rc) {
10563 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10564 return rc;
10565 }
10566
10567 tmp_list = bnx2x_prev_path_get_entry(bp);
10568 if (tmp_list) {
10569 tmp_list->aer = 1;
10570 rc = 0;
10571 } else {
10572 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10573 BP_PATH(bp));
10574 }
10575
10576 up(&bnx2x_prev_sem);
10577
10578 return rc;
10579}
10580
10581static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10582{
10583 struct bnx2x_prev_path_list *tmp_list;
10584 bool rc = false;
10585
10586 if (down_trylock(&bnx2x_prev_sem))
10587 return false;
10588
10589 tmp_list = bnx2x_prev_path_get_entry(bp);
10590 if (tmp_list) {
10591 if (tmp_list->aer) {
10592 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10593 BP_PATH(bp));
10594 } else {
10595 rc = true;
10596 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10597 BP_PATH(bp));
10598 }
10599 }
10600
10601 up(&bnx2x_prev_sem);
10602
10603 return rc;
10604}
10605
10606bool bnx2x_port_after_undi(struct bnx2x *bp)
10607{
10608 struct bnx2x_prev_path_list *entry;
10609 bool val;
10610
10611 down(&bnx2x_prev_sem);
10612
10613 entry = bnx2x_prev_path_get_entry(bp);
10614 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10615
10616 up(&bnx2x_prev_sem);
10617
10618 return val;
10619}
10620
10621static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10622{
10623 struct bnx2x_prev_path_list *tmp_list;
10624 int rc;
10625
10626 rc = down_interruptible(&bnx2x_prev_sem);
10627 if (rc) {
10628 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10629 return rc;
10630 }
10631
10632
10633 tmp_list = bnx2x_prev_path_get_entry(bp);
10634 if (tmp_list) {
10635 if (!tmp_list->aer) {
10636 BNX2X_ERR("Re-Marking the path.\n");
10637 } else {
10638 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10639 BP_PATH(bp));
10640 tmp_list->aer = 0;
10641 }
10642 up(&bnx2x_prev_sem);
10643 return 0;
10644 }
10645 up(&bnx2x_prev_sem);
10646
10647
10648 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10649 if (!tmp_list) {
10650 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10651 return -ENOMEM;
10652 }
10653
10654 tmp_list->bus = bp->pdev->bus->number;
10655 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10656 tmp_list->path = BP_PATH(bp);
10657 tmp_list->aer = 0;
10658 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10659
10660 rc = down_interruptible(&bnx2x_prev_sem);
10661 if (rc) {
10662 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10663 kfree(tmp_list);
10664 } else {
10665 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10666 BP_PATH(bp));
10667 list_add(&tmp_list->list, &bnx2x_prev_list);
10668 up(&bnx2x_prev_sem);
10669 }
10670
10671 return rc;
10672}
10673
10674static int bnx2x_do_flr(struct bnx2x *bp)
10675{
10676 struct pci_dev *dev = bp->pdev;
10677
10678 if (CHIP_IS_E1x(bp)) {
10679 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10680 return -EINVAL;
10681 }
10682
10683
10684 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10685 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10686 bp->common.bc_ver);
10687 return -EINVAL;
10688 }
10689
10690 if (!pci_wait_for_pending_transaction(dev))
10691 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10692
10693 BNX2X_DEV_INFO("Initiating FLR\n");
10694 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10695
10696 return 0;
10697}
10698
10699static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10700{
10701 int rc;
10702
10703 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10704
10705
10706 if (bnx2x_prev_is_path_marked(bp))
10707 return bnx2x_prev_mcp_done(bp);
10708
10709 BNX2X_DEV_INFO("Path is unmarked\n");
10710
10711
10712 if (bnx2x_prev_is_after_undi(bp))
10713 goto out;
10714
10715
10716
10717
10718
10719 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10720
10721 if (!rc) {
10722
10723 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10724 rc = bnx2x_do_flr(bp);
10725 }
10726
10727 if (!rc) {
10728
10729 BNX2X_DEV_INFO("FLR successful\n");
10730 return 0;
10731 }
10732
10733 BNX2X_DEV_INFO("Could not FLR\n");
10734
10735out:
10736
10737 rc = bnx2x_prev_mcp_done(bp);
10738 if (!rc)
10739 rc = BNX2X_PREV_WAIT_NEEDED;
10740
10741 return rc;
10742}
10743
10744static int bnx2x_prev_unload_common(struct bnx2x *bp)
10745{
10746 u32 reset_reg, tmp_reg = 0, rc;
10747 bool prev_undi = false;
10748 struct bnx2x_mac_vals mac_vals;
10749
10750
10751
10752
10753
10754 BNX2X_DEV_INFO("Common unload Flow\n");
10755
10756 memset(&mac_vals, 0, sizeof(mac_vals));
10757
10758 if (bnx2x_prev_is_path_marked(bp))
10759 return bnx2x_prev_mcp_done(bp);
10760
10761 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10762
10763
10764 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10765 u32 timer_count = 1000;
10766
10767
10768 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10769
10770
10771 bnx2x_set_rx_filter(&bp->link_params, 0);
10772 bp->link_params.port ^= 1;
10773 bnx2x_set_rx_filter(&bp->link_params, 0);
10774 bp->link_params.port ^= 1;
10775
10776
10777 if (bnx2x_prev_is_after_undi(bp)) {
10778 prev_undi = true;
10779
10780 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10781
10782 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10783 }
10784 if (!CHIP_IS_E1x(bp))
10785
10786 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10787
10788
10789 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10790 while (timer_count) {
10791 u32 prev_brb = tmp_reg;
10792
10793 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10794 if (!tmp_reg)
10795 break;
10796
10797 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10798
10799
10800 if (prev_brb > tmp_reg)
10801 timer_count = 1000;
10802 else
10803 timer_count--;
10804
10805
10806 if (prev_undi)
10807 bnx2x_prev_unload_undi_inc(bp, 1);
10808
10809 udelay(10);
10810 }
10811
10812 if (!timer_count)
10813 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10814 }
10815
10816
10817 bnx2x_reset_common(bp);
10818
10819 if (mac_vals.xmac_addr)
10820 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10821 if (mac_vals.umac_addr[0])
10822 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10823 if (mac_vals.umac_addr[1])
10824 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10825 if (mac_vals.emac_addr)
10826 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10827 if (mac_vals.bmac_addr) {
10828 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10829 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10830 }
10831
10832 rc = bnx2x_prev_mark_path(bp, prev_undi);
10833 if (rc) {
10834 bnx2x_prev_mcp_done(bp);
10835 return rc;
10836 }
10837
10838 return bnx2x_prev_mcp_done(bp);
10839}
10840
10841static int bnx2x_prev_unload(struct bnx2x *bp)
10842{
10843 int time_counter = 10;
10844 u32 rc, fw, hw_lock_reg, hw_lock_val;
10845 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10846
10847
10848
10849
10850 bnx2x_clean_pglue_errors(bp);
10851
10852
10853 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10854 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10855 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10856
10857 hw_lock_val = REG_RD(bp, hw_lock_reg);
10858 if (hw_lock_val) {
10859 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10860 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10861 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10862 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10863 }
10864
10865 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10866 REG_WR(bp, hw_lock_reg, 0xffffffff);
10867 } else
10868 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10869
10870 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10871 BNX2X_DEV_INFO("Release previously held alr\n");
10872 bnx2x_release_alr(bp);
10873 }
10874
10875 do {
10876 int aer = 0;
10877
10878 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10879 if (!fw) {
10880 BNX2X_ERR("MCP response failure, aborting\n");
10881 rc = -EBUSY;
10882 break;
10883 }
10884
10885 rc = down_interruptible(&bnx2x_prev_sem);
10886 if (rc) {
10887 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10888 rc);
10889 } else {
10890
10891 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10892 bnx2x_prev_path_get_entry(bp)->aer);
10893 up(&bnx2x_prev_sem);
10894 }
10895
10896 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10897 rc = bnx2x_prev_unload_common(bp);
10898 break;
10899 }
10900
10901
10902 rc = bnx2x_prev_unload_uncommon(bp);
10903 if (rc != BNX2X_PREV_WAIT_NEEDED)
10904 break;
10905
10906 msleep(20);
10907 } while (--time_counter);
10908
10909 if (!time_counter || rc) {
10910 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10911 rc = -EPROBE_DEFER;
10912 }
10913
10914
10915 if (bnx2x_port_after_undi(bp))
10916 bp->link_params.feature_config_flags |=
10917 FEATURE_CONFIG_BOOT_FROM_SAN;
10918
10919 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10920
10921 return rc;
10922}
10923
10924static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10925{
10926 u32 val, val2, val3, val4, id, boot_mode;
10927 u16 pmc;
10928
10929
10930
10931 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10932 id = ((val & 0xffff) << 16);
10933 val = REG_RD(bp, MISC_REG_CHIP_REV);
10934 id |= ((val & 0xf) << 12);
10935
10936
10937
10938
10939 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10940 id |= (((val >> 24) & 0xf) << 4);
10941 val = REG_RD(bp, MISC_REG_BOND_ID);
10942 id |= (val & 0xf);
10943 bp->common.chip_id = id;
10944
10945
10946 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10947 if (CHIP_IS_57810(bp))
10948 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10949 (bp->common.chip_id & 0x0000FFFF);
10950 else if (CHIP_IS_57810_MF(bp))
10951 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10952 (bp->common.chip_id & 0x0000FFFF);
10953 bp->common.chip_id |= 0x1;
10954 }
10955
10956
10957 bp->db_size = (1 << BNX2X_DB_SHIFT);
10958
10959 if (!CHIP_IS_E1x(bp)) {
10960 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10961 if ((val & 1) == 0)
10962 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10963 else
10964 val = (val >> 1) & 1;
10965 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10966 "2_PORT_MODE");
10967 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10968 CHIP_2_PORT_MODE;
10969
10970 if (CHIP_MODE_IS_4_PORT(bp))
10971 bp->pfid = (bp->pf_num >> 1);
10972 else
10973 bp->pfid = (bp->pf_num & 0x6);
10974 } else {
10975 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10976 bp->pfid = bp->pf_num;
10977 }
10978
10979 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10980
10981 bp->link_params.chip_id = bp->common.chip_id;
10982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10983
10984 val = (REG_RD(bp, 0x2874) & 0x55);
10985 if ((bp->common.chip_id & 0x1) ||
10986 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10987 bp->flags |= ONE_PORT_FLAG;
10988 BNX2X_DEV_INFO("single port device\n");
10989 }
10990
10991 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10992 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10993 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10995 bp->common.flash_size, bp->common.flash_size);
10996
10997 bnx2x_init_shmem(bp);
10998
10999 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11000 MISC_REG_GENERIC_CR_1 :
11001 MISC_REG_GENERIC_CR_0));
11002
11003 bp->link_params.shmem_base = bp->common.shmem_base;
11004 bp->link_params.shmem2_base = bp->common.shmem2_base;
11005 if (SHMEM2_RD(bp, size) >
11006 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11007 bp->link_params.lfa_base =
11008 REG_RD(bp, bp->common.shmem2_base +
11009 (u32)offsetof(struct shmem2_region,
11010 lfa_host_addr[BP_PORT(bp)]));
11011 else
11012 bp->link_params.lfa_base = 0;
11013 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11014 bp->common.shmem_base, bp->common.shmem2_base);
11015
11016 if (!bp->common.shmem_base) {
11017 BNX2X_DEV_INFO("MCP not active\n");
11018 bp->flags |= NO_MCP_FLAG;
11019 return;
11020 }
11021
11022 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11023 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11024
11025 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11026 SHARED_HW_CFG_LED_MODE_MASK) >>
11027 SHARED_HW_CFG_LED_MODE_SHIFT);
11028
11029 bp->link_params.feature_config_flags = 0;
11030 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11031 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11032 bp->link_params.feature_config_flags |=
11033 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11034 else
11035 bp->link_params.feature_config_flags &=
11036 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11037
11038 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11039 bp->common.bc_ver = val;
11040 BNX2X_DEV_INFO("bc_ver %X\n", val);
11041 if (val < BNX2X_BC_VER) {
11042
11043
11044 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11045 BNX2X_BC_VER, val);
11046 }
11047 bp->link_params.feature_config_flags |=
11048 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11049 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11050
11051 bp->link_params.feature_config_flags |=
11052 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11053 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11054 bp->link_params.feature_config_flags |=
11055 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11056 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11057 bp->link_params.feature_config_flags |=
11058 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11059 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11060
11061 bp->link_params.feature_config_flags |=
11062 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11063 FEATURE_CONFIG_MT_SUPPORT : 0;
11064
11065 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11066 BC_SUPPORTS_PFC_STATS : 0;
11067
11068 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11069 BC_SUPPORTS_FCOE_FEATURES : 0;
11070
11071 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11072 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11073
11074 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11075 BC_SUPPORTS_RMMOD_CMD : 0;
11076
11077 boot_mode = SHMEM_RD(bp,
11078 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11079 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11080 switch (boot_mode) {
11081 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11082 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11083 break;
11084 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11085 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11086 break;
11087 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11088 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11089 break;
11090 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11091 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11092 break;
11093 }
11094
11095 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11096 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11097
11098 BNX2X_DEV_INFO("%sWoL capable\n",
11099 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11100
11101 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11102 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11103 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11104 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11105
11106 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11107 val, val2, val3, val4);
11108}
11109
11110#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11111#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11112
11113static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11114{
11115 int pfid = BP_FUNC(bp);
11116 int igu_sb_id;
11117 u32 val;
11118 u8 fid, igu_sb_cnt = 0;
11119
11120 bp->igu_base_sb = 0xff;
11121 if (CHIP_INT_MODE_IS_BC(bp)) {
11122 int vn = BP_VN(bp);
11123 igu_sb_cnt = bp->igu_sb_cnt;
11124 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11125 FP_SB_MAX_E1x;
11126
11127 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11128 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11129
11130 return 0;
11131 }
11132
11133
11134 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11135 igu_sb_id++) {
11136 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11137 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11138 continue;
11139 fid = IGU_FID(val);
11140 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11141 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11142 continue;
11143 if (IGU_VEC(val) == 0)
11144
11145 bp->igu_dsb_id = igu_sb_id;
11146 else {
11147 if (bp->igu_base_sb == 0xff)
11148 bp->igu_base_sb = igu_sb_id;
11149 igu_sb_cnt++;
11150 }
11151 }
11152 }
11153
11154#ifdef CONFIG_PCI_MSI
11155
11156
11157
11158
11159
11160
11161 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11162#endif
11163
11164 if (igu_sb_cnt == 0) {
11165 BNX2X_ERR("CAM configuration error\n");
11166 return -EINVAL;
11167 }
11168
11169 return 0;
11170}
11171
11172static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11173{
11174 int cfg_size = 0, idx, port = BP_PORT(bp);
11175
11176
11177 bp->port.supported[0] = 0;
11178 bp->port.supported[1] = 0;
11179 switch (bp->link_params.num_phys) {
11180 case 1:
11181 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11182 cfg_size = 1;
11183 break;
11184 case 2:
11185 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11186 cfg_size = 1;
11187 break;
11188 case 3:
11189 if (bp->link_params.multi_phy_config &
11190 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11191 bp->port.supported[1] =
11192 bp->link_params.phy[EXT_PHY1].supported;
11193 bp->port.supported[0] =
11194 bp->link_params.phy[EXT_PHY2].supported;
11195 } else {
11196 bp->port.supported[0] =
11197 bp->link_params.phy[EXT_PHY1].supported;
11198 bp->port.supported[1] =
11199 bp->link_params.phy[EXT_PHY2].supported;
11200 }
11201 cfg_size = 2;
11202 break;
11203 }
11204
11205 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11206 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11207 SHMEM_RD(bp,
11208 dev_info.port_hw_config[port].external_phy_config),
11209 SHMEM_RD(bp,
11210 dev_info.port_hw_config[port].external_phy_config2));
11211 return;
11212 }
11213
11214 if (CHIP_IS_E3(bp))
11215 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11216 else {
11217 switch (switch_cfg) {
11218 case SWITCH_CFG_1G:
11219 bp->port.phy_addr = REG_RD(
11220 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11221 break;
11222 case SWITCH_CFG_10G:
11223 bp->port.phy_addr = REG_RD(
11224 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11225 break;
11226 default:
11227 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11228 bp->port.link_config[0]);
11229 return;
11230 }
11231 }
11232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11233
11234 for (idx = 0; idx < cfg_size; idx++) {
11235 if (!(bp->link_params.speed_cap_mask[idx] &
11236 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11237 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11238
11239 if (!(bp->link_params.speed_cap_mask[idx] &
11240 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11241 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11242
11243 if (!(bp->link_params.speed_cap_mask[idx] &
11244 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11245 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11246
11247 if (!(bp->link_params.speed_cap_mask[idx] &
11248 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11249 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11250
11251 if (!(bp->link_params.speed_cap_mask[idx] &
11252 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11253 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11254 SUPPORTED_1000baseT_Full);
11255
11256 if (!(bp->link_params.speed_cap_mask[idx] &
11257 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11258 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11259
11260 if (!(bp->link_params.speed_cap_mask[idx] &
11261 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11262 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11263
11264 if (!(bp->link_params.speed_cap_mask[idx] &
11265 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11266 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11267 }
11268
11269 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11270 bp->port.supported[1]);
11271}
11272
11273static void bnx2x_link_settings_requested(struct bnx2x *bp)
11274{
11275 u32 link_config, idx, cfg_size = 0;
11276 bp->port.advertising[0] = 0;
11277 bp->port.advertising[1] = 0;
11278 switch (bp->link_params.num_phys) {
11279 case 1:
11280 case 2:
11281 cfg_size = 1;
11282 break;
11283 case 3:
11284 cfg_size = 2;
11285 break;
11286 }
11287 for (idx = 0; idx < cfg_size; idx++) {
11288 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11289 link_config = bp->port.link_config[idx];
11290 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11291 case PORT_FEATURE_LINK_SPEED_AUTO:
11292 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11293 bp->link_params.req_line_speed[idx] =
11294 SPEED_AUTO_NEG;
11295 bp->port.advertising[idx] |=
11296 bp->port.supported[idx];
11297 if (bp->link_params.phy[EXT_PHY1].type ==
11298 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11299 bp->port.advertising[idx] |=
11300 (SUPPORTED_100baseT_Half |
11301 SUPPORTED_100baseT_Full);
11302 } else {
11303
11304 bp->link_params.req_line_speed[idx] =
11305 SPEED_10000;
11306 bp->port.advertising[idx] |=
11307 (ADVERTISED_10000baseT_Full |
11308 ADVERTISED_FIBRE);
11309 continue;
11310 }
11311 break;
11312
11313 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11314 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11315 bp->link_params.req_line_speed[idx] =
11316 SPEED_10;
11317 bp->port.advertising[idx] |=
11318 (ADVERTISED_10baseT_Full |
11319 ADVERTISED_TP);
11320 } else {
11321 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11322 link_config,
11323 bp->link_params.speed_cap_mask[idx]);
11324 return;
11325 }
11326 break;
11327
11328 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11329 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11330 bp->link_params.req_line_speed[idx] =
11331 SPEED_10;
11332 bp->link_params.req_duplex[idx] =
11333 DUPLEX_HALF;
11334 bp->port.advertising[idx] |=
11335 (ADVERTISED_10baseT_Half |
11336 ADVERTISED_TP);
11337 } else {
11338 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11339 link_config,
11340 bp->link_params.speed_cap_mask[idx]);
11341 return;
11342 }
11343 break;
11344
11345 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11346 if (bp->port.supported[idx] &
11347 SUPPORTED_100baseT_Full) {
11348 bp->link_params.req_line_speed[idx] =
11349 SPEED_100;
11350 bp->port.advertising[idx] |=
11351 (ADVERTISED_100baseT_Full |
11352 ADVERTISED_TP);
11353 } else {
11354 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11355 link_config,
11356 bp->link_params.speed_cap_mask[idx]);
11357 return;
11358 }
11359 break;
11360
11361 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11362 if (bp->port.supported[idx] &
11363 SUPPORTED_100baseT_Half) {
11364 bp->link_params.req_line_speed[idx] =
11365 SPEED_100;
11366 bp->link_params.req_duplex[idx] =
11367 DUPLEX_HALF;
11368 bp->port.advertising[idx] |=
11369 (ADVERTISED_100baseT_Half |
11370 ADVERTISED_TP);
11371 } else {
11372 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11373 link_config,
11374 bp->link_params.speed_cap_mask[idx]);
11375 return;
11376 }
11377 break;
11378
11379 case PORT_FEATURE_LINK_SPEED_1G:
11380 if (bp->port.supported[idx] &
11381 SUPPORTED_1000baseT_Full) {
11382 bp->link_params.req_line_speed[idx] =
11383 SPEED_1000;
11384 bp->port.advertising[idx] |=
11385 (ADVERTISED_1000baseT_Full |
11386 ADVERTISED_TP);
11387 } else if (bp->port.supported[idx] &
11388 SUPPORTED_1000baseKX_Full) {
11389 bp->link_params.req_line_speed[idx] =
11390 SPEED_1000;
11391 bp->port.advertising[idx] |=
11392 ADVERTISED_1000baseKX_Full;
11393 } else {
11394 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11395 link_config,
11396 bp->link_params.speed_cap_mask[idx]);
11397 return;
11398 }
11399 break;
11400
11401 case PORT_FEATURE_LINK_SPEED_2_5G:
11402 if (bp->port.supported[idx] &
11403 SUPPORTED_2500baseX_Full) {
11404 bp->link_params.req_line_speed[idx] =
11405 SPEED_2500;
11406 bp->port.advertising[idx] |=
11407 (ADVERTISED_2500baseX_Full |
11408 ADVERTISED_TP);
11409 } else {
11410 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11411 link_config,
11412 bp->link_params.speed_cap_mask[idx]);
11413 return;
11414 }
11415 break;
11416
11417 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11418 if (bp->port.supported[idx] &
11419 SUPPORTED_10000baseT_Full) {
11420 bp->link_params.req_line_speed[idx] =
11421 SPEED_10000;
11422 bp->port.advertising[idx] |=
11423 (ADVERTISED_10000baseT_Full |
11424 ADVERTISED_FIBRE);
11425 } else if (bp->port.supported[idx] &
11426 SUPPORTED_10000baseKR_Full) {
11427 bp->link_params.req_line_speed[idx] =
11428 SPEED_10000;
11429 bp->port.advertising[idx] |=
11430 (ADVERTISED_10000baseKR_Full |
11431 ADVERTISED_FIBRE);
11432 } else {
11433 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11434 link_config,
11435 bp->link_params.speed_cap_mask[idx]);
11436 return;
11437 }
11438 break;
11439 case PORT_FEATURE_LINK_SPEED_20G:
11440 bp->link_params.req_line_speed[idx] = SPEED_20000;
11441
11442 break;
11443 default:
11444 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11445 link_config);
11446 bp->link_params.req_line_speed[idx] =
11447 SPEED_AUTO_NEG;
11448 bp->port.advertising[idx] =
11449 bp->port.supported[idx];
11450 break;
11451 }
11452
11453 bp->link_params.req_flow_ctrl[idx] = (link_config &
11454 PORT_FEATURE_FLOW_CONTROL_MASK);
11455 if (bp->link_params.req_flow_ctrl[idx] ==
11456 BNX2X_FLOW_CTRL_AUTO) {
11457 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11458 bp->link_params.req_flow_ctrl[idx] =
11459 BNX2X_FLOW_CTRL_NONE;
11460 else
11461 bnx2x_set_requested_fc(bp);
11462 }
11463
11464 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11465 bp->link_params.req_line_speed[idx],
11466 bp->link_params.req_duplex[idx],
11467 bp->link_params.req_flow_ctrl[idx],
11468 bp->port.advertising[idx]);
11469 }
11470}
11471
11472static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11473{
11474 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11475 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11476 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11477 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11478}
11479
11480static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11481{
11482 int port = BP_PORT(bp);
11483 u32 config;
11484 u32 ext_phy_type, ext_phy_config, eee_mode;
11485
11486 bp->link_params.bp = bp;
11487 bp->link_params.port = port;
11488
11489 bp->link_params.lane_config =
11490 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11491
11492 bp->link_params.speed_cap_mask[0] =
11493 SHMEM_RD(bp,
11494 dev_info.port_hw_config[port].speed_capability_mask) &
11495 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11496 bp->link_params.speed_cap_mask[1] =
11497 SHMEM_RD(bp,
11498 dev_info.port_hw_config[port].speed_capability_mask2) &
11499 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11500 bp->port.link_config[0] =
11501 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11502
11503 bp->port.link_config[1] =
11504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11505
11506 bp->link_params.multi_phy_config =
11507 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11508
11509
11510
11511 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11512 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11513 (config & PORT_FEATURE_WOL_ENABLED));
11514
11515 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11516 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11517 bp->flags |= NO_ISCSI_FLAG;
11518 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11519 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11520 bp->flags |= NO_FCOE_FLAG;
11521
11522 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11523 bp->link_params.lane_config,
11524 bp->link_params.speed_cap_mask[0],
11525 bp->port.link_config[0]);
11526
11527 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11528 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11529 bnx2x_phy_probe(&bp->link_params);
11530 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11531
11532 bnx2x_link_settings_requested(bp);
11533
11534
11535
11536
11537
11538 ext_phy_config =
11539 SHMEM_RD(bp,
11540 dev_info.port_hw_config[port].external_phy_config);
11541 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11542 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11543 bp->mdio.prtad = bp->port.phy_addr;
11544
11545 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11546 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11547 bp->mdio.prtad =
11548 XGXS_EXT_PHY_ADDR(ext_phy_config);
11549
11550
11551 eee_mode = (((SHMEM_RD(bp, dev_info.
11552 port_feature_config[port].eee_power_mode)) &
11553 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11554 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11555 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11556 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11557 EEE_MODE_ENABLE_LPI |
11558 EEE_MODE_OUTPUT_TIME;
11559 } else {
11560 bp->link_params.eee_mode = 0;
11561 }
11562}
11563
11564void bnx2x_get_iscsi_info(struct bnx2x *bp)
11565{
11566 u32 no_flags = NO_ISCSI_FLAG;
11567 int port = BP_PORT(bp);
11568 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11569 drv_lic_key[port].max_iscsi_conn);
11570
11571 if (!CNIC_SUPPORT(bp)) {
11572 bp->flags |= no_flags;
11573 return;
11574 }
11575
11576
11577 bp->cnic_eth_dev.max_iscsi_conn =
11578 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11579 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11580
11581 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11582 bp->cnic_eth_dev.max_iscsi_conn);
11583
11584
11585
11586
11587
11588 if (!bp->cnic_eth_dev.max_iscsi_conn)
11589 bp->flags |= no_flags;
11590}
11591
11592static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11593{
11594
11595 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11596 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11597 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11598 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11599
11600
11601 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11602 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11603 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11604 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11605}
11606
11607static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11608{
11609 u8 count = 0;
11610
11611 if (IS_MF(bp)) {
11612 u8 fid;
11613
11614
11615 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11616 if (IS_MF_SD(bp)) {
11617 u32 cfg = MF_CFG_RD(bp,
11618 func_mf_config[fid].config);
11619
11620 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11621 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11622 FUNC_MF_CFG_PROTOCOL_FCOE))
11623 count++;
11624 } else {
11625 u32 cfg = MF_CFG_RD(bp,
11626 func_ext_config[fid].
11627 func_cfg);
11628
11629 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11630 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11631 count++;
11632 }
11633 }
11634 } else {
11635 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11636
11637 for (port = 0; port < port_cnt; port++) {
11638 u32 lic = SHMEM_RD(bp,
11639 drv_lic_key[port].max_fcoe_conn) ^
11640 FW_ENCODE_32BIT_PATTERN;
11641 if (lic)
11642 count++;
11643 }
11644 }
11645
11646 return count;
11647}
11648
11649static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11650{
11651 int port = BP_PORT(bp);
11652 int func = BP_ABS_FUNC(bp);
11653 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11654 drv_lic_key[port].max_fcoe_conn);
11655 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11656
11657 if (!CNIC_SUPPORT(bp)) {
11658 bp->flags |= NO_FCOE_FLAG;
11659 return;
11660 }
11661
11662
11663 bp->cnic_eth_dev.max_fcoe_conn =
11664 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11665 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11666
11667
11668 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11669
11670
11671 if (num_fcoe_func)
11672 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11673
11674
11675 if (!IS_MF(bp)) {
11676
11677 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11678 SHMEM_RD(bp,
11679 dev_info.port_hw_config[port].
11680 fcoe_wwn_port_name_upper);
11681 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11682 SHMEM_RD(bp,
11683 dev_info.port_hw_config[port].
11684 fcoe_wwn_port_name_lower);
11685
11686
11687 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11688 SHMEM_RD(bp,
11689 dev_info.port_hw_config[port].
11690 fcoe_wwn_node_name_upper);
11691 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11692 SHMEM_RD(bp,
11693 dev_info.port_hw_config[port].
11694 fcoe_wwn_node_name_lower);
11695 } else if (!IS_MF_SD(bp)) {
11696
11697
11698
11699 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11700 bnx2x_get_ext_wwn_info(bp, func);
11701 } else {
11702 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11703 bnx2x_get_ext_wwn_info(bp, func);
11704 }
11705
11706 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11707
11708
11709
11710
11711
11712 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11713 bp->flags |= NO_FCOE_FLAG;
11714 eth_zero_addr(bp->fip_mac);
11715 }
11716}
11717
11718static void bnx2x_get_cnic_info(struct bnx2x *bp)
11719{
11720
11721
11722
11723
11724
11725 bnx2x_get_iscsi_info(bp);
11726 bnx2x_get_fcoe_info(bp);
11727}
11728
11729static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11730{
11731 u32 val, val2;
11732 int func = BP_ABS_FUNC(bp);
11733 int port = BP_PORT(bp);
11734 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11735 u8 *fip_mac = bp->fip_mac;
11736
11737 if (IS_MF(bp)) {
11738
11739
11740
11741
11742
11743 if (!IS_MF_SD(bp)) {
11744 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11745 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11746 val2 = MF_CFG_RD(bp, func_ext_config[func].
11747 iscsi_mac_addr_upper);
11748 val = MF_CFG_RD(bp, func_ext_config[func].
11749 iscsi_mac_addr_lower);
11750 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11751 BNX2X_DEV_INFO
11752 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11753 } else {
11754 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11755 }
11756
11757 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11758 val2 = MF_CFG_RD(bp, func_ext_config[func].
11759 fcoe_mac_addr_upper);
11760 val = MF_CFG_RD(bp, func_ext_config[func].
11761 fcoe_mac_addr_lower);
11762 bnx2x_set_mac_buf(fip_mac, val, val2);
11763 BNX2X_DEV_INFO
11764 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11765 } else {
11766 bp->flags |= NO_FCOE_FLAG;
11767 }
11768
11769 bp->mf_ext_config = cfg;
11770
11771 } else {
11772 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11773
11774 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11775
11776 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11777 BNX2X_DEV_INFO
11778 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11779 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11780
11781 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11782 BNX2X_DEV_INFO("SD FCoE MODE\n");
11783 BNX2X_DEV_INFO
11784 ("Read FIP MAC: %pM\n", fip_mac);
11785 }
11786 }
11787
11788
11789
11790
11791
11792 if (IS_MF_FCOE_AFEX(bp))
11793 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11794 } else {
11795 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11796 iscsi_mac_upper);
11797 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11798 iscsi_mac_lower);
11799 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11800
11801 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11802 fcoe_fip_mac_upper);
11803 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11804 fcoe_fip_mac_lower);
11805 bnx2x_set_mac_buf(fip_mac, val, val2);
11806 }
11807
11808
11809 if (!is_valid_ether_addr(iscsi_mac)) {
11810 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11811 eth_zero_addr(iscsi_mac);
11812 }
11813
11814
11815 if (!is_valid_ether_addr(fip_mac)) {
11816 bp->flags |= NO_FCOE_FLAG;
11817 eth_zero_addr(bp->fip_mac);
11818 }
11819}
11820
11821static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11822{
11823 u32 val, val2;
11824 int func = BP_ABS_FUNC(bp);
11825 int port = BP_PORT(bp);
11826
11827
11828 eth_zero_addr(bp->dev->dev_addr);
11829
11830 if (BP_NOMCP(bp)) {
11831 BNX2X_ERROR("warning: random MAC workaround active\n");
11832 eth_hw_addr_random(bp->dev);
11833 } else if (IS_MF(bp)) {
11834 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11835 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11836 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11837 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11838 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11839
11840 if (CNIC_SUPPORT(bp))
11841 bnx2x_get_cnic_mac_hwinfo(bp);
11842 } else {
11843
11844 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11845 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11846 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11847
11848 if (CNIC_SUPPORT(bp))
11849 bnx2x_get_cnic_mac_hwinfo(bp);
11850 }
11851
11852 if (!BP_NOMCP(bp)) {
11853
11854 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11855 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11856 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11857 bp->flags |= HAS_PHYS_PORT_ID;
11858 }
11859
11860 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11861
11862 if (!is_valid_ether_addr(bp->dev->dev_addr))
11863 dev_err(&bp->pdev->dev,
11864 "bad Ethernet MAC address configuration: %pM\n"
11865 "change it manually before bringing up the appropriate network interface\n",
11866 bp->dev->dev_addr);
11867}
11868
11869static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11870{
11871 int tmp;
11872 u32 cfg;
11873
11874 if (IS_VF(bp))
11875 return false;
11876
11877 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11878
11879 tmp = BP_ABS_FUNC(bp);
11880 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11881 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11882 } else {
11883
11884 tmp = BP_PORT(bp);
11885 cfg = SHMEM_RD(bp,
11886 dev_info.port_hw_config[tmp].generic_features);
11887 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11888 }
11889 return cfg;
11890}
11891
11892static void validate_set_si_mode(struct bnx2x *bp)
11893{
11894 u8 func = BP_ABS_FUNC(bp);
11895 u32 val;
11896
11897 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11898
11899
11900 if (val != 0xffff) {
11901 bp->mf_mode = MULTI_FUNCTION_SI;
11902 bp->mf_config[BP_VN(bp)] =
11903 MF_CFG_RD(bp, func_mf_config[func].config);
11904 } else
11905 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11906}
11907
11908static int bnx2x_get_hwinfo(struct bnx2x *bp)
11909{
11910 int func = BP_ABS_FUNC(bp);
11911 int vn;
11912 u32 val = 0, val2 = 0;
11913 int rc = 0;
11914
11915
11916 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11917 dev_err(&bp->pdev->dev,
11918 "Chip read returns all Fs. Preventing probe from continuing\n");
11919 return -EINVAL;
11920 }
11921
11922 bnx2x_get_common_hwinfo(bp);
11923
11924
11925
11926
11927 if (CHIP_IS_E1x(bp)) {
11928 bp->common.int_block = INT_BLOCK_HC;
11929
11930 bp->igu_dsb_id = DEF_SB_IGU_ID;
11931 bp->igu_base_sb = 0;
11932 } else {
11933 bp->common.int_block = INT_BLOCK_IGU;
11934
11935
11936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11937
11938 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11939
11940 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11941 int tout = 5000;
11942
11943 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11944
11945 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11946 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11947 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11948
11949 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11950 tout--;
11951 usleep_range(1000, 2000);
11952 }
11953
11954 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11955 dev_err(&bp->pdev->dev,
11956 "FORCING Normal Mode failed!!!\n");
11957 bnx2x_release_hw_lock(bp,
11958 HW_LOCK_RESOURCE_RESET);
11959 return -EPERM;
11960 }
11961 }
11962
11963 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11964 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11965 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11966 } else
11967 BNX2X_DEV_INFO("IGU Normal Mode\n");
11968
11969 rc = bnx2x_get_igu_cam_info(bp);
11970 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11971 if (rc)
11972 return rc;
11973 }
11974
11975
11976
11977
11978
11979
11980 if (CHIP_IS_E1x(bp))
11981 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11982 else
11983
11984
11985
11986
11987 bp->base_fw_ndsb = bp->igu_base_sb;
11988
11989 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11990 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11991 bp->igu_sb_cnt, bp->base_fw_ndsb);
11992
11993
11994
11995
11996 bp->mf_ov = 0;
11997 bp->mf_mode = 0;
11998 bp->mf_sub_mode = 0;
11999 vn = BP_VN(bp);
12000
12001 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12002 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12003 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12004 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12005
12006 if (SHMEM2_HAS(bp, mf_cfg_addr))
12007 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12008 else
12009 bp->common.mf_cfg_base = bp->common.shmem_base +
12010 offsetof(struct shmem_region, func_mb) +
12011 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12012
12013
12014
12015
12016
12017
12018
12019
12020 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12021
12022 val = SHMEM_RD(bp,
12023 dev_info.shared_feature_config.config);
12024 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12025
12026 switch (val) {
12027 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12028 validate_set_si_mode(bp);
12029 break;
12030 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12031 if ((!CHIP_IS_E1x(bp)) &&
12032 (MF_CFG_RD(bp, func_mf_config[func].
12033 mac_upper) != 0xffff) &&
12034 (SHMEM2_HAS(bp,
12035 afex_driver_support))) {
12036 bp->mf_mode = MULTI_FUNCTION_AFEX;
12037 bp->mf_config[vn] = MF_CFG_RD(bp,
12038 func_mf_config[func].config);
12039 } else {
12040 BNX2X_DEV_INFO("can not configure afex mode\n");
12041 }
12042 break;
12043 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12044
12045 val = MF_CFG_RD(bp,
12046 func_mf_config[FUNC_0].e1hov_tag);
12047 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12048
12049 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12050 bp->mf_mode = MULTI_FUNCTION_SD;
12051 bp->mf_config[vn] = MF_CFG_RD(bp,
12052 func_mf_config[func].config);
12053 } else
12054 BNX2X_DEV_INFO("illegal OV for SD\n");
12055 break;
12056 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12057 bp->mf_mode = MULTI_FUNCTION_SD;
12058 bp->mf_sub_mode = SUB_MF_MODE_BD;
12059 bp->mf_config[vn] =
12060 MF_CFG_RD(bp,
12061 func_mf_config[func].config);
12062
12063 if (SHMEM2_HAS(bp, mtu_size)) {
12064 int mtu_idx = BP_FW_MB_IDX(bp);
12065 u16 mtu_size;
12066 u32 mtu;
12067
12068 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12069 mtu_size = (u16)mtu;
12070 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12071 mtu_size, mtu);
12072
12073
12074 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12075 (mtu_size <=
12076 ETH_MAX_JUMBO_PACKET_SIZE))
12077 bp->dev->mtu = mtu_size;
12078 }
12079 break;
12080 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12081 bp->mf_mode = MULTI_FUNCTION_SD;
12082 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12083 bp->mf_config[vn] =
12084 MF_CFG_RD(bp,
12085 func_mf_config[func].config);
12086 break;
12087 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12088 bp->mf_config[vn] = 0;
12089 break;
12090 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12091 val2 = SHMEM_RD(bp,
12092 dev_info.shared_hw_config.config_3);
12093 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12094 switch (val2) {
12095 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12096 validate_set_si_mode(bp);
12097 bp->mf_sub_mode =
12098 SUB_MF_MODE_NPAR1_DOT_5;
12099 break;
12100 default:
12101
12102 bp->mf_config[vn] = 0;
12103 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12104 val);
12105 }
12106 break;
12107 default:
12108
12109 bp->mf_config[vn] = 0;
12110 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12111 }
12112 }
12113
12114 BNX2X_DEV_INFO("%s function mode\n",
12115 IS_MF(bp) ? "multi" : "single");
12116
12117 switch (bp->mf_mode) {
12118 case MULTI_FUNCTION_SD:
12119 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12120 FUNC_MF_CFG_E1HOV_TAG_MASK;
12121 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12122 bp->mf_ov = val;
12123 bp->path_has_ovlan = true;
12124
12125 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12126 func, bp->mf_ov, bp->mf_ov);
12127 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12128 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12129 dev_err(&bp->pdev->dev,
12130 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12131 func);
12132 bp->path_has_ovlan = true;
12133 } else {
12134 dev_err(&bp->pdev->dev,
12135 "No valid MF OV for func %d, aborting\n",
12136 func);
12137 return -EPERM;
12138 }
12139 break;
12140 case MULTI_FUNCTION_AFEX:
12141 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12142 break;
12143 case MULTI_FUNCTION_SI:
12144 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12145 func);
12146 break;
12147 default:
12148 if (vn) {
12149 dev_err(&bp->pdev->dev,
12150 "VN %d is in a single function mode, aborting\n",
12151 vn);
12152 return -EPERM;
12153 }
12154 break;
12155 }
12156
12157
12158
12159
12160
12161
12162 if (CHIP_MODE_IS_4_PORT(bp) &&
12163 !bp->path_has_ovlan &&
12164 !IS_MF(bp) &&
12165 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12166 u8 other_port = !BP_PORT(bp);
12167 u8 other_func = BP_PATH(bp) + 2*other_port;
12168 val = MF_CFG_RD(bp,
12169 func_mf_config[other_func].e1hov_tag);
12170 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12171 bp->path_has_ovlan = true;
12172 }
12173 }
12174
12175
12176 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12177 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12178
12179
12180 bnx2x_get_port_hwinfo(bp);
12181
12182
12183 bnx2x_get_mac_hwinfo(bp);
12184
12185 bnx2x_get_cnic_info(bp);
12186
12187 return rc;
12188}
12189
12190static void bnx2x_read_fwinfo(struct bnx2x *bp)
12191{
12192 char str_id[VENDOR_ID_LEN + 1];
12193 unsigned int vpd_len, kw_len;
12194 u8 *vpd_data;
12195 int rodi;
12196
12197 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12198
12199 vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
12200 if (IS_ERR(vpd_data))
12201 return;
12202
12203 rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
12204 PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
12205 if (rodi < 0 || kw_len != VENDOR_ID_LEN)
12206 goto out_not_found;
12207
12208
12209 snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12210 if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
12211 rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
12212 PCI_VPD_RO_KEYWORD_VENDOR0,
12213 &kw_len);
12214 if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
12215 memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
12216 bp->fw_ver[kw_len] = ' ';
12217 }
12218 }
12219out_not_found:
12220 kfree(vpd_data);
12221}
12222
12223static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12224{
12225 u32 flags = 0;
12226
12227 if (CHIP_REV_IS_FPGA(bp))
12228 SET_FLAGS(flags, MODE_FPGA);
12229 else if (CHIP_REV_IS_EMUL(bp))
12230 SET_FLAGS(flags, MODE_EMUL);
12231 else
12232 SET_FLAGS(flags, MODE_ASIC);
12233
12234 if (CHIP_MODE_IS_4_PORT(bp))
12235 SET_FLAGS(flags, MODE_PORT4);
12236 else
12237 SET_FLAGS(flags, MODE_PORT2);
12238
12239 if (CHIP_IS_E2(bp))
12240 SET_FLAGS(flags, MODE_E2);
12241 else if (CHIP_IS_E3(bp)) {
12242 SET_FLAGS(flags, MODE_E3);
12243 if (CHIP_REV(bp) == CHIP_REV_Ax)
12244 SET_FLAGS(flags, MODE_E3_A0);
12245 else
12246 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12247 }
12248
12249 if (IS_MF(bp)) {
12250 SET_FLAGS(flags, MODE_MF);
12251 switch (bp->mf_mode) {
12252 case MULTI_FUNCTION_SD:
12253 SET_FLAGS(flags, MODE_MF_SD);
12254 break;
12255 case MULTI_FUNCTION_SI:
12256 SET_FLAGS(flags, MODE_MF_SI);
12257 break;
12258 case MULTI_FUNCTION_AFEX:
12259 SET_FLAGS(flags, MODE_MF_AFEX);
12260 break;
12261 }
12262 } else
12263 SET_FLAGS(flags, MODE_SF);
12264
12265#if defined(__LITTLE_ENDIAN)
12266 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12267#else
12268 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12269#endif
12270 INIT_MODE_FLAGS(bp) = flags;
12271}
12272
12273static int bnx2x_init_bp(struct bnx2x *bp)
12274{
12275 int func;
12276 int rc;
12277
12278 mutex_init(&bp->port.phy_mutex);
12279 mutex_init(&bp->fw_mb_mutex);
12280 mutex_init(&bp->drv_info_mutex);
12281 sema_init(&bp->stats_lock, 1);
12282 bp->drv_info_mng_owner = false;
12283 INIT_LIST_HEAD(&bp->vlan_reg);
12284
12285 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12286 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12287 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12288 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12289 if (IS_PF(bp)) {
12290 rc = bnx2x_get_hwinfo(bp);
12291 if (rc)
12292 return rc;
12293 } else {
12294 eth_zero_addr(bp->dev->dev_addr);
12295 }
12296
12297 bnx2x_set_modes_bitmap(bp);
12298
12299 rc = bnx2x_alloc_mem_bp(bp);
12300 if (rc)
12301 return rc;
12302
12303 bnx2x_read_fwinfo(bp);
12304
12305 func = BP_FUNC(bp);
12306
12307
12308 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12309
12310 bp->fw_seq =
12311 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12312 DRV_MSG_SEQ_NUMBER_MASK;
12313 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12314
12315 rc = bnx2x_prev_unload(bp);
12316 if (rc) {
12317 bnx2x_free_mem_bp(bp);
12318 return rc;
12319 }
12320 }
12321
12322 if (CHIP_REV_IS_FPGA(bp))
12323 dev_err(&bp->pdev->dev, "FPGA detected\n");
12324
12325 if (BP_NOMCP(bp) && (func == 0))
12326 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12327
12328 bp->disable_tpa = disable_tpa;
12329 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12330
12331 bp->disable_tpa |= is_kdump_kernel();
12332
12333
12334 if (bp->disable_tpa) {
12335 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12336 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12337 }
12338
12339 if (CHIP_IS_E1(bp))
12340 bp->dropless_fc = false;
12341 else
12342 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12343
12344 bp->mrrs = mrrs;
12345
12346 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12347 if (IS_VF(bp))
12348 bp->rx_ring_size = MAX_RX_AVAIL;
12349
12350
12351 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12352 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12353
12354 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12355
12356 timer_setup(&bp->timer, bnx2x_timer, 0);
12357 bp->timer.expires = jiffies + bp->current_interval;
12358
12359 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12360 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12361 SHMEM2_HAS(bp, dcbx_en) &&
12362 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12363 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12364 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12365 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12366 bnx2x_dcbx_init_params(bp);
12367 } else {
12368 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12369 }
12370
12371 if (CHIP_IS_E1x(bp))
12372 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12373 else
12374 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12375
12376
12377 if (IS_VF(bp))
12378 bp->max_cos = 1;
12379 else if (CHIP_IS_E1x(bp))
12380 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12381 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12382 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12383 else if (CHIP_IS_E3B0(bp))
12384 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12385 else
12386 BNX2X_ERR("unknown chip %x revision %x\n",
12387 CHIP_NUM(bp), CHIP_REV(bp));
12388 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12389
12390
12391
12392
12393
12394 if (IS_VF(bp))
12395 bp->min_msix_vec_cnt = 1;
12396 else if (CNIC_SUPPORT(bp))
12397 bp->min_msix_vec_cnt = 3;
12398 else
12399 bp->min_msix_vec_cnt = 2;
12400 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12401
12402 bp->dump_preset_idx = 1;
12403
12404 return rc;
12405}
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415
12416static int bnx2x_open(struct net_device *dev)
12417{
12418 struct bnx2x *bp = netdev_priv(dev);
12419 int rc;
12420
12421 bp->stats_init = true;
12422
12423 netif_carrier_off(dev);
12424
12425 bnx2x_set_power_state(bp, PCI_D0);
12426
12427
12428
12429
12430
12431
12432
12433 if (IS_PF(bp)) {
12434 int other_engine = BP_PATH(bp) ? 0 : 1;
12435 bool other_load_status, load_status;
12436 bool global = false;
12437
12438 other_load_status = bnx2x_get_load_status(bp, other_engine);
12439 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12440 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12441 bnx2x_chk_parity_attn(bp, &global, true)) {
12442 do {
12443
12444
12445
12446
12447
12448 if (global)
12449 bnx2x_set_reset_global(bp);
12450
12451
12452
12453
12454
12455
12456 if ((!load_status &&
12457 (!global || !other_load_status)) &&
12458 bnx2x_trylock_leader_lock(bp) &&
12459 !bnx2x_leader_reset(bp)) {
12460 netdev_info(bp->dev,
12461 "Recovered in open\n");
12462 break;
12463 }
12464
12465
12466 bnx2x_set_power_state(bp, PCI_D3hot);
12467 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12468
12469 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12470 "If you still see this message after a few retries then power cycle is required.\n");
12471
12472 return -EAGAIN;
12473 } while (0);
12474 }
12475 }
12476
12477 bp->recovery_state = BNX2X_RECOVERY_DONE;
12478 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12479 if (rc)
12480 return rc;
12481
12482 return 0;
12483}
12484
12485
12486static int bnx2x_close(struct net_device *dev)
12487{
12488 struct bnx2x *bp = netdev_priv(dev);
12489
12490
12491 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12492
12493 return 0;
12494}
12495
12496struct bnx2x_mcast_list_elem_group
12497{
12498 struct list_head mcast_group_link;
12499 struct bnx2x_mcast_list_elem mcast_elems[];
12500};
12501
12502#define MCAST_ELEMS_PER_PG \
12503 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12504 sizeof(struct bnx2x_mcast_list_elem))
12505
12506static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12507{
12508 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12509
12510 while (!list_empty(mcast_group_list)) {
12511 current_mcast_group = list_first_entry(mcast_group_list,
12512 struct bnx2x_mcast_list_elem_group,
12513 mcast_group_link);
12514 list_del(¤t_mcast_group->mcast_group_link);
12515 free_page((unsigned long)current_mcast_group);
12516 }
12517}
12518
12519static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12520 struct bnx2x_mcast_ramrod_params *p,
12521 struct list_head *mcast_group_list)
12522{
12523 struct bnx2x_mcast_list_elem *mc_mac;
12524 struct netdev_hw_addr *ha;
12525 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12526 int mc_count = netdev_mc_count(bp->dev);
12527 int offset = 0;
12528
12529 INIT_LIST_HEAD(&p->mcast_list);
12530 netdev_for_each_mc_addr(ha, bp->dev) {
12531 if (!offset) {
12532 current_mcast_group =
12533 (struct bnx2x_mcast_list_elem_group *)
12534 __get_free_page(GFP_ATOMIC);
12535 if (!current_mcast_group) {
12536 bnx2x_free_mcast_macs_list(mcast_group_list);
12537 BNX2X_ERR("Failed to allocate mc MAC list\n");
12538 return -ENOMEM;
12539 }
12540 list_add(¤t_mcast_group->mcast_group_link,
12541 mcast_group_list);
12542 }
12543 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12544 mc_mac->mac = bnx2x_mc_addr(ha);
12545 list_add_tail(&mc_mac->link, &p->mcast_list);
12546 offset++;
12547 if (offset == MCAST_ELEMS_PER_PG)
12548 offset = 0;
12549 }
12550 p->mcast_list_len = mc_count;
12551 return 0;
12552}
12553
12554
12555
12556
12557
12558
12559
12560
12561static int bnx2x_set_uc_list(struct bnx2x *bp)
12562{
12563 int rc;
12564 struct net_device *dev = bp->dev;
12565 struct netdev_hw_addr *ha;
12566 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12567 unsigned long ramrod_flags = 0;
12568
12569
12570 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12571 if (rc < 0) {
12572 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12573 return rc;
12574 }
12575
12576 netdev_for_each_uc_addr(ha, dev) {
12577 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12578 BNX2X_UC_LIST_MAC, &ramrod_flags);
12579 if (rc == -EEXIST) {
12580 DP(BNX2X_MSG_SP,
12581 "Failed to schedule ADD operations: %d\n", rc);
12582
12583 rc = 0;
12584
12585 } else if (rc < 0) {
12586
12587 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12588 rc);
12589 return rc;
12590 }
12591 }
12592
12593
12594 __set_bit(RAMROD_CONT, &ramrod_flags);
12595 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12596 BNX2X_UC_LIST_MAC, &ramrod_flags);
12597}
12598
12599static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12600{
12601 LIST_HEAD(mcast_group_list);
12602 struct net_device *dev = bp->dev;
12603 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12604 int rc = 0;
12605
12606 rparam.mcast_obj = &bp->mcast_obj;
12607
12608
12609 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12610 if (rc < 0) {
12611 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12612 return rc;
12613 }
12614
12615
12616 if (netdev_mc_count(dev)) {
12617 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12618 if (rc)
12619 return rc;
12620
12621
12622 rc = bnx2x_config_mcast(bp, &rparam,
12623 BNX2X_MCAST_CMD_ADD);
12624 if (rc < 0)
12625 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12626 rc);
12627
12628 bnx2x_free_mcast_macs_list(&mcast_group_list);
12629 }
12630
12631 return rc;
12632}
12633
12634static int bnx2x_set_mc_list(struct bnx2x *bp)
12635{
12636 LIST_HEAD(mcast_group_list);
12637 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12638 struct net_device *dev = bp->dev;
12639 int rc = 0;
12640
12641
12642 if (CHIP_IS_E1x(bp))
12643 return bnx2x_set_mc_list_e1x(bp);
12644
12645 rparam.mcast_obj = &bp->mcast_obj;
12646
12647 if (netdev_mc_count(dev)) {
12648 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12649 if (rc)
12650 return rc;
12651
12652
12653 rc = bnx2x_config_mcast(bp, &rparam,
12654 BNX2X_MCAST_CMD_SET);
12655 if (rc < 0)
12656 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12657 rc);
12658
12659 bnx2x_free_mcast_macs_list(&mcast_group_list);
12660 } else {
12661
12662 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12663 if (rc < 0)
12664 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12665 rc);
12666 }
12667
12668 return rc;
12669}
12670
12671
12672static void bnx2x_set_rx_mode(struct net_device *dev)
12673{
12674 struct bnx2x *bp = netdev_priv(dev);
12675
12676 if (bp->state != BNX2X_STATE_OPEN) {
12677 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12678 return;
12679 } else {
12680
12681 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12682 NETIF_MSG_IFUP);
12683 }
12684}
12685
12686void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12687{
12688 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12689
12690 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12691
12692 netif_addr_lock_bh(bp->dev);
12693
12694 if (bp->dev->flags & IFF_PROMISC) {
12695 rx_mode = BNX2X_RX_MODE_PROMISC;
12696 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12697 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12698 CHIP_IS_E1(bp))) {
12699 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12700 } else {
12701 if (IS_PF(bp)) {
12702
12703 if (bnx2x_set_mc_list(bp) < 0)
12704 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12705
12706
12707 netif_addr_unlock_bh(bp->dev);
12708 if (bnx2x_set_uc_list(bp) < 0)
12709 rx_mode = BNX2X_RX_MODE_PROMISC;
12710 netif_addr_lock_bh(bp->dev);
12711 } else {
12712
12713
12714
12715 bnx2x_schedule_sp_rtnl(bp,
12716 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12717 }
12718 }
12719
12720 bp->rx_mode = rx_mode;
12721
12722 if (IS_MF_ISCSI_ONLY(bp))
12723 bp->rx_mode = BNX2X_RX_MODE_NONE;
12724
12725
12726 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12727 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12728 netif_addr_unlock_bh(bp->dev);
12729 return;
12730 }
12731
12732 if (IS_PF(bp)) {
12733 bnx2x_set_storm_rx_mode(bp);
12734 netif_addr_unlock_bh(bp->dev);
12735 } else {
12736
12737
12738
12739
12740 netif_addr_unlock_bh(bp->dev);
12741 bnx2x_vfpf_storm_rx_mode(bp);
12742 }
12743}
12744
12745
12746static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12747 int devad, u16 addr)
12748{
12749 struct bnx2x *bp = netdev_priv(netdev);
12750 u16 value;
12751 int rc;
12752
12753 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12754 prtad, devad, addr);
12755
12756
12757 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12758
12759 bnx2x_acquire_phy_lock(bp);
12760 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12761 bnx2x_release_phy_lock(bp);
12762 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12763
12764 if (!rc)
12765 rc = value;
12766 return rc;
12767}
12768
12769
12770static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12771 u16 addr, u16 value)
12772{
12773 struct bnx2x *bp = netdev_priv(netdev);
12774 int rc;
12775
12776 DP(NETIF_MSG_LINK,
12777 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12778 prtad, devad, addr, value);
12779
12780
12781 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12782
12783 bnx2x_acquire_phy_lock(bp);
12784 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12785 bnx2x_release_phy_lock(bp);
12786 return rc;
12787}
12788
12789
12790static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12791{
12792 struct bnx2x *bp = netdev_priv(dev);
12793 struct mii_ioctl_data *mdio = if_mii(ifr);
12794
12795 if (!netif_running(dev))
12796 return -EAGAIN;
12797
12798 switch (cmd) {
12799 case SIOCSHWTSTAMP:
12800 return bnx2x_hwtstamp_ioctl(bp, ifr);
12801 default:
12802 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12803 mdio->phy_id, mdio->reg_num, mdio->val_in);
12804 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12805 }
12806}
12807
12808static int bnx2x_validate_addr(struct net_device *dev)
12809{
12810 struct bnx2x *bp = netdev_priv(dev);
12811
12812
12813 if (IS_VF(bp))
12814 bnx2x_sample_bulletin(bp);
12815
12816 if (!is_valid_ether_addr(dev->dev_addr)) {
12817 BNX2X_ERR("Non-valid Ethernet address\n");
12818 return -EADDRNOTAVAIL;
12819 }
12820 return 0;
12821}
12822
12823static int bnx2x_get_phys_port_id(struct net_device *netdev,
12824 struct netdev_phys_item_id *ppid)
12825{
12826 struct bnx2x *bp = netdev_priv(netdev);
12827
12828 if (!(bp->flags & HAS_PHYS_PORT_ID))
12829 return -EOPNOTSUPP;
12830
12831 ppid->id_len = sizeof(bp->phys_port_id);
12832 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12833
12834 return 0;
12835}
12836
12837static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12838 struct net_device *dev,
12839 netdev_features_t features)
12840{
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854 if (unlikely(skb_is_gso(skb) &&
12855 (skb_shinfo(skb)->gso_size > 9000) &&
12856 !skb_gso_validate_mac_len(skb, 9700)))
12857 features &= ~NETIF_F_GSO_MASK;
12858
12859 features = vlan_features_check(skb, features);
12860 return vxlan_features_check(skb, features);
12861}
12862
12863static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12864{
12865 int rc;
12866
12867 if (IS_PF(bp)) {
12868 unsigned long ramrod_flags = 0;
12869
12870 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12871 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12872 add, &ramrod_flags);
12873 } else {
12874 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12875 }
12876
12877 return rc;
12878}
12879
12880static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12881{
12882 struct bnx2x_vlan_entry *vlan;
12883 int rc = 0;
12884
12885
12886 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12887 if (vlan->hw)
12888 continue;
12889
12890 if (bp->vlan_cnt >= bp->vlan_credit)
12891 return -ENOBUFS;
12892
12893 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12894 if (rc) {
12895 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
12896 return rc;
12897 }
12898
12899 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
12900 vlan->hw = true;
12901 bp->vlan_cnt++;
12902 }
12903
12904 return 0;
12905}
12906
12907static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12908{
12909 bool need_accept_any_vlan;
12910
12911 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12912
12913 if (bp->accept_any_vlan != need_accept_any_vlan) {
12914 bp->accept_any_vlan = need_accept_any_vlan;
12915 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
12916 bp->accept_any_vlan ? "raised" : "cleared");
12917 if (set_rx_mode) {
12918 if (IS_PF(bp))
12919 bnx2x_set_rx_mode_inner(bp);
12920 else
12921 bnx2x_vfpf_storm_rx_mode(bp);
12922 }
12923 }
12924}
12925
12926int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12927{
12928
12929 bnx2x_vlan_configure(bp, false);
12930
12931 return 0;
12932}
12933
12934static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12935{
12936 struct bnx2x *bp = netdev_priv(dev);
12937 struct bnx2x_vlan_entry *vlan;
12938
12939 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12940
12941 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12942 if (!vlan)
12943 return -ENOMEM;
12944
12945 vlan->vid = vid;
12946 vlan->hw = false;
12947 list_add_tail(&vlan->link, &bp->vlan_reg);
12948
12949 if (netif_running(dev))
12950 bnx2x_vlan_configure(bp, true);
12951
12952 return 0;
12953}
12954
12955static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12956{
12957 struct bnx2x *bp = netdev_priv(dev);
12958 struct bnx2x_vlan_entry *vlan;
12959 bool found = false;
12960 int rc = 0;
12961
12962 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12963
12964 list_for_each_entry(vlan, &bp->vlan_reg, link)
12965 if (vlan->vid == vid) {
12966 found = true;
12967 break;
12968 }
12969
12970 if (!found) {
12971 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
12972 return -EINVAL;
12973 }
12974
12975 if (netif_running(dev) && vlan->hw) {
12976 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
12977 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
12978 bp->vlan_cnt--;
12979 }
12980
12981 list_del(&vlan->link);
12982 kfree(vlan);
12983
12984 if (netif_running(dev))
12985 bnx2x_vlan_configure(bp, true);
12986
12987 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
12988
12989 return rc;
12990}
12991
12992static const struct net_device_ops bnx2x_netdev_ops = {
12993 .ndo_open = bnx2x_open,
12994 .ndo_stop = bnx2x_close,
12995 .ndo_start_xmit = bnx2x_start_xmit,
12996 .ndo_select_queue = bnx2x_select_queue,
12997 .ndo_set_rx_mode = bnx2x_set_rx_mode,
12998 .ndo_set_mac_address = bnx2x_change_mac_addr,
12999 .ndo_validate_addr = bnx2x_validate_addr,
13000 .ndo_eth_ioctl = bnx2x_ioctl,
13001 .ndo_change_mtu = bnx2x_change_mtu,
13002 .ndo_fix_features = bnx2x_fix_features,
13003 .ndo_set_features = bnx2x_set_features,
13004 .ndo_tx_timeout = bnx2x_tx_timeout,
13005 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13006 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13007 .ndo_setup_tc = __bnx2x_setup_tc,
13008#ifdef CONFIG_BNX2X_SRIOV
13009 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13010 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13011 .ndo_get_vf_config = bnx2x_get_vf_config,
13012 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13013#endif
13014#ifdef NETDEV_FCOE_WWNN
13015 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13016#endif
13017
13018 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13019 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13020 .ndo_features_check = bnx2x_features_check,
13021};
13022
13023static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13024{
13025 struct device *dev = &bp->pdev->dev;
13026
13027 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13028 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13029 dev_err(dev, "System does not support DMA, aborting\n");
13030 return -EIO;
13031 }
13032
13033 return 0;
13034}
13035
13036static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13037{
13038 if (bp->flags & AER_ENABLED) {
13039 pci_disable_pcie_error_reporting(bp->pdev);
13040 bp->flags &= ~AER_ENABLED;
13041 }
13042}
13043
13044static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13045 struct net_device *dev, unsigned long board_type)
13046{
13047 int rc;
13048 u32 pci_cfg_dword;
13049 bool chip_is_e1x = (board_type == BCM57710 ||
13050 board_type == BCM57711 ||
13051 board_type == BCM57711E);
13052
13053 SET_NETDEV_DEV(dev, &pdev->dev);
13054
13055 bp->dev = dev;
13056 bp->pdev = pdev;
13057
13058 rc = pci_enable_device(pdev);
13059 if (rc) {
13060 dev_err(&bp->pdev->dev,
13061 "Cannot enable PCI device, aborting\n");
13062 goto err_out;
13063 }
13064
13065 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13066 dev_err(&bp->pdev->dev,
13067 "Cannot find PCI device base address, aborting\n");
13068 rc = -ENODEV;
13069 goto err_out_disable;
13070 }
13071
13072 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13073 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13074 rc = -ENODEV;
13075 goto err_out_disable;
13076 }
13077
13078 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13079 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13080 PCICFG_REVESION_ID_ERROR_VAL) {
13081 pr_err("PCI device error, probably due to fan failure, aborting\n");
13082 rc = -ENODEV;
13083 goto err_out_disable;
13084 }
13085
13086 if (atomic_read(&pdev->enable_cnt) == 1) {
13087 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13088 if (rc) {
13089 dev_err(&bp->pdev->dev,
13090 "Cannot obtain PCI resources, aborting\n");
13091 goto err_out_disable;
13092 }
13093
13094 pci_set_master(pdev);
13095 pci_save_state(pdev);
13096 }
13097
13098 if (IS_PF(bp)) {
13099 if (!pdev->pm_cap) {
13100 dev_err(&bp->pdev->dev,
13101 "Cannot find power management capability, aborting\n");
13102 rc = -EIO;
13103 goto err_out_release;
13104 }
13105 }
13106
13107 if (!pci_is_pcie(pdev)) {
13108 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13109 rc = -EIO;
13110 goto err_out_release;
13111 }
13112
13113 rc = bnx2x_set_coherency_mask(bp);
13114 if (rc)
13115 goto err_out_release;
13116
13117 dev->mem_start = pci_resource_start(pdev, 0);
13118 dev->base_addr = dev->mem_start;
13119 dev->mem_end = pci_resource_end(pdev, 0);
13120
13121 dev->irq = pdev->irq;
13122
13123 bp->regview = pci_ioremap_bar(pdev, 0);
13124 if (!bp->regview) {
13125 dev_err(&bp->pdev->dev,
13126 "Cannot map register space, aborting\n");
13127 rc = -ENOMEM;
13128 goto err_out_release;
13129 }
13130
13131
13132
13133
13134
13135
13136 if (chip_is_e1x) {
13137 bp->pf_num = PCI_FUNC(pdev->devfn);
13138 } else {
13139
13140 pci_read_config_dword(bp->pdev,
13141 PCICFG_ME_REGISTER, &pci_cfg_dword);
13142 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13143 ME_REG_ABS_PF_NUM_SHIFT);
13144 }
13145 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13146
13147
13148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13149 PCICFG_VENDOR_ID_OFFSET);
13150
13151
13152 pdev->needs_freset = 1;
13153
13154
13155 rc = pci_enable_pcie_error_reporting(pdev);
13156 if (!rc)
13157 bp->flags |= AER_ENABLED;
13158 else
13159 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13160
13161
13162
13163
13164
13165 if (IS_PF(bp)) {
13166 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13167 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13168 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13169 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13170
13171 if (chip_is_e1x) {
13172 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13173 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13174 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13175 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13176 }
13177
13178
13179
13180
13181
13182 if (!chip_is_e1x)
13183 REG_WR(bp,
13184 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13185 }
13186
13187 dev->watchdog_timeo = TX_TIMEOUT;
13188
13189 dev->netdev_ops = &bnx2x_netdev_ops;
13190 bnx2x_set_ethtool_ops(bp, dev);
13191
13192 dev->priv_flags |= IFF_UNICAST_FLT;
13193
13194 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13195 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13196 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13197 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13198 if (!chip_is_e1x) {
13199 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13200 NETIF_F_GSO_IPXIP4 |
13201 NETIF_F_GSO_UDP_TUNNEL |
13202 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13203 NETIF_F_GSO_PARTIAL;
13204
13205 dev->hw_enc_features =
13206 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13207 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13208 NETIF_F_GSO_IPXIP4 |
13209 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13210 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13211 NETIF_F_GSO_PARTIAL;
13212
13213 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13214 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13215
13216 if (IS_PF(bp))
13217 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
13218 }
13219
13220 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13221 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13222
13223 if (IS_PF(bp)) {
13224 if (chip_is_e1x)
13225 bp->accept_any_vlan = true;
13226 else
13227 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13228 }
13229
13230
13231
13232
13233 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13234 dev->features |= NETIF_F_HIGHDMA;
13235 if (dev->features & NETIF_F_LRO)
13236 dev->features &= ~NETIF_F_GRO_HW;
13237
13238
13239 dev->hw_features |= NETIF_F_LOOPBACK;
13240
13241#ifdef BCM_DCBNL
13242 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13243#endif
13244
13245
13246 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13247 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13248
13249
13250 bp->mdio.prtad = MDIO_PRTAD_NONE;
13251 bp->mdio.mmds = 0;
13252 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13253 bp->mdio.dev = dev;
13254 bp->mdio.mdio_read = bnx2x_mdio_read;
13255 bp->mdio.mdio_write = bnx2x_mdio_write;
13256
13257 return 0;
13258
13259err_out_release:
13260 if (atomic_read(&pdev->enable_cnt) == 1)
13261 pci_release_regions(pdev);
13262
13263err_out_disable:
13264 pci_disable_device(pdev);
13265
13266err_out:
13267 return rc;
13268}
13269
13270static int bnx2x_check_firmware(struct bnx2x *bp)
13271{
13272 const struct firmware *firmware = bp->firmware;
13273 struct bnx2x_fw_file_hdr *fw_hdr;
13274 struct bnx2x_fw_file_section *sections;
13275 u32 offset, len, num_ops;
13276 __be16 *ops_offsets;
13277 int i;
13278 const u8 *fw_ver;
13279
13280 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13281 BNX2X_ERR("Wrong FW size\n");
13282 return -EINVAL;
13283 }
13284
13285 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13286 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13287
13288
13289
13290 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13291 offset = be32_to_cpu(sections[i].offset);
13292 len = be32_to_cpu(sections[i].len);
13293 if (offset + len > firmware->size) {
13294 BNX2X_ERR("Section %d length is out of bounds\n", i);
13295 return -EINVAL;
13296 }
13297 }
13298
13299
13300 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13301 ops_offsets = (__force __be16 *)(firmware->data + offset);
13302 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13303
13304 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13305 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13306 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13307 return -EINVAL;
13308 }
13309 }
13310
13311
13312 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13313 fw_ver = firmware->data + offset;
13314 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13315 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13316 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13317 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13318 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13319 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13320 BCM_5710_FW_MAJOR_VERSION,
13321 BCM_5710_FW_MINOR_VERSION,
13322 BCM_5710_FW_REVISION_VERSION,
13323 BCM_5710_FW_ENGINEERING_VERSION);
13324 return -EINVAL;
13325 }
13326
13327 return 0;
13328}
13329
13330static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13331{
13332 const __be32 *source = (const __be32 *)_source;
13333 u32 *target = (u32 *)_target;
13334 u32 i;
13335
13336 for (i = 0; i < n/4; i++)
13337 target[i] = be32_to_cpu(source[i]);
13338}
13339
13340
13341
13342
13343
13344static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13345{
13346 const __be32 *source = (const __be32 *)_source;
13347 struct raw_op *target = (struct raw_op *)_target;
13348 u32 i, j, tmp;
13349
13350 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13351 tmp = be32_to_cpu(source[j]);
13352 target[i].op = (tmp >> 24) & 0xff;
13353 target[i].offset = tmp & 0xffffff;
13354 target[i].raw_data = be32_to_cpu(source[j + 1]);
13355 }
13356}
13357
13358
13359
13360
13361static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13362{
13363 const __be32 *source = (const __be32 *)_source;
13364 struct iro *target = (struct iro *)_target;
13365 u32 i, j, tmp;
13366
13367 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13368 target[i].base = be32_to_cpu(source[j]);
13369 j++;
13370 tmp = be32_to_cpu(source[j]);
13371 target[i].m1 = (tmp >> 16) & 0xffff;
13372 target[i].m2 = tmp & 0xffff;
13373 j++;
13374 tmp = be32_to_cpu(source[j]);
13375 target[i].m3 = (tmp >> 16) & 0xffff;
13376 target[i].size = tmp & 0xffff;
13377 j++;
13378 }
13379}
13380
13381static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13382{
13383 const __be16 *source = (const __be16 *)_source;
13384 u16 *target = (u16 *)_target;
13385 u32 i;
13386
13387 for (i = 0; i < n/2; i++)
13388 target[i] = be16_to_cpu(source[i]);
13389}
13390
13391#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13392do { \
13393 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13394 bp->arr = kmalloc(len, GFP_KERNEL); \
13395 if (!bp->arr) \
13396 goto lbl; \
13397 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13398 (u8 *)bp->arr, len); \
13399} while (0)
13400
13401static int bnx2x_init_firmware(struct bnx2x *bp)
13402{
13403 const char *fw_file_name;
13404 struct bnx2x_fw_file_hdr *fw_hdr;
13405 int rc;
13406
13407 if (bp->firmware)
13408 return 0;
13409
13410 if (CHIP_IS_E1(bp))
13411 fw_file_name = FW_FILE_NAME_E1;
13412 else if (CHIP_IS_E1H(bp))
13413 fw_file_name = FW_FILE_NAME_E1H;
13414 else if (!CHIP_IS_E1x(bp))
13415 fw_file_name = FW_FILE_NAME_E2;
13416 else {
13417 BNX2X_ERR("Unsupported chip revision\n");
13418 return -EINVAL;
13419 }
13420 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13421
13422 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13423 if (rc) {
13424 BNX2X_ERR("Can't load firmware file %s\n",
13425 fw_file_name);
13426 goto request_firmware_exit;
13427 }
13428
13429 rc = bnx2x_check_firmware(bp);
13430 if (rc) {
13431 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13432 goto request_firmware_exit;
13433 }
13434
13435 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13436
13437
13438
13439 rc = -ENOMEM;
13440 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13441
13442
13443 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13444
13445
13446 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13447 be16_to_cpu_n);
13448
13449
13450 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13451 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13452 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13453 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13454 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13455 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13456 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13457 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13458 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13459 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13460 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13461 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13462 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13463 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13464 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13465 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13466
13467 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13468
13469 return 0;
13470
13471iro_alloc_err:
13472 kfree(bp->init_ops_offsets);
13473init_offsets_alloc_err:
13474 kfree(bp->init_ops);
13475init_ops_alloc_err:
13476 kfree(bp->init_data);
13477request_firmware_exit:
13478 release_firmware(bp->firmware);
13479 bp->firmware = NULL;
13480
13481 return rc;
13482}
13483
13484static void bnx2x_release_firmware(struct bnx2x *bp)
13485{
13486 kfree(bp->init_ops_offsets);
13487 kfree(bp->init_ops);
13488 kfree(bp->init_data);
13489 release_firmware(bp->firmware);
13490 bp->firmware = NULL;
13491}
13492
13493static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13494 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13495 .init_hw_cmn = bnx2x_init_hw_common,
13496 .init_hw_port = bnx2x_init_hw_port,
13497 .init_hw_func = bnx2x_init_hw_func,
13498
13499 .reset_hw_cmn = bnx2x_reset_common,
13500 .reset_hw_port = bnx2x_reset_port,
13501 .reset_hw_func = bnx2x_reset_func,
13502
13503 .gunzip_init = bnx2x_gunzip_init,
13504 .gunzip_end = bnx2x_gunzip_end,
13505
13506 .init_fw = bnx2x_init_firmware,
13507 .release_fw = bnx2x_release_firmware,
13508};
13509
13510void bnx2x__init_func_obj(struct bnx2x *bp)
13511{
13512
13513 bnx2x_setup_dmae(bp);
13514
13515 bnx2x_init_func_obj(bp, &bp->func_obj,
13516 bnx2x_sp(bp, func_rdata),
13517 bnx2x_sp_mapping(bp, func_rdata),
13518 bnx2x_sp(bp, func_afex_rdata),
13519 bnx2x_sp_mapping(bp, func_afex_rdata),
13520 &bnx2x_func_sp_drv);
13521}
13522
13523
13524static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13525{
13526 int cid_count = BNX2X_L2_MAX_CID(bp);
13527
13528 if (IS_SRIOV(bp))
13529 cid_count += BNX2X_VF_CIDS;
13530
13531 if (CNIC_SUPPORT(bp))
13532 cid_count += CNIC_CID_MAX;
13533
13534 return roundup(cid_count, QM_CID_ROUND);
13535}
13536
13537
13538
13539
13540
13541
13542
13543static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13544{
13545 int index;
13546 u16 control = 0;
13547
13548
13549
13550
13551
13552 if (!pdev->msix_cap) {
13553 dev_info(&pdev->dev, "no msix capability found\n");
13554 return 1 + cnic_cnt;
13555 }
13556 dev_info(&pdev->dev, "msix capability found\n");
13557
13558
13559
13560
13561
13562
13563
13564
13565 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13566
13567 index = control & PCI_MSIX_FLAGS_QSIZE;
13568
13569 return index;
13570}
13571
13572static int set_max_cos_est(int chip_id)
13573{
13574 switch (chip_id) {
13575 case BCM57710:
13576 case BCM57711:
13577 case BCM57711E:
13578 return BNX2X_MULTI_TX_COS_E1X;
13579 case BCM57712:
13580 case BCM57712_MF:
13581 return BNX2X_MULTI_TX_COS_E2_E3A0;
13582 case BCM57800:
13583 case BCM57800_MF:
13584 case BCM57810:
13585 case BCM57810_MF:
13586 case BCM57840_4_10:
13587 case BCM57840_2_20:
13588 case BCM57840_O:
13589 case BCM57840_MFO:
13590 case BCM57840_MF:
13591 case BCM57811:
13592 case BCM57811_MF:
13593 return BNX2X_MULTI_TX_COS_E3B0;
13594 case BCM57712_VF:
13595 case BCM57800_VF:
13596 case BCM57810_VF:
13597 case BCM57840_VF:
13598 case BCM57811_VF:
13599 return 1;
13600 default:
13601 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13602 return -ENODEV;
13603 }
13604}
13605
13606static int set_is_vf(int chip_id)
13607{
13608 switch (chip_id) {
13609 case BCM57712_VF:
13610 case BCM57800_VF:
13611 case BCM57810_VF:
13612 case BCM57840_VF:
13613 case BCM57811_VF:
13614 return true;
13615 default:
13616 return false;
13617 }
13618}
13619
13620
13621#define tsgen_ctrl 0x0
13622#define tsgen_freecount 0x10
13623#define tsgen_synctime_t0 0x20
13624#define tsgen_offset_t0 0x28
13625#define tsgen_drift_t0 0x30
13626#define tsgen_synctime_t1 0x58
13627#define tsgen_offset_t1 0x60
13628#define tsgen_drift_t1 0x68
13629
13630
13631static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13632 int best_val, int best_period)
13633{
13634 struct bnx2x_func_state_params func_params = {NULL};
13635 struct bnx2x_func_set_timesync_params *set_timesync_params =
13636 &func_params.params.set_timesync;
13637
13638
13639 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13640 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13641
13642 func_params.f_obj = &bp->func_obj;
13643 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13644
13645
13646 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13647 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13648 set_timesync_params->add_sub_drift_adjust_value =
13649 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13650 set_timesync_params->drift_adjust_value = best_val;
13651 set_timesync_params->drift_adjust_period = best_period;
13652
13653 return bnx2x_func_state_change(bp, &func_params);
13654}
13655
13656static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13657{
13658 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13659 int rc;
13660 int drift_dir = 1;
13661 int val, period, period1, period2, dif, dif1, dif2;
13662 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13663
13664 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13665
13666 if (!netif_running(bp->dev)) {
13667 DP(BNX2X_MSG_PTP,
13668 "PTP adjfreq called while the interface is down\n");
13669 return -ENETDOWN;
13670 }
13671
13672 if (ppb < 0) {
13673 ppb = -ppb;
13674 drift_dir = 0;
13675 }
13676
13677 if (ppb == 0) {
13678 best_val = 1;
13679 best_period = 0x1FFFFFF;
13680 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13681 best_val = 31;
13682 best_period = 1;
13683 } else {
13684
13685
13686
13687 for (val = 0; val <= 31; val++) {
13688 if ((val & 0x7) == 0)
13689 continue;
13690 period1 = val * 1000000 / ppb;
13691 period2 = period1 + 1;
13692 if (period1 != 0)
13693 dif1 = ppb - (val * 1000000 / period1);
13694 else
13695 dif1 = BNX2X_MAX_PHC_DRIFT;
13696 if (dif1 < 0)
13697 dif1 = -dif1;
13698 dif2 = ppb - (val * 1000000 / period2);
13699 if (dif2 < 0)
13700 dif2 = -dif2;
13701 dif = (dif1 < dif2) ? dif1 : dif2;
13702 period = (dif1 < dif2) ? period1 : period2;
13703 if (dif < best_dif) {
13704 best_dif = dif;
13705 best_val = val;
13706 best_period = period;
13707 }
13708 }
13709 }
13710
13711 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13712 best_period);
13713 if (rc) {
13714 BNX2X_ERR("Failed to set drift\n");
13715 return -EFAULT;
13716 }
13717
13718 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13719 best_period);
13720
13721 return 0;
13722}
13723
13724static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13725{
13726 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13727
13728 if (!netif_running(bp->dev)) {
13729 DP(BNX2X_MSG_PTP,
13730 "PTP adjtime called while the interface is down\n");
13731 return -ENETDOWN;
13732 }
13733
13734 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13735
13736 timecounter_adjtime(&bp->timecounter, delta);
13737
13738 return 0;
13739}
13740
13741static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13742{
13743 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13744 u64 ns;
13745
13746 if (!netif_running(bp->dev)) {
13747 DP(BNX2X_MSG_PTP,
13748 "PTP gettime called while the interface is down\n");
13749 return -ENETDOWN;
13750 }
13751
13752 ns = timecounter_read(&bp->timecounter);
13753
13754 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13755
13756 *ts = ns_to_timespec64(ns);
13757
13758 return 0;
13759}
13760
13761static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13762 const struct timespec64 *ts)
13763{
13764 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13765 u64 ns;
13766
13767 if (!netif_running(bp->dev)) {
13768 DP(BNX2X_MSG_PTP,
13769 "PTP settime called while the interface is down\n");
13770 return -ENETDOWN;
13771 }
13772
13773 ns = timespec64_to_ns(ts);
13774
13775 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13776
13777
13778 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13779
13780 return 0;
13781}
13782
13783
13784static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13785 struct ptp_clock_request *rq, int on)
13786{
13787 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13788
13789 BNX2X_ERR("PHC ancillary features are not supported\n");
13790 return -ENOTSUPP;
13791}
13792
13793void bnx2x_register_phc(struct bnx2x *bp)
13794{
13795
13796 bp->ptp_clock_info.owner = THIS_MODULE;
13797 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13798 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13799 bp->ptp_clock_info.n_alarm = 0;
13800 bp->ptp_clock_info.n_ext_ts = 0;
13801 bp->ptp_clock_info.n_per_out = 0;
13802 bp->ptp_clock_info.pps = 0;
13803 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13804 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13805 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13806 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13807 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13808
13809 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13810 if (IS_ERR(bp->ptp_clock)) {
13811 bp->ptp_clock = NULL;
13812 BNX2X_ERR("PTP clock registration failed\n");
13813 }
13814}
13815
13816static int bnx2x_init_one(struct pci_dev *pdev,
13817 const struct pci_device_id *ent)
13818{
13819 struct net_device *dev = NULL;
13820 struct bnx2x *bp;
13821 int rc, max_non_def_sbs;
13822 int rx_count, tx_count, rss_count, doorbell_size;
13823 int max_cos_est;
13824 bool is_vf;
13825 int cnic_cnt;
13826
13827
13828
13829
13830 if (is_kdump_kernel()) {
13831 ktime_t now = ktime_get_boottime();
13832 ktime_t fw_ready_time = ktime_set(5, 0);
13833
13834 if (ktime_before(now, fw_ready_time))
13835 msleep(ktime_ms_delta(fw_ready_time, now));
13836 }
13837
13838
13839
13840
13841
13842
13843
13844
13845
13846 max_cos_est = set_max_cos_est(ent->driver_data);
13847 if (max_cos_est < 0)
13848 return max_cos_est;
13849 is_vf = set_is_vf(ent->driver_data);
13850 cnic_cnt = is_vf ? 0 : 1;
13851
13852 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13853
13854
13855 max_non_def_sbs += is_vf ? 1 : 0;
13856
13857
13858 rss_count = max_non_def_sbs - cnic_cnt;
13859
13860 if (rss_count < 1)
13861 return -EINVAL;
13862
13863
13864 rx_count = rss_count + cnic_cnt;
13865
13866
13867
13868
13869 tx_count = rss_count * max_cos_est + cnic_cnt;
13870
13871
13872 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13873 if (!dev)
13874 return -ENOMEM;
13875
13876 bp = netdev_priv(dev);
13877
13878 bp->flags = 0;
13879 if (is_vf)
13880 bp->flags |= IS_VF_FLAG;
13881
13882 bp->igu_sb_cnt = max_non_def_sbs;
13883 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13884 bp->msg_enable = debug;
13885 bp->cnic_support = cnic_cnt;
13886 bp->cnic_probe = bnx2x_cnic_probe;
13887
13888 pci_set_drvdata(pdev, dev);
13889
13890 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13891 if (rc < 0) {
13892 free_netdev(dev);
13893 return rc;
13894 }
13895
13896 BNX2X_DEV_INFO("This is a %s function\n",
13897 IS_PF(bp) ? "physical" : "virtual");
13898 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13899 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13900 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13901 tx_count, rx_count);
13902
13903 rc = bnx2x_init_bp(bp);
13904 if (rc)
13905 goto init_one_exit;
13906
13907
13908
13909
13910
13911 if (IS_VF(bp)) {
13912 bp->doorbells = bnx2x_vf_doorbells(bp);
13913 rc = bnx2x_vf_pci_alloc(bp);
13914 if (rc)
13915 goto init_one_freemem;
13916 } else {
13917 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13918 if (doorbell_size > pci_resource_len(pdev, 2)) {
13919 dev_err(&bp->pdev->dev,
13920 "Cannot map doorbells, bar size too small, aborting\n");
13921 rc = -ENOMEM;
13922 goto init_one_freemem;
13923 }
13924 bp->doorbells = ioremap(pci_resource_start(pdev, 2),
13925 doorbell_size);
13926 }
13927 if (!bp->doorbells) {
13928 dev_err(&bp->pdev->dev,
13929 "Cannot map doorbell space, aborting\n");
13930 rc = -ENOMEM;
13931 goto init_one_freemem;
13932 }
13933
13934 if (IS_VF(bp)) {
13935 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13936 if (rc)
13937 goto init_one_freemem;
13938
13939#ifdef CONFIG_BNX2X_SRIOV
13940
13941 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13942 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13943 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13944 }
13945#endif
13946 }
13947
13948
13949 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13950 if (rc)
13951 goto init_one_freemem;
13952
13953
13954 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13955 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13956
13957
13958 if (CHIP_IS_E1x(bp))
13959 bp->flags |= NO_FCOE_FLAG;
13960
13961
13962 bnx2x_set_num_queues(bp);
13963
13964
13965
13966
13967 rc = bnx2x_set_int_mode(bp);
13968 if (rc) {
13969 dev_err(&pdev->dev, "Cannot set interrupts\n");
13970 goto init_one_freemem;
13971 }
13972 BNX2X_DEV_INFO("set interrupts successfully\n");
13973
13974
13975 rc = register_netdev(dev);
13976 if (rc) {
13977 dev_err(&pdev->dev, "Cannot register net device\n");
13978 goto init_one_freemem;
13979 }
13980 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13981
13982 if (!NO_FCOE(bp)) {
13983
13984 rtnl_lock();
13985 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13986 rtnl_unlock();
13987 }
13988 BNX2X_DEV_INFO(
13989 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
13990 board_info[ent->driver_data].name,
13991 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13992 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13993 pcie_print_link_status(bp->pdev);
13994
13995 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
13996 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
13997
13998 return 0;
13999
14000init_one_freemem:
14001 bnx2x_free_mem_bp(bp);
14002
14003init_one_exit:
14004 bnx2x_disable_pcie_error_reporting(bp);
14005
14006 if (bp->regview)
14007 iounmap(bp->regview);
14008
14009 if (IS_PF(bp) && bp->doorbells)
14010 iounmap(bp->doorbells);
14011
14012 free_netdev(dev);
14013
14014 if (atomic_read(&pdev->enable_cnt) == 1)
14015 pci_release_regions(pdev);
14016
14017 pci_disable_device(pdev);
14018
14019 return rc;
14020}
14021
14022static void __bnx2x_remove(struct pci_dev *pdev,
14023 struct net_device *dev,
14024 struct bnx2x *bp,
14025 bool remove_netdev)
14026{
14027
14028 if (!NO_FCOE(bp)) {
14029 rtnl_lock();
14030 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14031 rtnl_unlock();
14032 }
14033
14034#ifdef BCM_DCBNL
14035
14036 bnx2x_dcbnl_update_applist(bp, true);
14037#endif
14038
14039 if (IS_PF(bp) &&
14040 !BP_NOMCP(bp) &&
14041 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14042 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14043
14044
14045 if (remove_netdev) {
14046 unregister_netdev(dev);
14047 } else {
14048 rtnl_lock();
14049 dev_close(dev);
14050 rtnl_unlock();
14051 }
14052
14053 bnx2x_iov_remove_one(bp);
14054
14055
14056 if (IS_PF(bp)) {
14057 bnx2x_set_power_state(bp, PCI_D0);
14058 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14059
14060
14061
14062
14063 bnx2x_reset_endianity(bp);
14064 }
14065
14066
14067 bnx2x_disable_msi(bp);
14068
14069
14070 if (IS_PF(bp))
14071 bnx2x_set_power_state(bp, PCI_D3hot);
14072
14073
14074 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14075
14076
14077 if (IS_VF(bp))
14078 bnx2x_vfpf_release(bp);
14079
14080
14081 if (system_state == SYSTEM_POWER_OFF) {
14082 pci_wake_from_d3(pdev, bp->wol);
14083 pci_set_power_state(pdev, PCI_D3hot);
14084 }
14085
14086 bnx2x_disable_pcie_error_reporting(bp);
14087 if (remove_netdev) {
14088 if (bp->regview)
14089 iounmap(bp->regview);
14090
14091
14092
14093
14094 if (IS_PF(bp)) {
14095 if (bp->doorbells)
14096 iounmap(bp->doorbells);
14097
14098 bnx2x_release_firmware(bp);
14099 } else {
14100 bnx2x_vf_pci_dealloc(bp);
14101 }
14102 bnx2x_free_mem_bp(bp);
14103
14104 free_netdev(dev);
14105
14106 if (atomic_read(&pdev->enable_cnt) == 1)
14107 pci_release_regions(pdev);
14108
14109 pci_disable_device(pdev);
14110 }
14111}
14112
14113static void bnx2x_remove_one(struct pci_dev *pdev)
14114{
14115 struct net_device *dev = pci_get_drvdata(pdev);
14116 struct bnx2x *bp;
14117
14118 if (!dev) {
14119 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14120 return;
14121 }
14122 bp = netdev_priv(dev);
14123
14124 __bnx2x_remove(pdev, dev, bp, true);
14125}
14126
14127static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14128{
14129 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14130
14131 bp->rx_mode = BNX2X_RX_MODE_NONE;
14132
14133 if (CNIC_LOADED(bp))
14134 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14135
14136
14137 bnx2x_tx_disable(bp);
14138
14139 bnx2x_del_all_napi(bp);
14140 if (CNIC_LOADED(bp))
14141 bnx2x_del_all_napi_cnic(bp);
14142 netdev_reset_tc(bp->dev);
14143
14144 del_timer_sync(&bp->timer);
14145 cancel_delayed_work_sync(&bp->sp_task);
14146 cancel_delayed_work_sync(&bp->period_task);
14147
14148 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14149 bp->stats_state = STATS_STATE_DISABLED;
14150 up(&bp->stats_lock);
14151 }
14152
14153 bnx2x_save_statistics(bp);
14154
14155 netif_carrier_off(bp->dev);
14156
14157 return 0;
14158}
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14169 pci_channel_state_t state)
14170{
14171 struct net_device *dev = pci_get_drvdata(pdev);
14172 struct bnx2x *bp = netdev_priv(dev);
14173
14174 rtnl_lock();
14175
14176 BNX2X_ERR("IO error detected\n");
14177
14178 netif_device_detach(dev);
14179
14180 if (state == pci_channel_io_perm_failure) {
14181 rtnl_unlock();
14182 return PCI_ERS_RESULT_DISCONNECT;
14183 }
14184
14185 if (netif_running(dev))
14186 bnx2x_eeh_nic_unload(bp);
14187
14188 bnx2x_prev_path_mark_eeh(bp);
14189
14190 pci_disable_device(pdev);
14191
14192 rtnl_unlock();
14193
14194
14195 return PCI_ERS_RESULT_NEED_RESET;
14196}
14197
14198
14199
14200
14201
14202
14203
14204static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14205{
14206 struct net_device *dev = pci_get_drvdata(pdev);
14207 struct bnx2x *bp = netdev_priv(dev);
14208 int i;
14209
14210 rtnl_lock();
14211 BNX2X_ERR("IO slot reset initializing...\n");
14212 if (pci_enable_device(pdev)) {
14213 dev_err(&pdev->dev,
14214 "Cannot re-enable PCI device after reset\n");
14215 rtnl_unlock();
14216 return PCI_ERS_RESULT_DISCONNECT;
14217 }
14218
14219 pci_set_master(pdev);
14220 pci_restore_state(pdev);
14221 pci_save_state(pdev);
14222
14223 if (netif_running(dev))
14224 bnx2x_set_power_state(bp, PCI_D0);
14225
14226 if (netif_running(dev)) {
14227 BNX2X_ERR("IO slot reset --> driver unload\n");
14228
14229
14230 if (bnx2x_init_shmem(bp)) {
14231 rtnl_unlock();
14232 return PCI_ERS_RESULT_DISCONNECT;
14233 }
14234
14235 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14236 u32 v;
14237
14238 v = SHMEM2_RD(bp,
14239 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14240 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14241 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14242 }
14243 bnx2x_drain_tx_queues(bp);
14244 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14245 bnx2x_netif_stop(bp, 1);
14246 bnx2x_free_irq(bp);
14247
14248
14249 bnx2x_send_unload_done(bp, true);
14250
14251 bp->sp_state = 0;
14252 bp->port.pmf = 0;
14253
14254 bnx2x_prev_unload(bp);
14255
14256
14257
14258
14259 bnx2x_squeeze_objects(bp);
14260 bnx2x_free_skbs(bp);
14261 for_each_rx_queue(bp, i)
14262 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14263 bnx2x_free_fp_mem(bp);
14264 bnx2x_free_mem(bp);
14265
14266 bp->state = BNX2X_STATE_CLOSED;
14267 }
14268
14269 rtnl_unlock();
14270
14271 return PCI_ERS_RESULT_RECOVERED;
14272}
14273
14274
14275
14276
14277
14278
14279
14280
14281static void bnx2x_io_resume(struct pci_dev *pdev)
14282{
14283 struct net_device *dev = pci_get_drvdata(pdev);
14284 struct bnx2x *bp = netdev_priv(dev);
14285
14286 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14287 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14288 return;
14289 }
14290
14291 rtnl_lock();
14292
14293 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14294 DRV_MSG_SEQ_NUMBER_MASK;
14295
14296 if (netif_running(dev))
14297 bnx2x_nic_load(bp, LOAD_NORMAL);
14298
14299 netif_device_attach(dev);
14300
14301 rtnl_unlock();
14302}
14303
14304static const struct pci_error_handlers bnx2x_err_handler = {
14305 .error_detected = bnx2x_io_error_detected,
14306 .slot_reset = bnx2x_io_slot_reset,
14307 .resume = bnx2x_io_resume,
14308};
14309
14310static void bnx2x_shutdown(struct pci_dev *pdev)
14311{
14312 struct net_device *dev = pci_get_drvdata(pdev);
14313 struct bnx2x *bp;
14314
14315 if (!dev)
14316 return;
14317
14318 bp = netdev_priv(dev);
14319 if (!bp)
14320 return;
14321
14322 rtnl_lock();
14323 netif_device_detach(dev);
14324 rtnl_unlock();
14325
14326
14327
14328
14329
14330 __bnx2x_remove(pdev, dev, bp, false);
14331}
14332
14333static struct pci_driver bnx2x_pci_driver = {
14334 .name = DRV_MODULE_NAME,
14335 .id_table = bnx2x_pci_tbl,
14336 .probe = bnx2x_init_one,
14337 .remove = bnx2x_remove_one,
14338 .driver.pm = &bnx2x_pm_ops,
14339 .err_handler = &bnx2x_err_handler,
14340#ifdef CONFIG_BNX2X_SRIOV
14341 .sriov_configure = bnx2x_sriov_configure,
14342#endif
14343 .shutdown = bnx2x_shutdown,
14344};
14345
14346static int __init bnx2x_init(void)
14347{
14348 int ret;
14349
14350 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14351 if (bnx2x_wq == NULL) {
14352 pr_err("Cannot create workqueue\n");
14353 return -ENOMEM;
14354 }
14355 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14356 if (!bnx2x_iov_wq) {
14357 pr_err("Cannot create iov workqueue\n");
14358 destroy_workqueue(bnx2x_wq);
14359 return -ENOMEM;
14360 }
14361
14362 ret = pci_register_driver(&bnx2x_pci_driver);
14363 if (ret) {
14364 pr_err("Cannot register driver\n");
14365 destroy_workqueue(bnx2x_wq);
14366 destroy_workqueue(bnx2x_iov_wq);
14367 }
14368 return ret;
14369}
14370
14371static void __exit bnx2x_cleanup(void)
14372{
14373 struct list_head *pos, *q;
14374
14375 pci_unregister_driver(&bnx2x_pci_driver);
14376
14377 destroy_workqueue(bnx2x_wq);
14378 destroy_workqueue(bnx2x_iov_wq);
14379
14380
14381 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14382 struct bnx2x_prev_path_list *tmp =
14383 list_entry(pos, struct bnx2x_prev_path_list, list);
14384 list_del(pos);
14385 kfree(tmp);
14386 }
14387}
14388
14389void bnx2x_notify_link_changed(struct bnx2x *bp)
14390{
14391 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14392}
14393
14394module_init(bnx2x_init);
14395module_exit(bnx2x_cleanup);
14396
14397
14398
14399
14400
14401
14402
14403
14404static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14405{
14406 unsigned long ramrod_flags = 0;
14407
14408 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14409 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14410 &bp->iscsi_l2_mac_obj, true,
14411 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14412}
14413
14414
14415static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14416{
14417 struct eth_spe *spe;
14418 int cxt_index, cxt_offset;
14419
14420#ifdef BNX2X_STOP_ON_ERROR
14421 if (unlikely(bp->panic))
14422 return;
14423#endif
14424
14425 spin_lock_bh(&bp->spq_lock);
14426 BUG_ON(bp->cnic_spq_pending < count);
14427 bp->cnic_spq_pending -= count;
14428
14429 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14430 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14431 & SPE_HDR_CONN_TYPE) >>
14432 SPE_HDR_CONN_TYPE_SHIFT;
14433 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14434 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14435
14436
14437
14438
14439 if (type == ETH_CONNECTION_TYPE) {
14440 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14441 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14442 ILT_PAGE_CIDS;
14443 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14444 (cxt_index * ILT_PAGE_CIDS);
14445 bnx2x_set_ctx_validation(bp,
14446 &bp->context[cxt_index].
14447 vcxt[cxt_offset].eth,
14448 BNX2X_ISCSI_ETH_CID(bp));
14449 }
14450 }
14451
14452
14453
14454
14455
14456
14457
14458 if (type == ETH_CONNECTION_TYPE) {
14459 if (!atomic_read(&bp->cq_spq_left))
14460 break;
14461 else
14462 atomic_dec(&bp->cq_spq_left);
14463 } else if (type == NONE_CONNECTION_TYPE) {
14464 if (!atomic_read(&bp->eq_spq_left))
14465 break;
14466 else
14467 atomic_dec(&bp->eq_spq_left);
14468 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14469 (type == FCOE_CONNECTION_TYPE)) {
14470 if (bp->cnic_spq_pending >=
14471 bp->cnic_eth_dev.max_kwqe_pending)
14472 break;
14473 else
14474 bp->cnic_spq_pending++;
14475 } else {
14476 BNX2X_ERR("Unknown SPE type: %d\n", type);
14477 bnx2x_panic();
14478 break;
14479 }
14480
14481 spe = bnx2x_sp_get_next(bp);
14482 *spe = *bp->cnic_kwq_cons;
14483
14484 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14485 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14486
14487 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14488 bp->cnic_kwq_cons = bp->cnic_kwq;
14489 else
14490 bp->cnic_kwq_cons++;
14491 }
14492 bnx2x_sp_prod_update(bp);
14493 spin_unlock_bh(&bp->spq_lock);
14494}
14495
14496static int bnx2x_cnic_sp_queue(struct net_device *dev,
14497 struct kwqe_16 *kwqes[], u32 count)
14498{
14499 struct bnx2x *bp = netdev_priv(dev);
14500 int i;
14501
14502#ifdef BNX2X_STOP_ON_ERROR
14503 if (unlikely(bp->panic)) {
14504 BNX2X_ERR("Can't post to SP queue while panic\n");
14505 return -EIO;
14506 }
14507#endif
14508
14509 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14510 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14511 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14512 return -EAGAIN;
14513 }
14514
14515 spin_lock_bh(&bp->spq_lock);
14516
14517 for (i = 0; i < count; i++) {
14518 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14519
14520 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14521 break;
14522
14523 *bp->cnic_kwq_prod = *spe;
14524
14525 bp->cnic_kwq_pending++;
14526
14527 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14528 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14529 spe->data.update_data_addr.hi,
14530 spe->data.update_data_addr.lo,
14531 bp->cnic_kwq_pending);
14532
14533 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14534 bp->cnic_kwq_prod = bp->cnic_kwq;
14535 else
14536 bp->cnic_kwq_prod++;
14537 }
14538
14539 spin_unlock_bh(&bp->spq_lock);
14540
14541 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14542 bnx2x_cnic_sp_post(bp, 0);
14543
14544 return i;
14545}
14546
14547static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14548{
14549 struct cnic_ops *c_ops;
14550 int rc = 0;
14551
14552 mutex_lock(&bp->cnic_mutex);
14553 c_ops = rcu_dereference_protected(bp->cnic_ops,
14554 lockdep_is_held(&bp->cnic_mutex));
14555 if (c_ops)
14556 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14557 mutex_unlock(&bp->cnic_mutex);
14558
14559 return rc;
14560}
14561
14562static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14563{
14564 struct cnic_ops *c_ops;
14565 int rc = 0;
14566
14567 rcu_read_lock();
14568 c_ops = rcu_dereference(bp->cnic_ops);
14569 if (c_ops)
14570 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14571 rcu_read_unlock();
14572
14573 return rc;
14574}
14575
14576
14577
14578
14579int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14580{
14581 struct cnic_ctl_info ctl = {0};
14582
14583 ctl.cmd = cmd;
14584
14585 return bnx2x_cnic_ctl_send(bp, &ctl);
14586}
14587
14588static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14589{
14590 struct cnic_ctl_info ctl = {0};
14591
14592
14593 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14594 ctl.data.comp.cid = cid;
14595 ctl.data.comp.error = err;
14596
14597 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14598 bnx2x_cnic_sp_post(bp, 0);
14599}
14600
14601
14602
14603
14604
14605
14606static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14607{
14608 unsigned long accept_flags = 0, ramrod_flags = 0;
14609 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14610 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14611
14612 if (start) {
14613
14614
14615
14616
14617
14618
14619 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14620 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14621 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14623
14624
14625 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14626
14627 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14628 } else
14629
14630 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14631
14632 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14633 set_bit(sched_state, &bp->sp_state);
14634 else {
14635 __set_bit(RAMROD_RX, &ramrod_flags);
14636 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14637 ramrod_flags);
14638 }
14639}
14640
14641static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14642{
14643 struct bnx2x *bp = netdev_priv(dev);
14644 int rc = 0;
14645
14646 switch (ctl->cmd) {
14647 case DRV_CTL_CTXTBL_WR_CMD: {
14648 u32 index = ctl->data.io.offset;
14649 dma_addr_t addr = ctl->data.io.dma_addr;
14650
14651 bnx2x_ilt_wr(bp, index, addr);
14652 break;
14653 }
14654
14655 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14656 int count = ctl->data.credit.credit_count;
14657
14658 bnx2x_cnic_sp_post(bp, count);
14659 break;
14660 }
14661
14662
14663 case DRV_CTL_START_L2_CMD: {
14664 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14665 unsigned long sp_bits = 0;
14666
14667
14668 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14669 cp->iscsi_l2_client_id,
14670 cp->iscsi_l2_cid, BP_FUNC(bp),
14671 bnx2x_sp(bp, mac_rdata),
14672 bnx2x_sp_mapping(bp, mac_rdata),
14673 BNX2X_FILTER_MAC_PENDING,
14674 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14675 &bp->macs_pool);
14676
14677
14678 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14679 if (rc)
14680 break;
14681
14682 barrier();
14683
14684
14685
14686 netif_addr_lock_bh(dev);
14687 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14688 netif_addr_unlock_bh(dev);
14689
14690
14691 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14692 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14693
14694 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14695 BNX2X_ERR("rx_mode completion timed out!\n");
14696
14697 break;
14698 }
14699
14700
14701 case DRV_CTL_STOP_L2_CMD: {
14702 unsigned long sp_bits = 0;
14703
14704
14705 netif_addr_lock_bh(dev);
14706 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14707 netif_addr_unlock_bh(dev);
14708
14709
14710 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14711 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14712
14713 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14714 BNX2X_ERR("rx_mode completion timed out!\n");
14715
14716 barrier();
14717
14718
14719 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14720 BNX2X_ISCSI_ETH_MAC, true);
14721 break;
14722 }
14723 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14724 int count = ctl->data.credit.credit_count;
14725
14726 smp_mb__before_atomic();
14727 atomic_add(count, &bp->cq_spq_left);
14728 smp_mb__after_atomic();
14729 break;
14730 }
14731 case DRV_CTL_ULP_REGISTER_CMD: {
14732 int ulp_type = ctl->data.register_data.ulp_type;
14733
14734 if (CHIP_IS_E3(bp)) {
14735 int idx = BP_FW_MB_IDX(bp);
14736 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14737 int path = BP_PATH(bp);
14738 int port = BP_PORT(bp);
14739 int i;
14740 u32 scratch_offset;
14741 u32 *host_addr;
14742
14743
14744 if (ulp_type == CNIC_ULP_ISCSI)
14745 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14746 else if (ulp_type == CNIC_ULP_FCOE)
14747 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14748 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14749
14750 if ((ulp_type != CNIC_ULP_FCOE) ||
14751 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14752 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14753 break;
14754
14755
14756 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14757 if (!scratch_offset)
14758 break;
14759 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14760 fcoe_features[path][port]);
14761 host_addr = (u32 *) &(ctl->data.register_data.
14762 fcoe_features);
14763 for (i = 0; i < sizeof(struct fcoe_capabilities);
14764 i += 4)
14765 REG_WR(bp, scratch_offset + i,
14766 *(host_addr + i/4));
14767 }
14768 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14769 break;
14770 }
14771
14772 case DRV_CTL_ULP_UNREGISTER_CMD: {
14773 int ulp_type = ctl->data.ulp_type;
14774
14775 if (CHIP_IS_E3(bp)) {
14776 int idx = BP_FW_MB_IDX(bp);
14777 u32 cap;
14778
14779 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14780 if (ulp_type == CNIC_ULP_ISCSI)
14781 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14782 else if (ulp_type == CNIC_ULP_FCOE)
14783 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14784 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14785 }
14786 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14787 break;
14788 }
14789
14790 default:
14791 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14792 rc = -EINVAL;
14793 }
14794
14795
14796 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14797 switch (ctl->drv_state) {
14798 case DRV_NOP:
14799 break;
14800 case DRV_ACTIVE:
14801 bnx2x_set_os_driver_state(bp,
14802 OS_DRIVER_STATE_ACTIVE);
14803 break;
14804 case DRV_INACTIVE:
14805 bnx2x_set_os_driver_state(bp,
14806 OS_DRIVER_STATE_DISABLED);
14807 break;
14808 case DRV_UNLOADED:
14809 bnx2x_set_os_driver_state(bp,
14810 OS_DRIVER_STATE_NOT_LOADED);
14811 break;
14812 default:
14813 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14814 }
14815 }
14816
14817 return rc;
14818}
14819
14820static int bnx2x_get_fc_npiv(struct net_device *dev,
14821 struct cnic_fc_npiv_tbl *cnic_tbl)
14822{
14823 struct bnx2x *bp = netdev_priv(dev);
14824 struct bdn_fc_npiv_tbl *tbl = NULL;
14825 u32 offset, entries;
14826 int rc = -EINVAL;
14827 int i;
14828
14829 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14830 goto out;
14831
14832 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14833
14834 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14835 if (!tbl) {
14836 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14837 goto out;
14838 }
14839
14840 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14841 if (!offset) {
14842 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14843 goto out;
14844 }
14845 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14846
14847
14848 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14849 BNX2X_ERR("Failed to read FC-NPIV table\n");
14850 goto out;
14851 }
14852
14853
14854
14855
14856 entries = tbl->fc_npiv_cfg.num_of_npiv;
14857 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14858 tbl->fc_npiv_cfg.num_of_npiv = entries;
14859
14860 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14861 DP(BNX2X_MSG_MCP,
14862 "No FC-NPIV table [valid, simply not present]\n");
14863 goto out;
14864 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14865 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14866 tbl->fc_npiv_cfg.num_of_npiv);
14867 goto out;
14868 } else {
14869 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14870 tbl->fc_npiv_cfg.num_of_npiv);
14871 }
14872
14873
14874 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14875 for (i = 0; i < cnic_tbl->count; i++) {
14876 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14877 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14878 }
14879
14880 rc = 0;
14881out:
14882 kfree(tbl);
14883 return rc;
14884}
14885
14886void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14887{
14888 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14889
14890 if (bp->flags & USING_MSIX_FLAG) {
14891 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14892 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14893 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14894 } else {
14895 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14896 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14897 }
14898 if (!CHIP_IS_E1x(bp))
14899 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14900 else
14901 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14902
14903 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14904 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14905 cp->irq_arr[1].status_blk = bp->def_status_blk;
14906 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14907 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14908
14909 cp->num_irq = 2;
14910}
14911
14912void bnx2x_setup_cnic_info(struct bnx2x *bp)
14913{
14914 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14915
14916 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14917 bnx2x_cid_ilt_lines(bp);
14918 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14919 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14920 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14921
14922 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14923 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14924 cp->iscsi_l2_cid);
14925
14926 if (NO_ISCSI_OOO(bp))
14927 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14928}
14929
14930static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14931 void *data)
14932{
14933 struct bnx2x *bp = netdev_priv(dev);
14934 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14935 int rc;
14936
14937 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14938
14939 if (ops == NULL) {
14940 BNX2X_ERR("NULL ops received\n");
14941 return -EINVAL;
14942 }
14943
14944 if (!CNIC_SUPPORT(bp)) {
14945 BNX2X_ERR("Can't register CNIC when not supported\n");
14946 return -EOPNOTSUPP;
14947 }
14948
14949 if (!CNIC_LOADED(bp)) {
14950 rc = bnx2x_load_cnic(bp);
14951 if (rc) {
14952 BNX2X_ERR("CNIC-related load failed\n");
14953 return rc;
14954 }
14955 }
14956
14957 bp->cnic_enabled = true;
14958
14959 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14960 if (!bp->cnic_kwq)
14961 return -ENOMEM;
14962
14963 bp->cnic_kwq_cons = bp->cnic_kwq;
14964 bp->cnic_kwq_prod = bp->cnic_kwq;
14965 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14966
14967 bp->cnic_spq_pending = 0;
14968 bp->cnic_kwq_pending = 0;
14969
14970 bp->cnic_data = data;
14971
14972 cp->num_irq = 0;
14973 cp->drv_state |= CNIC_DRV_STATE_REGD;
14974 cp->iro_arr = bp->iro_arr;
14975
14976 bnx2x_setup_cnic_irq_info(bp);
14977
14978 rcu_assign_pointer(bp->cnic_ops, ops);
14979
14980
14981 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14982
14983 return 0;
14984}
14985
14986static int bnx2x_unregister_cnic(struct net_device *dev)
14987{
14988 struct bnx2x *bp = netdev_priv(dev);
14989 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14990
14991 mutex_lock(&bp->cnic_mutex);
14992 cp->drv_state = 0;
14993 RCU_INIT_POINTER(bp->cnic_ops, NULL);
14994 mutex_unlock(&bp->cnic_mutex);
14995 synchronize_rcu();
14996 bp->cnic_enabled = false;
14997 kfree(bp->cnic_kwq);
14998 bp->cnic_kwq = NULL;
14999
15000 return 0;
15001}
15002
15003static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15004{
15005 struct bnx2x *bp = netdev_priv(dev);
15006 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15007
15008
15009
15010
15011
15012 if (NO_ISCSI(bp) && NO_FCOE(bp))
15013 return NULL;
15014
15015 cp->drv_owner = THIS_MODULE;
15016 cp->chip_id = CHIP_ID(bp);
15017 cp->pdev = bp->pdev;
15018 cp->io_base = bp->regview;
15019 cp->io_base2 = bp->doorbells;
15020 cp->max_kwqe_pending = 8;
15021 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15022 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15023 bnx2x_cid_ilt_lines(bp);
15024 cp->ctx_tbl_len = CNIC_ILT_LINES;
15025 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15026 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15027 cp->drv_ctl = bnx2x_drv_ctl;
15028 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15029 cp->drv_register_cnic = bnx2x_register_cnic;
15030 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15031 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15032 cp->iscsi_l2_client_id =
15033 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15034 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15035
15036 if (NO_ISCSI_OOO(bp))
15037 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15038
15039 if (NO_ISCSI(bp))
15040 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15041
15042 if (NO_FCOE(bp))
15043 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15044
15045 BNX2X_DEV_INFO(
15046 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15047 cp->ctx_blk_size,
15048 cp->ctx_tbl_offset,
15049 cp->ctx_tbl_len,
15050 cp->starting_cid);
15051 return cp;
15052}
15053
15054static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15055{
15056 struct bnx2x *bp = fp->bp;
15057 u32 offset = BAR_USTRORM_INTMEM;
15058
15059 if (IS_VF(bp))
15060 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15061 else if (!CHIP_IS_E1x(bp))
15062 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15063 else
15064 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15065
15066 return offset;
15067}
15068
15069
15070
15071
15072
15073
15074int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15075{
15076 u32 pretend_reg;
15077
15078 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15079 return -1;
15080
15081
15082 pretend_reg = bnx2x_get_pretend_reg(bp);
15083 REG_WR(bp, pretend_reg, pretend_func_val);
15084 REG_RD(bp, pretend_reg);
15085 return 0;
15086}
15087
15088static void bnx2x_ptp_task(struct work_struct *work)
15089{
15090 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15091 int port = BP_PORT(bp);
15092 u32 val_seq;
15093 u64 timestamp, ns;
15094 struct skb_shared_hwtstamps shhwtstamps;
15095 bool bail = true;
15096 int i;
15097
15098
15099
15100
15101 for (i = 0; i < 10; i++) {
15102
15103 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15104 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15105 if (val_seq & 0x10000) {
15106 bail = false;
15107 break;
15108 }
15109 msleep(1 << i);
15110 }
15111
15112 if (!bail) {
15113
15114 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15115 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15116 timestamp <<= 32;
15117 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15118 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15119
15120 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15121 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15122 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15123
15124 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15125 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15126 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15127
15128 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15129 timestamp, ns);
15130 } else {
15131 DP(BNX2X_MSG_PTP,
15132 "Tx timestamp is not recorded (register read=%u)\n",
15133 val_seq);
15134 bp->eth_stats.ptp_skip_tx_ts++;
15135 }
15136
15137 dev_kfree_skb_any(bp->ptp_tx_skb);
15138 bp->ptp_tx_skb = NULL;
15139}
15140
15141void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15142{
15143 int port = BP_PORT(bp);
15144 u64 timestamp, ns;
15145
15146 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15147 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15148 timestamp <<= 32;
15149 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15150 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15151
15152
15153 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15154 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15155
15156 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15157
15158 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15159
15160 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15161 timestamp, ns);
15162}
15163
15164
15165static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15166{
15167 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15168 int port = BP_PORT(bp);
15169 u32 wb_data[2];
15170 u64 phc_cycles;
15171
15172 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15173 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15174 phc_cycles = wb_data[1];
15175 phc_cycles = (phc_cycles << 32) + wb_data[0];
15176
15177 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15178
15179 return phc_cycles;
15180}
15181
15182static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15183{
15184 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15185 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15186 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15187 bp->cyclecounter.shift = 0;
15188 bp->cyclecounter.mult = 1;
15189}
15190
15191static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15192{
15193 struct bnx2x_func_state_params func_params = {NULL};
15194 struct bnx2x_func_set_timesync_params *set_timesync_params =
15195 &func_params.params.set_timesync;
15196
15197
15198 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15199 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15200
15201 func_params.f_obj = &bp->func_obj;
15202 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15203
15204
15205 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15206 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15207
15208 return bnx2x_func_state_change(bp, &func_params);
15209}
15210
15211static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15212{
15213 struct bnx2x_queue_state_params q_params;
15214 int rc, i;
15215
15216
15217 memset(&q_params, 0, sizeof(q_params));
15218 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15219 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15220 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15221 &q_params.params.update.update_flags);
15222 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15223 &q_params.params.update.update_flags);
15224
15225
15226 for_each_eth_queue(bp, i) {
15227 struct bnx2x_fastpath *fp = &bp->fp[i];
15228
15229
15230 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15231
15232
15233 rc = bnx2x_queue_state_change(bp, &q_params);
15234 if (rc) {
15235 BNX2X_ERR("Failed to enable PTP packets\n");
15236 return rc;
15237 }
15238 }
15239
15240 return 0;
15241}
15242
15243#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15244#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15245#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15246#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15247#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15248#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15249#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15250#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15251#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15252#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15253#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15254#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15255
15256int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15257{
15258 int port = BP_PORT(bp);
15259 u32 param, rule;
15260 int rc;
15261
15262 if (!bp->hwtstamp_ioctl_called)
15263 return 0;
15264
15265 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15266 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15267 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15268 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15269 switch (bp->tx_type) {
15270 case HWTSTAMP_TX_ON:
15271 bp->flags |= TX_TIMESTAMPING_EN;
15272 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15273 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15274 break;
15275 case HWTSTAMP_TX_ONESTEP_SYNC:
15276 case HWTSTAMP_TX_ONESTEP_P2P:
15277 BNX2X_ERR("One-step timestamping is not supported\n");
15278 return -ERANGE;
15279 }
15280
15281 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15282 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15283 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15284 NIG_REG_P0_LLH_PTP_RULE_MASK;
15285 switch (bp->rx_filter) {
15286 case HWTSTAMP_FILTER_NONE:
15287 break;
15288 case HWTSTAMP_FILTER_ALL:
15289 case HWTSTAMP_FILTER_SOME:
15290 case HWTSTAMP_FILTER_NTP_ALL:
15291 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15292 break;
15293 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15294 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15295 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15296 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15297
15298 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15299 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15300 break;
15301 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15302 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15303 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15304 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15305
15306 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15307 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15308 break;
15309 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15310 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15311 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15312 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15313
15314 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15315 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15316
15317 break;
15318 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15319 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15320 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15321 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15322
15323 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15324 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15325 break;
15326 }
15327
15328
15329 rc = bnx2x_enable_ptp_packets(bp);
15330 if (rc)
15331 return rc;
15332
15333
15334 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15335 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15336
15337 return 0;
15338}
15339
15340static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15341{
15342 struct hwtstamp_config config;
15343 int rc;
15344
15345 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15346
15347 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15348 return -EFAULT;
15349
15350 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15351 config.tx_type, config.rx_filter);
15352
15353 if (config.flags) {
15354 BNX2X_ERR("config.flags is reserved for future use\n");
15355 return -EINVAL;
15356 }
15357
15358 bp->hwtstamp_ioctl_called = true;
15359 bp->tx_type = config.tx_type;
15360 bp->rx_filter = config.rx_filter;
15361
15362 rc = bnx2x_configure_ptp_filters(bp);
15363 if (rc)
15364 return rc;
15365
15366 config.rx_filter = bp->rx_filter;
15367
15368 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15369 -EFAULT : 0;
15370}
15371
15372
15373static int bnx2x_configure_ptp(struct bnx2x *bp)
15374{
15375 int rc, port = BP_PORT(bp);
15376 u32 wb_data[2];
15377
15378
15379 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15380 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15381 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15382 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15383 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15384 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15385 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15386 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15387
15388
15389 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15390 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15391
15392
15393 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15394 NIG_REG_P0_PTP_EN, 0x3F);
15395
15396
15397 wb_data[0] = 0;
15398 wb_data[1] = 0;
15399 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15400
15401
15402 rc = bnx2x_send_reset_timesync_ramrod(bp);
15403 if (rc) {
15404 BNX2X_ERR("Failed to reset PHC drift register\n");
15405 return -EFAULT;
15406 }
15407
15408
15409 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15410 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15411 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15412 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15413
15414 return 0;
15415}
15416
15417
15418void bnx2x_init_ptp(struct bnx2x *bp)
15419{
15420 int rc;
15421
15422
15423 rc = bnx2x_configure_ptp(bp);
15424 if (rc) {
15425 BNX2X_ERR("Stopping PTP initialization\n");
15426 return;
15427 }
15428
15429
15430 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15431
15432
15433
15434
15435
15436 if (!bp->timecounter_init_done) {
15437 bnx2x_init_cyclecounter(bp);
15438 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15439 ktime_to_ns(ktime_get_real()));
15440 bp->timecounter_init_done = true;
15441 }
15442
15443 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15444}
15445