1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/timer.h>
27#include <linux/errno.h>
28#include <linux/ioport.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/aer.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39#include <linux/irq.h>
40#include <linux/delay.h>
41#include <asm/byteorder.h>
42#include <linux/time.h>
43#include <linux/ethtool.h>
44#include <linux/mii.h>
45#include <linux/if_vlan.h>
46#include <linux/crash_dump.h>
47#include <net/ip.h>
48#include <net/ipv6.h>
49#include <net/tcp.h>
50#include <net/vxlan.h>
51#include <net/checksum.h>
52#include <net/ip6_checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/crc32c.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/io.h>
59#include <linux/semaphore.h>
60#include <linux/stringify.h>
61#include <linux/vmalloc.h>
62#include "bnx2x.h"
63#include "bnx2x_init.h"
64#include "bnx2x_init_ops.h"
65#include "bnx2x_cmn.h"
66#include "bnx2x_vfpf.h"
67#include "bnx2x_dcb.h"
68#include "bnx2x_sp.h"
69#include <linux/firmware.h>
70#include "bnx2x_fw_file_hdr.h"
71
72#define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81
82#define TX_TIMEOUT (5*HZ)
83
84static char version[] =
85 "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88MODULE_AUTHOR("Eliezer Tamir");
89MODULE_DESCRIPTION("QLogic "
90 "BCM57710/57711/57711E/"
91 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92 "57840/57840_MF Driver");
93MODULE_LICENSE("GPL");
94MODULE_VERSION(DRV_MODULE_VERSION);
95MODULE_FIRMWARE(FW_FILE_NAME_E1);
96MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97MODULE_FIRMWARE(FW_FILE_NAME_E2);
98
99int bnx2x_num_queues;
100module_param_named(num_queues, bnx2x_num_queues, int, 0444);
101MODULE_PARM_DESC(num_queues,
102 " Set number of queues (default is as a number of CPUs)");
103
104static int disable_tpa;
105module_param(disable_tpa, int, 0444);
106MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
107
108static int int_mode;
109module_param(int_mode, int, 0444);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
111 "(1 INT#x; 2 MSI)");
112
113static int dropless_fc;
114module_param(dropless_fc, int, 0444);
115MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
117static int mrrs = -1;
118module_param(mrrs, int, 0444);
119MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
121static int debug;
122module_param(debug, int, 0444);
123MODULE_PARM_DESC(debug, " Default debug msglevel");
124
125static struct workqueue_struct *bnx2x_wq;
126struct workqueue_struct *bnx2x_iov_wq;
127
128struct bnx2x_mac_vals {
129 u32 xmac_addr;
130 u32 xmac_val;
131 u32 emac_addr;
132 u32 emac_val;
133 u32 umac_addr[2];
134 u32 umac_val[2];
135 u32 bmac_addr;
136 u32 bmac_val[2];
137};
138
139enum bnx2x_board_type {
140 BCM57710 = 0,
141 BCM57711,
142 BCM57711E,
143 BCM57712,
144 BCM57712_MF,
145 BCM57712_VF,
146 BCM57800,
147 BCM57800_MF,
148 BCM57800_VF,
149 BCM57810,
150 BCM57810_MF,
151 BCM57810_VF,
152 BCM57840_4_10,
153 BCM57840_2_20,
154 BCM57840_MF,
155 BCM57840_VF,
156 BCM57811,
157 BCM57811_MF,
158 BCM57840_O,
159 BCM57840_MFO,
160 BCM57811_VF
161};
162
163
164static struct {
165 char *name;
166} board_info[] = {
167 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
168 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
169 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
170 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
171 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
172 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
173 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
174 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
175 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
176 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
177 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
178 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
179 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
180 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
181 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
183 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
184 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
185 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
186 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
187 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
188};
189
190#ifndef PCI_DEVICE_ID_NX2_57710
191#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
192#endif
193#ifndef PCI_DEVICE_ID_NX2_57711
194#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
195#endif
196#ifndef PCI_DEVICE_ID_NX2_57711E
197#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57712
200#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
201#endif
202#ifndef PCI_DEVICE_ID_NX2_57712_MF
203#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
204#endif
205#ifndef PCI_DEVICE_ID_NX2_57712_VF
206#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
207#endif
208#ifndef PCI_DEVICE_ID_NX2_57800
209#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
210#endif
211#ifndef PCI_DEVICE_ID_NX2_57800_MF
212#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
213#endif
214#ifndef PCI_DEVICE_ID_NX2_57800_VF
215#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
216#endif
217#ifndef PCI_DEVICE_ID_NX2_57810
218#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
219#endif
220#ifndef PCI_DEVICE_ID_NX2_57810_MF
221#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
222#endif
223#ifndef PCI_DEVICE_ID_NX2_57840_O
224#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
225#endif
226#ifndef PCI_DEVICE_ID_NX2_57810_VF
227#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
228#endif
229#ifndef PCI_DEVICE_ID_NX2_57840_4_10
230#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
231#endif
232#ifndef PCI_DEVICE_ID_NX2_57840_2_20
233#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
234#endif
235#ifndef PCI_DEVICE_ID_NX2_57840_MFO
236#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
237#endif
238#ifndef PCI_DEVICE_ID_NX2_57840_MF
239#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
240#endif
241#ifndef PCI_DEVICE_ID_NX2_57840_VF
242#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
243#endif
244#ifndef PCI_DEVICE_ID_NX2_57811
245#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
246#endif
247#ifndef PCI_DEVICE_ID_NX2_57811_MF
248#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
249#endif
250#ifndef PCI_DEVICE_ID_NX2_57811_VF
251#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
252#endif
253
254static const struct pci_device_id bnx2x_pci_tbl[] = {
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
273 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
275 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
276 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
279 { 0 }
280};
281
282MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
283
284const u32 dmae_reg_go_c[] = {
285 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
286 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
287 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
288 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
289};
290
291
292#define BNX2X_PREV_WAIT_NEEDED 1
293static DEFINE_SEMAPHORE(bnx2x_prev_sem);
294static LIST_HEAD(bnx2x_prev_list);
295
296
297static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
298static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
299static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
300
301
302
303
304
305static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
306
307static void __storm_memset_dma_mapping(struct bnx2x *bp,
308 u32 addr, dma_addr_t mapping)
309{
310 REG_WR(bp, addr, U64_LO(mapping));
311 REG_WR(bp, addr + 4, U64_HI(mapping));
312}
313
314static void storm_memset_spq_addr(struct bnx2x *bp,
315 dma_addr_t mapping, u16 abs_fid)
316{
317 u32 addr = XSEM_REG_FAST_MEMORY +
318 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
319
320 __storm_memset_dma_mapping(bp, addr, mapping);
321}
322
323static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
324 u16 pf_id)
325{
326 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334}
335
336static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
337 u8 enable)
338{
339 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347}
348
349static void storm_memset_eq_data(struct bnx2x *bp,
350 struct event_ring_data *eq_data,
351 u16 pfid)
352{
353 size_t size = sizeof(struct event_ring_data);
354
355 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
356
357 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
358}
359
360static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
361 u16 pfid)
362{
363 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
364 REG_WR16(bp, addr, eq_prod);
365}
366
367
368
369
370static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
371{
372 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
373 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
374 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
375 PCICFG_VENDOR_ID_OFFSET);
376}
377
378static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
379{
380 u32 val;
381
382 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
383 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
384 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
385 PCICFG_VENDOR_ID_OFFSET);
386
387 return val;
388}
389
390#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
391#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
392#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
393#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
394#define DMAE_DP_DST_NONE "dst_addr [none]"
395
396static void bnx2x_dp_dmae(struct bnx2x *bp,
397 struct dmae_command *dmae, int msglvl)
398{
399 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
400 int i;
401
402 switch (dmae->opcode & DMAE_COMMAND_DST) {
403 case DMAE_CMD_DST_PCI:
404 if (src_type == DMAE_CMD_SRC_PCI)
405 DP(msglvl, "DMAE: opcode 0x%08x\n"
406 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
407 "comp_addr [%x:%08x], comp_val 0x%08x\n",
408 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
409 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
410 dmae->comp_addr_hi, dmae->comp_addr_lo,
411 dmae->comp_val);
412 else
413 DP(msglvl, "DMAE: opcode 0x%08x\n"
414 "src [%08x], len [%d*4], dst [%x:%08x]\n"
415 "comp_addr [%x:%08x], comp_val 0x%08x\n",
416 dmae->opcode, dmae->src_addr_lo >> 2,
417 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
418 dmae->comp_addr_hi, dmae->comp_addr_lo,
419 dmae->comp_val);
420 break;
421 case DMAE_CMD_DST_GRC:
422 if (src_type == DMAE_CMD_SRC_PCI)
423 DP(msglvl, "DMAE: opcode 0x%08x\n"
424 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
425 "comp_addr [%x:%08x], comp_val 0x%08x\n",
426 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
427 dmae->len, dmae->dst_addr_lo >> 2,
428 dmae->comp_addr_hi, dmae->comp_addr_lo,
429 dmae->comp_val);
430 else
431 DP(msglvl, "DMAE: opcode 0x%08x\n"
432 "src [%08x], len [%d*4], dst [%08x]\n"
433 "comp_addr [%x:%08x], comp_val 0x%08x\n",
434 dmae->opcode, dmae->src_addr_lo >> 2,
435 dmae->len, dmae->dst_addr_lo >> 2,
436 dmae->comp_addr_hi, dmae->comp_addr_lo,
437 dmae->comp_val);
438 break;
439 default:
440 if (src_type == DMAE_CMD_SRC_PCI)
441 DP(msglvl, "DMAE: opcode 0x%08x\n"
442 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
443 "comp_addr [%x:%08x] comp_val 0x%08x\n",
444 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
445 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
450 "comp_addr [%x:%08x] comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 }
456
457 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
458 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
459 i, *(((u32 *)dmae) + i));
460}
461
462
463void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
464{
465 u32 cmd_offset;
466 int i;
467
468 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
469 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
470 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
471 }
472 REG_WR(bp, dmae_reg_go_c[idx], 1);
473}
474
475u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
476{
477 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
478 DMAE_CMD_C_ENABLE);
479}
480
481u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
482{
483 return opcode & ~DMAE_CMD_SRC_RESET;
484}
485
486u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
487 bool with_comp, u8 comp_type)
488{
489 u32 opcode = 0;
490
491 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
492 (dst_type << DMAE_COMMAND_DST_SHIFT));
493
494 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
495
496 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
497 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
498 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
499 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
500
501#ifdef __BIG_ENDIAN
502 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
503#else
504 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
505#endif
506 if (with_comp)
507 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
508 return opcode;
509}
510
511void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
512 struct dmae_command *dmae,
513 u8 src_type, u8 dst_type)
514{
515 memset(dmae, 0, sizeof(struct dmae_command));
516
517
518 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
519 true, DMAE_COMP_PCI);
520
521
522 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
523 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
524 dmae->comp_val = DMAE_COMP_VAL;
525}
526
527
528int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
529 u32 *comp)
530{
531 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
532 int rc = 0;
533
534 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
535
536
537
538
539
540
541 spin_lock_bh(&bp->dmae_lock);
542
543
544 *comp = 0;
545
546
547 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
548
549
550 udelay(5);
551 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
552
553 if (!cnt ||
554 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
555 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
556 BNX2X_ERR("DMAE timeout!\n");
557 rc = DMAE_TIMEOUT;
558 goto unlock;
559 }
560 cnt--;
561 udelay(50);
562 }
563 if (*comp & DMAE_PCI_ERR_FLAG) {
564 BNX2X_ERR("DMAE PCI error!\n");
565 rc = DMAE_PCI_ERROR;
566 }
567
568unlock:
569
570 spin_unlock_bh(&bp->dmae_lock);
571
572 return rc;
573}
574
575void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
576 u32 len32)
577{
578 int rc;
579 struct dmae_command dmae;
580
581 if (!bp->dmae_ready) {
582 u32 *data = bnx2x_sp(bp, wb_data[0]);
583
584 if (CHIP_IS_E1(bp))
585 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
586 else
587 bnx2x_init_str_wr(bp, dst_addr, data, len32);
588 return;
589 }
590
591
592 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
593
594
595 dmae.src_addr_lo = U64_LO(dma_addr);
596 dmae.src_addr_hi = U64_HI(dma_addr);
597 dmae.dst_addr_lo = dst_addr >> 2;
598 dmae.dst_addr_hi = 0;
599 dmae.len = len32;
600
601
602 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
603 if (rc) {
604 BNX2X_ERR("DMAE returned failure %d\n", rc);
605#ifdef BNX2X_STOP_ON_ERROR
606 bnx2x_panic();
607#endif
608 }
609}
610
611void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
612{
613 int rc;
614 struct dmae_command dmae;
615
616 if (!bp->dmae_ready) {
617 u32 *data = bnx2x_sp(bp, wb_data[0]);
618 int i;
619
620 if (CHIP_IS_E1(bp))
621 for (i = 0; i < len32; i++)
622 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
623 else
624 for (i = 0; i < len32; i++)
625 data[i] = REG_RD(bp, src_addr + i*4);
626
627 return;
628 }
629
630
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
632
633
634 dmae.src_addr_lo = src_addr >> 2;
635 dmae.src_addr_hi = 0;
636 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
637 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
638 dmae.len = len32;
639
640
641 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
642 if (rc) {
643 BNX2X_ERR("DMAE returned failure %d\n", rc);
644#ifdef BNX2X_STOP_ON_ERROR
645 bnx2x_panic();
646#endif
647 }
648}
649
650static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
651 u32 addr, u32 len)
652{
653 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
654 int offset = 0;
655
656 while (len > dmae_wr_max) {
657 bnx2x_write_dmae(bp, phys_addr + offset,
658 addr + offset, dmae_wr_max);
659 offset += dmae_wr_max * 4;
660 len -= dmae_wr_max;
661 }
662
663 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
664}
665
666enum storms {
667 XSTORM,
668 TSTORM,
669 CSTORM,
670 USTORM,
671 MAX_STORMS
672};
673
674#define STORMS_NUM 4
675#define REGS_IN_ENTRY 4
676
677static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
678 enum storms storm,
679 int entry)
680{
681 switch (storm) {
682 case XSTORM:
683 return XSTORM_ASSERT_LIST_OFFSET(entry);
684 case TSTORM:
685 return TSTORM_ASSERT_LIST_OFFSET(entry);
686 case CSTORM:
687 return CSTORM_ASSERT_LIST_OFFSET(entry);
688 case USTORM:
689 return USTORM_ASSERT_LIST_OFFSET(entry);
690 case MAX_STORMS:
691 default:
692 BNX2X_ERR("unknown storm\n");
693 }
694 return -EINVAL;
695}
696
697static int bnx2x_mc_assert(struct bnx2x *bp)
698{
699 char last_idx;
700 int i, j, rc = 0;
701 enum storms storm;
702 u32 regs[REGS_IN_ENTRY];
703 u32 bar_storm_intmem[STORMS_NUM] = {
704 BAR_XSTRORM_INTMEM,
705 BAR_TSTRORM_INTMEM,
706 BAR_CSTRORM_INTMEM,
707 BAR_USTRORM_INTMEM
708 };
709 u32 storm_assert_list_index[STORMS_NUM] = {
710 XSTORM_ASSERT_LIST_INDEX_OFFSET,
711 TSTORM_ASSERT_LIST_INDEX_OFFSET,
712 CSTORM_ASSERT_LIST_INDEX_OFFSET,
713 USTORM_ASSERT_LIST_INDEX_OFFSET
714 };
715 char *storms_string[STORMS_NUM] = {
716 "XSTORM",
717 "TSTORM",
718 "CSTORM",
719 "USTORM"
720 };
721
722 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
723 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
724 storm_assert_list_index[storm]);
725 if (last_idx)
726 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
727 storms_string[storm], last_idx);
728
729
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
731
732 for (j = 0; j < REGS_IN_ENTRY; j++)
733 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
734 bnx2x_get_assert_list_entry(bp,
735 storm,
736 i) +
737 sizeof(u32) * j);
738
739
740 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
741 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
742 storms_string[storm], i, regs[3],
743 regs[2], regs[1], regs[0]);
744 rc++;
745 } else {
746 break;
747 }
748 }
749 }
750
751 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
752 CHIP_IS_E1(bp) ? "everest1" :
753 CHIP_IS_E1H(bp) ? "everest1h" :
754 CHIP_IS_E2(bp) ? "everest2" : "everest3",
755 BCM_5710_FW_MAJOR_VERSION,
756 BCM_5710_FW_MINOR_VERSION,
757 BCM_5710_FW_REVISION_VERSION);
758
759 return rc;
760}
761
762#define MCPR_TRACE_BUFFER_SIZE (0x800)
763#define SCRATCH_BUFFER_SIZE(bp) \
764 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
765
766void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
767{
768 u32 addr, val;
769 u32 mark, offset;
770 __be32 data[9];
771 int word;
772 u32 trace_shmem_base;
773 if (BP_NOMCP(bp)) {
774 BNX2X_ERR("NO MCP - can not dump\n");
775 return;
776 }
777 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
778 (bp->common.bc_ver & 0xff0000) >> 16,
779 (bp->common.bc_ver & 0xff00) >> 8,
780 (bp->common.bc_ver & 0xff));
781
782 if (pci_channel_offline(bp->pdev)) {
783 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
784 return;
785 }
786
787 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
788 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
789 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
790
791 if (BP_PATH(bp) == 0)
792 trace_shmem_base = bp->common.shmem_base;
793 else
794 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
795
796
797 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
798 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
799 SCRATCH_BUFFER_SIZE(bp)) {
800 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
801 trace_shmem_base);
802 return;
803 }
804
805 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
806
807
808 mark = REG_RD(bp, addr);
809 if (mark != MFW_TRACE_SIGNATURE) {
810 BNX2X_ERR("Trace buffer signature is missing.");
811 return ;
812 }
813
814
815 addr += 4;
816 mark = REG_RD(bp, addr);
817 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
818 if (mark >= trace_shmem_base || mark < addr + 4) {
819 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
820 return;
821 }
822 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
823
824 printk("%s", lvl);
825
826
827 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
828 for (word = 0; word < 8; word++)
829 data[word] = htonl(REG_RD(bp, offset + 4*word));
830 data[8] = 0x0;
831 pr_cont("%s", (char *)data);
832 }
833
834
835 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
836 for (word = 0; word < 8; word++)
837 data[word] = htonl(REG_RD(bp, offset + 4*word));
838 data[8] = 0x0;
839 pr_cont("%s", (char *)data);
840 }
841 printk("%s" "end of fw dump\n", lvl);
842}
843
844static void bnx2x_fw_dump(struct bnx2x *bp)
845{
846 bnx2x_fw_dump_lvl(bp, KERN_ERR);
847}
848
849static void bnx2x_hc_int_disable(struct bnx2x *bp)
850{
851 int port = BP_PORT(bp);
852 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
853 u32 val = REG_RD(bp, addr);
854
855
856
857
858
859 if (CHIP_IS_E1(bp)) {
860
861
862
863
864 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
865
866 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
867 HC_CONFIG_0_REG_INT_LINE_EN_0 |
868 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
869 } else
870 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
871 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
872 HC_CONFIG_0_REG_INT_LINE_EN_0 |
873 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
874
875 DP(NETIF_MSG_IFDOWN,
876 "write %x to HC %d (addr 0x%x)\n",
877 val, port, addr);
878
879 REG_WR(bp, addr, val);
880 if (REG_RD(bp, addr) != val)
881 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
882}
883
884static void bnx2x_igu_int_disable(struct bnx2x *bp)
885{
886 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
887
888 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
889 IGU_PF_CONF_INT_LINE_EN |
890 IGU_PF_CONF_ATTN_BIT_EN);
891
892 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
893
894 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
895 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
896 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
897}
898
899static void bnx2x_int_disable(struct bnx2x *bp)
900{
901 if (bp->common.int_block == INT_BLOCK_HC)
902 bnx2x_hc_int_disable(bp);
903 else
904 bnx2x_igu_int_disable(bp);
905}
906
907void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
908{
909 int i;
910 u16 j;
911 struct hc_sp_status_block_data sp_sb_data;
912 int func = BP_FUNC(bp);
913#ifdef BNX2X_STOP_ON_ERROR
914 u16 start = 0, end = 0;
915 u8 cos;
916#endif
917 if (IS_PF(bp) && disable_int)
918 bnx2x_int_disable(bp);
919
920 bp->stats_state = STATS_STATE_DISABLED;
921 bp->eth_stats.unrecoverable_error++;
922 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
923
924 BNX2X_ERR("begin crash dump -----------------\n");
925
926
927
928 if (IS_PF(bp)) {
929 struct host_sp_status_block *def_sb = bp->def_status_blk;
930 int data_size, cstorm_offset;
931
932 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
933 bp->def_idx, bp->def_att_idx, bp->attn_state,
934 bp->spq_prod_idx, bp->stats_counter);
935 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
936 def_sb->atten_status_block.attn_bits,
937 def_sb->atten_status_block.attn_bits_ack,
938 def_sb->atten_status_block.status_block_id,
939 def_sb->atten_status_block.attn_bits_index);
940 BNX2X_ERR(" def (");
941 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
942 pr_cont("0x%x%s",
943 def_sb->sp_sb.index_values[i],
944 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
945
946 data_size = sizeof(struct hc_sp_status_block_data) /
947 sizeof(u32);
948 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
949 for (i = 0; i < data_size; i++)
950 *((u32 *)&sp_sb_data + i) =
951 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
952 i * sizeof(u32));
953
954 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
955 sp_sb_data.igu_sb_id,
956 sp_sb_data.igu_seg_id,
957 sp_sb_data.p_func.pf_id,
958 sp_sb_data.p_func.vnic_id,
959 sp_sb_data.p_func.vf_id,
960 sp_sb_data.p_func.vf_valid,
961 sp_sb_data.state);
962 }
963
964 for_each_eth_queue(bp, i) {
965 struct bnx2x_fastpath *fp = &bp->fp[i];
966 int loop;
967 struct hc_status_block_data_e2 sb_data_e2;
968 struct hc_status_block_data_e1x sb_data_e1x;
969 struct hc_status_block_sm *hc_sm_p =
970 CHIP_IS_E1x(bp) ?
971 sb_data_e1x.common.state_machine :
972 sb_data_e2.common.state_machine;
973 struct hc_index_data *hc_index_p =
974 CHIP_IS_E1x(bp) ?
975 sb_data_e1x.index_data :
976 sb_data_e2.index_data;
977 u8 data_size, cos;
978 u32 *sb_data_p;
979 struct bnx2x_fp_txdata txdata;
980
981 if (!bp->fp)
982 break;
983
984 if (!fp->rx_cons_sb)
985 continue;
986
987
988 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
989 i, fp->rx_bd_prod, fp->rx_bd_cons,
990 fp->rx_comp_prod,
991 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
992 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
993 fp->rx_sge_prod, fp->last_max_sge,
994 le16_to_cpu(fp->fp_hc_idx));
995
996
997 for_each_cos_in_tx_queue(fp, cos)
998 {
999 if (!fp->txdata_ptr[cos])
1000 break;
1001
1002 txdata = *fp->txdata_ptr[cos];
1003
1004 if (!txdata.tx_cons_sb)
1005 continue;
1006
1007 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1008 i, txdata.tx_pkt_prod,
1009 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1010 txdata.tx_bd_cons,
1011 le16_to_cpu(*txdata.tx_cons_sb));
1012 }
1013
1014 loop = CHIP_IS_E1x(bp) ?
1015 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1016
1017
1018
1019 if (IS_FCOE_FP(fp))
1020 continue;
1021
1022 BNX2X_ERR(" run indexes (");
1023 for (j = 0; j < HC_SB_MAX_SM; j++)
1024 pr_cont("0x%x%s",
1025 fp->sb_running_index[j],
1026 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1027
1028 BNX2X_ERR(" indexes (");
1029 for (j = 0; j < loop; j++)
1030 pr_cont("0x%x%s",
1031 fp->sb_index_values[j],
1032 (j == loop - 1) ? ")" : " ");
1033
1034
1035 if (IS_VF(bp))
1036 continue;
1037
1038
1039 data_size = CHIP_IS_E1x(bp) ?
1040 sizeof(struct hc_status_block_data_e1x) :
1041 sizeof(struct hc_status_block_data_e2);
1042 data_size /= sizeof(u32);
1043 sb_data_p = CHIP_IS_E1x(bp) ?
1044 (u32 *)&sb_data_e1x :
1045 (u32 *)&sb_data_e2;
1046
1047 for (j = 0; j < data_size; j++)
1048 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1049 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1050 j * sizeof(u32));
1051
1052 if (!CHIP_IS_E1x(bp)) {
1053 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1054 sb_data_e2.common.p_func.pf_id,
1055 sb_data_e2.common.p_func.vf_id,
1056 sb_data_e2.common.p_func.vf_valid,
1057 sb_data_e2.common.p_func.vnic_id,
1058 sb_data_e2.common.same_igu_sb_1b,
1059 sb_data_e2.common.state);
1060 } else {
1061 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1062 sb_data_e1x.common.p_func.pf_id,
1063 sb_data_e1x.common.p_func.vf_id,
1064 sb_data_e1x.common.p_func.vf_valid,
1065 sb_data_e1x.common.p_func.vnic_id,
1066 sb_data_e1x.common.same_igu_sb_1b,
1067 sb_data_e1x.common.state);
1068 }
1069
1070
1071 for (j = 0; j < HC_SB_MAX_SM; j++) {
1072 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1073 j, hc_sm_p[j].__flags,
1074 hc_sm_p[j].igu_sb_id,
1075 hc_sm_p[j].igu_seg_id,
1076 hc_sm_p[j].time_to_expire,
1077 hc_sm_p[j].timer_value);
1078 }
1079
1080
1081 for (j = 0; j < loop; j++) {
1082 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1083 hc_index_p[j].flags,
1084 hc_index_p[j].timeout);
1085 }
1086 }
1087
1088#ifdef BNX2X_STOP_ON_ERROR
1089 if (IS_PF(bp)) {
1090
1091 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1092 for (i = 0; i < NUM_EQ_DESC; i++) {
1093 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1094
1095 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1096 i, bp->eq_ring[i].message.opcode,
1097 bp->eq_ring[i].message.error);
1098 BNX2X_ERR("data: %x %x %x\n",
1099 data[0], data[1], data[2]);
1100 }
1101 }
1102
1103
1104
1105 for_each_valid_rx_queue(bp, i) {
1106 struct bnx2x_fastpath *fp = &bp->fp[i];
1107
1108 if (!bp->fp)
1109 break;
1110
1111 if (!fp->rx_cons_sb)
1112 continue;
1113
1114 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1115 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1116 for (j = start; j != end; j = RX_BD(j + 1)) {
1117 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1118 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1119
1120 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1121 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1122 }
1123
1124 start = RX_SGE(fp->rx_sge_prod);
1125 end = RX_SGE(fp->last_max_sge);
1126 for (j = start; j != end; j = RX_SGE(j + 1)) {
1127 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1128 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1129
1130 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1131 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1132 }
1133
1134 start = RCQ_BD(fp->rx_comp_cons - 10);
1135 end = RCQ_BD(fp->rx_comp_cons + 503);
1136 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1137 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1138
1139 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1140 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1141 }
1142 }
1143
1144
1145 for_each_valid_tx_queue(bp, i) {
1146 struct bnx2x_fastpath *fp = &bp->fp[i];
1147
1148 if (!bp->fp)
1149 break;
1150
1151 for_each_cos_in_tx_queue(fp, cos) {
1152 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1153
1154 if (!fp->txdata_ptr[cos])
1155 break;
1156
1157 if (!txdata->tx_cons_sb)
1158 continue;
1159
1160 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1161 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1162 for (j = start; j != end; j = TX_BD(j + 1)) {
1163 struct sw_tx_bd *sw_bd =
1164 &txdata->tx_buf_ring[j];
1165
1166 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1167 i, cos, j, sw_bd->skb,
1168 sw_bd->first_bd);
1169 }
1170
1171 start = TX_BD(txdata->tx_bd_cons - 10);
1172 end = TX_BD(txdata->tx_bd_cons + 254);
1173 for (j = start; j != end; j = TX_BD(j + 1)) {
1174 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1175
1176 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1177 i, cos, j, tx_bd[0], tx_bd[1],
1178 tx_bd[2], tx_bd[3]);
1179 }
1180 }
1181 }
1182#endif
1183 if (IS_PF(bp)) {
1184 int tmp_msg_en = bp->msg_enable;
1185
1186 bnx2x_fw_dump(bp);
1187 bp->msg_enable |= NETIF_MSG_HW;
1188 BNX2X_ERR("Idle check (1st round) ----------\n");
1189 bnx2x_idle_chk(bp);
1190 BNX2X_ERR("Idle check (2nd round) ----------\n");
1191 bnx2x_idle_chk(bp);
1192 bp->msg_enable = tmp_msg_en;
1193 bnx2x_mc_assert(bp);
1194 }
1195
1196 BNX2X_ERR("end crash dump -----------------\n");
1197}
1198
1199
1200
1201
1202
1203
1204
1205#define FLR_WAIT_USEC 10000
1206#define FLR_WAIT_INTERVAL 50
1207#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1208
1209struct pbf_pN_buf_regs {
1210 int pN;
1211 u32 init_crd;
1212 u32 crd;
1213 u32 crd_freed;
1214};
1215
1216struct pbf_pN_cmd_regs {
1217 int pN;
1218 u32 lines_occup;
1219 u32 lines_freed;
1220};
1221
1222static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1223 struct pbf_pN_buf_regs *regs,
1224 u32 poll_count)
1225{
1226 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1227 u32 cur_cnt = poll_count;
1228
1229 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1230 crd = crd_start = REG_RD(bp, regs->crd);
1231 init_crd = REG_RD(bp, regs->init_crd);
1232
1233 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1234 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1235 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1236
1237 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1238 (init_crd - crd_start))) {
1239 if (cur_cnt--) {
1240 udelay(FLR_WAIT_INTERVAL);
1241 crd = REG_RD(bp, regs->crd);
1242 crd_freed = REG_RD(bp, regs->crd_freed);
1243 } else {
1244 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1245 regs->pN);
1246 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1247 regs->pN, crd);
1248 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1249 regs->pN, crd_freed);
1250 break;
1251 }
1252 }
1253 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1254 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1255}
1256
1257static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1258 struct pbf_pN_cmd_regs *regs,
1259 u32 poll_count)
1260{
1261 u32 occup, to_free, freed, freed_start;
1262 u32 cur_cnt = poll_count;
1263
1264 occup = to_free = REG_RD(bp, regs->lines_occup);
1265 freed = freed_start = REG_RD(bp, regs->lines_freed);
1266
1267 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1268 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1269
1270 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1271 if (cur_cnt--) {
1272 udelay(FLR_WAIT_INTERVAL);
1273 occup = REG_RD(bp, regs->lines_occup);
1274 freed = REG_RD(bp, regs->lines_freed);
1275 } else {
1276 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1277 regs->pN);
1278 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1279 regs->pN, occup);
1280 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1281 regs->pN, freed);
1282 break;
1283 }
1284 }
1285 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1286 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1287}
1288
1289static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1290 u32 expected, u32 poll_count)
1291{
1292 u32 cur_cnt = poll_count;
1293 u32 val;
1294
1295 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1296 udelay(FLR_WAIT_INTERVAL);
1297
1298 return val;
1299}
1300
1301int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1302 char *msg, u32 poll_cnt)
1303{
1304 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1305 if (val != 0) {
1306 BNX2X_ERR("%s usage count=%d\n", msg, val);
1307 return 1;
1308 }
1309 return 0;
1310}
1311
1312
1313u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1314{
1315
1316 if (CHIP_REV_IS_EMUL(bp))
1317 return FLR_POLL_CNT * 2000;
1318
1319 if (CHIP_REV_IS_FPGA(bp))
1320 return FLR_POLL_CNT * 120;
1321
1322 return FLR_POLL_CNT;
1323}
1324
1325void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1326{
1327 struct pbf_pN_cmd_regs cmd_regs[] = {
1328 {0, (CHIP_IS_E3B0(bp)) ?
1329 PBF_REG_TQ_OCCUPANCY_Q0 :
1330 PBF_REG_P0_TQ_OCCUPANCY,
1331 (CHIP_IS_E3B0(bp)) ?
1332 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1333 PBF_REG_P0_TQ_LINES_FREED_CNT},
1334 {1, (CHIP_IS_E3B0(bp)) ?
1335 PBF_REG_TQ_OCCUPANCY_Q1 :
1336 PBF_REG_P1_TQ_OCCUPANCY,
1337 (CHIP_IS_E3B0(bp)) ?
1338 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1339 PBF_REG_P1_TQ_LINES_FREED_CNT},
1340 {4, (CHIP_IS_E3B0(bp)) ?
1341 PBF_REG_TQ_OCCUPANCY_LB_Q :
1342 PBF_REG_P4_TQ_OCCUPANCY,
1343 (CHIP_IS_E3B0(bp)) ?
1344 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1345 PBF_REG_P4_TQ_LINES_FREED_CNT}
1346 };
1347
1348 struct pbf_pN_buf_regs buf_regs[] = {
1349 {0, (CHIP_IS_E3B0(bp)) ?
1350 PBF_REG_INIT_CRD_Q0 :
1351 PBF_REG_P0_INIT_CRD ,
1352 (CHIP_IS_E3B0(bp)) ?
1353 PBF_REG_CREDIT_Q0 :
1354 PBF_REG_P0_CREDIT,
1355 (CHIP_IS_E3B0(bp)) ?
1356 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1357 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1358 {1, (CHIP_IS_E3B0(bp)) ?
1359 PBF_REG_INIT_CRD_Q1 :
1360 PBF_REG_P1_INIT_CRD,
1361 (CHIP_IS_E3B0(bp)) ?
1362 PBF_REG_CREDIT_Q1 :
1363 PBF_REG_P1_CREDIT,
1364 (CHIP_IS_E3B0(bp)) ?
1365 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1366 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1367 {4, (CHIP_IS_E3B0(bp)) ?
1368 PBF_REG_INIT_CRD_LB_Q :
1369 PBF_REG_P4_INIT_CRD,
1370 (CHIP_IS_E3B0(bp)) ?
1371 PBF_REG_CREDIT_LB_Q :
1372 PBF_REG_P4_CREDIT,
1373 (CHIP_IS_E3B0(bp)) ?
1374 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1375 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1376 };
1377
1378 int i;
1379
1380
1381 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1382 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1383
1384
1385 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1386 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1387}
1388
1389#define OP_GEN_PARAM(param) \
1390 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1391
1392#define OP_GEN_TYPE(type) \
1393 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1394
1395#define OP_GEN_AGG_VECT(index) \
1396 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1397
1398int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1399{
1400 u32 op_gen_command = 0;
1401 u32 comp_addr = BAR_CSTRORM_INTMEM +
1402 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1403 int ret = 0;
1404
1405 if (REG_RD(bp, comp_addr)) {
1406 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1407 return 1;
1408 }
1409
1410 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1411 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1412 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1413 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1414
1415 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1416 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1417
1418 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1419 BNX2X_ERR("FW final cleanup did not succeed\n");
1420 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1421 (REG_RD(bp, comp_addr)));
1422 bnx2x_panic();
1423 return 1;
1424 }
1425
1426 REG_WR(bp, comp_addr, 0);
1427
1428 return ret;
1429}
1430
1431u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1432{
1433 u16 status;
1434
1435 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1436 return status & PCI_EXP_DEVSTA_TRPND;
1437}
1438
1439
1440
1441static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1442{
1443
1444 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1445 CFC_REG_NUM_LCIDS_INSIDE_PF,
1446 "CFC PF usage counter timed out",
1447 poll_cnt))
1448 return 1;
1449
1450
1451 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1452 DORQ_REG_PF_USAGE_CNT,
1453 "DQ PF usage counter timed out",
1454 poll_cnt))
1455 return 1;
1456
1457
1458 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1460 "QM PF usage counter timed out",
1461 poll_cnt))
1462 return 1;
1463
1464
1465 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1466 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1467 "Timers VNIC usage counter timed out",
1468 poll_cnt))
1469 return 1;
1470 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1471 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1472 "Timers NUM_SCANS usage counter timed out",
1473 poll_cnt))
1474 return 1;
1475
1476
1477 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1478 dmae_reg_go_c[INIT_DMAE_C(bp)],
1479 "DMAE command register timed out",
1480 poll_cnt))
1481 return 1;
1482
1483 return 0;
1484}
1485
1486static void bnx2x_hw_enable_status(struct bnx2x *bp)
1487{
1488 u32 val;
1489
1490 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1491 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1492
1493 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1494 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1495
1496 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1497 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1498
1499 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1500 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1501
1502 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1503 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1504
1505 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1506 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1507
1508 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1509 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1510
1511 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1512 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1513 val);
1514}
1515
1516static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1517{
1518 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1519
1520 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1521
1522
1523 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1524
1525
1526 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1527 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1528 return -EBUSY;
1529
1530
1531
1532
1533 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1534 return -EBUSY;
1535
1536
1537
1538
1539 bnx2x_tx_hw_flushed(bp, poll_cnt);
1540
1541
1542 msleep(100);
1543
1544
1545 if (bnx2x_is_pcie_pending(bp->pdev))
1546 BNX2X_ERR("PCIE Transactions still pending\n");
1547
1548
1549 bnx2x_hw_enable_status(bp);
1550
1551
1552
1553
1554
1555 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1556
1557 return 0;
1558}
1559
1560static void bnx2x_hc_int_enable(struct bnx2x *bp)
1561{
1562 int port = BP_PORT(bp);
1563 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1564 u32 val = REG_RD(bp, addr);
1565 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1566 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1567 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1568
1569 if (msix) {
1570 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1571 HC_CONFIG_0_REG_INT_LINE_EN_0);
1572 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1573 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1574 if (single_msix)
1575 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1576 } else if (msi) {
1577 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1578 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1579 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1580 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1581 } else {
1582 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1583 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1584 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1585 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1586
1587 if (!CHIP_IS_E1(bp)) {
1588 DP(NETIF_MSG_IFUP,
1589 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1590
1591 REG_WR(bp, addr, val);
1592
1593 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1594 }
1595 }
1596
1597 if (CHIP_IS_E1(bp))
1598 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1599
1600 DP(NETIF_MSG_IFUP,
1601 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1602 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1603
1604 REG_WR(bp, addr, val);
1605
1606
1607
1608 barrier();
1609
1610 if (!CHIP_IS_E1(bp)) {
1611
1612 if (IS_MF(bp)) {
1613 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1614 if (bp->port.pmf)
1615
1616 val |= 0x1100;
1617 } else
1618 val = 0xffff;
1619
1620 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1621 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1622 }
1623}
1624
1625static void bnx2x_igu_int_enable(struct bnx2x *bp)
1626{
1627 u32 val;
1628 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1629 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1630 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1631
1632 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1633
1634 if (msix) {
1635 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1636 IGU_PF_CONF_SINGLE_ISR_EN);
1637 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1638 IGU_PF_CONF_ATTN_BIT_EN);
1639
1640 if (single_msix)
1641 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1642 } else if (msi) {
1643 val &= ~IGU_PF_CONF_INT_LINE_EN;
1644 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1645 IGU_PF_CONF_ATTN_BIT_EN |
1646 IGU_PF_CONF_SINGLE_ISR_EN);
1647 } else {
1648 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1649 val |= (IGU_PF_CONF_INT_LINE_EN |
1650 IGU_PF_CONF_ATTN_BIT_EN |
1651 IGU_PF_CONF_SINGLE_ISR_EN);
1652 }
1653
1654
1655 if ((!msix) || single_msix) {
1656 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1657 bnx2x_ack_int(bp);
1658 }
1659
1660 val |= IGU_PF_CONF_FUNC_EN;
1661
1662 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1663 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1664
1665 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1666
1667 if (val & IGU_PF_CONF_INT_LINE_EN)
1668 pci_intx(bp->pdev, true);
1669
1670 barrier();
1671
1672
1673 if (IS_MF(bp)) {
1674 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1675 if (bp->port.pmf)
1676
1677 val |= 0x1100;
1678 } else
1679 val = 0xffff;
1680
1681 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1682 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1683}
1684
1685void bnx2x_int_enable(struct bnx2x *bp)
1686{
1687 if (bp->common.int_block == INT_BLOCK_HC)
1688 bnx2x_hc_int_enable(bp);
1689 else
1690 bnx2x_igu_int_enable(bp);
1691}
1692
1693void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1694{
1695 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1696 int i, offset;
1697
1698 if (disable_hw)
1699
1700 bnx2x_int_disable(bp);
1701
1702
1703 if (msix) {
1704 synchronize_irq(bp->msix_table[0].vector);
1705 offset = 1;
1706 if (CNIC_SUPPORT(bp))
1707 offset++;
1708 for_each_eth_queue(bp, i)
1709 synchronize_irq(bp->msix_table[offset++].vector);
1710 } else
1711 synchronize_irq(bp->pdev->irq);
1712
1713
1714 cancel_delayed_work(&bp->sp_task);
1715 cancel_delayed_work(&bp->period_task);
1716 flush_workqueue(bnx2x_wq);
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1727{
1728 u32 lock_status;
1729 u32 resource_bit = (1 << resource);
1730 int func = BP_FUNC(bp);
1731 u32 hw_lock_control_reg;
1732
1733 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1734 "Trying to take a lock on resource %d\n", resource);
1735
1736
1737 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1738 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1739 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1740 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1741 return false;
1742 }
1743
1744 if (func <= 5)
1745 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1746 else
1747 hw_lock_control_reg =
1748 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1749
1750
1751 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1752 lock_status = REG_RD(bp, hw_lock_control_reg);
1753 if (lock_status & resource_bit)
1754 return true;
1755
1756 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1757 "Failed to get a lock on resource %d\n", resource);
1758 return false;
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1770{
1771 if (BP_PATH(bp))
1772 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1773 else
1774 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1785{
1786 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1787}
1788
1789static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1790
1791
1792static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1793{
1794
1795
1796
1797
1798 atomic_set(&bp->interrupt_occurred, 1);
1799
1800
1801
1802
1803
1804 smp_wmb();
1805
1806
1807 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1808}
1809
1810void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1811{
1812 struct bnx2x *bp = fp->bp;
1813 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1814 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1815 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1816 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1817
1818 DP(BNX2X_MSG_SP,
1819 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1820 fp->index, cid, command, bp->state,
1821 rr_cqe->ramrod_cqe.ramrod_type);
1822
1823
1824
1825
1826 if (cid >= BNX2X_FIRST_VF_CID &&
1827 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1828 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1829
1830 switch (command) {
1831 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1832 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1833 drv_cmd = BNX2X_Q_CMD_UPDATE;
1834 break;
1835
1836 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1837 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1838 drv_cmd = BNX2X_Q_CMD_SETUP;
1839 break;
1840
1841 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1842 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1843 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1844 break;
1845
1846 case (RAMROD_CMD_ID_ETH_HALT):
1847 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1848 drv_cmd = BNX2X_Q_CMD_HALT;
1849 break;
1850
1851 case (RAMROD_CMD_ID_ETH_TERMINATE):
1852 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1853 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1854 break;
1855
1856 case (RAMROD_CMD_ID_ETH_EMPTY):
1857 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1858 drv_cmd = BNX2X_Q_CMD_EMPTY;
1859 break;
1860
1861 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1862 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1863 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1864 break;
1865
1866 default:
1867 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1868 command, fp->index);
1869 return;
1870 }
1871
1872 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1873 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1874
1875
1876
1877
1878
1879
1880
1881#ifdef BNX2X_STOP_ON_ERROR
1882 bnx2x_panic();
1883#else
1884 return;
1885#endif
1886
1887 smp_mb__before_atomic();
1888 atomic_inc(&bp->cq_spq_left);
1889
1890 smp_mb__after_atomic();
1891
1892 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1893
1894 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1895 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 smp_mb__before_atomic();
1906 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1907 wmb();
1908 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1909 smp_mb__after_atomic();
1910
1911
1912 bnx2x_schedule_sp_task(bp);
1913 }
1914
1915 return;
1916}
1917
1918irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1919{
1920 struct bnx2x *bp = netdev_priv(dev_instance);
1921 u16 status = bnx2x_ack_int(bp);
1922 u16 mask;
1923 int i;
1924 u8 cos;
1925
1926
1927 if (unlikely(status == 0)) {
1928 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1929 return IRQ_NONE;
1930 }
1931 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1932
1933#ifdef BNX2X_STOP_ON_ERROR
1934 if (unlikely(bp->panic))
1935 return IRQ_HANDLED;
1936#endif
1937
1938 for_each_eth_queue(bp, i) {
1939 struct bnx2x_fastpath *fp = &bp->fp[i];
1940
1941 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1942 if (status & mask) {
1943
1944 for_each_cos_in_tx_queue(fp, cos)
1945 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1946 prefetch(&fp->sb_running_index[SM_RX_ID]);
1947 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1948 status &= ~mask;
1949 }
1950 }
1951
1952 if (CNIC_SUPPORT(bp)) {
1953 mask = 0x2;
1954 if (status & (mask | 0x1)) {
1955 struct cnic_ops *c_ops = NULL;
1956
1957 rcu_read_lock();
1958 c_ops = rcu_dereference(bp->cnic_ops);
1959 if (c_ops && (bp->cnic_eth_dev.drv_state &
1960 CNIC_DRV_STATE_HANDLES_IRQ))
1961 c_ops->cnic_handler(bp->cnic_data, NULL);
1962 rcu_read_unlock();
1963
1964 status &= ~mask;
1965 }
1966 }
1967
1968 if (unlikely(status & 0x1)) {
1969
1970
1971
1972
1973 bnx2x_schedule_sp_task(bp);
1974
1975 status &= ~0x1;
1976 if (!status)
1977 return IRQ_HANDLED;
1978 }
1979
1980 if (unlikely(status))
1981 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1982 status);
1983
1984 return IRQ_HANDLED;
1985}
1986
1987
1988
1989
1990
1991
1992
1993int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1994{
1995 u32 lock_status;
1996 u32 resource_bit = (1 << resource);
1997 int func = BP_FUNC(bp);
1998 u32 hw_lock_control_reg;
1999 int cnt;
2000
2001
2002 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2003 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2004 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2005 return -EINVAL;
2006 }
2007
2008 if (func <= 5) {
2009 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2010 } else {
2011 hw_lock_control_reg =
2012 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2013 }
2014
2015
2016 lock_status = REG_RD(bp, hw_lock_control_reg);
2017 if (lock_status & resource_bit) {
2018 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2019 lock_status, resource_bit);
2020 return -EEXIST;
2021 }
2022
2023
2024 for (cnt = 0; cnt < 1000; cnt++) {
2025
2026 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2027 lock_status = REG_RD(bp, hw_lock_control_reg);
2028 if (lock_status & resource_bit)
2029 return 0;
2030
2031 usleep_range(5000, 10000);
2032 }
2033 BNX2X_ERR("Timeout\n");
2034 return -EAGAIN;
2035}
2036
2037int bnx2x_release_leader_lock(struct bnx2x *bp)
2038{
2039 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2040}
2041
2042int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2043{
2044 u32 lock_status;
2045 u32 resource_bit = (1 << resource);
2046 int func = BP_FUNC(bp);
2047 u32 hw_lock_control_reg;
2048
2049
2050 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2051 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2052 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2053 return -EINVAL;
2054 }
2055
2056 if (func <= 5) {
2057 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2058 } else {
2059 hw_lock_control_reg =
2060 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2061 }
2062
2063
2064 lock_status = REG_RD(bp, hw_lock_control_reg);
2065 if (!(lock_status & resource_bit)) {
2066 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2067 lock_status, resource_bit);
2068 return -EFAULT;
2069 }
2070
2071 REG_WR(bp, hw_lock_control_reg, resource_bit);
2072 return 0;
2073}
2074
2075int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2076{
2077
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2083 u32 gpio_reg;
2084 int value;
2085
2086 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2087 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2088 return -EINVAL;
2089 }
2090
2091
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2093
2094
2095 if ((gpio_reg & gpio_mask) == gpio_mask)
2096 value = 1;
2097 else
2098 value = 0;
2099
2100 return value;
2101}
2102
2103int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2104{
2105
2106 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2107 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2108 int gpio_shift = gpio_num +
2109 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2110 u32 gpio_mask = (1 << gpio_shift);
2111 u32 gpio_reg;
2112
2113 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2114 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2115 return -EINVAL;
2116 }
2117
2118 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2119
2120 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2121
2122 switch (mode) {
2123 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2124 DP(NETIF_MSG_LINK,
2125 "Set GPIO %d (shift %d) -> output low\n",
2126 gpio_num, gpio_shift);
2127
2128 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2129 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2130 break;
2131
2132 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2133 DP(NETIF_MSG_LINK,
2134 "Set GPIO %d (shift %d) -> output high\n",
2135 gpio_num, gpio_shift);
2136
2137 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2138 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2139 break;
2140
2141 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2142 DP(NETIF_MSG_LINK,
2143 "Set GPIO %d (shift %d) -> input\n",
2144 gpio_num, gpio_shift);
2145
2146 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2147 break;
2148
2149 default:
2150 break;
2151 }
2152
2153 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2155
2156 return 0;
2157}
2158
2159int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2160{
2161 u32 gpio_reg = 0;
2162 int rc = 0;
2163
2164
2165
2166 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2167
2168 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2169 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2170 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2171 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2172
2173 switch (mode) {
2174 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2176
2177 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2178 break;
2179
2180 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2181 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2182
2183 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2184 break;
2185
2186 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2187 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2188
2189 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2190 break;
2191
2192 default:
2193 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2194 rc = -EINVAL;
2195 break;
2196 }
2197
2198 if (rc == 0)
2199 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2200
2201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2202
2203 return rc;
2204}
2205
2206int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2207{
2208
2209 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2210 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2211 int gpio_shift = gpio_num +
2212 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2213 u32 gpio_mask = (1 << gpio_shift);
2214 u32 gpio_reg;
2215
2216 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2217 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2218 return -EINVAL;
2219 }
2220
2221 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2222
2223 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2224
2225 switch (mode) {
2226 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2227 DP(NETIF_MSG_LINK,
2228 "Clear GPIO INT %d (shift %d) -> output low\n",
2229 gpio_num, gpio_shift);
2230
2231 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2232 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2233 break;
2234
2235 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2236 DP(NETIF_MSG_LINK,
2237 "Set GPIO INT %d (shift %d) -> output high\n",
2238 gpio_num, gpio_shift);
2239
2240 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2241 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2242 break;
2243
2244 default:
2245 break;
2246 }
2247
2248 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2249 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2250
2251 return 0;
2252}
2253
2254static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2255{
2256 u32 spio_reg;
2257
2258
2259 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2260 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2261 return -EINVAL;
2262 }
2263
2264 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2265
2266 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2267
2268 switch (mode) {
2269 case MISC_SPIO_OUTPUT_LOW:
2270 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2271
2272 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2273 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2274 break;
2275
2276 case MISC_SPIO_OUTPUT_HIGH:
2277 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2278
2279 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2280 spio_reg |= (spio << MISC_SPIO_SET_POS);
2281 break;
2282
2283 case MISC_SPIO_INPUT_HI_Z:
2284 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2285
2286 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2287 break;
2288
2289 default:
2290 break;
2291 }
2292
2293 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2294 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2295
2296 return 0;
2297}
2298
2299void bnx2x_calc_fc_adv(struct bnx2x *bp)
2300{
2301 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2302
2303 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2304 ADVERTISED_Pause);
2305 switch (bp->link_vars.ieee_fc &
2306 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2307 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2308 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2309 ADVERTISED_Pause);
2310 break;
2311
2312 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2313 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2314 break;
2315
2316 default:
2317 break;
2318 }
2319}
2320
2321static void bnx2x_set_requested_fc(struct bnx2x *bp)
2322{
2323
2324
2325
2326
2327 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2328 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2329 else
2330 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2331}
2332
2333static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2334{
2335 u32 pause_enabled = 0;
2336
2337 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2338 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2339 pause_enabled = 1;
2340
2341 REG_WR(bp, BAR_USTRORM_INTMEM +
2342 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2343 pause_enabled);
2344 }
2345
2346 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2347 pause_enabled ? "enabled" : "disabled");
2348}
2349
2350int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2351{
2352 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2353 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2354
2355 if (!BP_NOMCP(bp)) {
2356 bnx2x_set_requested_fc(bp);
2357 bnx2x_acquire_phy_lock(bp);
2358
2359 if (load_mode == LOAD_DIAG) {
2360 struct link_params *lp = &bp->link_params;
2361 lp->loopback_mode = LOOPBACK_XGXS;
2362
2363 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2364 if (lp->speed_cap_mask[cfx_idx] &
2365 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2366 lp->req_line_speed[cfx_idx] =
2367 SPEED_20000;
2368 else if (lp->speed_cap_mask[cfx_idx] &
2369 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2370 lp->req_line_speed[cfx_idx] =
2371 SPEED_10000;
2372 else
2373 lp->req_line_speed[cfx_idx] =
2374 SPEED_1000;
2375 }
2376 }
2377
2378 if (load_mode == LOAD_LOOPBACK_EXT) {
2379 struct link_params *lp = &bp->link_params;
2380 lp->loopback_mode = LOOPBACK_EXT;
2381 }
2382
2383 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2384
2385 bnx2x_release_phy_lock(bp);
2386
2387 bnx2x_init_dropless_fc(bp);
2388
2389 bnx2x_calc_fc_adv(bp);
2390
2391 if (bp->link_vars.link_up) {
2392 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2393 bnx2x_link_report(bp);
2394 }
2395 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2396 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2397 return rc;
2398 }
2399 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2400 return -EINVAL;
2401}
2402
2403void bnx2x_link_set(struct bnx2x *bp)
2404{
2405 if (!BP_NOMCP(bp)) {
2406 bnx2x_acquire_phy_lock(bp);
2407 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2408 bnx2x_release_phy_lock(bp);
2409
2410 bnx2x_init_dropless_fc(bp);
2411
2412 bnx2x_calc_fc_adv(bp);
2413 } else
2414 BNX2X_ERR("Bootcode is missing - can not set link\n");
2415}
2416
2417static void bnx2x__link_reset(struct bnx2x *bp)
2418{
2419 if (!BP_NOMCP(bp)) {
2420 bnx2x_acquire_phy_lock(bp);
2421 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2422 bnx2x_release_phy_lock(bp);
2423 } else
2424 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2425}
2426
2427void bnx2x_force_link_reset(struct bnx2x *bp)
2428{
2429 bnx2x_acquire_phy_lock(bp);
2430 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2431 bnx2x_release_phy_lock(bp);
2432}
2433
2434u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2435{
2436 u8 rc = 0;
2437
2438 if (!BP_NOMCP(bp)) {
2439 bnx2x_acquire_phy_lock(bp);
2440 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2441 is_serdes);
2442 bnx2x_release_phy_lock(bp);
2443 } else
2444 BNX2X_ERR("Bootcode is missing - can not test link\n");
2445
2446 return rc;
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static void bnx2x_calc_vn_min(struct bnx2x *bp,
2459 struct cmng_init_input *input)
2460{
2461 int all_zero = 1;
2462 int vn;
2463
2464 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2465 u32 vn_cfg = bp->mf_config[vn];
2466 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2467 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2468
2469
2470 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2471 vn_min_rate = 0;
2472
2473 else if (!vn_min_rate)
2474 vn_min_rate = DEF_MIN_RATE;
2475 else
2476 all_zero = 0;
2477
2478 input->vnic_min_rate[vn] = vn_min_rate;
2479 }
2480
2481
2482 if (BNX2X_IS_ETS_ENABLED(bp)) {
2483 input->flags.cmng_enables &=
2484 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2485 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2486 } else if (all_zero) {
2487 input->flags.cmng_enables &=
2488 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2489 DP(NETIF_MSG_IFUP,
2490 "All MIN values are zeroes fairness will be disabled\n");
2491 } else
2492 input->flags.cmng_enables |=
2493 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2494}
2495
2496static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2497 struct cmng_init_input *input)
2498{
2499 u16 vn_max_rate;
2500 u32 vn_cfg = bp->mf_config[vn];
2501
2502 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2503 vn_max_rate = 0;
2504 else {
2505 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2506
2507 if (IS_MF_PERCENT_BW(bp)) {
2508
2509 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2510 } else
2511
2512 vn_max_rate = maxCfg * 100;
2513 }
2514
2515 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2516
2517 input->vnic_max_rate[vn] = vn_max_rate;
2518}
2519
2520static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2521{
2522 if (CHIP_REV_IS_SLOW(bp))
2523 return CMNG_FNS_NONE;
2524 if (IS_MF(bp))
2525 return CMNG_FNS_MINMAX;
2526
2527 return CMNG_FNS_NONE;
2528}
2529
2530void bnx2x_read_mf_cfg(struct bnx2x *bp)
2531{
2532 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2533
2534 if (BP_NOMCP(bp))
2535 return;
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2549 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2550
2551 if (func >= E1H_FUNC_MAX)
2552 break;
2553
2554 bp->mf_config[vn] =
2555 MF_CFG_RD(bp, func_mf_config[func].config);
2556 }
2557 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2558 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2559 bp->flags |= MF_FUNC_DIS;
2560 } else {
2561 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2562 bp->flags &= ~MF_FUNC_DIS;
2563 }
2564}
2565
2566static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2567{
2568 struct cmng_init_input input;
2569 memset(&input, 0, sizeof(struct cmng_init_input));
2570
2571 input.port_rate = bp->link_vars.line_speed;
2572
2573 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2574 int vn;
2575
2576
2577 if (read_cfg)
2578 bnx2x_read_mf_cfg(bp);
2579
2580
2581 bnx2x_calc_vn_min(bp, &input);
2582
2583
2584 if (bp->port.pmf)
2585 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2586 bnx2x_calc_vn_max(bp, vn, &input);
2587
2588
2589 input.flags.cmng_enables |=
2590 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2591
2592 bnx2x_init_cmng(&input, &bp->cmng);
2593 return;
2594 }
2595
2596
2597 DP(NETIF_MSG_IFUP,
2598 "rate shaping and fairness are disabled\n");
2599}
2600
2601static void storm_memset_cmng(struct bnx2x *bp,
2602 struct cmng_init *cmng,
2603 u8 port)
2604{
2605 int vn;
2606 size_t size = sizeof(struct cmng_struct_per_port);
2607
2608 u32 addr = BAR_XSTRORM_INTMEM +
2609 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2610
2611 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2612
2613 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2614 int func = func_by_vn(bp, vn);
2615
2616 addr = BAR_XSTRORM_INTMEM +
2617 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2618 size = sizeof(struct rate_shaping_vars_per_vn);
2619 __storm_memset_struct(bp, addr, size,
2620 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2621
2622 addr = BAR_XSTRORM_INTMEM +
2623 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2624 size = sizeof(struct fairness_vars_per_vn);
2625 __storm_memset_struct(bp, addr, size,
2626 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2627 }
2628}
2629
2630
2631void bnx2x_set_local_cmng(struct bnx2x *bp)
2632{
2633 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2634
2635 if (cmng_fns != CMNG_FNS_NONE) {
2636 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2637 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2638 } else {
2639
2640 DP(NETIF_MSG_IFUP,
2641 "single function mode without fairness\n");
2642 }
2643}
2644
2645
2646static void bnx2x_link_attn(struct bnx2x *bp)
2647{
2648
2649 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2650
2651 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2652
2653 bnx2x_init_dropless_fc(bp);
2654
2655 if (bp->link_vars.link_up) {
2656
2657 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2658 struct host_port_stats *pstats;
2659
2660 pstats = bnx2x_sp(bp, port_stats);
2661
2662 memset(&(pstats->mac_stx[0]), 0,
2663 sizeof(struct mac_stx));
2664 }
2665 if (bp->state == BNX2X_STATE_OPEN)
2666 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2667 }
2668
2669 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2670 bnx2x_set_local_cmng(bp);
2671
2672 __bnx2x_link_report(bp);
2673
2674 if (IS_MF(bp))
2675 bnx2x_link_sync_notify(bp);
2676}
2677
2678void bnx2x__link_status_update(struct bnx2x *bp)
2679{
2680 if (bp->state != BNX2X_STATE_OPEN)
2681 return;
2682
2683
2684 if (IS_PF(bp)) {
2685 bnx2x_dcbx_pmf_update(bp);
2686 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2687 if (bp->link_vars.link_up)
2688 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2689 else
2690 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2691
2692 bnx2x_link_report(bp);
2693
2694 } else {
2695 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2696 SUPPORTED_10baseT_Full |
2697 SUPPORTED_100baseT_Half |
2698 SUPPORTED_100baseT_Full |
2699 SUPPORTED_1000baseT_Full |
2700 SUPPORTED_2500baseX_Full |
2701 SUPPORTED_10000baseT_Full |
2702 SUPPORTED_TP |
2703 SUPPORTED_FIBRE |
2704 SUPPORTED_Autoneg |
2705 SUPPORTED_Pause |
2706 SUPPORTED_Asym_Pause);
2707 bp->port.advertising[0] = bp->port.supported[0];
2708
2709 bp->link_params.bp = bp;
2710 bp->link_params.port = BP_PORT(bp);
2711 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2712 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2713 bp->link_params.req_line_speed[0] = SPEED_10000;
2714 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2715 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2716 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2717 bp->link_vars.line_speed = SPEED_10000;
2718 bp->link_vars.link_status =
2719 (LINK_STATUS_LINK_UP |
2720 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2721 bp->link_vars.link_up = 1;
2722 bp->link_vars.duplex = DUPLEX_FULL;
2723 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2724 __bnx2x_link_report(bp);
2725
2726 bnx2x_sample_bulletin(bp);
2727
2728
2729
2730
2731
2732
2733 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2734 }
2735}
2736
2737static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2738 u16 vlan_val, u8 allowed_prio)
2739{
2740 struct bnx2x_func_state_params func_params = {NULL};
2741 struct bnx2x_func_afex_update_params *f_update_params =
2742 &func_params.params.afex_update;
2743
2744 func_params.f_obj = &bp->func_obj;
2745 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2746
2747
2748
2749
2750
2751 f_update_params->vif_id = vifid;
2752 f_update_params->afex_default_vlan = vlan_val;
2753 f_update_params->allowed_priorities = allowed_prio;
2754
2755
2756 if (bnx2x_func_state_change(bp, &func_params) < 0)
2757 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2758
2759 return 0;
2760}
2761
2762static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2763 u16 vif_index, u8 func_bit_map)
2764{
2765 struct bnx2x_func_state_params func_params = {NULL};
2766 struct bnx2x_func_afex_viflists_params *update_params =
2767 &func_params.params.afex_viflists;
2768 int rc;
2769 u32 drv_msg_code;
2770
2771
2772 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2773 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2774 cmd_type);
2775
2776 func_params.f_obj = &bp->func_obj;
2777 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2778
2779
2780 update_params->afex_vif_list_command = cmd_type;
2781 update_params->vif_list_index = vif_index;
2782 update_params->func_bit_map =
2783 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2784 update_params->func_to_clear = 0;
2785 drv_msg_code =
2786 (cmd_type == VIF_LIST_RULE_GET) ?
2787 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2788 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2789
2790
2791
2792
2793 rc = bnx2x_func_state_change(bp, &func_params);
2794 if (rc < 0)
2795 bnx2x_fw_command(bp, drv_msg_code, 0);
2796
2797 return 0;
2798}
2799
2800static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2801{
2802 struct afex_stats afex_stats;
2803 u32 func = BP_ABS_FUNC(bp);
2804 u32 mf_config;
2805 u16 vlan_val;
2806 u32 vlan_prio;
2807 u16 vif_id;
2808 u8 allowed_prio;
2809 u8 vlan_mode;
2810 u32 addr_to_write, vifid, addrs, stats_type, i;
2811
2812 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2813 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2814 DP(BNX2X_MSG_MCP,
2815 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2816 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2817 }
2818
2819 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2820 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2821 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2822 DP(BNX2X_MSG_MCP,
2823 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2824 vifid, addrs);
2825 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2826 addrs);
2827 }
2828
2829 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2830 addr_to_write = SHMEM2_RD(bp,
2831 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2832 stats_type = SHMEM2_RD(bp,
2833 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2834
2835 DP(BNX2X_MSG_MCP,
2836 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2837 addr_to_write);
2838
2839 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2840
2841
2842 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2843 REG_WR(bp, addr_to_write + i*sizeof(u32),
2844 *(((u32 *)(&afex_stats))+i));
2845
2846
2847 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2848 }
2849
2850 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2851 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2852 bp->mf_config[BP_VN(bp)] = mf_config;
2853 DP(BNX2X_MSG_MCP,
2854 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2855 mf_config);
2856
2857
2858 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2859
2860 struct cmng_init_input cmng_input;
2861 struct rate_shaping_vars_per_vn m_rs_vn;
2862 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2863 u32 addr = BAR_XSTRORM_INTMEM +
2864 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2865
2866 bp->mf_config[BP_VN(bp)] = mf_config;
2867
2868 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2869 m_rs_vn.vn_counter.rate =
2870 cmng_input.vnic_max_rate[BP_VN(bp)];
2871 m_rs_vn.vn_counter.quota =
2872 (m_rs_vn.vn_counter.rate *
2873 RS_PERIODIC_TIMEOUT_USEC) / 8;
2874
2875 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2876
2877
2878 vif_id =
2879 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2880 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2881 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2882 vlan_val =
2883 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2884 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2885 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2886 vlan_prio = (mf_config &
2887 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2888 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2889 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2890 vlan_mode =
2891 (MF_CFG_RD(bp,
2892 func_mf_config[func].afex_config) &
2893 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2894 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2895 allowed_prio =
2896 (MF_CFG_RD(bp,
2897 func_mf_config[func].afex_config) &
2898 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2899 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2900
2901
2902 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2903 allowed_prio))
2904 return;
2905
2906 bp->afex_def_vlan_tag = vlan_val;
2907 bp->afex_vlan_mode = vlan_mode;
2908 } else {
2909
2910 bnx2x_link_report(bp);
2911
2912
2913 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2914
2915
2916 bp->afex_def_vlan_tag = -1;
2917 }
2918 }
2919}
2920
2921static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2922{
2923 struct bnx2x_func_switch_update_params *switch_update_params;
2924 struct bnx2x_func_state_params func_params;
2925
2926 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2927 switch_update_params = &func_params.params.switch_update;
2928 func_params.f_obj = &bp->func_obj;
2929 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2930
2931
2932 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2933 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2934
2935 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2936 int func = BP_ABS_FUNC(bp);
2937 u32 val;
2938
2939
2940 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2941 FUNC_MF_CFG_E1HOV_TAG_MASK;
2942 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2943 bp->mf_ov = val;
2944 } else {
2945 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2946 goto fail;
2947 }
2948
2949
2950 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2951 bp->mf_ov);
2952
2953
2954 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2955 &switch_update_params->changes);
2956 switch_update_params->vlan = bp->mf_ov;
2957
2958 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2959 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2960 bp->mf_ov);
2961 goto fail;
2962 } else {
2963 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2964 bp->mf_ov);
2965 }
2966 } else {
2967 goto fail;
2968 }
2969
2970 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2971 return;
2972fail:
2973 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2974}
2975
2976static void bnx2x_pmf_update(struct bnx2x *bp)
2977{
2978 int port = BP_PORT(bp);
2979 u32 val;
2980
2981 bp->port.pmf = 1;
2982 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2983
2984
2985
2986
2987
2988 smp_mb();
2989
2990
2991 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2992
2993 bnx2x_dcbx_pmf_update(bp);
2994
2995
2996 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2997 if (bp->common.int_block == INT_BLOCK_HC) {
2998 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2999 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
3000 } else if (!CHIP_IS_E1x(bp)) {
3001 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
3002 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
3003 }
3004
3005 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3006}
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3018{
3019 int mb_idx = BP_FW_MB_IDX(bp);
3020 u32 seq;
3021 u32 rc = 0;
3022 u32 cnt = 1;
3023 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3024
3025 mutex_lock(&bp->fw_mb_mutex);
3026 seq = ++bp->fw_seq;
3027 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3028 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3029
3030 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3031 (command | seq), param);
3032
3033 do {
3034
3035 msleep(delay);
3036
3037 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3038
3039
3040 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3041
3042 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3043 cnt*delay, rc, seq);
3044
3045
3046 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3047 rc &= FW_MSG_CODE_MASK;
3048 else {
3049
3050 BNX2X_ERR("FW failed to respond!\n");
3051 bnx2x_fw_dump(bp);
3052 rc = 0;
3053 }
3054 mutex_unlock(&bp->fw_mb_mutex);
3055
3056 return rc;
3057}
3058
3059static void storm_memset_func_cfg(struct bnx2x *bp,
3060 struct tstorm_eth_function_common_config *tcfg,
3061 u16 abs_fid)
3062{
3063 size_t size = sizeof(struct tstorm_eth_function_common_config);
3064
3065 u32 addr = BAR_TSTRORM_INTMEM +
3066 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3067
3068 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3069}
3070
3071void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3072{
3073 if (CHIP_IS_E1x(bp)) {
3074 struct tstorm_eth_function_common_config tcfg = {0};
3075
3076 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3077 }
3078
3079
3080 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3081 storm_memset_func_en(bp, p->func_id, 1);
3082
3083
3084 if (p->spq_active) {
3085 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3086 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3087 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3088 }
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3101 struct bnx2x_fastpath *fp,
3102 bool zero_stats)
3103{
3104 unsigned long flags = 0;
3105
3106
3107 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3108
3109
3110
3111
3112
3113
3114 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3115 if (zero_stats)
3116 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3117
3118 if (bp->flags & TX_SWITCHING)
3119 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3120
3121 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3122 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3123
3124#ifdef BNX2X_STOP_ON_ERROR
3125 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3126#endif
3127
3128 return flags;
3129}
3130
3131static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3132 struct bnx2x_fastpath *fp,
3133 bool leading)
3134{
3135 unsigned long flags = 0;
3136
3137
3138 if (IS_MF_SD(bp))
3139 __set_bit(BNX2X_Q_FLG_OV, &flags);
3140
3141 if (IS_FCOE_FP(fp)) {
3142 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3143
3144 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3145 }
3146
3147 if (fp->mode != TPA_MODE_DISABLED) {
3148 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3149 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3150 if (fp->mode == TPA_MODE_GRO)
3151 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3152 }
3153
3154 if (leading) {
3155 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3156 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3157 }
3158
3159
3160 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3161
3162
3163 if (IS_MF_AFEX(bp))
3164 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3165
3166 return flags | bnx2x_get_common_flags(bp, fp, true);
3167}
3168
3169static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3170 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3171 u8 cos)
3172{
3173 gen_init->stat_id = bnx2x_stats_id(fp);
3174 gen_init->spcl_id = fp->cl_id;
3175
3176
3177 if (IS_FCOE_FP(fp))
3178 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3179 else
3180 gen_init->mtu = bp->dev->mtu;
3181
3182 gen_init->cos = cos;
3183
3184 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3185}
3186
3187static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3188 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3189 struct bnx2x_rxq_setup_params *rxq_init)
3190{
3191 u8 max_sge = 0;
3192 u16 sge_sz = 0;
3193 u16 tpa_agg_size = 0;
3194
3195 if (fp->mode != TPA_MODE_DISABLED) {
3196 pause->sge_th_lo = SGE_TH_LO(bp);
3197 pause->sge_th_hi = SGE_TH_HI(bp);
3198
3199
3200 WARN_ON(bp->dropless_fc &&
3201 pause->sge_th_hi + FW_PREFETCH_CNT >
3202 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3203
3204 tpa_agg_size = TPA_AGG_SIZE;
3205 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3206 SGE_PAGE_SHIFT;
3207 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3208 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3209 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3210 }
3211
3212
3213 if (!CHIP_IS_E1(bp)) {
3214 pause->bd_th_lo = BD_TH_LO(bp);
3215 pause->bd_th_hi = BD_TH_HI(bp);
3216
3217 pause->rcq_th_lo = RCQ_TH_LO(bp);
3218 pause->rcq_th_hi = RCQ_TH_HI(bp);
3219
3220
3221
3222
3223 WARN_ON(bp->dropless_fc &&
3224 pause->bd_th_hi + FW_PREFETCH_CNT >
3225 bp->rx_ring_size);
3226 WARN_ON(bp->dropless_fc &&
3227 pause->rcq_th_hi + FW_PREFETCH_CNT >
3228 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3229
3230 pause->pri_map = 1;
3231 }
3232
3233
3234 rxq_init->dscr_map = fp->rx_desc_mapping;
3235 rxq_init->sge_map = fp->rx_sge_mapping;
3236 rxq_init->rcq_map = fp->rx_comp_mapping;
3237 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3238
3239
3240
3241
3242 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3243 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3244
3245 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3246 rxq_init->tpa_agg_sz = tpa_agg_size;
3247 rxq_init->sge_buf_sz = sge_sz;
3248 rxq_init->max_sges_pkt = max_sge;
3249 rxq_init->rss_engine_id = BP_FUNC(bp);
3250 rxq_init->mcast_engine_id = BP_FUNC(bp);
3251
3252
3253
3254
3255
3256
3257 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3258
3259 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3260 rxq_init->fw_sb_id = fp->fw_sb_id;
3261
3262 if (IS_FCOE_FP(fp))
3263 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3264 else
3265 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3266
3267
3268
3269 if (IS_MF_AFEX(bp)) {
3270 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3271 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3272 }
3273}
3274
3275static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3276 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3277 u8 cos)
3278{
3279 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3280 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3281 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3282 txq_init->fw_sb_id = fp->fw_sb_id;
3283
3284
3285
3286
3287
3288 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3289
3290 if (IS_FCOE_FP(fp)) {
3291 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3292 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3293 }
3294}
3295
3296static void bnx2x_pf_init(struct bnx2x *bp)
3297{
3298 struct bnx2x_func_init_params func_init = {0};
3299 struct event_ring_data eq_data = { {0} };
3300
3301 if (!CHIP_IS_E1x(bp)) {
3302
3303
3304 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3305 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3306 (CHIP_MODE_IS_4_PORT(bp) ?
3307 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3308
3309 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3310 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3311 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3312 (CHIP_MODE_IS_4_PORT(bp) ?
3313 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3314 }
3315
3316 func_init.spq_active = true;
3317 func_init.pf_id = BP_FUNC(bp);
3318 func_init.func_id = BP_FUNC(bp);
3319 func_init.spq_map = bp->spq_mapping;
3320 func_init.spq_prod = bp->spq_prod_idx;
3321
3322 bnx2x_func_init(bp, &func_init);
3323
3324 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3325
3326
3327
3328
3329
3330
3331
3332 bp->link_vars.line_speed = SPEED_10000;
3333 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3334
3335
3336 if (bp->port.pmf)
3337 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3338
3339
3340 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3341 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3342 eq_data.producer = bp->eq_prod;
3343 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3344 eq_data.sb_id = DEF_SB_ID;
3345 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3346}
3347
3348static void bnx2x_e1h_disable(struct bnx2x *bp)
3349{
3350 int port = BP_PORT(bp);
3351
3352 bnx2x_tx_disable(bp);
3353
3354 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3355}
3356
3357static void bnx2x_e1h_enable(struct bnx2x *bp)
3358{
3359 int port = BP_PORT(bp);
3360
3361 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3362 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3363
3364
3365 netif_tx_wake_all_queues(bp->dev);
3366
3367
3368
3369
3370
3371}
3372
3373#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3374
3375static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3376{
3377 struct eth_stats_info *ether_stat =
3378 &bp->slowpath->drv_info_to_mcp.ether_stat;
3379 struct bnx2x_vlan_mac_obj *mac_obj =
3380 &bp->sp_objs->mac_obj;
3381 int i;
3382
3383 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3384 ETH_STAT_INFO_VERSION_LEN);
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3395 memset(ether_stat->mac_local + i, 0,
3396 sizeof(ether_stat->mac_local[0]));
3397 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3398 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3399 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3400 ETH_ALEN);
3401 ether_stat->mtu_size = bp->dev->mtu;
3402 if (bp->dev->features & NETIF_F_RXCSUM)
3403 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3404 if (bp->dev->features & NETIF_F_TSO)
3405 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3406 ether_stat->feature_flags |= bp->common.boot_mode;
3407
3408 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3409
3410 ether_stat->txq_size = bp->tx_ring_size;
3411 ether_stat->rxq_size = bp->rx_ring_size;
3412
3413#ifdef CONFIG_BNX2X_SRIOV
3414 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3415#endif
3416}
3417
3418static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3419{
3420 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3421 struct fcoe_stats_info *fcoe_stat =
3422 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3423
3424 if (!CNIC_LOADED(bp))
3425 return;
3426
3427 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3428
3429 fcoe_stat->qos_priority =
3430 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3431
3432
3433 if (!NO_FCOE(bp)) {
3434 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3435 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3436 tstorm_queue_statistics;
3437
3438 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3439 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3440 xstorm_queue_statistics;
3441
3442 struct fcoe_statistics_params *fw_fcoe_stat =
3443 &bp->fw_stats_data->fcoe;
3444
3445 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3446 fcoe_stat->rx_bytes_lo,
3447 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3448
3449 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3450 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3451 fcoe_stat->rx_bytes_lo,
3452 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3453
3454 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3455 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3456 fcoe_stat->rx_bytes_lo,
3457 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3458
3459 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3460 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3461 fcoe_stat->rx_bytes_lo,
3462 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3463
3464 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3465 fcoe_stat->rx_frames_lo,
3466 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3467
3468 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3469 fcoe_stat->rx_frames_lo,
3470 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3471
3472 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3473 fcoe_stat->rx_frames_lo,
3474 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3475
3476 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3477 fcoe_stat->rx_frames_lo,
3478 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3479
3480 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3481 fcoe_stat->tx_bytes_lo,
3482 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3483
3484 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3485 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3486 fcoe_stat->tx_bytes_lo,
3487 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3488
3489 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3490 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3491 fcoe_stat->tx_bytes_lo,
3492 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3493
3494 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3495 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3496 fcoe_stat->tx_bytes_lo,
3497 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3498
3499 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3500 fcoe_stat->tx_frames_lo,
3501 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3502
3503 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3504 fcoe_stat->tx_frames_lo,
3505 fcoe_q_xstorm_stats->ucast_pkts_sent);
3506
3507 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3508 fcoe_stat->tx_frames_lo,
3509 fcoe_q_xstorm_stats->bcast_pkts_sent);
3510
3511 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3512 fcoe_stat->tx_frames_lo,
3513 fcoe_q_xstorm_stats->mcast_pkts_sent);
3514 }
3515
3516
3517 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3518}
3519
3520static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3521{
3522 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3523 struct iscsi_stats_info *iscsi_stat =
3524 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3525
3526 if (!CNIC_LOADED(bp))
3527 return;
3528
3529 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3530 ETH_ALEN);
3531
3532 iscsi_stat->qos_priority =
3533 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3534
3535
3536 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3537}
3538
3539
3540
3541
3542
3543
3544static void bnx2x_config_mf_bw(struct bnx2x *bp)
3545{
3546
3547
3548
3549
3550 if (!IS_MF(bp)) {
3551 DP(BNX2X_MSG_MCP,
3552 "Ignoring MF BW config in single function mode\n");
3553 return;
3554 }
3555
3556 if (bp->link_vars.link_up) {
3557 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3558 bnx2x_link_sync_notify(bp);
3559 }
3560 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3561}
3562
3563static void bnx2x_set_mf_bw(struct bnx2x *bp)
3564{
3565 bnx2x_config_mf_bw(bp);
3566 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3567}
3568
3569static void bnx2x_handle_eee_event(struct bnx2x *bp)
3570{
3571 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3572 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3573}
3574
3575#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3576#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3577
3578static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3579{
3580 enum drv_info_opcode op_code;
3581 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3582 bool release = false;
3583 int wait;
3584
3585
3586 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3587 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3588 return;
3589 }
3590
3591 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3592 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3593
3594
3595 mutex_lock(&bp->drv_info_mutex);
3596
3597 memset(&bp->slowpath->drv_info_to_mcp, 0,
3598 sizeof(union drv_info_to_mcp));
3599
3600 switch (op_code) {
3601 case ETH_STATS_OPCODE:
3602 bnx2x_drv_info_ether_stat(bp);
3603 break;
3604 case FCOE_STATS_OPCODE:
3605 bnx2x_drv_info_fcoe_stat(bp);
3606 break;
3607 case ISCSI_STATS_OPCODE:
3608 bnx2x_drv_info_iscsi_stat(bp);
3609 break;
3610 default:
3611
3612 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3613 goto out;
3614 }
3615
3616
3617
3618
3619 SHMEM2_WR(bp, drv_info_host_addr_lo,
3620 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3621 SHMEM2_WR(bp, drv_info_host_addr_hi,
3622 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3623
3624 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3625
3626
3627
3628
3629
3630 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3631 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3632 } else if (!bp->drv_info_mng_owner) {
3633 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3634
3635 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3636 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3637
3638
3639 if (indication & bit) {
3640 SHMEM2_WR(bp, mfw_drv_indication,
3641 indication & ~bit);
3642 release = true;
3643 break;
3644 }
3645
3646 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3647 }
3648 }
3649 if (!release) {
3650 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3651 bp->drv_info_mng_owner = true;
3652 }
3653
3654out:
3655 mutex_unlock(&bp->drv_info_mutex);
3656}
3657
3658static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3659{
3660 u8 vals[4];
3661 int i = 0;
3662
3663 if (bnx2x_format) {
3664 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3665 &vals[0], &vals[1], &vals[2], &vals[3]);
3666 if (i > 0)
3667 vals[0] -= '0';
3668 } else {
3669 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3670 &vals[0], &vals[1], &vals[2], &vals[3]);
3671 }
3672
3673 while (i < 4)
3674 vals[i++] = 0;
3675
3676 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3677}
3678
3679void bnx2x_update_mng_version(struct bnx2x *bp)
3680{
3681 u32 iscsiver = DRV_VER_NOT_LOADED;
3682 u32 fcoever = DRV_VER_NOT_LOADED;
3683 u32 ethver = DRV_VER_NOT_LOADED;
3684 int idx = BP_FW_MB_IDX(bp);
3685 u8 *version;
3686
3687 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3688 return;
3689
3690 mutex_lock(&bp->drv_info_mutex);
3691
3692 if (bp->drv_info_mng_owner)
3693 goto out;
3694
3695 if (bp->state != BNX2X_STATE_OPEN)
3696 goto out;
3697
3698
3699 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3700 if (!CNIC_LOADED(bp))
3701 goto out;
3702
3703
3704 memset(&bp->slowpath->drv_info_to_mcp, 0,
3705 sizeof(union drv_info_to_mcp));
3706 bnx2x_drv_info_iscsi_stat(bp);
3707 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3708 iscsiver = bnx2x_update_mng_version_utility(version, false);
3709
3710 memset(&bp->slowpath->drv_info_to_mcp, 0,
3711 sizeof(union drv_info_to_mcp));
3712 bnx2x_drv_info_fcoe_stat(bp);
3713 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3714 fcoever = bnx2x_update_mng_version_utility(version, false);
3715
3716out:
3717 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3718 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3719 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3720
3721 mutex_unlock(&bp->drv_info_mutex);
3722
3723 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3724 ethver, iscsiver, fcoever);
3725}
3726
3727void bnx2x_update_mfw_dump(struct bnx2x *bp)
3728{
3729 u32 drv_ver;
3730 u32 valid_dump;
3731
3732 if (!SHMEM2_HAS(bp, drv_info))
3733 return;
3734
3735
3736 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3737
3738 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3739 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3740
3741 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3742
3743
3744 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3745
3746 if (valid_dump & FIRST_DUMP_VALID)
3747 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3748
3749 if (valid_dump & SECOND_DUMP_VALID)
3750 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3751}
3752
3753static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3754{
3755 u32 cmd_ok, cmd_fail;
3756
3757
3758 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3759 event & DRV_STATUS_OEM_EVENT_MASK) {
3760 BNX2X_ERR("Received simultaneous events %08x\n", event);
3761 return;
3762 }
3763
3764 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3765 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3766 cmd_ok = DRV_MSG_CODE_DCC_OK;
3767 } else {
3768 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3769 cmd_ok = DRV_MSG_CODE_OEM_OK;
3770 }
3771
3772 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3773
3774 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3775 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3776
3777
3778
3779
3780 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3781 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3782 bp->flags |= MF_FUNC_DIS;
3783
3784 bnx2x_e1h_disable(bp);
3785 } else {
3786 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3787 bp->flags &= ~MF_FUNC_DIS;
3788
3789 bnx2x_e1h_enable(bp);
3790 }
3791 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3792 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3793 }
3794
3795 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3796 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3797 bnx2x_config_mf_bw(bp);
3798 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3799 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3800 }
3801
3802
3803 if (event)
3804 bnx2x_fw_command(bp, cmd_fail, 0);
3805 else
3806 bnx2x_fw_command(bp, cmd_ok, 0);
3807}
3808
3809
3810static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3811{
3812 struct eth_spe *next_spe = bp->spq_prod_bd;
3813
3814 if (bp->spq_prod_bd == bp->spq_last_bd) {
3815 bp->spq_prod_bd = bp->spq;
3816 bp->spq_prod_idx = 0;
3817 DP(BNX2X_MSG_SP, "end of spq\n");
3818 } else {
3819 bp->spq_prod_bd++;
3820 bp->spq_prod_idx++;
3821 }
3822 return next_spe;
3823}
3824
3825
3826static void bnx2x_sp_prod_update(struct bnx2x *bp)
3827{
3828 int func = BP_FUNC(bp);
3829
3830
3831
3832
3833
3834
3835 mb();
3836
3837 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3838 bp->spq_prod_idx);
3839}
3840
3841
3842
3843
3844
3845
3846
3847static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3848{
3849 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3850 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3851 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3852 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3853 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3854 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3855 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3856 return true;
3857 else
3858 return false;
3859}
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3876 u32 data_hi, u32 data_lo, int cmd_type)
3877{
3878 struct eth_spe *spe;
3879 u16 type;
3880 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3881
3882#ifdef BNX2X_STOP_ON_ERROR
3883 if (unlikely(bp->panic)) {
3884 BNX2X_ERR("Can't post SP when there is panic\n");
3885 return -EIO;
3886 }
3887#endif
3888
3889 spin_lock_bh(&bp->spq_lock);
3890
3891 if (common) {
3892 if (!atomic_read(&bp->eq_spq_left)) {
3893 BNX2X_ERR("BUG! EQ ring full!\n");
3894 spin_unlock_bh(&bp->spq_lock);
3895 bnx2x_panic();
3896 return -EBUSY;
3897 }
3898 } else if (!atomic_read(&bp->cq_spq_left)) {
3899 BNX2X_ERR("BUG! SPQ ring full!\n");
3900 spin_unlock_bh(&bp->spq_lock);
3901 bnx2x_panic();
3902 return -EBUSY;
3903 }
3904
3905 spe = bnx2x_sp_get_next(bp);
3906
3907
3908 spe->hdr.conn_and_cmd_data =
3909 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3910 HW_CID(bp, cid));
3911
3912
3913
3914
3915
3916 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3917 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3918 SPE_HDR_CONN_TYPE;
3919 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3920 SPE_HDR_FUNCTION_ID);
3921 } else {
3922 type = cmd_type;
3923 }
3924
3925 spe->hdr.type = cpu_to_le16(type);
3926
3927 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3928 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3929
3930
3931
3932
3933
3934
3935 if (common)
3936 atomic_dec(&bp->eq_spq_left);
3937 else
3938 atomic_dec(&bp->cq_spq_left);
3939
3940 DP(BNX2X_MSG_SP,
3941 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3942 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3943 (u32)(U64_LO(bp->spq_mapping) +
3944 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3945 HW_CID(bp, cid), data_hi, data_lo, type,
3946 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3947
3948 bnx2x_sp_prod_update(bp);
3949 spin_unlock_bh(&bp->spq_lock);
3950 return 0;
3951}
3952
3953
3954static int bnx2x_acquire_alr(struct bnx2x *bp)
3955{
3956 u32 j, val;
3957 int rc = 0;
3958
3959 might_sleep();
3960 for (j = 0; j < 1000; j++) {
3961 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3962 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3963 if (val & MCPR_ACCESS_LOCK_LOCK)
3964 break;
3965
3966 usleep_range(5000, 10000);
3967 }
3968 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3969 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3970 rc = -EBUSY;
3971 }
3972
3973 return rc;
3974}
3975
3976
3977static void bnx2x_release_alr(struct bnx2x *bp)
3978{
3979 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3980}
3981
3982#define BNX2X_DEF_SB_ATT_IDX 0x0001
3983#define BNX2X_DEF_SB_IDX 0x0002
3984
3985static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3986{
3987 struct host_sp_status_block *def_sb = bp->def_status_blk;
3988 u16 rc = 0;
3989
3990 barrier();
3991 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3992 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3993 rc |= BNX2X_DEF_SB_ATT_IDX;
3994 }
3995
3996 if (bp->def_idx != def_sb->sp_sb.running_index) {
3997 bp->def_idx = def_sb->sp_sb.running_index;
3998 rc |= BNX2X_DEF_SB_IDX;
3999 }
4000
4001
4002 barrier();
4003 return rc;
4004}
4005
4006
4007
4008
4009
4010static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4011{
4012 int port = BP_PORT(bp);
4013 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4014 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4015 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4016 NIG_REG_MASK_INTERRUPT_PORT0;
4017 u32 aeu_mask;
4018 u32 nig_mask = 0;
4019 u32 reg_addr;
4020
4021 if (bp->attn_state & asserted)
4022 BNX2X_ERR("IGU ERROR\n");
4023
4024 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4025 aeu_mask = REG_RD(bp, aeu_addr);
4026
4027 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4028 aeu_mask, asserted);
4029 aeu_mask &= ~(asserted & 0x3ff);
4030 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4031
4032 REG_WR(bp, aeu_addr, aeu_mask);
4033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4034
4035 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4036 bp->attn_state |= asserted;
4037 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4038
4039 if (asserted & ATTN_HARD_WIRED_MASK) {
4040 if (asserted & ATTN_NIG_FOR_FUNC) {
4041
4042 bnx2x_acquire_phy_lock(bp);
4043
4044
4045 nig_mask = REG_RD(bp, nig_int_mask_addr);
4046
4047
4048
4049
4050 if (nig_mask) {
4051 REG_WR(bp, nig_int_mask_addr, 0);
4052
4053 bnx2x_link_attn(bp);
4054 }
4055
4056
4057 }
4058 if (asserted & ATTN_SW_TIMER_4_FUNC)
4059 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4060
4061 if (asserted & GPIO_2_FUNC)
4062 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4063
4064 if (asserted & GPIO_3_FUNC)
4065 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4066
4067 if (asserted & GPIO_4_FUNC)
4068 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4069
4070 if (port == 0) {
4071 if (asserted & ATTN_GENERAL_ATTN_1) {
4072 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4073 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4074 }
4075 if (asserted & ATTN_GENERAL_ATTN_2) {
4076 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4077 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4078 }
4079 if (asserted & ATTN_GENERAL_ATTN_3) {
4080 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4082 }
4083 } else {
4084 if (asserted & ATTN_GENERAL_ATTN_4) {
4085 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4087 }
4088 if (asserted & ATTN_GENERAL_ATTN_5) {
4089 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4091 }
4092 if (asserted & ATTN_GENERAL_ATTN_6) {
4093 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4094 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4095 }
4096 }
4097
4098 }
4099
4100 if (bp->common.int_block == INT_BLOCK_HC)
4101 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4102 COMMAND_REG_ATTN_BITS_SET);
4103 else
4104 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4105
4106 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4107 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4108 REG_WR(bp, reg_addr, asserted);
4109
4110
4111 if (asserted & ATTN_NIG_FOR_FUNC) {
4112
4113
4114
4115 if (bp->common.int_block != INT_BLOCK_HC) {
4116 u32 cnt = 0, igu_acked;
4117 do {
4118 igu_acked = REG_RD(bp,
4119 IGU_REG_ATTENTION_ACK_BITS);
4120 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4121 (++cnt < MAX_IGU_ATTN_ACK_TO));
4122 if (!igu_acked)
4123 DP(NETIF_MSG_HW,
4124 "Failed to verify IGU ack on time\n");
4125 barrier();
4126 }
4127 REG_WR(bp, nig_int_mask_addr, nig_mask);
4128 bnx2x_release_phy_lock(bp);
4129 }
4130}
4131
4132static void bnx2x_fan_failure(struct bnx2x *bp)
4133{
4134 int port = BP_PORT(bp);
4135 u32 ext_phy_config;
4136
4137 ext_phy_config =
4138 SHMEM_RD(bp,
4139 dev_info.port_hw_config[port].external_phy_config);
4140
4141 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4142 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4143 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4144 ext_phy_config);
4145
4146
4147 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4148 "Please contact OEM Support for assistance\n");
4149
4150
4151
4152
4153
4154 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4155}
4156
4157static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4158{
4159 int port = BP_PORT(bp);
4160 int reg_offset;
4161 u32 val;
4162
4163 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4164 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4165
4166 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4167
4168 val = REG_RD(bp, reg_offset);
4169 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4170 REG_WR(bp, reg_offset, val);
4171
4172 BNX2X_ERR("SPIO5 hw attention\n");
4173
4174
4175 bnx2x_hw_reset_phy(&bp->link_params);
4176 bnx2x_fan_failure(bp);
4177 }
4178
4179 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4180 bnx2x_acquire_phy_lock(bp);
4181 bnx2x_handle_module_detect_int(&bp->link_params);
4182 bnx2x_release_phy_lock(bp);
4183 }
4184
4185 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4186
4187 val = REG_RD(bp, reg_offset);
4188 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4189 REG_WR(bp, reg_offset, val);
4190
4191 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4192 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4193 bnx2x_panic();
4194 }
4195}
4196
4197static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4198{
4199 u32 val;
4200
4201 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4202
4203 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4204 BNX2X_ERR("DB hw attention 0x%x\n", val);
4205
4206 if (val & 0x2)
4207 BNX2X_ERR("FATAL error from DORQ\n");
4208 }
4209
4210 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4211
4212 int port = BP_PORT(bp);
4213 int reg_offset;
4214
4215 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4216 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4217
4218 val = REG_RD(bp, reg_offset);
4219 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4220 REG_WR(bp, reg_offset, val);
4221
4222 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4223 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4224 bnx2x_panic();
4225 }
4226}
4227
4228static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4229{
4230 u32 val;
4231
4232 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4233
4234 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4235 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4236
4237 if (val & 0x2)
4238 BNX2X_ERR("FATAL error from CFC\n");
4239 }
4240
4241 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4242 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4243 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4244
4245 if (val & 0x18000)
4246 BNX2X_ERR("FATAL error from PXP\n");
4247
4248 if (!CHIP_IS_E1x(bp)) {
4249 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4250 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4251 }
4252 }
4253
4254 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4255
4256 int port = BP_PORT(bp);
4257 int reg_offset;
4258
4259 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4260 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4261
4262 val = REG_RD(bp, reg_offset);
4263 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4264 REG_WR(bp, reg_offset, val);
4265
4266 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4267 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4268 bnx2x_panic();
4269 }
4270}
4271
4272static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4273{
4274 u32 val;
4275
4276 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4277
4278 if (attn & BNX2X_PMF_LINK_ASSERT) {
4279 int func = BP_FUNC(bp);
4280
4281 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4282 bnx2x_read_mf_cfg(bp);
4283 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4284 func_mf_config[BP_ABS_FUNC(bp)].config);
4285 val = SHMEM_RD(bp,
4286 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4287
4288 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4289 DRV_STATUS_OEM_EVENT_MASK))
4290 bnx2x_oem_event(bp,
4291 (val & (DRV_STATUS_DCC_EVENT_MASK |
4292 DRV_STATUS_OEM_EVENT_MASK)));
4293
4294 if (val & DRV_STATUS_SET_MF_BW)
4295 bnx2x_set_mf_bw(bp);
4296
4297 if (val & DRV_STATUS_DRV_INFO_REQ)
4298 bnx2x_handle_drv_info_req(bp);
4299
4300 if (val & DRV_STATUS_VF_DISABLED)
4301 bnx2x_schedule_iov_task(bp,
4302 BNX2X_IOV_HANDLE_FLR);
4303
4304 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4305 bnx2x_pmf_update(bp);
4306
4307 if (bp->port.pmf &&
4308 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4309 bp->dcbx_enabled > 0)
4310
4311 bnx2x_dcbx_set_params(bp,
4312 BNX2X_DCBX_STATE_NEG_RECEIVED);
4313 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4314 bnx2x_handle_afex_cmd(bp,
4315 val & DRV_STATUS_AFEX_EVENT_MASK);
4316 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4317 bnx2x_handle_eee_event(bp);
4318
4319 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4320 bnx2x_schedule_sp_rtnl(bp,
4321 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4322
4323 if (bp->link_vars.periodic_flags &
4324 PERIODIC_FLAGS_LINK_EVENT) {
4325
4326 bnx2x_acquire_phy_lock(bp);
4327 bp->link_vars.periodic_flags &=
4328 ~PERIODIC_FLAGS_LINK_EVENT;
4329 bnx2x_release_phy_lock(bp);
4330 if (IS_MF(bp))
4331 bnx2x_link_sync_notify(bp);
4332 bnx2x_link_report(bp);
4333 }
4334
4335
4336
4337 bnx2x__link_status_update(bp);
4338 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4339
4340 BNX2X_ERR("MC assert!\n");
4341 bnx2x_mc_assert(bp);
4342 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4343 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4344 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4345 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4346 bnx2x_panic();
4347
4348 } else if (attn & BNX2X_MCP_ASSERT) {
4349
4350 BNX2X_ERR("MCP assert!\n");
4351 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4352 bnx2x_fw_dump(bp);
4353
4354 } else
4355 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4356 }
4357
4358 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4359 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4360 if (attn & BNX2X_GRC_TIMEOUT) {
4361 val = CHIP_IS_E1(bp) ? 0 :
4362 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4363 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4364 }
4365 if (attn & BNX2X_GRC_RSV) {
4366 val = CHIP_IS_E1(bp) ? 0 :
4367 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4368 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4369 }
4370 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4371 }
4372}
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4389
4390#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4391#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4392#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4393#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4394#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4395#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4396#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4397
4398
4399
4400
4401
4402
4403void bnx2x_set_reset_global(struct bnx2x *bp)
4404{
4405 u32 val;
4406 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4407 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4408 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4409 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4410}
4411
4412
4413
4414
4415
4416
4417static void bnx2x_clear_reset_global(struct bnx2x *bp)
4418{
4419 u32 val;
4420 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4421 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4422 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4423 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4424}
4425
4426
4427
4428
4429
4430
4431static bool bnx2x_reset_is_global(struct bnx2x *bp)
4432{
4433 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4434
4435 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4436 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4437}
4438
4439
4440
4441
4442
4443
4444static void bnx2x_set_reset_done(struct bnx2x *bp)
4445{
4446 u32 val;
4447 u32 bit = BP_PATH(bp) ?
4448 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4449 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4450 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4451
4452
4453 val &= ~bit;
4454 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4455
4456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4457}
4458
4459
4460
4461
4462
4463
4464void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4465{
4466 u32 val;
4467 u32 bit = BP_PATH(bp) ?
4468 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4469 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4470 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4471
4472
4473 val |= bit;
4474 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4475 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4476}
4477
4478
4479
4480
4481
4482bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4483{
4484 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4485 u32 bit = engine ?
4486 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4487
4488
4489 return (val & bit) ? false : true;
4490}
4491
4492
4493
4494
4495
4496
4497void bnx2x_set_pf_load(struct bnx2x *bp)
4498{
4499 u32 val1, val;
4500 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4501 BNX2X_PATH0_LOAD_CNT_MASK;
4502 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4503 BNX2X_PATH0_LOAD_CNT_SHIFT;
4504
4505 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4506 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4507
4508 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4509
4510
4511 val1 = (val & mask) >> shift;
4512
4513
4514 val1 |= (1 << bp->pf_num);
4515
4516
4517 val &= ~mask;
4518
4519
4520 val |= ((val1 << shift) & mask);
4521
4522 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4523 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4524}
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535bool bnx2x_clear_pf_load(struct bnx2x *bp)
4536{
4537 u32 val1, val;
4538 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4539 BNX2X_PATH0_LOAD_CNT_MASK;
4540 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4541 BNX2X_PATH0_LOAD_CNT_SHIFT;
4542
4543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4544 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4545 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4546
4547
4548 val1 = (val & mask) >> shift;
4549
4550
4551 val1 &= ~(1 << bp->pf_num);
4552
4553
4554 val &= ~mask;
4555
4556
4557 val |= ((val1 << shift) & mask);
4558
4559 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4560 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4561 return val1 != 0;
4562}
4563
4564
4565
4566
4567
4568
4569static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4570{
4571 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4572 BNX2X_PATH0_LOAD_CNT_MASK);
4573 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4574 BNX2X_PATH0_LOAD_CNT_SHIFT);
4575 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4576
4577 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4578
4579 val = (val & mask) >> shift;
4580
4581 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4582 engine, val);
4583
4584 return val != 0;
4585}
4586
4587static void _print_parity(struct bnx2x *bp, u32 reg)
4588{
4589 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4590}
4591
4592static void _print_next_block(int idx, const char *blk)
4593{
4594 pr_cont("%s%s", idx ? ", " : "", blk);
4595}
4596
4597static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4598 int *par_num, bool print)
4599{
4600 u32 cur_bit;
4601 bool res;
4602 int i;
4603
4604 res = false;
4605
4606 for (i = 0; sig; i++) {
4607 cur_bit = (0x1UL << i);
4608 if (sig & cur_bit) {
4609 res |= true;
4610
4611 if (print) {
4612 switch (cur_bit) {
4613 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4614 _print_next_block((*par_num)++, "BRB");
4615 _print_parity(bp,
4616 BRB1_REG_BRB1_PRTY_STS);
4617 break;
4618 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4619 _print_next_block((*par_num)++,
4620 "PARSER");
4621 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4622 break;
4623 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4624 _print_next_block((*par_num)++, "TSDM");
4625 _print_parity(bp,
4626 TSDM_REG_TSDM_PRTY_STS);
4627 break;
4628 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4629 _print_next_block((*par_num)++,
4630 "SEARCHER");
4631 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4632 break;
4633 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4634 _print_next_block((*par_num)++, "TCM");
4635 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4636 break;
4637 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4638 _print_next_block((*par_num)++,
4639 "TSEMI");
4640 _print_parity(bp,
4641 TSEM_REG_TSEM_PRTY_STS_0);
4642 _print_parity(bp,
4643 TSEM_REG_TSEM_PRTY_STS_1);
4644 break;
4645 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4646 _print_next_block((*par_num)++, "XPB");
4647 _print_parity(bp, GRCBASE_XPB +
4648 PB_REG_PB_PRTY_STS);
4649 break;
4650 }
4651 }
4652
4653
4654 sig &= ~cur_bit;
4655 }
4656 }
4657
4658 return res;
4659}
4660
4661static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4662 int *par_num, bool *global,
4663 bool print)
4664{
4665 u32 cur_bit;
4666 bool res;
4667 int i;
4668
4669 res = false;
4670
4671 for (i = 0; sig; i++) {
4672 cur_bit = (0x1UL << i);
4673 if (sig & cur_bit) {
4674 res |= true;
4675 switch (cur_bit) {
4676 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4677 if (print) {
4678 _print_next_block((*par_num)++, "PBF");
4679 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4680 }
4681 break;
4682 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4683 if (print) {
4684 _print_next_block((*par_num)++, "QM");
4685 _print_parity(bp, QM_REG_QM_PRTY_STS);
4686 }
4687 break;
4688 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4689 if (print) {
4690 _print_next_block((*par_num)++, "TM");
4691 _print_parity(bp, TM_REG_TM_PRTY_STS);
4692 }
4693 break;
4694 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4695 if (print) {
4696 _print_next_block((*par_num)++, "XSDM");
4697 _print_parity(bp,
4698 XSDM_REG_XSDM_PRTY_STS);
4699 }
4700 break;
4701 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4702 if (print) {
4703 _print_next_block((*par_num)++, "XCM");
4704 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4705 }
4706 break;
4707 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4708 if (print) {
4709 _print_next_block((*par_num)++,
4710 "XSEMI");
4711 _print_parity(bp,
4712 XSEM_REG_XSEM_PRTY_STS_0);
4713 _print_parity(bp,
4714 XSEM_REG_XSEM_PRTY_STS_1);
4715 }
4716 break;
4717 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4718 if (print) {
4719 _print_next_block((*par_num)++,
4720 "DOORBELLQ");
4721 _print_parity(bp,
4722 DORQ_REG_DORQ_PRTY_STS);
4723 }
4724 break;
4725 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4726 if (print) {
4727 _print_next_block((*par_num)++, "NIG");
4728 if (CHIP_IS_E1x(bp)) {
4729 _print_parity(bp,
4730 NIG_REG_NIG_PRTY_STS);
4731 } else {
4732 _print_parity(bp,
4733 NIG_REG_NIG_PRTY_STS_0);
4734 _print_parity(bp,
4735 NIG_REG_NIG_PRTY_STS_1);
4736 }
4737 }
4738 break;
4739 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4740 if (print)
4741 _print_next_block((*par_num)++,
4742 "VAUX PCI CORE");
4743 *global = true;
4744 break;
4745 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4746 if (print) {
4747 _print_next_block((*par_num)++,
4748 "DEBUG");
4749 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4750 }
4751 break;
4752 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4753 if (print) {
4754 _print_next_block((*par_num)++, "USDM");
4755 _print_parity(bp,
4756 USDM_REG_USDM_PRTY_STS);
4757 }
4758 break;
4759 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4760 if (print) {
4761 _print_next_block((*par_num)++, "UCM");
4762 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4763 }
4764 break;
4765 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4766 if (print) {
4767 _print_next_block((*par_num)++,
4768 "USEMI");
4769 _print_parity(bp,
4770 USEM_REG_USEM_PRTY_STS_0);
4771 _print_parity(bp,
4772 USEM_REG_USEM_PRTY_STS_1);
4773 }
4774 break;
4775 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4776 if (print) {
4777 _print_next_block((*par_num)++, "UPB");
4778 _print_parity(bp, GRCBASE_UPB +
4779 PB_REG_PB_PRTY_STS);
4780 }
4781 break;
4782 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4783 if (print) {
4784 _print_next_block((*par_num)++, "CSDM");
4785 _print_parity(bp,
4786 CSDM_REG_CSDM_PRTY_STS);
4787 }
4788 break;
4789 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4790 if (print) {
4791 _print_next_block((*par_num)++, "CCM");
4792 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4793 }
4794 break;
4795 }
4796
4797
4798 sig &= ~cur_bit;
4799 }
4800 }
4801
4802 return res;
4803}
4804
4805static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4806 int *par_num, bool print)
4807{
4808 u32 cur_bit;
4809 bool res;
4810 int i;
4811
4812 res = false;
4813
4814 for (i = 0; sig; i++) {
4815 cur_bit = (0x1UL << i);
4816 if (sig & cur_bit) {
4817 res = true;
4818 if (print) {
4819 switch (cur_bit) {
4820 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4821 _print_next_block((*par_num)++,
4822 "CSEMI");
4823 _print_parity(bp,
4824 CSEM_REG_CSEM_PRTY_STS_0);
4825 _print_parity(bp,
4826 CSEM_REG_CSEM_PRTY_STS_1);
4827 break;
4828 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4829 _print_next_block((*par_num)++, "PXP");
4830 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4831 _print_parity(bp,
4832 PXP2_REG_PXP2_PRTY_STS_0);
4833 _print_parity(bp,
4834 PXP2_REG_PXP2_PRTY_STS_1);
4835 break;
4836 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4837 _print_next_block((*par_num)++,
4838 "PXPPCICLOCKCLIENT");
4839 break;
4840 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4841 _print_next_block((*par_num)++, "CFC");
4842 _print_parity(bp,
4843 CFC_REG_CFC_PRTY_STS);
4844 break;
4845 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4846 _print_next_block((*par_num)++, "CDU");
4847 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4848 break;
4849 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4850 _print_next_block((*par_num)++, "DMAE");
4851 _print_parity(bp,
4852 DMAE_REG_DMAE_PRTY_STS);
4853 break;
4854 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4855 _print_next_block((*par_num)++, "IGU");
4856 if (CHIP_IS_E1x(bp))
4857 _print_parity(bp,
4858 HC_REG_HC_PRTY_STS);
4859 else
4860 _print_parity(bp,
4861 IGU_REG_IGU_PRTY_STS);
4862 break;
4863 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4864 _print_next_block((*par_num)++, "MISC");
4865 _print_parity(bp,
4866 MISC_REG_MISC_PRTY_STS);
4867 break;
4868 }
4869 }
4870
4871
4872 sig &= ~cur_bit;
4873 }
4874 }
4875
4876 return res;
4877}
4878
4879static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4880 int *par_num, bool *global,
4881 bool print)
4882{
4883 bool res = false;
4884 u32 cur_bit;
4885 int i;
4886
4887 for (i = 0; sig; i++) {
4888 cur_bit = (0x1UL << i);
4889 if (sig & cur_bit) {
4890 switch (cur_bit) {
4891 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4892 if (print)
4893 _print_next_block((*par_num)++,
4894 "MCP ROM");
4895 *global = true;
4896 res = true;
4897 break;
4898 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4899 if (print)
4900 _print_next_block((*par_num)++,
4901 "MCP UMP RX");
4902 *global = true;
4903 res = true;
4904 break;
4905 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4906 if (print)
4907 _print_next_block((*par_num)++,
4908 "MCP UMP TX");
4909 *global = true;
4910 res = true;
4911 break;
4912 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4913 (*par_num)++;
4914
4915 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4916 1UL << 10);
4917 break;
4918 }
4919
4920
4921 sig &= ~cur_bit;
4922 }
4923 }
4924
4925 return res;
4926}
4927
4928static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4929 int *par_num, bool print)
4930{
4931 u32 cur_bit;
4932 bool res;
4933 int i;
4934
4935 res = false;
4936
4937 for (i = 0; sig; i++) {
4938 cur_bit = (0x1UL << i);
4939 if (sig & cur_bit) {
4940 res = true;
4941 if (print) {
4942 switch (cur_bit) {
4943 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4944 _print_next_block((*par_num)++,
4945 "PGLUE_B");
4946 _print_parity(bp,
4947 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4948 break;
4949 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4950 _print_next_block((*par_num)++, "ATC");
4951 _print_parity(bp,
4952 ATC_REG_ATC_PRTY_STS);
4953 break;
4954 }
4955 }
4956
4957 sig &= ~cur_bit;
4958 }
4959 }
4960
4961 return res;
4962}
4963
4964static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4965 u32 *sig)
4966{
4967 bool res = false;
4968
4969 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4970 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4971 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4972 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4973 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4974 int par_num = 0;
4975
4976 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4977 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4978 sig[0] & HW_PRTY_ASSERT_SET_0,
4979 sig[1] & HW_PRTY_ASSERT_SET_1,
4980 sig[2] & HW_PRTY_ASSERT_SET_2,
4981 sig[3] & HW_PRTY_ASSERT_SET_3,
4982 sig[4] & HW_PRTY_ASSERT_SET_4);
4983 if (print) {
4984 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4985 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4986 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4987 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4988 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4989 netdev_err(bp->dev,
4990 "Parity errors detected in blocks: ");
4991 } else {
4992 print = false;
4993 }
4994 }
4995 res |= bnx2x_check_blocks_with_parity0(bp,
4996 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4997 res |= bnx2x_check_blocks_with_parity1(bp,
4998 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4999 res |= bnx2x_check_blocks_with_parity2(bp,
5000 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
5001 res |= bnx2x_check_blocks_with_parity3(bp,
5002 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
5003 res |= bnx2x_check_blocks_with_parity4(bp,
5004 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
5005
5006 if (print)
5007 pr_cont("\n");
5008 }
5009
5010 return res;
5011}
5012
5013
5014
5015
5016
5017
5018
5019
5020bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5021{
5022 struct attn_route attn = { {0} };
5023 int port = BP_PORT(bp);
5024
5025 attn.sig[0] = REG_RD(bp,
5026 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5027 port*4);
5028 attn.sig[1] = REG_RD(bp,
5029 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5030 port*4);
5031 attn.sig[2] = REG_RD(bp,
5032 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5033 port*4);
5034 attn.sig[3] = REG_RD(bp,
5035 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5036 port*4);
5037
5038
5039
5040 attn.sig[3] &= ((REG_RD(bp,
5041 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5042 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5043 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5044 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5045
5046 if (!CHIP_IS_E1x(bp))
5047 attn.sig[4] = REG_RD(bp,
5048 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5049 port*4);
5050
5051 return bnx2x_parity_attn(bp, global, print, attn.sig);
5052}
5053
5054static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5055{
5056 u32 val;
5057 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5058
5059 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5060 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5061 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5063 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5064 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5065 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5066 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5067 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5068 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5069 if (val &
5070 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5071 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5072 if (val &
5073 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5074 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5075 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5076 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5077 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5078 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5079 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5080 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5081 }
5082 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5083 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5084 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5085 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5086 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5087 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5088 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5089 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5090 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5091 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5092 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5093 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5094 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5095 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5096 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5097 }
5098
5099 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5100 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5101 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5102 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5103 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5104 }
5105}
5106
5107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5108{
5109 struct attn_route attn, *group_mask;
5110 int port = BP_PORT(bp);
5111 int index;
5112 u32 reg_addr;
5113 u32 val;
5114 u32 aeu_mask;
5115 bool global = false;
5116
5117
5118
5119 bnx2x_acquire_alr(bp);
5120
5121 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5122#ifndef BNX2X_STOP_ON_ERROR
5123 bp->recovery_state = BNX2X_RECOVERY_INIT;
5124 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5125
5126 bnx2x_int_disable(bp);
5127
5128
5129
5130#else
5131 bnx2x_panic();
5132#endif
5133 bnx2x_release_alr(bp);
5134 return;
5135 }
5136
5137 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5138 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5139 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5140 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5141 if (!CHIP_IS_E1x(bp))
5142 attn.sig[4] =
5143 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5144 else
5145 attn.sig[4] = 0;
5146
5147 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5148 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5149
5150 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5151 if (deasserted & (1 << index)) {
5152 group_mask = &bp->attn_group[index];
5153
5154 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5155 index,
5156 group_mask->sig[0], group_mask->sig[1],
5157 group_mask->sig[2], group_mask->sig[3],
5158 group_mask->sig[4]);
5159
5160 bnx2x_attn_int_deasserted4(bp,
5161 attn.sig[4] & group_mask->sig[4]);
5162 bnx2x_attn_int_deasserted3(bp,
5163 attn.sig[3] & group_mask->sig[3]);
5164 bnx2x_attn_int_deasserted1(bp,
5165 attn.sig[1] & group_mask->sig[1]);
5166 bnx2x_attn_int_deasserted2(bp,
5167 attn.sig[2] & group_mask->sig[2]);
5168 bnx2x_attn_int_deasserted0(bp,
5169 attn.sig[0] & group_mask->sig[0]);
5170 }
5171 }
5172
5173 bnx2x_release_alr(bp);
5174
5175 if (bp->common.int_block == INT_BLOCK_HC)
5176 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5177 COMMAND_REG_ATTN_BITS_CLR);
5178 else
5179 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5180
5181 val = ~deasserted;
5182 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5183 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5184 REG_WR(bp, reg_addr, val);
5185
5186 if (~bp->attn_state & deasserted)
5187 BNX2X_ERR("IGU ERROR\n");
5188
5189 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5190 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5191
5192 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5193 aeu_mask = REG_RD(bp, reg_addr);
5194
5195 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5196 aeu_mask, deasserted);
5197 aeu_mask |= (deasserted & 0x3ff);
5198 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5199
5200 REG_WR(bp, reg_addr, aeu_mask);
5201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5202
5203 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5204 bp->attn_state &= ~deasserted;
5205 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5206}
5207
5208static void bnx2x_attn_int(struct bnx2x *bp)
5209{
5210
5211 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5212 attn_bits);
5213 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5214 attn_bits_ack);
5215 u32 attn_state = bp->attn_state;
5216
5217
5218 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5219 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5220
5221 DP(NETIF_MSG_HW,
5222 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5223 attn_bits, attn_ack, asserted, deasserted);
5224
5225 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5226 BNX2X_ERR("BAD attention state\n");
5227
5228
5229 if (asserted)
5230 bnx2x_attn_int_asserted(bp, asserted);
5231
5232 if (deasserted)
5233 bnx2x_attn_int_deasserted(bp, deasserted);
5234}
5235
5236void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5237 u16 index, u8 op, u8 update)
5238{
5239 u32 igu_addr = bp->igu_base_addr;
5240 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5241 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5242 igu_addr);
5243}
5244
5245static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5246{
5247
5248 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5249}
5250
5251static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5252 union event_ring_elem *elem)
5253{
5254 u8 err = elem->message.error;
5255
5256 if (!bp->cnic_eth_dev.starting_cid ||
5257 (cid < bp->cnic_eth_dev.starting_cid &&
5258 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5259 return 1;
5260
5261 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5262
5263 if (unlikely(err)) {
5264
5265 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5266 cid);
5267 bnx2x_panic_dump(bp, false);
5268 }
5269 bnx2x_cnic_cfc_comp(bp, cid, err);
5270 return 0;
5271}
5272
5273static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5274{
5275 struct bnx2x_mcast_ramrod_params rparam;
5276 int rc;
5277
5278 memset(&rparam, 0, sizeof(rparam));
5279
5280 rparam.mcast_obj = &bp->mcast_obj;
5281
5282 netif_addr_lock_bh(bp->dev);
5283
5284
5285 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5286
5287
5288 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5289 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5290 if (rc < 0)
5291 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5292 rc);
5293 }
5294
5295 netif_addr_unlock_bh(bp->dev);
5296}
5297
5298static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5299 union event_ring_elem *elem)
5300{
5301 unsigned long ramrod_flags = 0;
5302 int rc = 0;
5303 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5304 u32 cid = echo & BNX2X_SWCID_MASK;
5305 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5306
5307
5308 __set_bit(RAMROD_CONT, &ramrod_flags);
5309
5310 switch (echo >> BNX2X_SWCID_SHIFT) {
5311 case BNX2X_FILTER_MAC_PENDING:
5312 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5313 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5314 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5315 else
5316 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5317
5318 break;
5319 case BNX2X_FILTER_VLAN_PENDING:
5320 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5321 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5322 break;
5323 case BNX2X_FILTER_MCAST_PENDING:
5324 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5325
5326
5327
5328 bnx2x_handle_mcast_eqe(bp);
5329 return;
5330 default:
5331 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5332 return;
5333 }
5334
5335 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5336
5337 if (rc < 0)
5338 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5339 else if (rc > 0)
5340 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5341}
5342
5343static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5344
5345static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5346{
5347 netif_addr_lock_bh(bp->dev);
5348
5349 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5350
5351
5352 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5353 bnx2x_set_storm_rx_mode(bp);
5354 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5355 &bp->sp_state))
5356 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5357 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5358 &bp->sp_state))
5359 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5360
5361 netif_addr_unlock_bh(bp->dev);
5362}
5363
5364static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5365 union event_ring_elem *elem)
5366{
5367 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5368 DP(BNX2X_MSG_SP,
5369 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5370 elem->message.data.vif_list_event.func_bit_map);
5371 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5372 elem->message.data.vif_list_event.func_bit_map);
5373 } else if (elem->message.data.vif_list_event.echo ==
5374 VIF_LIST_RULE_SET) {
5375 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5376 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5377 }
5378}
5379
5380
5381static void bnx2x_after_function_update(struct bnx2x *bp)
5382{
5383 int q, rc;
5384 struct bnx2x_fastpath *fp;
5385 struct bnx2x_queue_state_params queue_params = {NULL};
5386 struct bnx2x_queue_update_params *q_update_params =
5387 &queue_params.params.update;
5388
5389
5390 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5391
5392
5393 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5394 &q_update_params->update_flags);
5395 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5396 &q_update_params->update_flags);
5397 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5398
5399
5400 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5401 q_update_params->silent_removal_value = 0;
5402 q_update_params->silent_removal_mask = 0;
5403 } else {
5404 q_update_params->silent_removal_value =
5405 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5406 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5407 }
5408
5409 for_each_eth_queue(bp, q) {
5410
5411 fp = &bp->fp[q];
5412 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5413
5414
5415 rc = bnx2x_queue_state_change(bp, &queue_params);
5416 if (rc < 0)
5417 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5418 q);
5419 }
5420
5421 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5422 fp = &bp->fp[FCOE_IDX(bp)];
5423 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5424
5425
5426 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5427
5428
5429 smp_mb__before_atomic();
5430 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5431 smp_mb__after_atomic();
5432
5433
5434 rc = bnx2x_queue_state_change(bp, &queue_params);
5435 if (rc < 0)
5436 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5437 q);
5438 } else {
5439
5440 bnx2x_link_report(bp);
5441 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5442 }
5443}
5444
5445static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5446 struct bnx2x *bp, u32 cid)
5447{
5448 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5449
5450 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5451 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5452 else
5453 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5454}
5455
5456static void bnx2x_eq_int(struct bnx2x *bp)
5457{
5458 u16 hw_cons, sw_cons, sw_prod;
5459 union event_ring_elem *elem;
5460 u8 echo;
5461 u32 cid;
5462 u8 opcode;
5463 int rc, spqe_cnt = 0;
5464 struct bnx2x_queue_sp_obj *q_obj;
5465 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5466 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5467
5468 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5469
5470
5471
5472
5473
5474
5475 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5476 hw_cons++;
5477
5478
5479
5480
5481
5482 sw_cons = bp->eq_cons;
5483 sw_prod = bp->eq_prod;
5484
5485 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5486 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5487
5488 for (; sw_cons != hw_cons;
5489 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5490
5491 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5492
5493 rc = bnx2x_iov_eq_sp_event(bp, elem);
5494 if (!rc) {
5495 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5496 rc);
5497 goto next_spqe;
5498 }
5499
5500 opcode = elem->message.opcode;
5501
5502
5503 switch (opcode) {
5504 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5505 bnx2x_vf_mbx_schedule(bp,
5506 &elem->message.data.vf_pf_event);
5507 continue;
5508
5509 case EVENT_RING_OPCODE_STAT_QUERY:
5510 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5511 "got statistics comp event %d\n",
5512 bp->stats_comp++);
5513
5514 goto next_spqe;
5515
5516 case EVENT_RING_OPCODE_CFC_DEL:
5517
5518
5519
5520
5521
5522
5523
5524 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5525
5526 DP(BNX2X_MSG_SP,
5527 "got delete ramrod for MULTI[%d]\n", cid);
5528
5529 if (CNIC_LOADED(bp) &&
5530 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5531 goto next_spqe;
5532
5533 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5534
5535 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5536 break;
5537
5538 goto next_spqe;
5539
5540 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5541 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5542 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5543 if (f_obj->complete_cmd(bp, f_obj,
5544 BNX2X_F_CMD_TX_STOP))
5545 break;
5546 goto next_spqe;
5547
5548 case EVENT_RING_OPCODE_START_TRAFFIC:
5549 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5550 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5551 if (f_obj->complete_cmd(bp, f_obj,
5552 BNX2X_F_CMD_TX_START))
5553 break;
5554 goto next_spqe;
5555
5556 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5557 echo = elem->message.data.function_update_event.echo;
5558 if (echo == SWITCH_UPDATE) {
5559 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5560 "got FUNC_SWITCH_UPDATE ramrod\n");
5561 if (f_obj->complete_cmd(
5562 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5563 break;
5564
5565 } else {
5566 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5567
5568 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5569 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5570 f_obj->complete_cmd(bp, f_obj,
5571 BNX2X_F_CMD_AFEX_UPDATE);
5572
5573
5574
5575
5576
5577 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5578 }
5579
5580 goto next_spqe;
5581
5582 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5583 f_obj->complete_cmd(bp, f_obj,
5584 BNX2X_F_CMD_AFEX_VIFLISTS);
5585 bnx2x_after_afex_vif_lists(bp, elem);
5586 goto next_spqe;
5587 case EVENT_RING_OPCODE_FUNCTION_START:
5588 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5589 "got FUNC_START ramrod\n");
5590 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5591 break;
5592
5593 goto next_spqe;
5594
5595 case EVENT_RING_OPCODE_FUNCTION_STOP:
5596 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5597 "got FUNC_STOP ramrod\n");
5598 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5599 break;
5600
5601 goto next_spqe;
5602
5603 case EVENT_RING_OPCODE_SET_TIMESYNC:
5604 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5605 "got set_timesync ramrod completion\n");
5606 if (f_obj->complete_cmd(bp, f_obj,
5607 BNX2X_F_CMD_SET_TIMESYNC))
5608 break;
5609 goto next_spqe;
5610 }
5611
5612 switch (opcode | bp->state) {
5613 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5614 BNX2X_STATE_OPEN):
5615 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5616 BNX2X_STATE_OPENING_WAIT4_PORT):
5617 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5618 BNX2X_STATE_CLOSING_WAIT4_HALT):
5619 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5620 SW_CID(elem->message.data.eth_event.echo));
5621 rss_raw->clear_pending(rss_raw);
5622 break;
5623
5624 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5625 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5626 case (EVENT_RING_OPCODE_SET_MAC |
5627 BNX2X_STATE_CLOSING_WAIT4_HALT):
5628 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5629 BNX2X_STATE_OPEN):
5630 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5631 BNX2X_STATE_DIAG):
5632 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5633 BNX2X_STATE_CLOSING_WAIT4_HALT):
5634 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5635 bnx2x_handle_classification_eqe(bp, elem);
5636 break;
5637
5638 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5639 BNX2X_STATE_OPEN):
5640 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5641 BNX2X_STATE_DIAG):
5642 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5643 BNX2X_STATE_CLOSING_WAIT4_HALT):
5644 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5645 bnx2x_handle_mcast_eqe(bp);
5646 break;
5647
5648 case (EVENT_RING_OPCODE_FILTERS_RULES |
5649 BNX2X_STATE_OPEN):
5650 case (EVENT_RING_OPCODE_FILTERS_RULES |
5651 BNX2X_STATE_DIAG):
5652 case (EVENT_RING_OPCODE_FILTERS_RULES |
5653 BNX2X_STATE_CLOSING_WAIT4_HALT):
5654 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5655 bnx2x_handle_rx_mode_eqe(bp);
5656 break;
5657 default:
5658
5659 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5660 elem->message.opcode, bp->state);
5661 }
5662next_spqe:
5663 spqe_cnt++;
5664 }
5665
5666 smp_mb__before_atomic();
5667 atomic_add(spqe_cnt, &bp->eq_spq_left);
5668
5669 bp->eq_cons = sw_cons;
5670 bp->eq_prod = sw_prod;
5671
5672 smp_wmb();
5673
5674
5675 bnx2x_update_eq_prod(bp, bp->eq_prod);
5676}
5677
5678static void bnx2x_sp_task(struct work_struct *work)
5679{
5680 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5681
5682 DP(BNX2X_MSG_SP, "sp task invoked\n");
5683
5684
5685 smp_rmb();
5686 if (atomic_read(&bp->interrupt_occurred)) {
5687
5688
5689 u16 status = bnx2x_update_dsb_idx(bp);
5690
5691 DP(BNX2X_MSG_SP, "status %x\n", status);
5692 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5693 atomic_set(&bp->interrupt_occurred, 0);
5694
5695
5696 if (status & BNX2X_DEF_SB_ATT_IDX) {
5697 bnx2x_attn_int(bp);
5698 status &= ~BNX2X_DEF_SB_ATT_IDX;
5699 }
5700
5701
5702 if (status & BNX2X_DEF_SB_IDX) {
5703 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5704
5705 if (FCOE_INIT(bp) &&
5706 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5707
5708
5709
5710 local_bh_disable();
5711 napi_schedule(&bnx2x_fcoe(bp, napi));
5712 local_bh_enable();
5713 }
5714
5715
5716 bnx2x_eq_int(bp);
5717 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5718 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5719
5720 status &= ~BNX2X_DEF_SB_IDX;
5721 }
5722
5723
5724 if (unlikely(status))
5725 DP(BNX2X_MSG_SP,
5726 "got an unknown interrupt! (status 0x%x)\n", status);
5727
5728
5729 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5730 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5731 }
5732
5733
5734 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5735 &bp->sp_state)) {
5736 bnx2x_link_report(bp);
5737 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5738 }
5739}
5740
5741irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5742{
5743 struct net_device *dev = dev_instance;
5744 struct bnx2x *bp = netdev_priv(dev);
5745
5746 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5747 IGU_INT_DISABLE, 0);
5748
5749#ifdef BNX2X_STOP_ON_ERROR
5750 if (unlikely(bp->panic))
5751 return IRQ_HANDLED;
5752#endif
5753
5754 if (CNIC_LOADED(bp)) {
5755 struct cnic_ops *c_ops;
5756
5757 rcu_read_lock();
5758 c_ops = rcu_dereference(bp->cnic_ops);
5759 if (c_ops)
5760 c_ops->cnic_handler(bp->cnic_data, NULL);
5761 rcu_read_unlock();
5762 }
5763
5764
5765
5766
5767 bnx2x_schedule_sp_task(bp);
5768
5769 return IRQ_HANDLED;
5770}
5771
5772
5773
5774void bnx2x_drv_pulse(struct bnx2x *bp)
5775{
5776 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5777 bp->fw_drv_pulse_wr_seq);
5778}
5779
5780static void bnx2x_timer(struct timer_list *t)
5781{
5782 struct bnx2x *bp = from_timer(bp, t, timer);
5783
5784 if (!netif_running(bp->dev))
5785 return;
5786
5787 if (IS_PF(bp) &&
5788 !BP_NOMCP(bp)) {
5789 int mb_idx = BP_FW_MB_IDX(bp);
5790 u16 drv_pulse;
5791 u16 mcp_pulse;
5792
5793 ++bp->fw_drv_pulse_wr_seq;
5794 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5795 drv_pulse = bp->fw_drv_pulse_wr_seq;
5796 bnx2x_drv_pulse(bp);
5797
5798 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5799 MCP_PULSE_SEQ_MASK);
5800
5801
5802
5803
5804
5805 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5806 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5807 drv_pulse, mcp_pulse);
5808 }
5809
5810 if (bp->state == BNX2X_STATE_OPEN)
5811 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5812
5813
5814 if (IS_VF(bp))
5815 bnx2x_timer_sriov(bp);
5816
5817 mod_timer(&bp->timer, jiffies + bp->current_interval);
5818}
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5829{
5830 u32 i;
5831 if (!(len%4) && !(addr%4))
5832 for (i = 0; i < len; i += 4)
5833 REG_WR(bp, addr + i, fill);
5834 else
5835 for (i = 0; i < len; i++)
5836 REG_WR8(bp, addr + i, fill);
5837}
5838
5839
5840static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5841 int fw_sb_id,
5842 u32 *sb_data_p,
5843 u32 data_size)
5844{
5845 int index;
5846 for (index = 0; index < data_size; index++)
5847 REG_WR(bp, BAR_CSTRORM_INTMEM +
5848 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5849 sizeof(u32)*index,
5850 *(sb_data_p + index));
5851}
5852
5853static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5854{
5855 u32 *sb_data_p;
5856 u32 data_size = 0;
5857 struct hc_status_block_data_e2 sb_data_e2;
5858 struct hc_status_block_data_e1x sb_data_e1x;
5859
5860
5861 if (!CHIP_IS_E1x(bp)) {
5862 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5863 sb_data_e2.common.state = SB_DISABLED;
5864 sb_data_e2.common.p_func.vf_valid = false;
5865 sb_data_p = (u32 *)&sb_data_e2;
5866 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5867 } else {
5868 memset(&sb_data_e1x, 0,
5869 sizeof(struct hc_status_block_data_e1x));
5870 sb_data_e1x.common.state = SB_DISABLED;
5871 sb_data_e1x.common.p_func.vf_valid = false;
5872 sb_data_p = (u32 *)&sb_data_e1x;
5873 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5874 }
5875 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5876
5877 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5878 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5879 CSTORM_STATUS_BLOCK_SIZE);
5880 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5881 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5882 CSTORM_SYNC_BLOCK_SIZE);
5883}
5884
5885
5886static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5887 struct hc_sp_status_block_data *sp_sb_data)
5888{
5889 int func = BP_FUNC(bp);
5890 int i;
5891 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5892 REG_WR(bp, BAR_CSTRORM_INTMEM +
5893 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5894 i*sizeof(u32),
5895 *((u32 *)sp_sb_data + i));
5896}
5897
5898static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5899{
5900 int func = BP_FUNC(bp);
5901 struct hc_sp_status_block_data sp_sb_data;
5902 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5903
5904 sp_sb_data.state = SB_DISABLED;
5905 sp_sb_data.p_func.vf_valid = false;
5906
5907 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5908
5909 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5910 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5911 CSTORM_SP_STATUS_BLOCK_SIZE);
5912 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5913 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5914 CSTORM_SP_SYNC_BLOCK_SIZE);
5915}
5916
5917static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5918 int igu_sb_id, int igu_seg_id)
5919{
5920 hc_sm->igu_sb_id = igu_sb_id;
5921 hc_sm->igu_seg_id = igu_seg_id;
5922 hc_sm->timer_value = 0xFF;
5923 hc_sm->time_to_expire = 0xFFFFFFFF;
5924}
5925
5926
5927static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5928{
5929
5930
5931 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5932
5933
5934 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5935 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5936 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5937 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5938
5939
5940
5941 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5942 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5943
5944
5945 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5946 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5947 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5948 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5949 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5950 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5951 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5952 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5953}
5954
5955void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5956 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5957{
5958 int igu_seg_id;
5959
5960 struct hc_status_block_data_e2 sb_data_e2;
5961 struct hc_status_block_data_e1x sb_data_e1x;
5962 struct hc_status_block_sm *hc_sm_p;
5963 int data_size;
5964 u32 *sb_data_p;
5965
5966 if (CHIP_INT_MODE_IS_BC(bp))
5967 igu_seg_id = HC_SEG_ACCESS_NORM;
5968 else
5969 igu_seg_id = IGU_SEG_ACCESS_NORM;
5970
5971 bnx2x_zero_fp_sb(bp, fw_sb_id);
5972
5973 if (!CHIP_IS_E1x(bp)) {
5974 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5975 sb_data_e2.common.state = SB_ENABLED;
5976 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5977 sb_data_e2.common.p_func.vf_id = vfid;
5978 sb_data_e2.common.p_func.vf_valid = vf_valid;
5979 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5980 sb_data_e2.common.same_igu_sb_1b = true;
5981 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5982 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5983 hc_sm_p = sb_data_e2.common.state_machine;
5984 sb_data_p = (u32 *)&sb_data_e2;
5985 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5986 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5987 } else {
5988 memset(&sb_data_e1x, 0,
5989 sizeof(struct hc_status_block_data_e1x));
5990 sb_data_e1x.common.state = SB_ENABLED;
5991 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5992 sb_data_e1x.common.p_func.vf_id = 0xff;
5993 sb_data_e1x.common.p_func.vf_valid = false;
5994 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5995 sb_data_e1x.common.same_igu_sb_1b = true;
5996 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5997 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5998 hc_sm_p = sb_data_e1x.common.state_machine;
5999 sb_data_p = (u32 *)&sb_data_e1x;
6000 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
6001 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
6002 }
6003
6004 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
6005 igu_sb_id, igu_seg_id);
6006 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
6007 igu_sb_id, igu_seg_id);
6008
6009 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6010
6011
6012 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6013}
6014
6015static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6016 u16 tx_usec, u16 rx_usec)
6017{
6018 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6019 false, rx_usec);
6020 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6021 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6022 tx_usec);
6023 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6024 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6025 tx_usec);
6026 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6027 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6028 tx_usec);
6029}
6030
6031static void bnx2x_init_def_sb(struct bnx2x *bp)
6032{
6033 struct host_sp_status_block *def_sb = bp->def_status_blk;
6034 dma_addr_t mapping = bp->def_status_blk_mapping;
6035 int igu_sp_sb_index;
6036 int igu_seg_id;
6037 int port = BP_PORT(bp);
6038 int func = BP_FUNC(bp);
6039 int reg_offset, reg_offset_en5;
6040 u64 section;
6041 int index;
6042 struct hc_sp_status_block_data sp_sb_data;
6043 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6044
6045 if (CHIP_INT_MODE_IS_BC(bp)) {
6046 igu_sp_sb_index = DEF_SB_IGU_ID;
6047 igu_seg_id = HC_SEG_ACCESS_DEF;
6048 } else {
6049 igu_sp_sb_index = bp->igu_dsb_id;
6050 igu_seg_id = IGU_SEG_ACCESS_DEF;
6051 }
6052
6053
6054 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6055 atten_status_block);
6056 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6057
6058 bp->attn_state = 0;
6059
6060 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6061 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6062 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6063 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6064 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6065 int sindex;
6066
6067 for (sindex = 0; sindex < 4; sindex++)
6068 bp->attn_group[index].sig[sindex] =
6069 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6070
6071 if (!CHIP_IS_E1x(bp))
6072
6073
6074
6075
6076
6077 bp->attn_group[index].sig[4] = REG_RD(bp,
6078 reg_offset_en5 + 0x4*index);
6079 else
6080 bp->attn_group[index].sig[4] = 0;
6081 }
6082
6083 if (bp->common.int_block == INT_BLOCK_HC) {
6084 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6085 HC_REG_ATTN_MSG0_ADDR_L);
6086
6087 REG_WR(bp, reg_offset, U64_LO(section));
6088 REG_WR(bp, reg_offset + 4, U64_HI(section));
6089 } else if (!CHIP_IS_E1x(bp)) {
6090 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6091 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6092 }
6093
6094 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6095 sp_sb);
6096
6097 bnx2x_zero_sp_sb(bp);
6098
6099
6100 sp_sb_data.state = SB_ENABLED;
6101 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6102 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6103 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6104 sp_sb_data.igu_seg_id = igu_seg_id;
6105 sp_sb_data.p_func.pf_id = func;
6106 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6107 sp_sb_data.p_func.vf_id = 0xff;
6108
6109 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6110
6111 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6112}
6113
6114void bnx2x_update_coalesce(struct bnx2x *bp)
6115{
6116 int i;
6117
6118 for_each_eth_queue(bp, i)
6119 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6120 bp->tx_ticks, bp->rx_ticks);
6121}
6122
6123static void bnx2x_init_sp_ring(struct bnx2x *bp)
6124{
6125 spin_lock_init(&bp->spq_lock);
6126 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6127
6128 bp->spq_prod_idx = 0;
6129 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6130 bp->spq_prod_bd = bp->spq;
6131 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6132}
6133
6134static void bnx2x_init_eq_ring(struct bnx2x *bp)
6135{
6136 int i;
6137 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6138 union event_ring_elem *elem =
6139 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6140
6141 elem->next_page.addr.hi =
6142 cpu_to_le32(U64_HI(bp->eq_mapping +
6143 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6144 elem->next_page.addr.lo =
6145 cpu_to_le32(U64_LO(bp->eq_mapping +
6146 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6147 }
6148 bp->eq_cons = 0;
6149 bp->eq_prod = NUM_EQ_DESC;
6150 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6151
6152 atomic_set(&bp->eq_spq_left,
6153 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6154}
6155
6156
6157static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6158 unsigned long rx_mode_flags,
6159 unsigned long rx_accept_flags,
6160 unsigned long tx_accept_flags,
6161 unsigned long ramrod_flags)
6162{
6163 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6164 int rc;
6165
6166 memset(&ramrod_param, 0, sizeof(ramrod_param));
6167
6168
6169 ramrod_param.cid = 0;
6170 ramrod_param.cl_id = cl_id;
6171 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6172 ramrod_param.func_id = BP_FUNC(bp);
6173
6174 ramrod_param.pstate = &bp->sp_state;
6175 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6176
6177 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6178 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6179
6180 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6181
6182 ramrod_param.ramrod_flags = ramrod_flags;
6183 ramrod_param.rx_mode_flags = rx_mode_flags;
6184
6185 ramrod_param.rx_accept_flags = rx_accept_flags;
6186 ramrod_param.tx_accept_flags = tx_accept_flags;
6187
6188 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6189 if (rc < 0) {
6190 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6191 return rc;
6192 }
6193
6194 return 0;
6195}
6196
6197static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6198 unsigned long *rx_accept_flags,
6199 unsigned long *tx_accept_flags)
6200{
6201
6202 *rx_accept_flags = 0;
6203 *tx_accept_flags = 0;
6204
6205 switch (rx_mode) {
6206 case BNX2X_RX_MODE_NONE:
6207
6208
6209
6210
6211 break;
6212 case BNX2X_RX_MODE_NORMAL:
6213 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6214 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6215 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6216
6217
6218 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6220 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6221
6222 if (bp->accept_any_vlan) {
6223 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6225 }
6226
6227 break;
6228 case BNX2X_RX_MODE_ALLMULTI:
6229 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6230 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6231 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6232
6233
6234 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6235 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6236 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6237
6238 if (bp->accept_any_vlan) {
6239 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6240 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6241 }
6242
6243 break;
6244 case BNX2X_RX_MODE_PROMISC:
6245
6246
6247
6248
6249 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6250 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6251 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6252 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6253
6254
6255 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6256 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6257
6258 if (IS_MF_SI(bp))
6259 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6260 else
6261 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6262
6263 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6264 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6265
6266 break;
6267 default:
6268 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6269 return -EINVAL;
6270 }
6271
6272 return 0;
6273}
6274
6275
6276static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6277{
6278 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6279 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6280 int rc;
6281
6282 if (!NO_FCOE(bp))
6283
6284 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6285
6286 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6287 &tx_accept_flags);
6288 if (rc)
6289 return rc;
6290
6291 __set_bit(RAMROD_RX, &ramrod_flags);
6292 __set_bit(RAMROD_TX, &ramrod_flags);
6293
6294 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6295 rx_accept_flags, tx_accept_flags,
6296 ramrod_flags);
6297}
6298
6299static void bnx2x_init_internal_common(struct bnx2x *bp)
6300{
6301 int i;
6302
6303
6304
6305 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6306 REG_WR(bp, BAR_USTRORM_INTMEM +
6307 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6308 if (!CHIP_IS_E1x(bp)) {
6309 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6310 CHIP_INT_MODE_IS_BC(bp) ?
6311 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6312 }
6313}
6314
6315static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6316{
6317 switch (load_code) {
6318 case FW_MSG_CODE_DRV_LOAD_COMMON:
6319 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6320 bnx2x_init_internal_common(bp);
6321
6322
6323 case FW_MSG_CODE_DRV_LOAD_PORT:
6324
6325
6326
6327 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6328
6329
6330 break;
6331
6332 default:
6333 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6334 break;
6335 }
6336}
6337
6338static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6339{
6340 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6341}
6342
6343static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6344{
6345 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6346}
6347
6348static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6349{
6350 if (CHIP_IS_E1x(fp->bp))
6351 return BP_L_ID(fp->bp) + fp->index;
6352 else
6353 return bnx2x_fp_igu_sb_id(fp);
6354}
6355
6356static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6357{
6358 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6359 u8 cos;
6360 unsigned long q_type = 0;
6361 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6362 fp->rx_queue = fp_idx;
6363 fp->cid = fp_idx;
6364 fp->cl_id = bnx2x_fp_cl_id(fp);
6365 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6366 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6367
6368 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6369
6370
6371 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6372
6373
6374 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6375
6376
6377 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6378 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6379
6380 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6381
6382
6383 for_each_cos_in_tx_queue(fp, cos) {
6384 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6385 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6386 FP_COS_TO_TXQ(fp, cos, bp),
6387 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6388 cids[cos] = fp->txdata_ptr[cos]->cid;
6389 }
6390
6391
6392 if (IS_VF(bp))
6393 return;
6394
6395 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6396 fp->fw_sb_id, fp->igu_sb_id);
6397 bnx2x_update_fpsb_idx(fp);
6398 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6399 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6400 bnx2x_sp_mapping(bp, q_rdata), q_type);
6401
6402
6403
6404
6405 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6406
6407 DP(NETIF_MSG_IFUP,
6408 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6409 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6410 fp->igu_sb_id);
6411}
6412
6413static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6414{
6415 int i;
6416
6417 for (i = 1; i <= NUM_TX_RINGS; i++) {
6418 struct eth_tx_next_bd *tx_next_bd =
6419 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6420
6421 tx_next_bd->addr_hi =
6422 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6423 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6424 tx_next_bd->addr_lo =
6425 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6426 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6427 }
6428
6429 *txdata->tx_cons_sb = cpu_to_le16(0);
6430
6431 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6432 txdata->tx_db.data.zero_fill1 = 0;
6433 txdata->tx_db.data.prod = 0;
6434
6435 txdata->tx_pkt_prod = 0;
6436 txdata->tx_pkt_cons = 0;
6437 txdata->tx_bd_prod = 0;
6438 txdata->tx_bd_cons = 0;
6439 txdata->tx_pkt = 0;
6440}
6441
6442static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6443{
6444 int i;
6445
6446 for_each_tx_queue_cnic(bp, i)
6447 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6448}
6449
6450static void bnx2x_init_tx_rings(struct bnx2x *bp)
6451{
6452 int i;
6453 u8 cos;
6454
6455 for_each_eth_queue(bp, i)
6456 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6457 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6458}
6459
6460static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6461{
6462 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6463 unsigned long q_type = 0;
6464
6465 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6466 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6467 BNX2X_FCOE_ETH_CL_ID_IDX);
6468 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6469 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6470 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6471 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6472 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6473 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6474 fp);
6475
6476 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6477
6478
6479 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6480
6481 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6482 bnx2x_rx_ustorm_prods_offset(fp);
6483
6484
6485 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6486 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6487
6488
6489 BUG_ON(fp->max_cos != 1);
6490
6491 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6492 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6493 bnx2x_sp_mapping(bp, q_rdata), q_type);
6494
6495 DP(NETIF_MSG_IFUP,
6496 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6497 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6498 fp->igu_sb_id);
6499}
6500
6501void bnx2x_nic_init_cnic(struct bnx2x *bp)
6502{
6503 if (!NO_FCOE(bp))
6504 bnx2x_init_fcoe_fp(bp);
6505
6506 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6507 BNX2X_VF_ID_INVALID, false,
6508 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6509
6510
6511 rmb();
6512 bnx2x_init_rx_rings_cnic(bp);
6513 bnx2x_init_tx_rings_cnic(bp);
6514
6515
6516 mb();
6517}
6518
6519void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6520{
6521 int i;
6522
6523
6524 for_each_eth_queue(bp, i)
6525 bnx2x_init_eth_fp(bp, i);
6526
6527
6528 rmb();
6529 bnx2x_init_rx_rings(bp);
6530 bnx2x_init_tx_rings(bp);
6531
6532 if (IS_PF(bp)) {
6533
6534 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6535 bp->common.shmem_base,
6536 bp->common.shmem2_base, BP_PORT(bp));
6537
6538
6539 bnx2x_init_def_sb(bp);
6540 bnx2x_update_dsb_idx(bp);
6541 bnx2x_init_sp_ring(bp);
6542 } else {
6543 bnx2x_memset_stats(bp);
6544 }
6545}
6546
6547void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6548{
6549 bnx2x_init_eq_ring(bp);
6550 bnx2x_init_internal(bp, load_code);
6551 bnx2x_pf_init(bp);
6552 bnx2x_stats_init(bp);
6553
6554
6555 mb();
6556
6557 bnx2x_int_enable(bp);
6558
6559
6560 bnx2x_attn_int_deasserted0(bp,
6561 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6562 AEU_INPUTS_ATTN_BITS_SPIO5);
6563}
6564
6565
6566static int bnx2x_gunzip_init(struct bnx2x *bp)
6567{
6568 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6569 &bp->gunzip_mapping, GFP_KERNEL);
6570 if (bp->gunzip_buf == NULL)
6571 goto gunzip_nomem1;
6572
6573 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6574 if (bp->strm == NULL)
6575 goto gunzip_nomem2;
6576
6577 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6578 if (bp->strm->workspace == NULL)
6579 goto gunzip_nomem3;
6580
6581 return 0;
6582
6583gunzip_nomem3:
6584 kfree(bp->strm);
6585 bp->strm = NULL;
6586
6587gunzip_nomem2:
6588 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6589 bp->gunzip_mapping);
6590 bp->gunzip_buf = NULL;
6591
6592gunzip_nomem1:
6593 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6594 return -ENOMEM;
6595}
6596
6597static void bnx2x_gunzip_end(struct bnx2x *bp)
6598{
6599 if (bp->strm) {
6600 vfree(bp->strm->workspace);
6601 kfree(bp->strm);
6602 bp->strm = NULL;
6603 }
6604
6605 if (bp->gunzip_buf) {
6606 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6607 bp->gunzip_mapping);
6608 bp->gunzip_buf = NULL;
6609 }
6610}
6611
6612static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6613{
6614 int n, rc;
6615
6616
6617 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6618 BNX2X_ERR("Bad gzip header\n");
6619 return -EINVAL;
6620 }
6621
6622 n = 10;
6623
6624#define FNAME 0x8
6625
6626 if (zbuf[3] & FNAME)
6627 while ((zbuf[n++] != 0) && (n < len));
6628
6629 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6630 bp->strm->avail_in = len - n;
6631 bp->strm->next_out = bp->gunzip_buf;
6632 bp->strm->avail_out = FW_BUF_SIZE;
6633
6634 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6635 if (rc != Z_OK)
6636 return rc;
6637
6638 rc = zlib_inflate(bp->strm, Z_FINISH);
6639 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6640 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6641 bp->strm->msg);
6642
6643 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6644 if (bp->gunzip_outlen & 0x3)
6645 netdev_err(bp->dev,
6646 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6647 bp->gunzip_outlen);
6648 bp->gunzip_outlen >>= 2;
6649
6650 zlib_inflateEnd(bp->strm);
6651
6652 if (rc == Z_STREAM_END)
6653 return 0;
6654
6655 return rc;
6656}
6657
6658
6659
6660
6661
6662
6663
6664
6665static void bnx2x_lb_pckt(struct bnx2x *bp)
6666{
6667 u32 wb_write[3];
6668
6669
6670 wb_write[0] = 0x55555555;
6671 wb_write[1] = 0x55555555;
6672 wb_write[2] = 0x20;
6673 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6674
6675
6676 wb_write[0] = 0x09000000;
6677 wb_write[1] = 0x55555555;
6678 wb_write[2] = 0x10;
6679 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6680}
6681
6682
6683
6684
6685
6686static int bnx2x_int_mem_test(struct bnx2x *bp)
6687{
6688 int factor;
6689 int count, i;
6690 u32 val = 0;
6691
6692 if (CHIP_REV_IS_FPGA(bp))
6693 factor = 120;
6694 else if (CHIP_REV_IS_EMUL(bp))
6695 factor = 200;
6696 else
6697 factor = 1;
6698
6699
6700 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6701 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6702 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6703 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6704
6705
6706 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6707
6708
6709 bnx2x_lb_pckt(bp);
6710
6711
6712
6713 count = 1000 * factor;
6714 while (count) {
6715
6716 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6717 val = *bnx2x_sp(bp, wb_data[0]);
6718 if (val == 0x10)
6719 break;
6720
6721 usleep_range(10000, 20000);
6722 count--;
6723 }
6724 if (val != 0x10) {
6725 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6726 return -1;
6727 }
6728
6729
6730 count = 1000 * factor;
6731 while (count) {
6732 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6733 if (val == 1)
6734 break;
6735
6736 usleep_range(10000, 20000);
6737 count--;
6738 }
6739 if (val != 0x1) {
6740 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6741 return -2;
6742 }
6743
6744
6745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6746 msleep(50);
6747 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6748 msleep(50);
6749 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6750 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6751
6752 DP(NETIF_MSG_HW, "part2\n");
6753
6754
6755 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6756 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6757 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6758 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6759
6760
6761 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6762
6763
6764 for (i = 0; i < 10; i++)
6765 bnx2x_lb_pckt(bp);
6766
6767
6768
6769 count = 1000 * factor;
6770 while (count) {
6771
6772 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6773 val = *bnx2x_sp(bp, wb_data[0]);
6774 if (val == 0xb0)
6775 break;
6776
6777 usleep_range(10000, 20000);
6778 count--;
6779 }
6780 if (val != 0xb0) {
6781 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6782 return -3;
6783 }
6784
6785
6786 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6787 if (val != 2)
6788 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6789
6790
6791 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6792
6793
6794 msleep(10 * factor);
6795
6796 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6797 if (val != 3)
6798 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6799
6800
6801 for (i = 0; i < 11; i++)
6802 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6803 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6804 if (val != 1) {
6805 BNX2X_ERR("clear of NIG failed\n");
6806 return -4;
6807 }
6808
6809
6810 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6811 msleep(50);
6812 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6813 msleep(50);
6814 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6815 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6816 if (!CNIC_SUPPORT(bp))
6817
6818 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6819
6820
6821 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6822 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6823 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6824 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6825
6826 DP(NETIF_MSG_HW, "done\n");
6827
6828 return 0;
6829}
6830
6831static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6832{
6833 u32 val;
6834
6835 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6836 if (!CHIP_IS_E1x(bp))
6837 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6838 else
6839 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6840 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6841 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6842
6843
6844
6845
6846
6847
6848 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6849 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6850 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6851 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6852 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6853 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6854
6855
6856 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6857 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6858 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6859
6860
6861 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6862 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6863 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6864 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6865
6866
6867
6868 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6869 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6870 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6871 if (!CHIP_IS_E1x(bp))
6872 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6873 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6874 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6875
6876 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6877 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6878 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6879
6880
6881 if (!CHIP_IS_E1x(bp))
6882
6883 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6884
6885 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6886 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6887
6888 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6889}
6890
6891static void bnx2x_reset_common(struct bnx2x *bp)
6892{
6893 u32 val = 0x1400;
6894
6895
6896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6897 0xd3ffff7f);
6898
6899 if (CHIP_IS_E3(bp)) {
6900 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6901 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6902 }
6903
6904 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6905}
6906
6907static void bnx2x_setup_dmae(struct bnx2x *bp)
6908{
6909 bp->dmae_ready = 0;
6910 spin_lock_init(&bp->dmae_lock);
6911}
6912
6913static void bnx2x_init_pxp(struct bnx2x *bp)
6914{
6915 u16 devctl;
6916 int r_order, w_order;
6917
6918 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6919 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6920 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6921 if (bp->mrrs == -1)
6922 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6923 else {
6924 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6925 r_order = bp->mrrs;
6926 }
6927
6928 bnx2x_init_pxp_arb(bp, r_order, w_order);
6929}
6930
6931static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6932{
6933 int is_required;
6934 u32 val;
6935 int port;
6936
6937 if (BP_NOMCP(bp))
6938 return;
6939
6940 is_required = 0;
6941 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6942 SHARED_HW_CFG_FAN_FAILURE_MASK;
6943
6944 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6945 is_required = 1;
6946
6947
6948
6949
6950
6951
6952 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6953 for (port = PORT_0; port < PORT_MAX; port++) {
6954 is_required |=
6955 bnx2x_fan_failure_det_req(
6956 bp,
6957 bp->common.shmem_base,
6958 bp->common.shmem2_base,
6959 port);
6960 }
6961
6962 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6963
6964 if (is_required == 0)
6965 return;
6966
6967
6968 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6969
6970
6971 val = REG_RD(bp, MISC_REG_SPIO_INT);
6972 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6973 REG_WR(bp, MISC_REG_SPIO_INT, val);
6974
6975
6976 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6977 val |= MISC_SPIO_SPIO5;
6978 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6979}
6980
6981void bnx2x_pf_disable(struct bnx2x *bp)
6982{
6983 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6984 val &= ~IGU_PF_CONF_FUNC_EN;
6985
6986 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6987 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6988 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6989}
6990
6991static void bnx2x__common_init_phy(struct bnx2x *bp)
6992{
6993 u32 shmem_base[2], shmem2_base[2];
6994
6995 if (SHMEM2_RD(bp, size) >
6996 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6997 return;
6998 shmem_base[0] = bp->common.shmem_base;
6999 shmem2_base[0] = bp->common.shmem2_base;
7000 if (!CHIP_IS_E1x(bp)) {
7001 shmem_base[1] =
7002 SHMEM2_RD(bp, other_shmem_base_addr);
7003 shmem2_base[1] =
7004 SHMEM2_RD(bp, other_shmem2_base_addr);
7005 }
7006 bnx2x_acquire_phy_lock(bp);
7007 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7008 bp->common.chip_id);
7009 bnx2x_release_phy_lock(bp);
7010}
7011
7012static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7013{
7014 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7015 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7016 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7017 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7018 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7019
7020
7021 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7022
7023 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7024 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7025 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7026 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7027}
7028
7029static void bnx2x_set_endianity(struct bnx2x *bp)
7030{
7031#ifdef __BIG_ENDIAN
7032 bnx2x_config_endianity(bp, 1);
7033#else
7034 bnx2x_config_endianity(bp, 0);
7035#endif
7036}
7037
7038static void bnx2x_reset_endianity(struct bnx2x *bp)
7039{
7040 bnx2x_config_endianity(bp, 0);
7041}
7042
7043
7044
7045
7046
7047
7048static int bnx2x_init_hw_common(struct bnx2x *bp)
7049{
7050 u32 val;
7051
7052 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7053
7054
7055
7056
7057
7058 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7059
7060 bnx2x_reset_common(bp);
7061 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7062
7063 val = 0xfffc;
7064 if (CHIP_IS_E3(bp)) {
7065 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7066 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7067 }
7068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7069
7070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7071
7072 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7073
7074 if (!CHIP_IS_E1x(bp)) {
7075 u8 abs_func_id;
7076
7077
7078
7079
7080
7081
7082
7083
7084 for (abs_func_id = BP_PATH(bp);
7085 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7086 if (abs_func_id == BP_ABS_FUNC(bp)) {
7087 REG_WR(bp,
7088 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7089 1);
7090 continue;
7091 }
7092
7093 bnx2x_pretend_func(bp, abs_func_id);
7094
7095 bnx2x_pf_disable(bp);
7096 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7097 }
7098 }
7099
7100 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7101 if (CHIP_IS_E1(bp)) {
7102
7103
7104 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7105 }
7106
7107 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7108 bnx2x_init_pxp(bp);
7109 bnx2x_set_endianity(bp);
7110 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7111
7112 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7113 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7114
7115
7116 msleep(100);
7117
7118 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7119 if (val != 1) {
7120 BNX2X_ERR("PXP2 CFG failed\n");
7121 return -EBUSY;
7122 }
7123 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7124 if (val != 1) {
7125 BNX2X_ERR("PXP2 RD_INIT failed\n");
7126 return -EBUSY;
7127 }
7128
7129
7130
7131
7132
7133
7134 if (!CHIP_IS_E1x(bp)) {
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197 struct ilt_client_info ilt_cli;
7198 struct bnx2x_ilt ilt;
7199 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7200 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7201
7202
7203 ilt_cli.start = 0;
7204 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7205 ilt_cli.client_num = ILT_CLIENT_TM;
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7219 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7220 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7221
7222 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7223 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7224 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7225 }
7226
7227 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7228 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7229
7230 if (!CHIP_IS_E1x(bp)) {
7231 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7232 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7233 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7234
7235 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7236
7237
7238 do {
7239 msleep(200);
7240 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7241 } while (factor-- && (val != 1));
7242
7243 if (val != 1) {
7244 BNX2X_ERR("ATC_INIT failed\n");
7245 return -EBUSY;
7246 }
7247 }
7248
7249 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7250
7251 bnx2x_iov_init_dmae(bp);
7252
7253
7254 bp->dmae_ready = 1;
7255 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7256
7257 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7258
7259 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7260
7261 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7262
7263 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7264
7265 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7266 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7267 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7268 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7269
7270 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7271
7272
7273 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7274
7275
7276 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7277 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7278
7279 if (CNIC_SUPPORT(bp))
7280 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7281
7282 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7283
7284 if (!CHIP_REV_IS_SLOW(bp))
7285
7286 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7287
7288 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7289
7290 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7291 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7292
7293 if (!CHIP_IS_E1(bp))
7294 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7295
7296 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7297 if (IS_MF_AFEX(bp)) {
7298
7299
7300
7301 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7302 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7303 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7304 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7305 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7306 } else {
7307
7308
7309
7310 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7311 bp->path_has_ovlan ? 7 : 6);
7312 }
7313 }
7314
7315 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7316 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7317 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7318 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7319
7320 if (!CHIP_IS_E1x(bp)) {
7321
7322 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7323 VFC_MEMORIES_RST_REG_CAM_RST |
7324 VFC_MEMORIES_RST_REG_RAM_RST);
7325 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7326 VFC_MEMORIES_RST_REG_CAM_RST |
7327 VFC_MEMORIES_RST_REG_RAM_RST);
7328
7329 msleep(20);
7330 }
7331
7332 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7333 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7334 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7335 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7336
7337
7338 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7339 0x80000000);
7340 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7341 0x80000000);
7342
7343 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7344 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7345 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7346
7347 if (!CHIP_IS_E1x(bp)) {
7348 if (IS_MF_AFEX(bp)) {
7349
7350
7351
7352 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7353 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7354 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7355 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7356 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7357 } else {
7358 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7359 bp->path_has_ovlan ? 7 : 6);
7360 }
7361 }
7362
7363 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7364
7365 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7366
7367 if (CNIC_SUPPORT(bp)) {
7368 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7369 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7370 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7371 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7372 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7373 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7374 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7375 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7376 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7377 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7378 }
7379 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7380
7381 if (sizeof(union cdu_context) != 1024)
7382
7383 dev_alert(&bp->pdev->dev,
7384 "please adjust the size of cdu_context(%ld)\n",
7385 (long)sizeof(union cdu_context));
7386
7387 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7388 val = (4 << 24) + (0 << 12) + 1024;
7389 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7390
7391 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7392 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7393
7394 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7395
7396
7397 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7398
7399 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7400
7401 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7402 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7403
7404 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7405 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7406
7407
7408 REG_WR(bp, 0x2814, 0xffffffff);
7409 REG_WR(bp, 0x3820, 0xffffffff);
7410
7411 if (!CHIP_IS_E1x(bp)) {
7412 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7413 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7414 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7415 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7416 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7417 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7418 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7419 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7420 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7421 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7422 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7423 }
7424
7425 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7426 if (!CHIP_IS_E1(bp)) {
7427
7428 if (!CHIP_IS_E3(bp))
7429 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7430 }
7431 if (CHIP_IS_E1H(bp))
7432
7433 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7434
7435 if (CHIP_REV_IS_SLOW(bp))
7436 msleep(200);
7437
7438
7439 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7440 if (val != 1) {
7441 BNX2X_ERR("CFC LL_INIT failed\n");
7442 return -EBUSY;
7443 }
7444 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7445 if (val != 1) {
7446 BNX2X_ERR("CFC AC_INIT failed\n");
7447 return -EBUSY;
7448 }
7449 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7450 if (val != 1) {
7451 BNX2X_ERR("CFC CAM_INIT failed\n");
7452 return -EBUSY;
7453 }
7454 REG_WR(bp, CFC_REG_DEBUG0, 0);
7455
7456 if (CHIP_IS_E1(bp)) {
7457
7458
7459 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7460 val = *bnx2x_sp(bp, wb_data[0]);
7461
7462
7463 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7464 BNX2X_ERR("internal mem self test failed\n");
7465 return -EBUSY;
7466 }
7467 }
7468
7469 bnx2x_setup_fan_failure_detection(bp);
7470
7471
7472 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7473
7474 bnx2x_enable_blocks_attention(bp);
7475 bnx2x_enable_blocks_parity(bp);
7476
7477 if (!BP_NOMCP(bp)) {
7478 if (CHIP_IS_E1x(bp))
7479 bnx2x__common_init_phy(bp);
7480 } else
7481 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7482
7483 if (SHMEM2_HAS(bp, netproc_fw_ver))
7484 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7485
7486 return 0;
7487}
7488
7489
7490
7491
7492
7493
7494static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7495{
7496 int rc = bnx2x_init_hw_common(bp);
7497
7498 if (rc)
7499 return rc;
7500
7501
7502 if (!BP_NOMCP(bp))
7503 bnx2x__common_init_phy(bp);
7504
7505 return 0;
7506}
7507
7508static int bnx2x_init_hw_port(struct bnx2x *bp)
7509{
7510 int port = BP_PORT(bp);
7511 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7512 u32 low, high;
7513 u32 val, reg;
7514
7515 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7516
7517 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7518
7519 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7520 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7521 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7522
7523
7524
7525
7526
7527
7528 if (!CHIP_IS_E1x(bp))
7529 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7530
7531 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7532 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7533 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7534 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7535
7536 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7537 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7538 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7539 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7540
7541
7542 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7543
7544 if (CNIC_SUPPORT(bp)) {
7545 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7546 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7547 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7548 }
7549
7550 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7551
7552 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7553
7554 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7555
7556 if (IS_MF(bp))
7557 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7558 else if (bp->dev->mtu > 4096) {
7559 if (bp->flags & ONE_PORT_FLAG)
7560 low = 160;
7561 else {
7562 val = bp->dev->mtu;
7563
7564 low = 96 + (val/64) +
7565 ((val % 64) ? 1 : 0);
7566 }
7567 } else
7568 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7569 high = low + 56;
7570 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7571 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7572 }
7573
7574 if (CHIP_MODE_IS_4_PORT(bp))
7575 REG_WR(bp, (BP_PORT(bp) ?
7576 BRB1_REG_MAC_GUARANTIED_1 :
7577 BRB1_REG_MAC_GUARANTIED_0), 40);
7578
7579 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7580 if (CHIP_IS_E3B0(bp)) {
7581 if (IS_MF_AFEX(bp)) {
7582
7583 REG_WR(bp, BP_PORT(bp) ?
7584 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7585 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7586 REG_WR(bp, BP_PORT(bp) ?
7587 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7588 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7589 REG_WR(bp, BP_PORT(bp) ?
7590 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7591 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7592 } else {
7593
7594
7595
7596
7597 REG_WR(bp, BP_PORT(bp) ?
7598 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7599 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7600 (bp->path_has_ovlan ? 7 : 6));
7601 }
7602 }
7603
7604 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7605 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7606 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7607 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7608
7609 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7610 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7611 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7612 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7613
7614 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7615 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7616
7617 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7618
7619 if (CHIP_IS_E1x(bp)) {
7620
7621 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7622
7623
7624 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7625
7626 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7627
7628
7629 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7630 udelay(50);
7631 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7632 }
7633
7634 if (CNIC_SUPPORT(bp))
7635 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7636
7637 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7638 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7639
7640 if (CHIP_IS_E1(bp)) {
7641 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7642 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7643 }
7644 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7645
7646 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7647
7648 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7649
7650
7651
7652
7653 val = IS_MF(bp) ? 0xF7 : 0x7;
7654
7655 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7656 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7657
7658
7659 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7660 REG_WR(bp, reg,
7661 REG_RD(bp, reg) &
7662 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7663
7664 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7665 REG_WR(bp, reg,
7666 REG_RD(bp, reg) &
7667 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7668
7669 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7670
7671 if (!CHIP_IS_E1x(bp)) {
7672
7673
7674
7675 if (IS_MF_AFEX(bp))
7676 REG_WR(bp, BP_PORT(bp) ?
7677 NIG_REG_P1_HDRS_AFTER_BASIC :
7678 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7679 else
7680 REG_WR(bp, BP_PORT(bp) ?
7681 NIG_REG_P1_HDRS_AFTER_BASIC :
7682 NIG_REG_P0_HDRS_AFTER_BASIC,
7683 IS_MF_SD(bp) ? 7 : 6);
7684
7685 if (CHIP_IS_E3(bp))
7686 REG_WR(bp, BP_PORT(bp) ?
7687 NIG_REG_LLH1_MF_MODE :
7688 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7689 }
7690 if (!CHIP_IS_E3(bp))
7691 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7692
7693 if (!CHIP_IS_E1(bp)) {
7694
7695 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7696 (IS_MF_SD(bp) ? 0x1 : 0x2));
7697
7698 if (!CHIP_IS_E1x(bp)) {
7699 val = 0;
7700 switch (bp->mf_mode) {
7701 case MULTI_FUNCTION_SD:
7702 val = 1;
7703 break;
7704 case MULTI_FUNCTION_SI:
7705 case MULTI_FUNCTION_AFEX:
7706 val = 2;
7707 break;
7708 }
7709
7710 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7711 NIG_REG_LLH0_CLS_TYPE), val);
7712 }
7713 {
7714 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7715 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7716 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7717 }
7718 }
7719
7720
7721 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7722 if (val & MISC_SPIO_SPIO5) {
7723 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7725 val = REG_RD(bp, reg_addr);
7726 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7727 REG_WR(bp, reg_addr, val);
7728 }
7729
7730 if (CHIP_IS_E3B0(bp))
7731 bp->flags |= PTP_SUPPORTED;
7732
7733 return 0;
7734}
7735
7736static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7737{
7738 int reg;
7739 u32 wb_write[2];
7740
7741 if (CHIP_IS_E1(bp))
7742 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7743 else
7744 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7745
7746 wb_write[0] = ONCHIP_ADDR1(addr);
7747 wb_write[1] = ONCHIP_ADDR2(addr);
7748 REG_WR_DMAE(bp, reg, wb_write, 2);
7749}
7750
7751void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7752{
7753 u32 data, ctl, cnt = 100;
7754 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7755 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7756 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7757 u32 sb_bit = 1 << (idu_sb_id%32);
7758 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7759 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7760
7761
7762 if (CHIP_INT_MODE_IS_BC(bp))
7763 return;
7764
7765 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7766 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7767 IGU_REGULAR_CLEANUP_SET |
7768 IGU_REGULAR_BCLEANUP;
7769
7770 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7771 func_encode << IGU_CTRL_REG_FID_SHIFT |
7772 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7773
7774 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7775 data, igu_addr_data);
7776 REG_WR(bp, igu_addr_data, data);
7777 barrier();
7778 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7779 ctl, igu_addr_ctl);
7780 REG_WR(bp, igu_addr_ctl, ctl);
7781 barrier();
7782
7783
7784 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7785 msleep(20);
7786
7787 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7788 DP(NETIF_MSG_HW,
7789 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7790 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7791 }
7792}
7793
7794static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7795{
7796 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7797}
7798
7799static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7800{
7801 u32 i, base = FUNC_ILT_BASE(func);
7802 for (i = base; i < base + ILT_PER_FUNC; i++)
7803 bnx2x_ilt_wr(bp, i, 0);
7804}
7805
7806static void bnx2x_init_searcher(struct bnx2x *bp)
7807{
7808 int port = BP_PORT(bp);
7809 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7810
7811 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7812}
7813
7814static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7815{
7816 int rc;
7817 struct bnx2x_func_state_params func_params = {NULL};
7818 struct bnx2x_func_switch_update_params *switch_update_params =
7819 &func_params.params.switch_update;
7820
7821
7822 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7823 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7824
7825 func_params.f_obj = &bp->func_obj;
7826 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7827
7828
7829 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7830 &switch_update_params->changes);
7831 if (suspend)
7832 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7833 &switch_update_params->changes);
7834
7835 rc = bnx2x_func_state_change(bp, &func_params);
7836
7837 return rc;
7838}
7839
7840static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7841{
7842 int rc, i, port = BP_PORT(bp);
7843 int vlan_en = 0, mac_en[NUM_MACS];
7844
7845
7846 if (bp->mf_mode == SINGLE_FUNCTION) {
7847 bnx2x_set_rx_filter(&bp->link_params, 0);
7848 } else {
7849 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7850 NIG_REG_LLH0_FUNC_EN);
7851 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7852 NIG_REG_LLH0_FUNC_EN, 0);
7853 for (i = 0; i < NUM_MACS; i++) {
7854 mac_en[i] = REG_RD(bp, port ?
7855 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7856 4 * i) :
7857 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7858 4 * i));
7859 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7860 4 * i) :
7861 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7862 }
7863 }
7864
7865
7866 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7867 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7868
7869
7870
7871
7872
7873
7874 rc = bnx2x_func_switch_update(bp, 1);
7875 if (rc) {
7876 BNX2X_ERR("Can't suspend tx-switching!\n");
7877 return rc;
7878 }
7879
7880
7881 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7882
7883
7884 if (bp->mf_mode == SINGLE_FUNCTION) {
7885 bnx2x_set_rx_filter(&bp->link_params, 1);
7886 } else {
7887 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7888 NIG_REG_LLH0_FUNC_EN, vlan_en);
7889 for (i = 0; i < NUM_MACS; i++) {
7890 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7891 4 * i) :
7892 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7893 mac_en[i]);
7894 }
7895 }
7896
7897
7898 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7899 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7900
7901
7902 rc = bnx2x_func_switch_update(bp, 0);
7903 if (rc) {
7904 BNX2X_ERR("Can't resume tx-switching!\n");
7905 return rc;
7906 }
7907
7908 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7909 return 0;
7910}
7911
7912int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7913{
7914 int rc;
7915
7916 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7917
7918 if (CONFIGURE_NIC_MODE(bp)) {
7919
7920 bnx2x_init_searcher(bp);
7921
7922
7923 rc = bnx2x_reset_nic_mode(bp);
7924 if (rc)
7925 BNX2X_ERR("Can't change NIC mode!\n");
7926 return rc;
7927 }
7928
7929 return 0;
7930}
7931
7932
7933
7934
7935
7936
7937
7938
7939static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7940{
7941 if (!CHIP_IS_E1x(bp))
7942 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7943 1 << BP_ABS_FUNC(bp));
7944}
7945
7946static int bnx2x_init_hw_func(struct bnx2x *bp)
7947{
7948 int port = BP_PORT(bp);
7949 int func = BP_FUNC(bp);
7950 int init_phase = PHASE_PF0 + func;
7951 struct bnx2x_ilt *ilt = BP_ILT(bp);
7952 u16 cdu_ilt_start;
7953 u32 addr, val;
7954 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7955 int i, main_mem_width, rc;
7956
7957 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7958
7959
7960 if (!CHIP_IS_E1x(bp)) {
7961 rc = bnx2x_pf_flr_clnup(bp);
7962 if (rc) {
7963 bnx2x_fw_dump(bp);
7964 return rc;
7965 }
7966 }
7967
7968
7969 if (bp->common.int_block == INT_BLOCK_HC) {
7970 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7971 val = REG_RD(bp, addr);
7972 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7973 REG_WR(bp, addr, val);
7974 }
7975
7976 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7977 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7978
7979 ilt = BP_ILT(bp);
7980 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7981
7982 if (IS_SRIOV(bp))
7983 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7984 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7985
7986
7987
7988
7989 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7990 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7991 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7992 ilt->lines[cdu_ilt_start + i].page_mapping =
7993 bp->context[i].cxt_mapping;
7994 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7995 }
7996
7997 bnx2x_ilt_init_op(bp, INITOP_SET);
7998
7999 if (!CONFIGURE_NIC_MODE(bp)) {
8000 bnx2x_init_searcher(bp);
8001 REG_WR(bp, PRS_REG_NIC_MODE, 0);
8002 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
8003 } else {
8004
8005 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8006 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
8007 }
8008
8009 if (!CHIP_IS_E1x(bp)) {
8010 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8011
8012
8013
8014
8015 if (!(bp->flags & USING_MSIX_FLAG))
8016 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8017
8018
8019
8020
8021
8022
8023 msleep(20);
8024
8025
8026
8027
8028
8029 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8030
8031 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8032 }
8033
8034 bp->dmae_ready = 1;
8035
8036 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8037
8038 bnx2x_clean_pglue_errors(bp);
8039
8040 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8041 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8042 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8043 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8044 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8045 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8046 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8047 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8048 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8049 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8050 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8051 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8052 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8053
8054 if (!CHIP_IS_E1x(bp))
8055 REG_WR(bp, QM_REG_PF_EN, 1);
8056
8057 if (!CHIP_IS_E1x(bp)) {
8058 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8059 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8060 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8061 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8062 }
8063 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8064
8065 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8066 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8067 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8068
8069 bnx2x_iov_init_dq(bp);
8070
8071 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8072 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8073 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8074 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8075 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8076 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8077 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8078 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8079 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8080 if (!CHIP_IS_E1x(bp))
8081 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8082
8083 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8084
8085 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8086
8087 if (!CHIP_IS_E1x(bp))
8088 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8089
8090 if (IS_MF(bp)) {
8091 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8092 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8093 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8094 bp->mf_ov);
8095 }
8096 }
8097
8098 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8099
8100
8101 if (bp->common.int_block == INT_BLOCK_HC) {
8102 if (CHIP_IS_E1H(bp)) {
8103 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8104
8105 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8106 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8107 }
8108 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8109
8110 } else {
8111 int num_segs, sb_idx, prod_offset;
8112
8113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8114
8115 if (!CHIP_IS_E1x(bp)) {
8116 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8117 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8118 }
8119
8120 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8121
8122 if (!CHIP_IS_E1x(bp)) {
8123 int dsb_idx = 0;
8124
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142
8143
8144
8145 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8146 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8147 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8148 prod_offset = (bp->igu_base_sb + sb_idx) *
8149 num_segs;
8150
8151 for (i = 0; i < num_segs; i++) {
8152 addr = IGU_REG_PROD_CONS_MEMORY +
8153 (prod_offset + i) * 4;
8154 REG_WR(bp, addr, 0);
8155 }
8156
8157 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8158 USTORM_ID, 0, IGU_INT_NOP, 1);
8159 bnx2x_igu_clear_sb(bp,
8160 bp->igu_base_sb + sb_idx);
8161 }
8162
8163
8164 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8165 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8166
8167 if (CHIP_MODE_IS_4_PORT(bp))
8168 dsb_idx = BP_FUNC(bp);
8169 else
8170 dsb_idx = BP_VN(bp);
8171
8172 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8173 IGU_BC_BASE_DSB_PROD + dsb_idx :
8174 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8175
8176
8177
8178
8179
8180 for (i = 0; i < (num_segs * E1HVN_MAX);
8181 i += E1HVN_MAX) {
8182 addr = IGU_REG_PROD_CONS_MEMORY +
8183 (prod_offset + i)*4;
8184 REG_WR(bp, addr, 0);
8185 }
8186
8187 if (CHIP_INT_MODE_IS_BC(bp)) {
8188 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8189 USTORM_ID, 0, IGU_INT_NOP, 1);
8190 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8191 CSTORM_ID, 0, IGU_INT_NOP, 1);
8192 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8193 XSTORM_ID, 0, IGU_INT_NOP, 1);
8194 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8195 TSTORM_ID, 0, IGU_INT_NOP, 1);
8196 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8197 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8198 } else {
8199 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8200 USTORM_ID, 0, IGU_INT_NOP, 1);
8201 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8202 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8203 }
8204 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8205
8206
8207
8208 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8209 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8210 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8211 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8212 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8213 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8214 }
8215 }
8216
8217
8218 REG_WR(bp, 0x2114, 0xffffffff);
8219 REG_WR(bp, 0x2120, 0xffffffff);
8220
8221 if (CHIP_IS_E1x(bp)) {
8222 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8223 main_mem_base = HC_REG_MAIN_MEMORY +
8224 BP_PORT(bp) * (main_mem_size * 4);
8225 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8226 main_mem_width = 8;
8227
8228 val = REG_RD(bp, main_mem_prty_clr);
8229 if (val)
8230 DP(NETIF_MSG_HW,
8231 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8232 val);
8233
8234
8235 for (i = main_mem_base;
8236 i < main_mem_base + main_mem_size * 4;
8237 i += main_mem_width) {
8238 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8239 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8240 i, main_mem_width / 4);
8241 }
8242
8243 REG_RD(bp, main_mem_prty_clr);
8244 }
8245
8246#ifdef BNX2X_STOP_ON_ERROR
8247
8248 REG_WR8(bp, BAR_USTRORM_INTMEM +
8249 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8250 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8251 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8252 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8253 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8254 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8255 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8256#endif
8257
8258 bnx2x_phy_probe(&bp->link_params);
8259
8260 return 0;
8261}
8262
8263void bnx2x_free_mem_cnic(struct bnx2x *bp)
8264{
8265 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8266
8267 if (!CHIP_IS_E1x(bp))
8268 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8269 sizeof(struct host_hc_status_block_e2));
8270 else
8271 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8272 sizeof(struct host_hc_status_block_e1x));
8273
8274 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8275}
8276
8277void bnx2x_free_mem(struct bnx2x *bp)
8278{
8279 int i;
8280
8281 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8282 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8283
8284 if (IS_VF(bp))
8285 return;
8286
8287 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8288 sizeof(struct host_sp_status_block));
8289
8290 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8291 sizeof(struct bnx2x_slowpath));
8292
8293 for (i = 0; i < L2_ILT_LINES(bp); i++)
8294 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8295 bp->context[i].size);
8296 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8297
8298 BNX2X_FREE(bp->ilt->lines);
8299
8300 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8301
8302 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8303 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8304
8305 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8306
8307 bnx2x_iov_free_mem(bp);
8308}
8309
8310int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8311{
8312 if (!CHIP_IS_E1x(bp)) {
8313
8314 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8315 sizeof(struct host_hc_status_block_e2));
8316 if (!bp->cnic_sb.e2_sb)
8317 goto alloc_mem_err;
8318 } else {
8319 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8320 sizeof(struct host_hc_status_block_e1x));
8321 if (!bp->cnic_sb.e1x_sb)
8322 goto alloc_mem_err;
8323 }
8324
8325 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8326
8327 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8328 if (!bp->t2)
8329 goto alloc_mem_err;
8330 }
8331
8332
8333 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8334 &bp->slowpath->drv_info_to_mcp;
8335
8336 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8337 goto alloc_mem_err;
8338
8339 return 0;
8340
8341alloc_mem_err:
8342 bnx2x_free_mem_cnic(bp);
8343 BNX2X_ERR("Can't allocate memory\n");
8344 return -ENOMEM;
8345}
8346
8347int bnx2x_alloc_mem(struct bnx2x *bp)
8348{
8349 int i, allocated, context_size;
8350
8351 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8352
8353 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8354 if (!bp->t2)
8355 goto alloc_mem_err;
8356 }
8357
8358 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8359 sizeof(struct host_sp_status_block));
8360 if (!bp->def_status_blk)
8361 goto alloc_mem_err;
8362
8363 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8364 sizeof(struct bnx2x_slowpath));
8365 if (!bp->slowpath)
8366 goto alloc_mem_err;
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8382
8383 for (i = 0, allocated = 0; allocated < context_size; i++) {
8384 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8385 (context_size - allocated));
8386 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8387 bp->context[i].size);
8388 if (!bp->context[i].vcxt)
8389 goto alloc_mem_err;
8390 allocated += bp->context[i].size;
8391 }
8392 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8393 GFP_KERNEL);
8394 if (!bp->ilt->lines)
8395 goto alloc_mem_err;
8396
8397 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8398 goto alloc_mem_err;
8399
8400 if (bnx2x_iov_alloc_mem(bp))
8401 goto alloc_mem_err;
8402
8403
8404 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8405 if (!bp->spq)
8406 goto alloc_mem_err;
8407
8408
8409 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8410 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8411 if (!bp->eq_ring)
8412 goto alloc_mem_err;
8413
8414 return 0;
8415
8416alloc_mem_err:
8417 bnx2x_free_mem(bp);
8418 BNX2X_ERR("Can't allocate memory\n");
8419 return -ENOMEM;
8420}
8421
8422
8423
8424
8425
8426int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8427 struct bnx2x_vlan_mac_obj *obj, bool set,
8428 int mac_type, unsigned long *ramrod_flags)
8429{
8430 int rc;
8431 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8432
8433 memset(&ramrod_param, 0, sizeof(ramrod_param));
8434
8435
8436 ramrod_param.vlan_mac_obj = obj;
8437 ramrod_param.ramrod_flags = *ramrod_flags;
8438
8439
8440 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8441 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8442
8443 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8444
8445
8446 if (set)
8447 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8448 else
8449 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8450 }
8451
8452 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8453
8454 if (rc == -EEXIST) {
8455 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8456
8457 rc = 0;
8458 } else if (rc < 0)
8459 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8460
8461 return rc;
8462}
8463
8464int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8465 struct bnx2x_vlan_mac_obj *obj, bool set,
8466 unsigned long *ramrod_flags)
8467{
8468 int rc;
8469 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8470
8471 memset(&ramrod_param, 0, sizeof(ramrod_param));
8472
8473
8474 ramrod_param.vlan_mac_obj = obj;
8475 ramrod_param.ramrod_flags = *ramrod_flags;
8476
8477
8478 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8479 ramrod_param.user_req.u.vlan.vlan = vlan;
8480 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8481
8482 if (set)
8483 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8484 else
8485 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8486 }
8487
8488 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8489
8490 if (rc == -EEXIST) {
8491
8492 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8493 rc = 0;
8494 } else if (rc < 0) {
8495 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8496 }
8497
8498 return rc;
8499}
8500
8501void bnx2x_clear_vlan_info(struct bnx2x *bp)
8502{
8503 struct bnx2x_vlan_entry *vlan;
8504
8505
8506 list_for_each_entry(vlan, &bp->vlan_reg, link)
8507 vlan->hw = false;
8508
8509 bp->vlan_cnt = 0;
8510}
8511
8512static int bnx2x_del_all_vlans(struct bnx2x *bp)
8513{
8514 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8515 unsigned long ramrod_flags = 0, vlan_flags = 0;
8516 int rc;
8517
8518 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8519 __set_bit(BNX2X_VLAN, &vlan_flags);
8520 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8521 if (rc)
8522 return rc;
8523
8524 bnx2x_clear_vlan_info(bp);
8525
8526 return 0;
8527}
8528
8529int bnx2x_del_all_macs(struct bnx2x *bp,
8530 struct bnx2x_vlan_mac_obj *mac_obj,
8531 int mac_type, bool wait_for_comp)
8532{
8533 int rc;
8534 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8535
8536
8537 if (wait_for_comp)
8538 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8539
8540
8541 __set_bit(mac_type, &vlan_mac_flags);
8542
8543 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8544 if (rc < 0)
8545 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8546
8547 return rc;
8548}
8549
8550int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8551{
8552 if (IS_PF(bp)) {
8553 unsigned long ramrod_flags = 0;
8554
8555 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8556 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8557 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8558 &bp->sp_objs->mac_obj, set,
8559 BNX2X_ETH_MAC, &ramrod_flags);
8560 } else {
8561 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8562 bp->fp->index, set);
8563 }
8564}
8565
8566int bnx2x_setup_leading(struct bnx2x *bp)
8567{
8568 if (IS_PF(bp))
8569 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8570 else
8571 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8572}
8573
8574
8575
8576
8577
8578
8579
8580
8581int bnx2x_set_int_mode(struct bnx2x *bp)
8582{
8583 int rc = 0;
8584
8585 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8586 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8587 return -EINVAL;
8588 }
8589
8590 switch (int_mode) {
8591 case BNX2X_INT_MODE_MSIX:
8592
8593 rc = bnx2x_enable_msix(bp);
8594
8595
8596 if (!rc)
8597 return 0;
8598
8599
8600 if (rc && IS_VF(bp))
8601 return rc;
8602
8603
8604 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8605 bp->num_queues,
8606 1 + bp->num_cnic_queues);
8607
8608
8609 case BNX2X_INT_MODE_MSI:
8610 bnx2x_enable_msi(bp);
8611
8612
8613 case BNX2X_INT_MODE_INTX:
8614 bp->num_ethernet_queues = 1;
8615 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8616 BNX2X_DEV_INFO("set number of queues to 1\n");
8617 break;
8618 default:
8619 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8620 return -EINVAL;
8621 }
8622 return 0;
8623}
8624
8625
8626static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8627{
8628 if (IS_SRIOV(bp))
8629 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8630 return L2_ILT_LINES(bp);
8631}
8632
8633void bnx2x_ilt_set_info(struct bnx2x *bp)
8634{
8635 struct ilt_client_info *ilt_client;
8636 struct bnx2x_ilt *ilt = BP_ILT(bp);
8637 u16 line = 0;
8638
8639 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8640 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8641
8642
8643 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8644 ilt_client->client_num = ILT_CLIENT_CDU;
8645 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8646 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8647 ilt_client->start = line;
8648 line += bnx2x_cid_ilt_lines(bp);
8649
8650 if (CNIC_SUPPORT(bp))
8651 line += CNIC_ILT_LINES;
8652 ilt_client->end = line - 1;
8653
8654 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8655 ilt_client->start,
8656 ilt_client->end,
8657 ilt_client->page_size,
8658 ilt_client->flags,
8659 ilog2(ilt_client->page_size >> 12));
8660
8661
8662 if (QM_INIT(bp->qm_cid_count)) {
8663 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8664 ilt_client->client_num = ILT_CLIENT_QM;
8665 ilt_client->page_size = QM_ILT_PAGE_SZ;
8666 ilt_client->flags = 0;
8667 ilt_client->start = line;
8668
8669
8670 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8671 QM_ILT_PAGE_SZ);
8672
8673 ilt_client->end = line - 1;
8674
8675 DP(NETIF_MSG_IFUP,
8676 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8677 ilt_client->start,
8678 ilt_client->end,
8679 ilt_client->page_size,
8680 ilt_client->flags,
8681 ilog2(ilt_client->page_size >> 12));
8682 }
8683
8684 if (CNIC_SUPPORT(bp)) {
8685
8686 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8687 ilt_client->client_num = ILT_CLIENT_SRC;
8688 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8689 ilt_client->flags = 0;
8690 ilt_client->start = line;
8691 line += SRC_ILT_LINES;
8692 ilt_client->end = line - 1;
8693
8694 DP(NETIF_MSG_IFUP,
8695 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8696 ilt_client->start,
8697 ilt_client->end,
8698 ilt_client->page_size,
8699 ilt_client->flags,
8700 ilog2(ilt_client->page_size >> 12));
8701
8702
8703 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8704 ilt_client->client_num = ILT_CLIENT_TM;
8705 ilt_client->page_size = TM_ILT_PAGE_SZ;
8706 ilt_client->flags = 0;
8707 ilt_client->start = line;
8708 line += TM_ILT_LINES;
8709 ilt_client->end = line - 1;
8710
8711 DP(NETIF_MSG_IFUP,
8712 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8713 ilt_client->start,
8714 ilt_client->end,
8715 ilt_client->page_size,
8716 ilt_client->flags,
8717 ilog2(ilt_client->page_size >> 12));
8718 }
8719
8720 BUG_ON(line > ILT_MAX_LINES);
8721}
8722
8723
8724
8725
8726
8727
8728
8729
8730
8731
8732
8733
8734static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8735 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8736{
8737 u8 cos;
8738 int cxt_index, cxt_offset;
8739
8740
8741 if (!IS_FCOE_FP(fp)) {
8742 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8743 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8744
8745
8746
8747
8748 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8749 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8750
8751
8752 init_params->rx.hc_rate = bp->rx_ticks ?
8753 (1000000 / bp->rx_ticks) : 0;
8754 init_params->tx.hc_rate = bp->tx_ticks ?
8755 (1000000 / bp->tx_ticks) : 0;
8756
8757
8758 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8759 fp->fw_sb_id;
8760
8761
8762
8763
8764
8765 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8766 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8767 }
8768
8769
8770 init_params->max_cos = fp->max_cos;
8771
8772 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8773 fp->index, init_params->max_cos);
8774
8775
8776 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8777 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8778 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8779 ILT_PAGE_CIDS);
8780 init_params->cxts[cos] =
8781 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8782 }
8783}
8784
8785static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8786 struct bnx2x_queue_state_params *q_params,
8787 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8788 int tx_index, bool leading)
8789{
8790 memset(tx_only_params, 0, sizeof(*tx_only_params));
8791
8792
8793 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8794
8795
8796 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8797
8798
8799 tx_only_params->cid_index = tx_index;
8800
8801
8802 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8803
8804
8805 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8806
8807 DP(NETIF_MSG_IFUP,
8808 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8809 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8810 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8811 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8812
8813
8814 return bnx2x_queue_state_change(bp, q_params);
8815}
8816
8817
8818
8819
8820
8821
8822
8823
8824
8825
8826
8827
8828int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8829 bool leading)
8830{
8831 struct bnx2x_queue_state_params q_params = {NULL};
8832 struct bnx2x_queue_setup_params *setup_params =
8833 &q_params.params.setup;
8834 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8835 &q_params.params.tx_only;
8836 int rc;
8837 u8 tx_index;
8838
8839 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8840
8841
8842 if (!IS_FCOE_FP(fp))
8843 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8844 IGU_INT_ENABLE, 0);
8845
8846 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8847
8848 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8849
8850
8851 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8852
8853
8854 q_params.cmd = BNX2X_Q_CMD_INIT;
8855
8856
8857 rc = bnx2x_queue_state_change(bp, &q_params);
8858 if (rc) {
8859 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8860 return rc;
8861 }
8862
8863 DP(NETIF_MSG_IFUP, "init complete\n");
8864
8865
8866 memset(setup_params, 0, sizeof(*setup_params));
8867
8868
8869 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8870
8871
8872 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8873 FIRST_TX_COS_INDEX);
8874
8875 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8876 &setup_params->rxq_params);
8877
8878 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8879 FIRST_TX_COS_INDEX);
8880
8881
8882 q_params.cmd = BNX2X_Q_CMD_SETUP;
8883
8884 if (IS_FCOE_FP(fp))
8885 bp->fcoe_init = true;
8886
8887
8888 rc = bnx2x_queue_state_change(bp, &q_params);
8889 if (rc) {
8890 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8891 return rc;
8892 }
8893
8894
8895 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8896 tx_index < fp->max_cos;
8897 tx_index++) {
8898
8899
8900 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8901 tx_only_params, tx_index, leading);
8902 if (rc) {
8903 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8904 fp->index, tx_index);
8905 return rc;
8906 }
8907 }
8908
8909 return rc;
8910}
8911
8912static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8913{
8914 struct bnx2x_fastpath *fp = &bp->fp[index];
8915 struct bnx2x_fp_txdata *txdata;
8916 struct bnx2x_queue_state_params q_params = {NULL};
8917 int rc, tx_index;
8918
8919 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8920
8921 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8922
8923 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8924
8925
8926 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8927 tx_index < fp->max_cos;
8928 tx_index++){
8929
8930
8931 txdata = fp->txdata_ptr[tx_index];
8932
8933 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8934 txdata->txq_index);
8935
8936
8937 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8938 memset(&q_params.params.terminate, 0,
8939 sizeof(q_params.params.terminate));
8940 q_params.params.terminate.cid_index = tx_index;
8941
8942 rc = bnx2x_queue_state_change(bp, &q_params);
8943 if (rc)
8944 return rc;
8945
8946
8947 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8948 memset(&q_params.params.cfc_del, 0,
8949 sizeof(q_params.params.cfc_del));
8950 q_params.params.cfc_del.cid_index = tx_index;
8951 rc = bnx2x_queue_state_change(bp, &q_params);
8952 if (rc)
8953 return rc;
8954 }
8955
8956
8957 q_params.cmd = BNX2X_Q_CMD_HALT;
8958 rc = bnx2x_queue_state_change(bp, &q_params);
8959 if (rc)
8960 return rc;
8961
8962
8963 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8964 memset(&q_params.params.terminate, 0,
8965 sizeof(q_params.params.terminate));
8966 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8967 rc = bnx2x_queue_state_change(bp, &q_params);
8968 if (rc)
8969 return rc;
8970
8971 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8972 memset(&q_params.params.cfc_del, 0,
8973 sizeof(q_params.params.cfc_del));
8974 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8975 return bnx2x_queue_state_change(bp, &q_params);
8976}
8977
8978static void bnx2x_reset_func(struct bnx2x *bp)
8979{
8980 int port = BP_PORT(bp);
8981 int func = BP_FUNC(bp);
8982 int i;
8983
8984
8985 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8986 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8987 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8988 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8989
8990
8991 for_each_eth_queue(bp, i) {
8992 struct bnx2x_fastpath *fp = &bp->fp[i];
8993 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8994 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8995 SB_DISABLED);
8996 }
8997
8998 if (CNIC_LOADED(bp))
8999
9000 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9001 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
9002 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
9003
9004
9005 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9006 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
9007 SB_DISABLED);
9008
9009 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
9010 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9011 0);
9012
9013
9014 if (bp->common.int_block == INT_BLOCK_HC) {
9015 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9016 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9017 } else {
9018 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9019 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9020 }
9021
9022 if (CNIC_LOADED(bp)) {
9023
9024 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9025
9026
9027
9028
9029 for (i = 0; i < 200; i++) {
9030 usleep_range(10000, 20000);
9031 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9032 break;
9033 }
9034 }
9035
9036 bnx2x_clear_func_ilt(bp, func);
9037
9038
9039
9040
9041 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9042 struct ilt_client_info ilt_cli;
9043
9044 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9045 ilt_cli.start = 0;
9046 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9047 ilt_cli.client_num = ILT_CLIENT_TM;
9048
9049 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9050 }
9051
9052
9053 if (!CHIP_IS_E1x(bp))
9054 bnx2x_pf_disable(bp);
9055
9056 bp->dmae_ready = 0;
9057}
9058
9059static void bnx2x_reset_port(struct bnx2x *bp)
9060{
9061 int port = BP_PORT(bp);
9062 u32 val;
9063
9064
9065 bnx2x__link_reset(bp);
9066
9067 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9068
9069
9070 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9071
9072 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9073 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9074
9075
9076 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9077
9078 msleep(100);
9079
9080 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9081 if (val)
9082 DP(NETIF_MSG_IFDOWN,
9083 "BRB1 is not empty %d blocks are occupied\n", val);
9084
9085
9086}
9087
9088static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9089{
9090 struct bnx2x_func_state_params func_params = {NULL};
9091
9092
9093 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9094
9095 func_params.f_obj = &bp->func_obj;
9096 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9097
9098 func_params.params.hw_init.load_phase = load_code;
9099
9100 return bnx2x_func_state_change(bp, &func_params);
9101}
9102
9103static int bnx2x_func_stop(struct bnx2x *bp)
9104{
9105 struct bnx2x_func_state_params func_params = {NULL};
9106 int rc;
9107
9108
9109 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9110 func_params.f_obj = &bp->func_obj;
9111 func_params.cmd = BNX2X_F_CMD_STOP;
9112
9113
9114
9115
9116
9117
9118
9119 rc = bnx2x_func_state_change(bp, &func_params);
9120 if (rc) {
9121#ifdef BNX2X_STOP_ON_ERROR
9122 return rc;
9123#else
9124 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9125 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9126 return bnx2x_func_state_change(bp, &func_params);
9127#endif
9128 }
9129
9130 return 0;
9131}
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9142{
9143 u32 reset_code = 0;
9144 int port = BP_PORT(bp);
9145
9146
9147 if (unload_mode == UNLOAD_NORMAL)
9148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9149
9150 else if (bp->flags & NO_WOL_FLAG)
9151 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9152
9153 else if (bp->wol) {
9154 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9155 u8 *mac_addr = bp->dev->dev_addr;
9156 struct pci_dev *pdev = bp->pdev;
9157 u32 val;
9158 u16 pmc;
9159
9160
9161
9162
9163 u8 entry = (BP_VN(bp) + 1)*8;
9164
9165 val = (mac_addr[0] << 8) | mac_addr[1];
9166 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9167
9168 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9169 (mac_addr[4] << 8) | mac_addr[5];
9170 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9171
9172
9173 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9174 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9175 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9176
9177 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9178
9179 } else
9180 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9181
9182
9183 if (!BP_NOMCP(bp))
9184 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9185 else {
9186 int path = BP_PATH(bp);
9187
9188 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9189 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9190 bnx2x_load_count[path][2]);
9191 bnx2x_load_count[path][0]--;
9192 bnx2x_load_count[path][1 + port]--;
9193 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9194 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9195 bnx2x_load_count[path][2]);
9196 if (bnx2x_load_count[path][0] == 0)
9197 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9198 else if (bnx2x_load_count[path][1 + port] == 0)
9199 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9200 else
9201 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9202 }
9203
9204 return reset_code;
9205}
9206
9207
9208
9209
9210
9211
9212
9213void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9214{
9215 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9216
9217
9218 if (!BP_NOMCP(bp))
9219 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9220}
9221
9222static int bnx2x_func_wait_started(struct bnx2x *bp)
9223{
9224 int tout = 50;
9225 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9226
9227 if (!bp->port.pmf)
9228 return 0;
9229
9230
9231
9232
9233
9234
9235
9236
9237
9238
9239
9240
9241
9242
9243
9244
9245 if (msix)
9246 synchronize_irq(bp->msix_table[0].vector);
9247 else
9248 synchronize_irq(bp->pdev->irq);
9249
9250 flush_workqueue(bnx2x_wq);
9251 flush_workqueue(bnx2x_iov_wq);
9252
9253 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9254 BNX2X_F_STATE_STARTED && tout--)
9255 msleep(20);
9256
9257 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9258 BNX2X_F_STATE_STARTED) {
9259#ifdef BNX2X_STOP_ON_ERROR
9260 BNX2X_ERR("Wrong function state\n");
9261 return -EBUSY;
9262#else
9263
9264
9265
9266
9267 struct bnx2x_func_state_params func_params = {NULL};
9268
9269 DP(NETIF_MSG_IFDOWN,
9270 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9271
9272 func_params.f_obj = &bp->func_obj;
9273 __set_bit(RAMROD_DRV_CLR_ONLY,
9274 &func_params.ramrod_flags);
9275
9276
9277 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9278 bnx2x_func_state_change(bp, &func_params);
9279
9280
9281 func_params.cmd = BNX2X_F_CMD_TX_START;
9282 return bnx2x_func_state_change(bp, &func_params);
9283#endif
9284 }
9285
9286 return 0;
9287}
9288
9289static void bnx2x_disable_ptp(struct bnx2x *bp)
9290{
9291 int port = BP_PORT(bp);
9292
9293
9294 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9295 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9296
9297
9298 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9299 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9300 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9301 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9302 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9303 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9304 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9305 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9306
9307
9308 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9309 NIG_REG_P0_PTP_EN, 0x0);
9310}
9311
9312
9313static void bnx2x_stop_ptp(struct bnx2x *bp)
9314{
9315
9316
9317
9318 cancel_work_sync(&bp->ptp_task);
9319
9320 if (bp->ptp_tx_skb) {
9321 dev_kfree_skb_any(bp->ptp_tx_skb);
9322 bp->ptp_tx_skb = NULL;
9323 }
9324
9325
9326 bnx2x_disable_ptp(bp);
9327
9328 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9329}
9330
9331void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9332{
9333 int port = BP_PORT(bp);
9334 int i, rc = 0;
9335 u8 cos;
9336 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9337 u32 reset_code;
9338
9339
9340 for_each_tx_queue(bp, i) {
9341 struct bnx2x_fastpath *fp = &bp->fp[i];
9342
9343 for_each_cos_in_tx_queue(fp, cos)
9344 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9345#ifdef BNX2X_STOP_ON_ERROR
9346 if (rc)
9347 return;
9348#endif
9349 }
9350
9351
9352 usleep_range(1000, 2000);
9353
9354
9355 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9356 false);
9357 if (rc < 0)
9358 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9359
9360
9361 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9362 true);
9363 if (rc < 0)
9364 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9365 rc);
9366
9367
9368
9369
9370
9371 if (!CHIP_IS_E1x(bp)) {
9372
9373 rc = bnx2x_del_all_vlans(bp);
9374 if (rc < 0)
9375 BNX2X_ERR("Failed to delete all VLANs\n");
9376 }
9377
9378
9379 if (!CHIP_IS_E1(bp))
9380 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9381
9382
9383
9384
9385
9386 netif_addr_lock_bh(bp->dev);
9387
9388 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9389 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9390 else if (bp->slowpath)
9391 bnx2x_set_storm_rx_mode(bp);
9392
9393
9394 rparam.mcast_obj = &bp->mcast_obj;
9395 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9396 if (rc < 0)
9397 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9398
9399 netif_addr_unlock_bh(bp->dev);
9400
9401 bnx2x_iov_chip_cleanup(bp);
9402
9403
9404
9405
9406
9407
9408 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9409
9410
9411
9412
9413
9414 rc = bnx2x_func_wait_started(bp);
9415 if (rc) {
9416 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9417#ifdef BNX2X_STOP_ON_ERROR
9418 return;
9419#endif
9420 }
9421
9422
9423
9424
9425 for_each_eth_queue(bp, i)
9426 if (bnx2x_stop_queue(bp, i))
9427#ifdef BNX2X_STOP_ON_ERROR
9428 return;
9429#else
9430 goto unload_error;
9431#endif
9432
9433 if (CNIC_LOADED(bp)) {
9434 for_each_cnic_queue(bp, i)
9435 if (bnx2x_stop_queue(bp, i))
9436#ifdef BNX2X_STOP_ON_ERROR
9437 return;
9438#else
9439 goto unload_error;
9440#endif
9441 }
9442
9443
9444
9445
9446 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9447 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9448
9449#ifndef BNX2X_STOP_ON_ERROR
9450unload_error:
9451#endif
9452 rc = bnx2x_func_stop(bp);
9453 if (rc) {
9454 BNX2X_ERR("Function stop failed!\n");
9455#ifdef BNX2X_STOP_ON_ERROR
9456 return;
9457#endif
9458 }
9459
9460
9461
9462
9463
9464
9465 if (bp->flags & PTP_SUPPORTED) {
9466 bnx2x_stop_ptp(bp);
9467 if (bp->ptp_clock) {
9468 ptp_clock_unregister(bp->ptp_clock);
9469 bp->ptp_clock = NULL;
9470 }
9471 }
9472
9473
9474 bnx2x_netif_stop(bp, 1);
9475
9476 bnx2x_del_all_napi(bp);
9477 if (CNIC_LOADED(bp))
9478 bnx2x_del_all_napi_cnic(bp);
9479
9480
9481 bnx2x_free_irq(bp);
9482
9483
9484
9485
9486
9487
9488 if (!pci_channel_offline(bp->pdev)) {
9489 rc = bnx2x_reset_hw(bp, reset_code);
9490 if (rc)
9491 BNX2X_ERR("HW_RESET failed\n");
9492 }
9493
9494
9495 bnx2x_send_unload_done(bp, keep_link);
9496}
9497
9498void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9499{
9500 u32 val;
9501
9502 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9503
9504 if (CHIP_IS_E1(bp)) {
9505 int port = BP_PORT(bp);
9506 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9507 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9508
9509 val = REG_RD(bp, addr);
9510 val &= ~(0x300);
9511 REG_WR(bp, addr, val);
9512 } else {
9513 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9514 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9515 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9516 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9517 }
9518}
9519
9520
9521static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9522{
9523 u32 val;
9524
9525
9526 if (!CHIP_IS_E1(bp)) {
9527
9528 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9529
9530 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9531 }
9532
9533
9534 if (CHIP_IS_E1x(bp)) {
9535
9536 val = REG_RD(bp, HC_REG_CONFIG_1);
9537 REG_WR(bp, HC_REG_CONFIG_1,
9538 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9539 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9540
9541 val = REG_RD(bp, HC_REG_CONFIG_0);
9542 REG_WR(bp, HC_REG_CONFIG_0,
9543 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9544 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9545 } else {
9546
9547 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9548
9549 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9550 (!close) ?
9551 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9552 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9553 }
9554
9555 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9556 close ? "closing" : "opening");
9557}
9558
9559#define SHARED_MF_CLP_MAGIC 0x80000000
9560
9561static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9562{
9563
9564 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9565 *magic_val = val & SHARED_MF_CLP_MAGIC;
9566 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9567}
9568
9569
9570
9571
9572
9573
9574
9575static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9576{
9577
9578 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9579 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9580 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9581}
9582
9583
9584
9585
9586
9587
9588
9589
9590
9591static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9592{
9593 u32 shmem;
9594 u32 validity_offset;
9595
9596 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9597
9598
9599 if (!CHIP_IS_E1(bp))
9600 bnx2x_clp_reset_prep(bp, magic_val);
9601
9602
9603 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9604 validity_offset =
9605 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9606
9607
9608 if (shmem > 0)
9609 REG_WR(bp, shmem + validity_offset, 0);
9610}
9611
9612#define MCP_TIMEOUT 5000
9613#define MCP_ONE_TIMEOUT 100
9614
9615
9616
9617
9618
9619
9620static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9621{
9622
9623
9624 if (CHIP_REV_IS_SLOW(bp))
9625 msleep(MCP_ONE_TIMEOUT*10);
9626 else
9627 msleep(MCP_ONE_TIMEOUT);
9628}
9629
9630
9631
9632
9633static int bnx2x_init_shmem(struct bnx2x *bp)
9634{
9635 int cnt = 0;
9636 u32 val = 0;
9637
9638 do {
9639 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9640
9641
9642
9643
9644 if (bp->common.shmem_base == 0xFFFFFFFF) {
9645 bp->flags |= NO_MCP_FLAG;
9646 return -ENODEV;
9647 }
9648
9649 if (bp->common.shmem_base) {
9650 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9651 if (val & SHR_MEM_VALIDITY_MB)
9652 return 0;
9653 }
9654
9655 bnx2x_mcp_wait_one(bp);
9656
9657 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9658
9659 BNX2X_ERR("BAD MCP validity signature\n");
9660
9661 return -ENODEV;
9662}
9663
9664static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9665{
9666 int rc = bnx2x_init_shmem(bp);
9667
9668
9669 if (!CHIP_IS_E1(bp))
9670 bnx2x_clp_reset_done(bp, magic_val);
9671
9672 return rc;
9673}
9674
9675static void bnx2x_pxp_prep(struct bnx2x *bp)
9676{
9677 if (!CHIP_IS_E1(bp)) {
9678 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9679 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9680 }
9681}
9682
9683
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9694{
9695 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9696 u32 global_bits2, stay_reset2;
9697
9698
9699
9700
9701
9702 global_bits2 =
9703 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9704 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9705
9706
9707
9708
9709
9710
9711 not_reset_mask1 =
9712 MISC_REGISTERS_RESET_REG_1_RST_HC |
9713 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9714 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9715
9716 not_reset_mask2 =
9717 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9718 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9719 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9720 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9721 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9722 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9723 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9724 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9725 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9726 MISC_REGISTERS_RESET_REG_2_PGLC |
9727 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9728 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9729 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9730 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9731 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9732 MISC_REGISTERS_RESET_REG_2_UMAC1;
9733
9734
9735
9736
9737
9738 stay_reset2 =
9739 MISC_REGISTERS_RESET_REG_2_XMAC |
9740 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9741
9742
9743 reset_mask1 = 0xffffffff;
9744
9745 if (CHIP_IS_E1(bp))
9746 reset_mask2 = 0xffff;
9747 else if (CHIP_IS_E1H(bp))
9748 reset_mask2 = 0x1ffff;
9749 else if (CHIP_IS_E2(bp))
9750 reset_mask2 = 0xfffff;
9751 else
9752 reset_mask2 = 0x3ffffff;
9753
9754
9755 if (!global)
9756 reset_mask2 &= ~global_bits2;
9757
9758
9759
9760
9761
9762
9763
9764
9765
9766
9767
9768
9769
9770
9771
9772 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9773 reset_mask2 & (~not_reset_mask2));
9774
9775 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9776 reset_mask1 & (~not_reset_mask1));
9777
9778 barrier();
9779
9780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9781 reset_mask2 & (~stay_reset2));
9782
9783 barrier();
9784
9785 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9786}
9787
9788
9789
9790
9791
9792
9793
9794
9795
9796
9797static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9798{
9799 u32 cnt = 1000;
9800 u32 pend_bits = 0;
9801
9802 do {
9803 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9804
9805 if (pend_bits == 0)
9806 break;
9807
9808 usleep_range(1000, 2000);
9809 } while (cnt-- > 0);
9810
9811 if (cnt <= 0) {
9812 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9813 pend_bits);
9814 return -EBUSY;
9815 }
9816
9817 return 0;
9818}
9819
9820static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9821{
9822 int cnt = 1000;
9823 u32 val = 0;
9824 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9825 u32 tags_63_32 = 0;
9826
9827
9828 do {
9829 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9830 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9831 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9832 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9833 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9834 if (CHIP_IS_E3(bp))
9835 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9836
9837 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9838 ((port_is_idle_0 & 0x1) == 0x1) &&
9839 ((port_is_idle_1 & 0x1) == 0x1) &&
9840 (pgl_exp_rom2 == 0xffffffff) &&
9841 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9842 break;
9843 usleep_range(1000, 2000);
9844 } while (cnt-- > 0);
9845
9846 if (cnt <= 0) {
9847 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9848 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9849 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9850 pgl_exp_rom2);
9851 return -EAGAIN;
9852 }
9853
9854 barrier();
9855
9856
9857 bnx2x_set_234_gates(bp, true);
9858
9859
9860 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9861 return -EAGAIN;
9862
9863
9864
9865
9866 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9867 barrier();
9868
9869
9870
9871
9872 usleep_range(1000, 2000);
9873
9874
9875
9876 if (global)
9877 bnx2x_reset_mcp_prep(bp, &val);
9878
9879
9880 bnx2x_pxp_prep(bp);
9881 barrier();
9882
9883
9884 bnx2x_process_kill_chip_reset(bp, global);
9885 barrier();
9886
9887
9888 if (!CHIP_IS_E1x(bp))
9889 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9890
9891
9892
9893 if (global && bnx2x_reset_mcp_comp(bp, val))
9894 return -EAGAIN;
9895
9896
9897
9898
9899 bnx2x_set_234_gates(bp, false);
9900
9901
9902
9903
9904 return 0;
9905}
9906
9907static int bnx2x_leader_reset(struct bnx2x *bp)
9908{
9909 int rc = 0;
9910 bool global = bnx2x_reset_is_global(bp);
9911 u32 load_code;
9912
9913
9914
9915
9916 if (!global && !BP_NOMCP(bp)) {
9917 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9918 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9919 if (!load_code) {
9920 BNX2X_ERR("MCP response failure, aborting\n");
9921 rc = -EAGAIN;
9922 goto exit_leader_reset;
9923 }
9924 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9925 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9926 BNX2X_ERR("MCP unexpected resp, aborting\n");
9927 rc = -EAGAIN;
9928 goto exit_leader_reset2;
9929 }
9930 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9931 if (!load_code) {
9932 BNX2X_ERR("MCP response failure, aborting\n");
9933 rc = -EAGAIN;
9934 goto exit_leader_reset2;
9935 }
9936 }
9937
9938
9939 if (bnx2x_process_kill(bp, global)) {
9940 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9941 BP_PATH(bp));
9942 rc = -EAGAIN;
9943 goto exit_leader_reset2;
9944 }
9945
9946
9947
9948
9949
9950 bnx2x_set_reset_done(bp);
9951 if (global)
9952 bnx2x_clear_reset_global(bp);
9953
9954exit_leader_reset2:
9955
9956 if (!global && !BP_NOMCP(bp)) {
9957 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9958 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9959 }
9960exit_leader_reset:
9961 bp->is_leader = 0;
9962 bnx2x_release_leader_lock(bp);
9963 smp_mb();
9964 return rc;
9965}
9966
9967static void bnx2x_recovery_failed(struct bnx2x *bp)
9968{
9969 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9970
9971
9972 netif_device_detach(bp->dev);
9973
9974
9975
9976
9977
9978 bnx2x_set_reset_in_progress(bp);
9979
9980
9981 bnx2x_set_power_state(bp, PCI_D3hot);
9982
9983 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9984
9985 smp_mb();
9986}
9987
9988
9989
9990
9991
9992
9993static void bnx2x_parity_recover(struct bnx2x *bp)
9994{
9995 u32 error_recovered, error_unrecovered;
9996 bool is_parity, global = false;
9997#ifdef CONFIG_BNX2X_SRIOV
9998 int vf_idx;
9999
10000 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
10001 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
10002
10003 if (vf)
10004 vf->state = VF_LOST;
10005 }
10006#endif
10007 DP(NETIF_MSG_HW, "Handling parity\n");
10008 while (1) {
10009 switch (bp->recovery_state) {
10010 case BNX2X_RECOVERY_INIT:
10011 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
10012 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10013 WARN_ON(!is_parity);
10014
10015
10016 if (bnx2x_trylock_leader_lock(bp)) {
10017 bnx2x_set_reset_in_progress(bp);
10018
10019
10020
10021
10022
10023
10024 if (global)
10025 bnx2x_set_reset_global(bp);
10026
10027 bp->is_leader = 1;
10028 }
10029
10030
10031
10032 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10033 return;
10034
10035 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10036
10037
10038
10039
10040
10041 smp_mb();
10042 break;
10043
10044 case BNX2X_RECOVERY_WAIT:
10045 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10046 if (bp->is_leader) {
10047 int other_engine = BP_PATH(bp) ? 0 : 1;
10048 bool other_load_status =
10049 bnx2x_get_load_status(bp, other_engine);
10050 bool load_status =
10051 bnx2x_get_load_status(bp, BP_PATH(bp));
10052 global = bnx2x_reset_is_global(bp);
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062 if (load_status ||
10063 (global && other_load_status)) {
10064
10065
10066
10067 schedule_delayed_work(&bp->sp_rtnl_task,
10068 HZ/10);
10069 return;
10070 } else {
10071
10072
10073
10074
10075
10076 if (bnx2x_leader_reset(bp)) {
10077 bnx2x_recovery_failed(bp);
10078 return;
10079 }
10080
10081
10082
10083
10084
10085
10086 break;
10087 }
10088 } else {
10089 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10090
10091
10092
10093
10094
10095
10096 if (bnx2x_trylock_leader_lock(bp)) {
10097
10098
10099
10100 bp->is_leader = 1;
10101 break;
10102 }
10103
10104 schedule_delayed_work(&bp->sp_rtnl_task,
10105 HZ/10);
10106 return;
10107
10108 } else {
10109
10110
10111
10112
10113 if (bnx2x_reset_is_global(bp)) {
10114 schedule_delayed_work(
10115 &bp->sp_rtnl_task,
10116 HZ/10);
10117 return;
10118 }
10119
10120 error_recovered =
10121 bp->eth_stats.recoverable_error;
10122 error_unrecovered =
10123 bp->eth_stats.unrecoverable_error;
10124 bp->recovery_state =
10125 BNX2X_RECOVERY_NIC_LOADING;
10126 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10127 error_unrecovered++;
10128 netdev_err(bp->dev,
10129 "Recovery failed. Power cycle needed\n");
10130
10131 netif_device_detach(bp->dev);
10132
10133 bnx2x_set_power_state(
10134 bp, PCI_D3hot);
10135 smp_mb();
10136 } else {
10137 bp->recovery_state =
10138 BNX2X_RECOVERY_DONE;
10139 error_recovered++;
10140 smp_mb();
10141 }
10142 bp->eth_stats.recoverable_error =
10143 error_recovered;
10144 bp->eth_stats.unrecoverable_error =
10145 error_unrecovered;
10146
10147 return;
10148 }
10149 }
10150 default:
10151 return;
10152 }
10153 }
10154}
10155
10156static int bnx2x_udp_port_update(struct bnx2x *bp)
10157{
10158 struct bnx2x_func_switch_update_params *switch_update_params;
10159 struct bnx2x_func_state_params func_params = {NULL};
10160 struct bnx2x_udp_tunnel *udp_tunnel;
10161 u16 vxlan_port = 0, geneve_port = 0;
10162 int rc;
10163
10164 switch_update_params = &func_params.params.switch_update;
10165
10166
10167 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10168 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10169
10170 func_params.f_obj = &bp->func_obj;
10171 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10172
10173
10174 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10175 &switch_update_params->changes);
10176
10177 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10178 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10179 geneve_port = udp_tunnel->dst_port;
10180 switch_update_params->geneve_dst_port = geneve_port;
10181 }
10182
10183 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10184 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10185 vxlan_port = udp_tunnel->dst_port;
10186 switch_update_params->vxlan_dst_port = vxlan_port;
10187 }
10188
10189
10190 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10191 &switch_update_params->changes);
10192
10193 rc = bnx2x_func_state_change(bp, &func_params);
10194 if (rc)
10195 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10196 vxlan_port, geneve_port, rc);
10197 else
10198 DP(BNX2X_MSG_SP,
10199 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10200 vxlan_port, geneve_port);
10201
10202 return rc;
10203}
10204
10205static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10206 enum bnx2x_udp_port_type type)
10207{
10208 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10209
10210 if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
10211 return;
10212
10213 if (udp_port->count && udp_port->dst_port == port) {
10214 udp_port->count++;
10215 return;
10216 }
10217
10218 if (udp_port->count) {
10219 DP(BNX2X_MSG_SP,
10220 "UDP tunnel [%d] - destination port limit reached\n",
10221 type);
10222 return;
10223 }
10224
10225 udp_port->dst_port = port;
10226 udp_port->count = 1;
10227 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10228}
10229
10230static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10231 enum bnx2x_udp_port_type type)
10232{
10233 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10234
10235 if (!IS_PF(bp) || CHIP_IS_E1x(bp))
10236 return;
10237
10238 if (!udp_port->count || udp_port->dst_port != port) {
10239 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10240 type);
10241 return;
10242 }
10243
10244
10245 udp_port->count--;
10246 if (udp_port->count)
10247 return;
10248 udp_port->dst_port = 0;
10249
10250 if (netif_running(bp->dev))
10251 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10252 else
10253 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10254 type, port);
10255}
10256
10257static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10258 struct udp_tunnel_info *ti)
10259{
10260 struct bnx2x *bp = netdev_priv(netdev);
10261 u16 t_port = ntohs(ti->port);
10262
10263 switch (ti->type) {
10264 case UDP_TUNNEL_TYPE_VXLAN:
10265 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10266 break;
10267 case UDP_TUNNEL_TYPE_GENEVE:
10268 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10269 break;
10270 default:
10271 break;
10272 }
10273}
10274
10275static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10276 struct udp_tunnel_info *ti)
10277{
10278 struct bnx2x *bp = netdev_priv(netdev);
10279 u16 t_port = ntohs(ti->port);
10280
10281 switch (ti->type) {
10282 case UDP_TUNNEL_TYPE_VXLAN:
10283 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10284 break;
10285 case UDP_TUNNEL_TYPE_GENEVE:
10286 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10287 break;
10288 default:
10289 break;
10290 }
10291}
10292
10293static int bnx2x_close(struct net_device *dev);
10294
10295
10296
10297
10298static void bnx2x_sp_rtnl_task(struct work_struct *work)
10299{
10300 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10301
10302 rtnl_lock();
10303
10304 if (!netif_running(bp->dev)) {
10305 rtnl_unlock();
10306 return;
10307 }
10308
10309 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10310#ifdef BNX2X_STOP_ON_ERROR
10311 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10312 "you will need to reboot when done\n");
10313 goto sp_rtnl_not_reset;
10314#endif
10315
10316
10317
10318
10319 bp->sp_rtnl_state = 0;
10320 smp_mb();
10321
10322 bnx2x_parity_recover(bp);
10323
10324 rtnl_unlock();
10325 return;
10326 }
10327
10328 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10329#ifdef BNX2X_STOP_ON_ERROR
10330 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10331 "you will need to reboot when done\n");
10332 goto sp_rtnl_not_reset;
10333#endif
10334
10335
10336
10337
10338
10339 bp->sp_rtnl_state = 0;
10340 smp_mb();
10341
10342
10343 bp->link_vars.link_up = 0;
10344 bp->force_link_down = true;
10345 netif_carrier_off(bp->dev);
10346 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10347
10348 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10349
10350
10351
10352
10353 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10354 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10355 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10356 BNX2X_ERR("Open the NIC fails again!\n");
10357 }
10358 rtnl_unlock();
10359 return;
10360 }
10361#ifdef BNX2X_STOP_ON_ERROR
10362sp_rtnl_not_reset:
10363#endif
10364 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10365 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10366 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10367 bnx2x_after_function_update(bp);
10368
10369
10370
10371
10372
10373 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10374 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10375 netif_device_detach(bp->dev);
10376 bnx2x_close(bp->dev);
10377 rtnl_unlock();
10378 return;
10379 }
10380
10381 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10382 DP(BNX2X_MSG_SP,
10383 "sending set mcast vf pf channel message from rtnl sp-task\n");
10384 bnx2x_vfpf_set_mcast(bp->dev);
10385 }
10386 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10387 &bp->sp_rtnl_state)){
10388 if (netif_carrier_ok(bp->dev)) {
10389 bnx2x_tx_disable(bp);
10390 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10391 }
10392 }
10393
10394 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10395 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10396 bnx2x_set_rx_mode_inner(bp);
10397 }
10398
10399 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10400 &bp->sp_rtnl_state))
10401 bnx2x_pf_set_vfs_vlan(bp);
10402
10403 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10404 bnx2x_dcbx_stop_hw_tx(bp);
10405 bnx2x_dcbx_resume_hw_tx(bp);
10406 }
10407
10408 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10409 &bp->sp_rtnl_state))
10410 bnx2x_update_mng_version(bp);
10411
10412 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10413 bnx2x_handle_update_svid_cmd(bp);
10414
10415 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10416 &bp->sp_rtnl_state)) {
10417 if (bnx2x_udp_port_update(bp)) {
10418
10419 memset(bp->udp_tunnel_ports, 0,
10420 sizeof(struct bnx2x_udp_tunnel) *
10421 BNX2X_UDP_PORT_MAX);
10422 } else {
10423
10424
10425
10426
10427 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10428 !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10429 udp_tunnel_get_rx_info(bp->dev);
10430 }
10431 }
10432
10433
10434
10435
10436 rtnl_unlock();
10437
10438
10439 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10440 &bp->sp_rtnl_state)) {
10441 bnx2x_disable_sriov(bp);
10442 bnx2x_enable_sriov(bp);
10443 }
10444}
10445
10446static void bnx2x_period_task(struct work_struct *work)
10447{
10448 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10449
10450 if (!netif_running(bp->dev))
10451 goto period_task_exit;
10452
10453 if (CHIP_REV_IS_SLOW(bp)) {
10454 BNX2X_ERR("period task called on emulation, ignoring\n");
10455 goto period_task_exit;
10456 }
10457
10458 bnx2x_acquire_phy_lock(bp);
10459
10460
10461
10462
10463
10464 smp_mb();
10465 if (bp->port.pmf) {
10466 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10467
10468
10469 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10470 }
10471
10472 bnx2x_release_phy_lock(bp);
10473period_task_exit:
10474 return;
10475}
10476
10477
10478
10479
10480
10481static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10482{
10483 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10484 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10485 return base + (BP_ABS_FUNC(bp)) * stride;
10486}
10487
10488static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10489 u8 port, u32 reset_reg,
10490 struct bnx2x_mac_vals *vals)
10491{
10492 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10493 u32 base_addr;
10494
10495 if (!(mask & reset_reg))
10496 return false;
10497
10498 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10499 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10500 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10501 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10502 REG_WR(bp, vals->umac_addr[port], 0);
10503
10504 return true;
10505}
10506
10507static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10508 struct bnx2x_mac_vals *vals)
10509{
10510 u32 val, base_addr, offset, mask, reset_reg;
10511 bool mac_stopped = false;
10512 u8 port = BP_PORT(bp);
10513
10514
10515 memset(vals, 0, sizeof(*vals));
10516
10517 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10518
10519 if (!CHIP_IS_E3(bp)) {
10520 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10521 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10522 if ((mask & reset_reg) && val) {
10523 u32 wb_data[2];
10524 BNX2X_DEV_INFO("Disable bmac Rx\n");
10525 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10526 : NIG_REG_INGRESS_BMAC0_MEM;
10527 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10528 : BIGMAC_REGISTER_BMAC_CONTROL;
10529
10530
10531
10532
10533
10534
10535
10536 wb_data[0] = REG_RD(bp, base_addr + offset);
10537 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10538 vals->bmac_addr = base_addr + offset;
10539 vals->bmac_val[0] = wb_data[0];
10540 vals->bmac_val[1] = wb_data[1];
10541 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10542 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10543 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10544 }
10545 BNX2X_DEV_INFO("Disable emac Rx\n");
10546 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10547 vals->emac_val = REG_RD(bp, vals->emac_addr);
10548 REG_WR(bp, vals->emac_addr, 0);
10549 mac_stopped = true;
10550 } else {
10551 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10552 BNX2X_DEV_INFO("Disable xmac Rx\n");
10553 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10554 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10555 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10556 val & ~(1 << 1));
10557 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10558 val | (1 << 1));
10559 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10560 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10561 REG_WR(bp, vals->xmac_addr, 0);
10562 mac_stopped = true;
10563 }
10564
10565 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10566 reset_reg, vals);
10567 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10568 reset_reg, vals);
10569 }
10570
10571 if (mac_stopped)
10572 msleep(20);
10573}
10574
10575#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10576#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10577 0x1848 + ((f) << 4))
10578#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10579#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10580#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10581
10582#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10583#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10584#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10585
10586static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10587{
10588
10589
10590
10591 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10592 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10593 return false;
10594
10595 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10596 BNX2X_DEV_INFO("UNDI previously loaded\n");
10597 return true;
10598 }
10599
10600 return false;
10601}
10602
10603static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10604{
10605 u16 rcq, bd;
10606 u32 addr, tmp_reg;
10607
10608 if (BP_FUNC(bp) < 2)
10609 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10610 else
10611 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10612
10613 tmp_reg = REG_RD(bp, addr);
10614 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10615 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10616
10617 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10618 REG_WR(bp, addr, tmp_reg);
10619
10620 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10621 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10622}
10623
10624static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10625{
10626 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10627 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10628 if (!rc) {
10629 BNX2X_ERR("MCP response failure, aborting\n");
10630 return -EBUSY;
10631 }
10632
10633 return 0;
10634}
10635
10636static struct bnx2x_prev_path_list *
10637 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10638{
10639 struct bnx2x_prev_path_list *tmp_list;
10640
10641 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10642 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10643 bp->pdev->bus->number == tmp_list->bus &&
10644 BP_PATH(bp) == tmp_list->path)
10645 return tmp_list;
10646
10647 return NULL;
10648}
10649
10650static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10651{
10652 struct bnx2x_prev_path_list *tmp_list;
10653 int rc;
10654
10655 rc = down_interruptible(&bnx2x_prev_sem);
10656 if (rc) {
10657 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10658 return rc;
10659 }
10660
10661 tmp_list = bnx2x_prev_path_get_entry(bp);
10662 if (tmp_list) {
10663 tmp_list->aer = 1;
10664 rc = 0;
10665 } else {
10666 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10667 BP_PATH(bp));
10668 }
10669
10670 up(&bnx2x_prev_sem);
10671
10672 return rc;
10673}
10674
10675static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10676{
10677 struct bnx2x_prev_path_list *tmp_list;
10678 bool rc = false;
10679
10680 if (down_trylock(&bnx2x_prev_sem))
10681 return false;
10682
10683 tmp_list = bnx2x_prev_path_get_entry(bp);
10684 if (tmp_list) {
10685 if (tmp_list->aer) {
10686 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10687 BP_PATH(bp));
10688 } else {
10689 rc = true;
10690 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10691 BP_PATH(bp));
10692 }
10693 }
10694
10695 up(&bnx2x_prev_sem);
10696
10697 return rc;
10698}
10699
10700bool bnx2x_port_after_undi(struct bnx2x *bp)
10701{
10702 struct bnx2x_prev_path_list *entry;
10703 bool val;
10704
10705 down(&bnx2x_prev_sem);
10706
10707 entry = bnx2x_prev_path_get_entry(bp);
10708 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10709
10710 up(&bnx2x_prev_sem);
10711
10712 return val;
10713}
10714
10715static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10716{
10717 struct bnx2x_prev_path_list *tmp_list;
10718 int rc;
10719
10720 rc = down_interruptible(&bnx2x_prev_sem);
10721 if (rc) {
10722 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10723 return rc;
10724 }
10725
10726
10727 tmp_list = bnx2x_prev_path_get_entry(bp);
10728 if (tmp_list) {
10729 if (!tmp_list->aer) {
10730 BNX2X_ERR("Re-Marking the path.\n");
10731 } else {
10732 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10733 BP_PATH(bp));
10734 tmp_list->aer = 0;
10735 }
10736 up(&bnx2x_prev_sem);
10737 return 0;
10738 }
10739 up(&bnx2x_prev_sem);
10740
10741
10742 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10743 if (!tmp_list) {
10744 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10745 return -ENOMEM;
10746 }
10747
10748 tmp_list->bus = bp->pdev->bus->number;
10749 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10750 tmp_list->path = BP_PATH(bp);
10751 tmp_list->aer = 0;
10752 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10753
10754 rc = down_interruptible(&bnx2x_prev_sem);
10755 if (rc) {
10756 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10757 kfree(tmp_list);
10758 } else {
10759 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10760 BP_PATH(bp));
10761 list_add(&tmp_list->list, &bnx2x_prev_list);
10762 up(&bnx2x_prev_sem);
10763 }
10764
10765 return rc;
10766}
10767
10768static int bnx2x_do_flr(struct bnx2x *bp)
10769{
10770 struct pci_dev *dev = bp->pdev;
10771
10772 if (CHIP_IS_E1x(bp)) {
10773 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10774 return -EINVAL;
10775 }
10776
10777
10778 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10779 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10780 bp->common.bc_ver);
10781 return -EINVAL;
10782 }
10783
10784 if (!pci_wait_for_pending_transaction(dev))
10785 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10786
10787 BNX2X_DEV_INFO("Initiating FLR\n");
10788 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10789
10790 return 0;
10791}
10792
10793static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10794{
10795 int rc;
10796
10797 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10798
10799
10800 if (bnx2x_prev_is_path_marked(bp))
10801 return bnx2x_prev_mcp_done(bp);
10802
10803 BNX2X_DEV_INFO("Path is unmarked\n");
10804
10805
10806 if (bnx2x_prev_is_after_undi(bp))
10807 goto out;
10808
10809
10810
10811
10812
10813 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10814
10815 if (!rc) {
10816
10817 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10818 rc = bnx2x_do_flr(bp);
10819 }
10820
10821 if (!rc) {
10822
10823 BNX2X_DEV_INFO("FLR successful\n");
10824 return 0;
10825 }
10826
10827 BNX2X_DEV_INFO("Could not FLR\n");
10828
10829out:
10830
10831 rc = bnx2x_prev_mcp_done(bp);
10832 if (!rc)
10833 rc = BNX2X_PREV_WAIT_NEEDED;
10834
10835 return rc;
10836}
10837
10838static int bnx2x_prev_unload_common(struct bnx2x *bp)
10839{
10840 u32 reset_reg, tmp_reg = 0, rc;
10841 bool prev_undi = false;
10842 struct bnx2x_mac_vals mac_vals;
10843
10844
10845
10846
10847
10848 BNX2X_DEV_INFO("Common unload Flow\n");
10849
10850 memset(&mac_vals, 0, sizeof(mac_vals));
10851
10852 if (bnx2x_prev_is_path_marked(bp))
10853 return bnx2x_prev_mcp_done(bp);
10854
10855 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10856
10857
10858 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10859 u32 timer_count = 1000;
10860
10861
10862 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10863
10864
10865 bnx2x_set_rx_filter(&bp->link_params, 0);
10866 bp->link_params.port ^= 1;
10867 bnx2x_set_rx_filter(&bp->link_params, 0);
10868 bp->link_params.port ^= 1;
10869
10870
10871 if (bnx2x_prev_is_after_undi(bp)) {
10872 prev_undi = true;
10873
10874 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10875
10876 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10877 }
10878 if (!CHIP_IS_E1x(bp))
10879
10880 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10881
10882
10883 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10884 while (timer_count) {
10885 u32 prev_brb = tmp_reg;
10886
10887 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10888 if (!tmp_reg)
10889 break;
10890
10891 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10892
10893
10894 if (prev_brb > tmp_reg)
10895 timer_count = 1000;
10896 else
10897 timer_count--;
10898
10899
10900 if (prev_undi)
10901 bnx2x_prev_unload_undi_inc(bp, 1);
10902
10903 udelay(10);
10904 }
10905
10906 if (!timer_count)
10907 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10908 }
10909
10910
10911 bnx2x_reset_common(bp);
10912
10913 if (mac_vals.xmac_addr)
10914 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10915 if (mac_vals.umac_addr[0])
10916 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10917 if (mac_vals.umac_addr[1])
10918 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10919 if (mac_vals.emac_addr)
10920 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10921 if (mac_vals.bmac_addr) {
10922 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10923 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10924 }
10925
10926 rc = bnx2x_prev_mark_path(bp, prev_undi);
10927 if (rc) {
10928 bnx2x_prev_mcp_done(bp);
10929 return rc;
10930 }
10931
10932 return bnx2x_prev_mcp_done(bp);
10933}
10934
10935static int bnx2x_prev_unload(struct bnx2x *bp)
10936{
10937 int time_counter = 10;
10938 u32 rc, fw, hw_lock_reg, hw_lock_val;
10939 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10940
10941
10942
10943
10944 bnx2x_clean_pglue_errors(bp);
10945
10946
10947 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10948 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10949 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10950
10951 hw_lock_val = REG_RD(bp, hw_lock_reg);
10952 if (hw_lock_val) {
10953 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10954 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10955 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10956 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10957 }
10958
10959 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10960 REG_WR(bp, hw_lock_reg, 0xffffffff);
10961 } else
10962 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10963
10964 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10965 BNX2X_DEV_INFO("Release previously held alr\n");
10966 bnx2x_release_alr(bp);
10967 }
10968
10969 do {
10970 int aer = 0;
10971
10972 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10973 if (!fw) {
10974 BNX2X_ERR("MCP response failure, aborting\n");
10975 rc = -EBUSY;
10976 break;
10977 }
10978
10979 rc = down_interruptible(&bnx2x_prev_sem);
10980 if (rc) {
10981 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10982 rc);
10983 } else {
10984
10985 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10986 bnx2x_prev_path_get_entry(bp)->aer);
10987 up(&bnx2x_prev_sem);
10988 }
10989
10990 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10991 rc = bnx2x_prev_unload_common(bp);
10992 break;
10993 }
10994
10995
10996 rc = bnx2x_prev_unload_uncommon(bp);
10997 if (rc != BNX2X_PREV_WAIT_NEEDED)
10998 break;
10999
11000 msleep(20);
11001 } while (--time_counter);
11002
11003 if (!time_counter || rc) {
11004 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
11005 rc = -EPROBE_DEFER;
11006 }
11007
11008
11009 if (bnx2x_port_after_undi(bp))
11010 bp->link_params.feature_config_flags |=
11011 FEATURE_CONFIG_BOOT_FROM_SAN;
11012
11013 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
11014
11015 return rc;
11016}
11017
11018static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
11019{
11020 u32 val, val2, val3, val4, id, boot_mode;
11021 u16 pmc;
11022
11023
11024
11025 val = REG_RD(bp, MISC_REG_CHIP_NUM);
11026 id = ((val & 0xffff) << 16);
11027 val = REG_RD(bp, MISC_REG_CHIP_REV);
11028 id |= ((val & 0xf) << 12);
11029
11030
11031
11032
11033 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
11034 id |= (((val >> 24) & 0xf) << 4);
11035 val = REG_RD(bp, MISC_REG_BOND_ID);
11036 id |= (val & 0xf);
11037 bp->common.chip_id = id;
11038
11039
11040 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
11041 if (CHIP_IS_57810(bp))
11042 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
11043 (bp->common.chip_id & 0x0000FFFF);
11044 else if (CHIP_IS_57810_MF(bp))
11045 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
11046 (bp->common.chip_id & 0x0000FFFF);
11047 bp->common.chip_id |= 0x1;
11048 }
11049
11050
11051 bp->db_size = (1 << BNX2X_DB_SHIFT);
11052
11053 if (!CHIP_IS_E1x(bp)) {
11054 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
11055 if ((val & 1) == 0)
11056 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
11057 else
11058 val = (val >> 1) & 1;
11059 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
11060 "2_PORT_MODE");
11061 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
11062 CHIP_2_PORT_MODE;
11063
11064 if (CHIP_MODE_IS_4_PORT(bp))
11065 bp->pfid = (bp->pf_num >> 1);
11066 else
11067 bp->pfid = (bp->pf_num & 0x6);
11068 } else {
11069 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
11070 bp->pfid = bp->pf_num;
11071 }
11072
11073 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
11074
11075 bp->link_params.chip_id = bp->common.chip_id;
11076 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
11077
11078 val = (REG_RD(bp, 0x2874) & 0x55);
11079 if ((bp->common.chip_id & 0x1) ||
11080 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11081 bp->flags |= ONE_PORT_FLAG;
11082 BNX2X_DEV_INFO("single port device\n");
11083 }
11084
11085 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11086 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11087 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11088 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11089 bp->common.flash_size, bp->common.flash_size);
11090
11091 bnx2x_init_shmem(bp);
11092
11093 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11094 MISC_REG_GENERIC_CR_1 :
11095 MISC_REG_GENERIC_CR_0));
11096
11097 bp->link_params.shmem_base = bp->common.shmem_base;
11098 bp->link_params.shmem2_base = bp->common.shmem2_base;
11099 if (SHMEM2_RD(bp, size) >
11100 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11101 bp->link_params.lfa_base =
11102 REG_RD(bp, bp->common.shmem2_base +
11103 (u32)offsetof(struct shmem2_region,
11104 lfa_host_addr[BP_PORT(bp)]));
11105 else
11106 bp->link_params.lfa_base = 0;
11107 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11108 bp->common.shmem_base, bp->common.shmem2_base);
11109
11110 if (!bp->common.shmem_base) {
11111 BNX2X_DEV_INFO("MCP not active\n");
11112 bp->flags |= NO_MCP_FLAG;
11113 return;
11114 }
11115
11116 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11117 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11118
11119 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11120 SHARED_HW_CFG_LED_MODE_MASK) >>
11121 SHARED_HW_CFG_LED_MODE_SHIFT);
11122
11123 bp->link_params.feature_config_flags = 0;
11124 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11125 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11126 bp->link_params.feature_config_flags |=
11127 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11128 else
11129 bp->link_params.feature_config_flags &=
11130 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11131
11132 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11133 bp->common.bc_ver = val;
11134 BNX2X_DEV_INFO("bc_ver %X\n", val);
11135 if (val < BNX2X_BC_VER) {
11136
11137
11138 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11139 BNX2X_BC_VER, val);
11140 }
11141 bp->link_params.feature_config_flags |=
11142 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11143 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11144
11145 bp->link_params.feature_config_flags |=
11146 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11147 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11148 bp->link_params.feature_config_flags |=
11149 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11150 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11151 bp->link_params.feature_config_flags |=
11152 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11153 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11154
11155 bp->link_params.feature_config_flags |=
11156 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11157 FEATURE_CONFIG_MT_SUPPORT : 0;
11158
11159 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11160 BC_SUPPORTS_PFC_STATS : 0;
11161
11162 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11163 BC_SUPPORTS_FCOE_FEATURES : 0;
11164
11165 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11166 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11167
11168 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11169 BC_SUPPORTS_RMMOD_CMD : 0;
11170
11171 boot_mode = SHMEM_RD(bp,
11172 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11173 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11174 switch (boot_mode) {
11175 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11176 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11177 break;
11178 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11179 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11180 break;
11181 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11182 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11183 break;
11184 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11185 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11186 break;
11187 }
11188
11189 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11190 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11191
11192 BNX2X_DEV_INFO("%sWoL capable\n",
11193 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11194
11195 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11196 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11197 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11198 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11199
11200 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11201 val, val2, val3, val4);
11202}
11203
11204#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11205#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11206
11207static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11208{
11209 int pfid = BP_FUNC(bp);
11210 int igu_sb_id;
11211 u32 val;
11212 u8 fid, igu_sb_cnt = 0;
11213
11214 bp->igu_base_sb = 0xff;
11215 if (CHIP_INT_MODE_IS_BC(bp)) {
11216 int vn = BP_VN(bp);
11217 igu_sb_cnt = bp->igu_sb_cnt;
11218 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11219 FP_SB_MAX_E1x;
11220
11221 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11222 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11223
11224 return 0;
11225 }
11226
11227
11228 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11229 igu_sb_id++) {
11230 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11231 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11232 continue;
11233 fid = IGU_FID(val);
11234 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11235 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11236 continue;
11237 if (IGU_VEC(val) == 0)
11238
11239 bp->igu_dsb_id = igu_sb_id;
11240 else {
11241 if (bp->igu_base_sb == 0xff)
11242 bp->igu_base_sb = igu_sb_id;
11243 igu_sb_cnt++;
11244 }
11245 }
11246 }
11247
11248#ifdef CONFIG_PCI_MSI
11249
11250
11251
11252
11253
11254
11255 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11256#endif
11257
11258 if (igu_sb_cnt == 0) {
11259 BNX2X_ERR("CAM configuration error\n");
11260 return -EINVAL;
11261 }
11262
11263 return 0;
11264}
11265
11266static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11267{
11268 int cfg_size = 0, idx, port = BP_PORT(bp);
11269
11270
11271 bp->port.supported[0] = 0;
11272 bp->port.supported[1] = 0;
11273 switch (bp->link_params.num_phys) {
11274 case 1:
11275 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11276 cfg_size = 1;
11277 break;
11278 case 2:
11279 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11280 cfg_size = 1;
11281 break;
11282 case 3:
11283 if (bp->link_params.multi_phy_config &
11284 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11285 bp->port.supported[1] =
11286 bp->link_params.phy[EXT_PHY1].supported;
11287 bp->port.supported[0] =
11288 bp->link_params.phy[EXT_PHY2].supported;
11289 } else {
11290 bp->port.supported[0] =
11291 bp->link_params.phy[EXT_PHY1].supported;
11292 bp->port.supported[1] =
11293 bp->link_params.phy[EXT_PHY2].supported;
11294 }
11295 cfg_size = 2;
11296 break;
11297 }
11298
11299 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11300 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11301 SHMEM_RD(bp,
11302 dev_info.port_hw_config[port].external_phy_config),
11303 SHMEM_RD(bp,
11304 dev_info.port_hw_config[port].external_phy_config2));
11305 return;
11306 }
11307
11308 if (CHIP_IS_E3(bp))
11309 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11310 else {
11311 switch (switch_cfg) {
11312 case SWITCH_CFG_1G:
11313 bp->port.phy_addr = REG_RD(
11314 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11315 break;
11316 case SWITCH_CFG_10G:
11317 bp->port.phy_addr = REG_RD(
11318 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11319 break;
11320 default:
11321 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11322 bp->port.link_config[0]);
11323 return;
11324 }
11325 }
11326 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11327
11328 for (idx = 0; idx < cfg_size; idx++) {
11329 if (!(bp->link_params.speed_cap_mask[idx] &
11330 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11331 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11332
11333 if (!(bp->link_params.speed_cap_mask[idx] &
11334 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11335 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11336
11337 if (!(bp->link_params.speed_cap_mask[idx] &
11338 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11339 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11340
11341 if (!(bp->link_params.speed_cap_mask[idx] &
11342 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11343 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11344
11345 if (!(bp->link_params.speed_cap_mask[idx] &
11346 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11347 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11348 SUPPORTED_1000baseT_Full);
11349
11350 if (!(bp->link_params.speed_cap_mask[idx] &
11351 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11352 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11353
11354 if (!(bp->link_params.speed_cap_mask[idx] &
11355 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11356 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11357
11358 if (!(bp->link_params.speed_cap_mask[idx] &
11359 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11360 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11361 }
11362
11363 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11364 bp->port.supported[1]);
11365}
11366
11367static void bnx2x_link_settings_requested(struct bnx2x *bp)
11368{
11369 u32 link_config, idx, cfg_size = 0;
11370 bp->port.advertising[0] = 0;
11371 bp->port.advertising[1] = 0;
11372 switch (bp->link_params.num_phys) {
11373 case 1:
11374 case 2:
11375 cfg_size = 1;
11376 break;
11377 case 3:
11378 cfg_size = 2;
11379 break;
11380 }
11381 for (idx = 0; idx < cfg_size; idx++) {
11382 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11383 link_config = bp->port.link_config[idx];
11384 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11385 case PORT_FEATURE_LINK_SPEED_AUTO:
11386 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11387 bp->link_params.req_line_speed[idx] =
11388 SPEED_AUTO_NEG;
11389 bp->port.advertising[idx] |=
11390 bp->port.supported[idx];
11391 if (bp->link_params.phy[EXT_PHY1].type ==
11392 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11393 bp->port.advertising[idx] |=
11394 (SUPPORTED_100baseT_Half |
11395 SUPPORTED_100baseT_Full);
11396 } else {
11397
11398 bp->link_params.req_line_speed[idx] =
11399 SPEED_10000;
11400 bp->port.advertising[idx] |=
11401 (ADVERTISED_10000baseT_Full |
11402 ADVERTISED_FIBRE);
11403 continue;
11404 }
11405 break;
11406
11407 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11408 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11409 bp->link_params.req_line_speed[idx] =
11410 SPEED_10;
11411 bp->port.advertising[idx] |=
11412 (ADVERTISED_10baseT_Full |
11413 ADVERTISED_TP);
11414 } else {
11415 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11416 link_config,
11417 bp->link_params.speed_cap_mask[idx]);
11418 return;
11419 }
11420 break;
11421
11422 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11423 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11424 bp->link_params.req_line_speed[idx] =
11425 SPEED_10;
11426 bp->link_params.req_duplex[idx] =
11427 DUPLEX_HALF;
11428 bp->port.advertising[idx] |=
11429 (ADVERTISED_10baseT_Half |
11430 ADVERTISED_TP);
11431 } else {
11432 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11433 link_config,
11434 bp->link_params.speed_cap_mask[idx]);
11435 return;
11436 }
11437 break;
11438
11439 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11440 if (bp->port.supported[idx] &
11441 SUPPORTED_100baseT_Full) {
11442 bp->link_params.req_line_speed[idx] =
11443 SPEED_100;
11444 bp->port.advertising[idx] |=
11445 (ADVERTISED_100baseT_Full |
11446 ADVERTISED_TP);
11447 } else {
11448 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11449 link_config,
11450 bp->link_params.speed_cap_mask[idx]);
11451 return;
11452 }
11453 break;
11454
11455 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11456 if (bp->port.supported[idx] &
11457 SUPPORTED_100baseT_Half) {
11458 bp->link_params.req_line_speed[idx] =
11459 SPEED_100;
11460 bp->link_params.req_duplex[idx] =
11461 DUPLEX_HALF;
11462 bp->port.advertising[idx] |=
11463 (ADVERTISED_100baseT_Half |
11464 ADVERTISED_TP);
11465 } else {
11466 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11467 link_config,
11468 bp->link_params.speed_cap_mask[idx]);
11469 return;
11470 }
11471 break;
11472
11473 case PORT_FEATURE_LINK_SPEED_1G:
11474 if (bp->port.supported[idx] &
11475 SUPPORTED_1000baseT_Full) {
11476 bp->link_params.req_line_speed[idx] =
11477 SPEED_1000;
11478 bp->port.advertising[idx] |=
11479 (ADVERTISED_1000baseT_Full |
11480 ADVERTISED_TP);
11481 } else if (bp->port.supported[idx] &
11482 SUPPORTED_1000baseKX_Full) {
11483 bp->link_params.req_line_speed[idx] =
11484 SPEED_1000;
11485 bp->port.advertising[idx] |=
11486 ADVERTISED_1000baseKX_Full;
11487 } else {
11488 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11489 link_config,
11490 bp->link_params.speed_cap_mask[idx]);
11491 return;
11492 }
11493 break;
11494
11495 case PORT_FEATURE_LINK_SPEED_2_5G:
11496 if (bp->port.supported[idx] &
11497 SUPPORTED_2500baseX_Full) {
11498 bp->link_params.req_line_speed[idx] =
11499 SPEED_2500;
11500 bp->port.advertising[idx] |=
11501 (ADVERTISED_2500baseX_Full |
11502 ADVERTISED_TP);
11503 } else {
11504 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11505 link_config,
11506 bp->link_params.speed_cap_mask[idx]);
11507 return;
11508 }
11509 break;
11510
11511 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11512 if (bp->port.supported[idx] &
11513 SUPPORTED_10000baseT_Full) {
11514 bp->link_params.req_line_speed[idx] =
11515 SPEED_10000;
11516 bp->port.advertising[idx] |=
11517 (ADVERTISED_10000baseT_Full |
11518 ADVERTISED_FIBRE);
11519 } else if (bp->port.supported[idx] &
11520 SUPPORTED_10000baseKR_Full) {
11521 bp->link_params.req_line_speed[idx] =
11522 SPEED_10000;
11523 bp->port.advertising[idx] |=
11524 (ADVERTISED_10000baseKR_Full |
11525 ADVERTISED_FIBRE);
11526 } else {
11527 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11528 link_config,
11529 bp->link_params.speed_cap_mask[idx]);
11530 return;
11531 }
11532 break;
11533 case PORT_FEATURE_LINK_SPEED_20G:
11534 bp->link_params.req_line_speed[idx] = SPEED_20000;
11535
11536 break;
11537 default:
11538 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11539 link_config);
11540 bp->link_params.req_line_speed[idx] =
11541 SPEED_AUTO_NEG;
11542 bp->port.advertising[idx] =
11543 bp->port.supported[idx];
11544 break;
11545 }
11546
11547 bp->link_params.req_flow_ctrl[idx] = (link_config &
11548 PORT_FEATURE_FLOW_CONTROL_MASK);
11549 if (bp->link_params.req_flow_ctrl[idx] ==
11550 BNX2X_FLOW_CTRL_AUTO) {
11551 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11552 bp->link_params.req_flow_ctrl[idx] =
11553 BNX2X_FLOW_CTRL_NONE;
11554 else
11555 bnx2x_set_requested_fc(bp);
11556 }
11557
11558 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11559 bp->link_params.req_line_speed[idx],
11560 bp->link_params.req_duplex[idx],
11561 bp->link_params.req_flow_ctrl[idx],
11562 bp->port.advertising[idx]);
11563 }
11564}
11565
11566static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11567{
11568 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11569 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11570 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11571 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11572}
11573
11574static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11575{
11576 int port = BP_PORT(bp);
11577 u32 config;
11578 u32 ext_phy_type, ext_phy_config, eee_mode;
11579
11580 bp->link_params.bp = bp;
11581 bp->link_params.port = port;
11582
11583 bp->link_params.lane_config =
11584 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11585
11586 bp->link_params.speed_cap_mask[0] =
11587 SHMEM_RD(bp,
11588 dev_info.port_hw_config[port].speed_capability_mask) &
11589 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11590 bp->link_params.speed_cap_mask[1] =
11591 SHMEM_RD(bp,
11592 dev_info.port_hw_config[port].speed_capability_mask2) &
11593 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11594 bp->port.link_config[0] =
11595 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11596
11597 bp->port.link_config[1] =
11598 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11599
11600 bp->link_params.multi_phy_config =
11601 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11602
11603
11604
11605 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11606 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11607 (config & PORT_FEATURE_WOL_ENABLED));
11608
11609 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11610 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11611 bp->flags |= NO_ISCSI_FLAG;
11612 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11613 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11614 bp->flags |= NO_FCOE_FLAG;
11615
11616 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11617 bp->link_params.lane_config,
11618 bp->link_params.speed_cap_mask[0],
11619 bp->port.link_config[0]);
11620
11621 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11622 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11623 bnx2x_phy_probe(&bp->link_params);
11624 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11625
11626 bnx2x_link_settings_requested(bp);
11627
11628
11629
11630
11631
11632 ext_phy_config =
11633 SHMEM_RD(bp,
11634 dev_info.port_hw_config[port].external_phy_config);
11635 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11636 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11637 bp->mdio.prtad = bp->port.phy_addr;
11638
11639 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11640 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11641 bp->mdio.prtad =
11642 XGXS_EXT_PHY_ADDR(ext_phy_config);
11643
11644
11645 eee_mode = (((SHMEM_RD(bp, dev_info.
11646 port_feature_config[port].eee_power_mode)) &
11647 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11648 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11649 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11650 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11651 EEE_MODE_ENABLE_LPI |
11652 EEE_MODE_OUTPUT_TIME;
11653 } else {
11654 bp->link_params.eee_mode = 0;
11655 }
11656}
11657
11658void bnx2x_get_iscsi_info(struct bnx2x *bp)
11659{
11660 u32 no_flags = NO_ISCSI_FLAG;
11661 int port = BP_PORT(bp);
11662 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11663 drv_lic_key[port].max_iscsi_conn);
11664
11665 if (!CNIC_SUPPORT(bp)) {
11666 bp->flags |= no_flags;
11667 return;
11668 }
11669
11670
11671 bp->cnic_eth_dev.max_iscsi_conn =
11672 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11673 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11674
11675 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11676 bp->cnic_eth_dev.max_iscsi_conn);
11677
11678
11679
11680
11681
11682 if (!bp->cnic_eth_dev.max_iscsi_conn)
11683 bp->flags |= no_flags;
11684}
11685
11686static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11687{
11688
11689 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11690 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11691 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11692 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11693
11694
11695 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11696 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11697 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11698 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11699}
11700
11701static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11702{
11703 u8 count = 0;
11704
11705 if (IS_MF(bp)) {
11706 u8 fid;
11707
11708
11709 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11710 if (IS_MF_SD(bp)) {
11711 u32 cfg = MF_CFG_RD(bp,
11712 func_mf_config[fid].config);
11713
11714 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11715 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11716 FUNC_MF_CFG_PROTOCOL_FCOE))
11717 count++;
11718 } else {
11719 u32 cfg = MF_CFG_RD(bp,
11720 func_ext_config[fid].
11721 func_cfg);
11722
11723 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11724 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11725 count++;
11726 }
11727 }
11728 } else {
11729 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11730
11731 for (port = 0; port < port_cnt; port++) {
11732 u32 lic = SHMEM_RD(bp,
11733 drv_lic_key[port].max_fcoe_conn) ^
11734 FW_ENCODE_32BIT_PATTERN;
11735 if (lic)
11736 count++;
11737 }
11738 }
11739
11740 return count;
11741}
11742
11743static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11744{
11745 int port = BP_PORT(bp);
11746 int func = BP_ABS_FUNC(bp);
11747 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11748 drv_lic_key[port].max_fcoe_conn);
11749 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11750
11751 if (!CNIC_SUPPORT(bp)) {
11752 bp->flags |= NO_FCOE_FLAG;
11753 return;
11754 }
11755
11756
11757 bp->cnic_eth_dev.max_fcoe_conn =
11758 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11759 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11760
11761
11762 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11763
11764
11765 if (num_fcoe_func)
11766 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11767
11768
11769 if (!IS_MF(bp)) {
11770
11771 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11772 SHMEM_RD(bp,
11773 dev_info.port_hw_config[port].
11774 fcoe_wwn_port_name_upper);
11775 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11776 SHMEM_RD(bp,
11777 dev_info.port_hw_config[port].
11778 fcoe_wwn_port_name_lower);
11779
11780
11781 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11782 SHMEM_RD(bp,
11783 dev_info.port_hw_config[port].
11784 fcoe_wwn_node_name_upper);
11785 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11786 SHMEM_RD(bp,
11787 dev_info.port_hw_config[port].
11788 fcoe_wwn_node_name_lower);
11789 } else if (!IS_MF_SD(bp)) {
11790
11791
11792
11793 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11794 bnx2x_get_ext_wwn_info(bp, func);
11795 } else {
11796 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11797 bnx2x_get_ext_wwn_info(bp, func);
11798 }
11799
11800 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11801
11802
11803
11804
11805
11806 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11807 bp->flags |= NO_FCOE_FLAG;
11808 eth_zero_addr(bp->fip_mac);
11809 }
11810}
11811
11812static void bnx2x_get_cnic_info(struct bnx2x *bp)
11813{
11814
11815
11816
11817
11818
11819 bnx2x_get_iscsi_info(bp);
11820 bnx2x_get_fcoe_info(bp);
11821}
11822
11823static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11824{
11825 u32 val, val2;
11826 int func = BP_ABS_FUNC(bp);
11827 int port = BP_PORT(bp);
11828 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11829 u8 *fip_mac = bp->fip_mac;
11830
11831 if (IS_MF(bp)) {
11832
11833
11834
11835
11836
11837 if (!IS_MF_SD(bp)) {
11838 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11839 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11840 val2 = MF_CFG_RD(bp, func_ext_config[func].
11841 iscsi_mac_addr_upper);
11842 val = MF_CFG_RD(bp, func_ext_config[func].
11843 iscsi_mac_addr_lower);
11844 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11845 BNX2X_DEV_INFO
11846 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11847 } else {
11848 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11849 }
11850
11851 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11852 val2 = MF_CFG_RD(bp, func_ext_config[func].
11853 fcoe_mac_addr_upper);
11854 val = MF_CFG_RD(bp, func_ext_config[func].
11855 fcoe_mac_addr_lower);
11856 bnx2x_set_mac_buf(fip_mac, val, val2);
11857 BNX2X_DEV_INFO
11858 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11859 } else {
11860 bp->flags |= NO_FCOE_FLAG;
11861 }
11862
11863 bp->mf_ext_config = cfg;
11864
11865 } else {
11866 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11867
11868 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11869
11870 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11871 BNX2X_DEV_INFO
11872 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11873 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11874
11875 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11876 BNX2X_DEV_INFO("SD FCoE MODE\n");
11877 BNX2X_DEV_INFO
11878 ("Read FIP MAC: %pM\n", fip_mac);
11879 }
11880 }
11881
11882
11883
11884
11885
11886 if (IS_MF_FCOE_AFEX(bp))
11887 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11888 } else {
11889 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11890 iscsi_mac_upper);
11891 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11892 iscsi_mac_lower);
11893 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11894
11895 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11896 fcoe_fip_mac_upper);
11897 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11898 fcoe_fip_mac_lower);
11899 bnx2x_set_mac_buf(fip_mac, val, val2);
11900 }
11901
11902
11903 if (!is_valid_ether_addr(iscsi_mac)) {
11904 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11905 eth_zero_addr(iscsi_mac);
11906 }
11907
11908
11909 if (!is_valid_ether_addr(fip_mac)) {
11910 bp->flags |= NO_FCOE_FLAG;
11911 eth_zero_addr(bp->fip_mac);
11912 }
11913}
11914
11915static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11916{
11917 u32 val, val2;
11918 int func = BP_ABS_FUNC(bp);
11919 int port = BP_PORT(bp);
11920
11921
11922 eth_zero_addr(bp->dev->dev_addr);
11923
11924 if (BP_NOMCP(bp)) {
11925 BNX2X_ERROR("warning: random MAC workaround active\n");
11926 eth_hw_addr_random(bp->dev);
11927 } else if (IS_MF(bp)) {
11928 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11929 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11930 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11931 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11932 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11933
11934 if (CNIC_SUPPORT(bp))
11935 bnx2x_get_cnic_mac_hwinfo(bp);
11936 } else {
11937
11938 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11939 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11940 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11941
11942 if (CNIC_SUPPORT(bp))
11943 bnx2x_get_cnic_mac_hwinfo(bp);
11944 }
11945
11946 if (!BP_NOMCP(bp)) {
11947
11948 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11949 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11950 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11951 bp->flags |= HAS_PHYS_PORT_ID;
11952 }
11953
11954 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11955
11956 if (!is_valid_ether_addr(bp->dev->dev_addr))
11957 dev_err(&bp->pdev->dev,
11958 "bad Ethernet MAC address configuration: %pM\n"
11959 "change it manually before bringing up the appropriate network interface\n",
11960 bp->dev->dev_addr);
11961}
11962
11963static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11964{
11965 int tmp;
11966 u32 cfg;
11967
11968 if (IS_VF(bp))
11969 return false;
11970
11971 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11972
11973 tmp = BP_ABS_FUNC(bp);
11974 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11975 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11976 } else {
11977
11978 tmp = BP_PORT(bp);
11979 cfg = SHMEM_RD(bp,
11980 dev_info.port_hw_config[tmp].generic_features);
11981 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11982 }
11983 return cfg;
11984}
11985
11986static void validate_set_si_mode(struct bnx2x *bp)
11987{
11988 u8 func = BP_ABS_FUNC(bp);
11989 u32 val;
11990
11991 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11992
11993
11994 if (val != 0xffff) {
11995 bp->mf_mode = MULTI_FUNCTION_SI;
11996 bp->mf_config[BP_VN(bp)] =
11997 MF_CFG_RD(bp, func_mf_config[func].config);
11998 } else
11999 BNX2X_DEV_INFO("illegal MAC address for SI\n");
12000}
12001
12002static int bnx2x_get_hwinfo(struct bnx2x *bp)
12003{
12004 int func = BP_ABS_FUNC(bp);
12005 int vn;
12006 u32 val = 0, val2 = 0;
12007 int rc = 0;
12008
12009
12010 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
12011 dev_err(&bp->pdev->dev,
12012 "Chip read returns all Fs. Preventing probe from continuing\n");
12013 return -EINVAL;
12014 }
12015
12016 bnx2x_get_common_hwinfo(bp);
12017
12018
12019
12020
12021 if (CHIP_IS_E1x(bp)) {
12022 bp->common.int_block = INT_BLOCK_HC;
12023
12024 bp->igu_dsb_id = DEF_SB_IGU_ID;
12025 bp->igu_base_sb = 0;
12026 } else {
12027 bp->common.int_block = INT_BLOCK_IGU;
12028
12029
12030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12031
12032 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
12033
12034 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12035 int tout = 5000;
12036
12037 BNX2X_DEV_INFO("FORCING Normal Mode\n");
12038
12039 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
12040 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
12041 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
12042
12043 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12044 tout--;
12045 usleep_range(1000, 2000);
12046 }
12047
12048 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12049 dev_err(&bp->pdev->dev,
12050 "FORCING Normal Mode failed!!!\n");
12051 bnx2x_release_hw_lock(bp,
12052 HW_LOCK_RESOURCE_RESET);
12053 return -EPERM;
12054 }
12055 }
12056
12057 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12058 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
12059 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
12060 } else
12061 BNX2X_DEV_INFO("IGU Normal Mode\n");
12062
12063 rc = bnx2x_get_igu_cam_info(bp);
12064 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12065 if (rc)
12066 return rc;
12067 }
12068
12069
12070
12071
12072
12073
12074 if (CHIP_IS_E1x(bp))
12075 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12076 else
12077
12078
12079
12080
12081 bp->base_fw_ndsb = bp->igu_base_sb;
12082
12083 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12084 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12085 bp->igu_sb_cnt, bp->base_fw_ndsb);
12086
12087
12088
12089
12090 bp->mf_ov = 0;
12091 bp->mf_mode = 0;
12092 bp->mf_sub_mode = 0;
12093 vn = BP_VN(bp);
12094
12095 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12096 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12097 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12098 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12099
12100 if (SHMEM2_HAS(bp, mf_cfg_addr))
12101 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12102 else
12103 bp->common.mf_cfg_base = bp->common.shmem_base +
12104 offsetof(struct shmem_region, func_mb) +
12105 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12106
12107
12108
12109
12110
12111
12112
12113
12114 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12115
12116 val = SHMEM_RD(bp,
12117 dev_info.shared_feature_config.config);
12118 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12119
12120 switch (val) {
12121 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12122 validate_set_si_mode(bp);
12123 break;
12124 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12125 if ((!CHIP_IS_E1x(bp)) &&
12126 (MF_CFG_RD(bp, func_mf_config[func].
12127 mac_upper) != 0xffff) &&
12128 (SHMEM2_HAS(bp,
12129 afex_driver_support))) {
12130 bp->mf_mode = MULTI_FUNCTION_AFEX;
12131 bp->mf_config[vn] = MF_CFG_RD(bp,
12132 func_mf_config[func].config);
12133 } else {
12134 BNX2X_DEV_INFO("can not configure afex mode\n");
12135 }
12136 break;
12137 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12138
12139 val = MF_CFG_RD(bp,
12140 func_mf_config[FUNC_0].e1hov_tag);
12141 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12142
12143 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12144 bp->mf_mode = MULTI_FUNCTION_SD;
12145 bp->mf_config[vn] = MF_CFG_RD(bp,
12146 func_mf_config[func].config);
12147 } else
12148 BNX2X_DEV_INFO("illegal OV for SD\n");
12149 break;
12150 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12151 bp->mf_mode = MULTI_FUNCTION_SD;
12152 bp->mf_sub_mode = SUB_MF_MODE_BD;
12153 bp->mf_config[vn] =
12154 MF_CFG_RD(bp,
12155 func_mf_config[func].config);
12156
12157 if (SHMEM2_HAS(bp, mtu_size)) {
12158 int mtu_idx = BP_FW_MB_IDX(bp);
12159 u16 mtu_size;
12160 u32 mtu;
12161
12162 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12163 mtu_size = (u16)mtu;
12164 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12165 mtu_size, mtu);
12166
12167
12168 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12169 (mtu_size <=
12170 ETH_MAX_JUMBO_PACKET_SIZE))
12171 bp->dev->mtu = mtu_size;
12172 }
12173 break;
12174 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12175 bp->mf_mode = MULTI_FUNCTION_SD;
12176 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12177 bp->mf_config[vn] =
12178 MF_CFG_RD(bp,
12179 func_mf_config[func].config);
12180 break;
12181 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12182 bp->mf_config[vn] = 0;
12183 break;
12184 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12185 val2 = SHMEM_RD(bp,
12186 dev_info.shared_hw_config.config_3);
12187 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12188 switch (val2) {
12189 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12190 validate_set_si_mode(bp);
12191 bp->mf_sub_mode =
12192 SUB_MF_MODE_NPAR1_DOT_5;
12193 break;
12194 default:
12195
12196 bp->mf_config[vn] = 0;
12197 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12198 val);
12199 }
12200 break;
12201 default:
12202
12203 bp->mf_config[vn] = 0;
12204 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12205 }
12206 }
12207
12208 BNX2X_DEV_INFO("%s function mode\n",
12209 IS_MF(bp) ? "multi" : "single");
12210
12211 switch (bp->mf_mode) {
12212 case MULTI_FUNCTION_SD:
12213 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12214 FUNC_MF_CFG_E1HOV_TAG_MASK;
12215 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12216 bp->mf_ov = val;
12217 bp->path_has_ovlan = true;
12218
12219 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12220 func, bp->mf_ov, bp->mf_ov);
12221 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12222 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12223 dev_err(&bp->pdev->dev,
12224 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12225 func);
12226 bp->path_has_ovlan = true;
12227 } else {
12228 dev_err(&bp->pdev->dev,
12229 "No valid MF OV for func %d, aborting\n",
12230 func);
12231 return -EPERM;
12232 }
12233 break;
12234 case MULTI_FUNCTION_AFEX:
12235 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12236 break;
12237 case MULTI_FUNCTION_SI:
12238 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12239 func);
12240 break;
12241 default:
12242 if (vn) {
12243 dev_err(&bp->pdev->dev,
12244 "VN %d is in a single function mode, aborting\n",
12245 vn);
12246 return -EPERM;
12247 }
12248 break;
12249 }
12250
12251
12252
12253
12254
12255
12256 if (CHIP_MODE_IS_4_PORT(bp) &&
12257 !bp->path_has_ovlan &&
12258 !IS_MF(bp) &&
12259 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12260 u8 other_port = !BP_PORT(bp);
12261 u8 other_func = BP_PATH(bp) + 2*other_port;
12262 val = MF_CFG_RD(bp,
12263 func_mf_config[other_func].e1hov_tag);
12264 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12265 bp->path_has_ovlan = true;
12266 }
12267 }
12268
12269
12270 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12271 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12272
12273
12274 bnx2x_get_port_hwinfo(bp);
12275
12276
12277 bnx2x_get_mac_hwinfo(bp);
12278
12279 bnx2x_get_cnic_info(bp);
12280
12281 return rc;
12282}
12283
12284static void bnx2x_read_fwinfo(struct bnx2x *bp)
12285{
12286 int cnt, i, block_end, rodi;
12287 char vpd_start[BNX2X_VPD_LEN+1];
12288 char str_id_reg[VENDOR_ID_LEN+1];
12289 char str_id_cap[VENDOR_ID_LEN+1];
12290 char *vpd_data;
12291 char *vpd_extended_data = NULL;
12292 u8 len;
12293
12294 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12295 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12296
12297 if (cnt < BNX2X_VPD_LEN)
12298 goto out_not_found;
12299
12300
12301
12302
12303 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12304 PCI_VPD_LRDT_RO_DATA);
12305 if (i < 0)
12306 goto out_not_found;
12307
12308 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12309 pci_vpd_lrdt_size(&vpd_start[i]);
12310
12311 i += PCI_VPD_LRDT_TAG_SIZE;
12312
12313 if (block_end > BNX2X_VPD_LEN) {
12314 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12315 if (vpd_extended_data == NULL)
12316 goto out_not_found;
12317
12318
12319 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12320 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12321 block_end - BNX2X_VPD_LEN,
12322 vpd_extended_data + BNX2X_VPD_LEN);
12323 if (cnt < (block_end - BNX2X_VPD_LEN))
12324 goto out_not_found;
12325 vpd_data = vpd_extended_data;
12326 } else
12327 vpd_data = vpd_start;
12328
12329
12330
12331 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12332 PCI_VPD_RO_KEYWORD_MFR_ID);
12333 if (rodi < 0)
12334 goto out_not_found;
12335
12336 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12337
12338 if (len != VENDOR_ID_LEN)
12339 goto out_not_found;
12340
12341 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12342
12343
12344 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12345 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12346 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12347 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12348
12349 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12350 PCI_VPD_RO_KEYWORD_VENDOR0);
12351 if (rodi >= 0) {
12352 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12353
12354 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12355
12356 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12357 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12358 bp->fw_ver[len] = ' ';
12359 }
12360 }
12361 kfree(vpd_extended_data);
12362 return;
12363 }
12364out_not_found:
12365 kfree(vpd_extended_data);
12366 return;
12367}
12368
12369static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12370{
12371 u32 flags = 0;
12372
12373 if (CHIP_REV_IS_FPGA(bp))
12374 SET_FLAGS(flags, MODE_FPGA);
12375 else if (CHIP_REV_IS_EMUL(bp))
12376 SET_FLAGS(flags, MODE_EMUL);
12377 else
12378 SET_FLAGS(flags, MODE_ASIC);
12379
12380 if (CHIP_MODE_IS_4_PORT(bp))
12381 SET_FLAGS(flags, MODE_PORT4);
12382 else
12383 SET_FLAGS(flags, MODE_PORT2);
12384
12385 if (CHIP_IS_E2(bp))
12386 SET_FLAGS(flags, MODE_E2);
12387 else if (CHIP_IS_E3(bp)) {
12388 SET_FLAGS(flags, MODE_E3);
12389 if (CHIP_REV(bp) == CHIP_REV_Ax)
12390 SET_FLAGS(flags, MODE_E3_A0);
12391 else
12392 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12393 }
12394
12395 if (IS_MF(bp)) {
12396 SET_FLAGS(flags, MODE_MF);
12397 switch (bp->mf_mode) {
12398 case MULTI_FUNCTION_SD:
12399 SET_FLAGS(flags, MODE_MF_SD);
12400 break;
12401 case MULTI_FUNCTION_SI:
12402 SET_FLAGS(flags, MODE_MF_SI);
12403 break;
12404 case MULTI_FUNCTION_AFEX:
12405 SET_FLAGS(flags, MODE_MF_AFEX);
12406 break;
12407 }
12408 } else
12409 SET_FLAGS(flags, MODE_SF);
12410
12411#if defined(__LITTLE_ENDIAN)
12412 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12413#else
12414 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12415#endif
12416 INIT_MODE_FLAGS(bp) = flags;
12417}
12418
12419static int bnx2x_init_bp(struct bnx2x *bp)
12420{
12421 int func;
12422 int rc;
12423
12424 mutex_init(&bp->port.phy_mutex);
12425 mutex_init(&bp->fw_mb_mutex);
12426 mutex_init(&bp->drv_info_mutex);
12427 sema_init(&bp->stats_lock, 1);
12428 bp->drv_info_mng_owner = false;
12429 INIT_LIST_HEAD(&bp->vlan_reg);
12430
12431 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12432 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12433 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12434 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12435 if (IS_PF(bp)) {
12436 rc = bnx2x_get_hwinfo(bp);
12437 if (rc)
12438 return rc;
12439 } else {
12440 eth_zero_addr(bp->dev->dev_addr);
12441 }
12442
12443 bnx2x_set_modes_bitmap(bp);
12444
12445 rc = bnx2x_alloc_mem_bp(bp);
12446 if (rc)
12447 return rc;
12448
12449 bnx2x_read_fwinfo(bp);
12450
12451 func = BP_FUNC(bp);
12452
12453
12454 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12455
12456 bp->fw_seq =
12457 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12458 DRV_MSG_SEQ_NUMBER_MASK;
12459 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12460
12461 rc = bnx2x_prev_unload(bp);
12462 if (rc) {
12463 bnx2x_free_mem_bp(bp);
12464 return rc;
12465 }
12466 }
12467
12468 if (CHIP_REV_IS_FPGA(bp))
12469 dev_err(&bp->pdev->dev, "FPGA detected\n");
12470
12471 if (BP_NOMCP(bp) && (func == 0))
12472 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12473
12474 bp->disable_tpa = disable_tpa;
12475 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12476
12477 bp->disable_tpa |= is_kdump_kernel();
12478
12479
12480 if (bp->disable_tpa) {
12481 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12482 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12483 }
12484
12485 if (CHIP_IS_E1(bp))
12486 bp->dropless_fc = 0;
12487 else
12488 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12489
12490 bp->mrrs = mrrs;
12491
12492 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12493 if (IS_VF(bp))
12494 bp->rx_ring_size = MAX_RX_AVAIL;
12495
12496
12497 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12498 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12499
12500 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12501
12502 timer_setup(&bp->timer, bnx2x_timer, 0);
12503 bp->timer.expires = jiffies + bp->current_interval;
12504
12505 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12506 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12507 SHMEM2_HAS(bp, dcbx_en) &&
12508 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12509 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12510 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12511 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12512 bnx2x_dcbx_init_params(bp);
12513 } else {
12514 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12515 }
12516
12517 if (CHIP_IS_E1x(bp))
12518 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12519 else
12520 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12521
12522
12523 if (IS_VF(bp))
12524 bp->max_cos = 1;
12525 else if (CHIP_IS_E1x(bp))
12526 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12527 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12528 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12529 else if (CHIP_IS_E3B0(bp))
12530 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12531 else
12532 BNX2X_ERR("unknown chip %x revision %x\n",
12533 CHIP_NUM(bp), CHIP_REV(bp));
12534 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12535
12536
12537
12538
12539
12540 if (IS_VF(bp))
12541 bp->min_msix_vec_cnt = 1;
12542 else if (CNIC_SUPPORT(bp))
12543 bp->min_msix_vec_cnt = 3;
12544 else
12545 bp->min_msix_vec_cnt = 2;
12546 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12547
12548 bp->dump_preset_idx = 1;
12549
12550 return rc;
12551}
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562static int bnx2x_open(struct net_device *dev)
12563{
12564 struct bnx2x *bp = netdev_priv(dev);
12565 int rc;
12566
12567 bp->stats_init = true;
12568
12569 netif_carrier_off(dev);
12570
12571 bnx2x_set_power_state(bp, PCI_D0);
12572
12573
12574
12575
12576
12577
12578
12579 if (IS_PF(bp)) {
12580 int other_engine = BP_PATH(bp) ? 0 : 1;
12581 bool other_load_status, load_status;
12582 bool global = false;
12583
12584 other_load_status = bnx2x_get_load_status(bp, other_engine);
12585 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12586 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12587 bnx2x_chk_parity_attn(bp, &global, true)) {
12588 do {
12589
12590
12591
12592
12593
12594 if (global)
12595 bnx2x_set_reset_global(bp);
12596
12597
12598
12599
12600
12601
12602 if ((!load_status &&
12603 (!global || !other_load_status)) &&
12604 bnx2x_trylock_leader_lock(bp) &&
12605 !bnx2x_leader_reset(bp)) {
12606 netdev_info(bp->dev,
12607 "Recovered in open\n");
12608 break;
12609 }
12610
12611
12612 bnx2x_set_power_state(bp, PCI_D3hot);
12613 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12614
12615 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12616 "If you still see this message after a few retries then power cycle is required.\n");
12617
12618 return -EAGAIN;
12619 } while (0);
12620 }
12621 }
12622
12623 bp->recovery_state = BNX2X_RECOVERY_DONE;
12624 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12625 if (rc)
12626 return rc;
12627
12628 if (IS_PF(bp))
12629 udp_tunnel_get_rx_info(dev);
12630
12631 return 0;
12632}
12633
12634
12635static int bnx2x_close(struct net_device *dev)
12636{
12637 struct bnx2x *bp = netdev_priv(dev);
12638
12639
12640 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12641
12642 return 0;
12643}
12644
12645struct bnx2x_mcast_list_elem_group
12646{
12647 struct list_head mcast_group_link;
12648 struct bnx2x_mcast_list_elem mcast_elems[];
12649};
12650
12651#define MCAST_ELEMS_PER_PG \
12652 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12653 sizeof(struct bnx2x_mcast_list_elem))
12654
12655static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12656{
12657 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12658
12659 while (!list_empty(mcast_group_list)) {
12660 current_mcast_group = list_first_entry(mcast_group_list,
12661 struct bnx2x_mcast_list_elem_group,
12662 mcast_group_link);
12663 list_del(¤t_mcast_group->mcast_group_link);
12664 free_page((unsigned long)current_mcast_group);
12665 }
12666}
12667
12668static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12669 struct bnx2x_mcast_ramrod_params *p,
12670 struct list_head *mcast_group_list)
12671{
12672 struct bnx2x_mcast_list_elem *mc_mac;
12673 struct netdev_hw_addr *ha;
12674 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12675 int mc_count = netdev_mc_count(bp->dev);
12676 int offset = 0;
12677
12678 INIT_LIST_HEAD(&p->mcast_list);
12679 netdev_for_each_mc_addr(ha, bp->dev) {
12680 if (!offset) {
12681 current_mcast_group =
12682 (struct bnx2x_mcast_list_elem_group *)
12683 __get_free_page(GFP_ATOMIC);
12684 if (!current_mcast_group) {
12685 bnx2x_free_mcast_macs_list(mcast_group_list);
12686 BNX2X_ERR("Failed to allocate mc MAC list\n");
12687 return -ENOMEM;
12688 }
12689 list_add(¤t_mcast_group->mcast_group_link,
12690 mcast_group_list);
12691 }
12692 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12693 mc_mac->mac = bnx2x_mc_addr(ha);
12694 list_add_tail(&mc_mac->link, &p->mcast_list);
12695 offset++;
12696 if (offset == MCAST_ELEMS_PER_PG)
12697 offset = 0;
12698 }
12699 p->mcast_list_len = mc_count;
12700 return 0;
12701}
12702
12703
12704
12705
12706
12707
12708
12709
12710static int bnx2x_set_uc_list(struct bnx2x *bp)
12711{
12712 int rc;
12713 struct net_device *dev = bp->dev;
12714 struct netdev_hw_addr *ha;
12715 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12716 unsigned long ramrod_flags = 0;
12717
12718
12719 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12720 if (rc < 0) {
12721 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12722 return rc;
12723 }
12724
12725 netdev_for_each_uc_addr(ha, dev) {
12726 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12727 BNX2X_UC_LIST_MAC, &ramrod_flags);
12728 if (rc == -EEXIST) {
12729 DP(BNX2X_MSG_SP,
12730 "Failed to schedule ADD operations: %d\n", rc);
12731
12732 rc = 0;
12733
12734 } else if (rc < 0) {
12735
12736 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12737 rc);
12738 return rc;
12739 }
12740 }
12741
12742
12743 __set_bit(RAMROD_CONT, &ramrod_flags);
12744 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12745 BNX2X_UC_LIST_MAC, &ramrod_flags);
12746}
12747
12748static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12749{
12750 LIST_HEAD(mcast_group_list);
12751 struct net_device *dev = bp->dev;
12752 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12753 int rc = 0;
12754
12755 rparam.mcast_obj = &bp->mcast_obj;
12756
12757
12758 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12759 if (rc < 0) {
12760 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12761 return rc;
12762 }
12763
12764
12765 if (netdev_mc_count(dev)) {
12766 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12767 if (rc)
12768 return rc;
12769
12770
12771 rc = bnx2x_config_mcast(bp, &rparam,
12772 BNX2X_MCAST_CMD_ADD);
12773 if (rc < 0)
12774 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12775 rc);
12776
12777 bnx2x_free_mcast_macs_list(&mcast_group_list);
12778 }
12779
12780 return rc;
12781}
12782
12783static int bnx2x_set_mc_list(struct bnx2x *bp)
12784{
12785 LIST_HEAD(mcast_group_list);
12786 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12787 struct net_device *dev = bp->dev;
12788 int rc = 0;
12789
12790
12791 if (CHIP_IS_E1x(bp))
12792 return bnx2x_set_mc_list_e1x(bp);
12793
12794 rparam.mcast_obj = &bp->mcast_obj;
12795
12796 if (netdev_mc_count(dev)) {
12797 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12798 if (rc)
12799 return rc;
12800
12801
12802 rc = bnx2x_config_mcast(bp, &rparam,
12803 BNX2X_MCAST_CMD_SET);
12804 if (rc < 0)
12805 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12806 rc);
12807
12808 bnx2x_free_mcast_macs_list(&mcast_group_list);
12809 } else {
12810
12811 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12812 if (rc < 0)
12813 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12814 rc);
12815 }
12816
12817 return rc;
12818}
12819
12820
12821static void bnx2x_set_rx_mode(struct net_device *dev)
12822{
12823 struct bnx2x *bp = netdev_priv(dev);
12824
12825 if (bp->state != BNX2X_STATE_OPEN) {
12826 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12827 return;
12828 } else {
12829
12830 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12831 NETIF_MSG_IFUP);
12832 }
12833}
12834
12835void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12836{
12837 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12838
12839 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12840
12841 netif_addr_lock_bh(bp->dev);
12842
12843 if (bp->dev->flags & IFF_PROMISC) {
12844 rx_mode = BNX2X_RX_MODE_PROMISC;
12845 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12846 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12847 CHIP_IS_E1(bp))) {
12848 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12849 } else {
12850 if (IS_PF(bp)) {
12851
12852 if (bnx2x_set_mc_list(bp) < 0)
12853 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12854
12855
12856 netif_addr_unlock_bh(bp->dev);
12857 if (bnx2x_set_uc_list(bp) < 0)
12858 rx_mode = BNX2X_RX_MODE_PROMISC;
12859 netif_addr_lock_bh(bp->dev);
12860 } else {
12861
12862
12863
12864 bnx2x_schedule_sp_rtnl(bp,
12865 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12866 }
12867 }
12868
12869 bp->rx_mode = rx_mode;
12870
12871 if (IS_MF_ISCSI_ONLY(bp))
12872 bp->rx_mode = BNX2X_RX_MODE_NONE;
12873
12874
12875 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12876 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12877 netif_addr_unlock_bh(bp->dev);
12878 return;
12879 }
12880
12881 if (IS_PF(bp)) {
12882 bnx2x_set_storm_rx_mode(bp);
12883 netif_addr_unlock_bh(bp->dev);
12884 } else {
12885
12886
12887
12888
12889 netif_addr_unlock_bh(bp->dev);
12890 bnx2x_vfpf_storm_rx_mode(bp);
12891 }
12892}
12893
12894
12895static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12896 int devad, u16 addr)
12897{
12898 struct bnx2x *bp = netdev_priv(netdev);
12899 u16 value;
12900 int rc;
12901
12902 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12903 prtad, devad, addr);
12904
12905
12906 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12907
12908 bnx2x_acquire_phy_lock(bp);
12909 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12910 bnx2x_release_phy_lock(bp);
12911 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12912
12913 if (!rc)
12914 rc = value;
12915 return rc;
12916}
12917
12918
12919static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12920 u16 addr, u16 value)
12921{
12922 struct bnx2x *bp = netdev_priv(netdev);
12923 int rc;
12924
12925 DP(NETIF_MSG_LINK,
12926 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12927 prtad, devad, addr, value);
12928
12929
12930 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12931
12932 bnx2x_acquire_phy_lock(bp);
12933 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12934 bnx2x_release_phy_lock(bp);
12935 return rc;
12936}
12937
12938
12939static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12940{
12941 struct bnx2x *bp = netdev_priv(dev);
12942 struct mii_ioctl_data *mdio = if_mii(ifr);
12943
12944 if (!netif_running(dev))
12945 return -EAGAIN;
12946
12947 switch (cmd) {
12948 case SIOCSHWTSTAMP:
12949 return bnx2x_hwtstamp_ioctl(bp, ifr);
12950 default:
12951 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12952 mdio->phy_id, mdio->reg_num, mdio->val_in);
12953 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12954 }
12955}
12956
12957static int bnx2x_validate_addr(struct net_device *dev)
12958{
12959 struct bnx2x *bp = netdev_priv(dev);
12960
12961
12962 if (IS_VF(bp))
12963 bnx2x_sample_bulletin(bp);
12964
12965 if (!is_valid_ether_addr(dev->dev_addr)) {
12966 BNX2X_ERR("Non-valid Ethernet address\n");
12967 return -EADDRNOTAVAIL;
12968 }
12969 return 0;
12970}
12971
12972static int bnx2x_get_phys_port_id(struct net_device *netdev,
12973 struct netdev_phys_item_id *ppid)
12974{
12975 struct bnx2x *bp = netdev_priv(netdev);
12976
12977 if (!(bp->flags & HAS_PHYS_PORT_ID))
12978 return -EOPNOTSUPP;
12979
12980 ppid->id_len = sizeof(bp->phys_port_id);
12981 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12982
12983 return 0;
12984}
12985
12986static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12987 struct net_device *dev,
12988 netdev_features_t features)
12989{
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003 if (unlikely(skb_is_gso(skb) &&
13004 (skb_shinfo(skb)->gso_size > 9000) &&
13005 !skb_gso_validate_mac_len(skb, 9700)))
13006 features &= ~NETIF_F_GSO_MASK;
13007
13008 features = vlan_features_check(skb, features);
13009 return vxlan_features_check(skb, features);
13010}
13011
13012static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
13013{
13014 int rc;
13015
13016 if (IS_PF(bp)) {
13017 unsigned long ramrod_flags = 0;
13018
13019 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13020 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
13021 add, &ramrod_flags);
13022 } else {
13023 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
13024 }
13025
13026 return rc;
13027}
13028
13029static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
13030{
13031 struct bnx2x_vlan_entry *vlan;
13032 int rc = 0;
13033
13034
13035 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13036 if (vlan->hw)
13037 continue;
13038
13039 if (bp->vlan_cnt >= bp->vlan_credit)
13040 return -ENOBUFS;
13041
13042 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13043 if (rc) {
13044 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
13045 return rc;
13046 }
13047
13048 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
13049 vlan->hw = true;
13050 bp->vlan_cnt++;
13051 }
13052
13053 return 0;
13054}
13055
13056static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13057{
13058 bool need_accept_any_vlan;
13059
13060 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
13061
13062 if (bp->accept_any_vlan != need_accept_any_vlan) {
13063 bp->accept_any_vlan = need_accept_any_vlan;
13064 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
13065 bp->accept_any_vlan ? "raised" : "cleared");
13066 if (set_rx_mode) {
13067 if (IS_PF(bp))
13068 bnx2x_set_rx_mode_inner(bp);
13069 else
13070 bnx2x_vfpf_storm_rx_mode(bp);
13071 }
13072 }
13073}
13074
13075int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13076{
13077
13078 bnx2x_vlan_configure(bp, false);
13079
13080 return 0;
13081}
13082
13083static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
13084{
13085 struct bnx2x *bp = netdev_priv(dev);
13086 struct bnx2x_vlan_entry *vlan;
13087
13088 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
13089
13090 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
13091 if (!vlan)
13092 return -ENOMEM;
13093
13094 vlan->vid = vid;
13095 vlan->hw = false;
13096 list_add_tail(&vlan->link, &bp->vlan_reg);
13097
13098 if (netif_running(dev))
13099 bnx2x_vlan_configure(bp, true);
13100
13101 return 0;
13102}
13103
13104static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13105{
13106 struct bnx2x *bp = netdev_priv(dev);
13107 struct bnx2x_vlan_entry *vlan;
13108 bool found = false;
13109 int rc = 0;
13110
13111 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13112
13113 list_for_each_entry(vlan, &bp->vlan_reg, link)
13114 if (vlan->vid == vid) {
13115 found = true;
13116 break;
13117 }
13118
13119 if (!found) {
13120 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13121 return -EINVAL;
13122 }
13123
13124 if (netif_running(dev) && vlan->hw) {
13125 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13126 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13127 bp->vlan_cnt--;
13128 }
13129
13130 list_del(&vlan->link);
13131 kfree(vlan);
13132
13133 if (netif_running(dev))
13134 bnx2x_vlan_configure(bp, true);
13135
13136 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13137
13138 return rc;
13139}
13140
13141static const struct net_device_ops bnx2x_netdev_ops = {
13142 .ndo_open = bnx2x_open,
13143 .ndo_stop = bnx2x_close,
13144 .ndo_start_xmit = bnx2x_start_xmit,
13145 .ndo_select_queue = bnx2x_select_queue,
13146 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13147 .ndo_set_mac_address = bnx2x_change_mac_addr,
13148 .ndo_validate_addr = bnx2x_validate_addr,
13149 .ndo_do_ioctl = bnx2x_ioctl,
13150 .ndo_change_mtu = bnx2x_change_mtu,
13151 .ndo_fix_features = bnx2x_fix_features,
13152 .ndo_set_features = bnx2x_set_features,
13153 .ndo_tx_timeout = bnx2x_tx_timeout,
13154 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13155 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13156 .ndo_setup_tc = __bnx2x_setup_tc,
13157#ifdef CONFIG_BNX2X_SRIOV
13158 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13159 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13160 .ndo_get_vf_config = bnx2x_get_vf_config,
13161 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13162#endif
13163#ifdef NETDEV_FCOE_WWNN
13164 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13165#endif
13166
13167 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13168 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13169 .ndo_features_check = bnx2x_features_check,
13170 .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
13171 .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
13172};
13173
13174static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13175{
13176 struct device *dev = &bp->pdev->dev;
13177
13178 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13179 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13180 dev_err(dev, "System does not support DMA, aborting\n");
13181 return -EIO;
13182 }
13183
13184 return 0;
13185}
13186
13187static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13188{
13189 if (bp->flags & AER_ENABLED) {
13190 pci_disable_pcie_error_reporting(bp->pdev);
13191 bp->flags &= ~AER_ENABLED;
13192 }
13193}
13194
13195static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13196 struct net_device *dev, unsigned long board_type)
13197{
13198 int rc;
13199 u32 pci_cfg_dword;
13200 bool chip_is_e1x = (board_type == BCM57710 ||
13201 board_type == BCM57711 ||
13202 board_type == BCM57711E);
13203
13204 SET_NETDEV_DEV(dev, &pdev->dev);
13205
13206 bp->dev = dev;
13207 bp->pdev = pdev;
13208
13209 rc = pci_enable_device(pdev);
13210 if (rc) {
13211 dev_err(&bp->pdev->dev,
13212 "Cannot enable PCI device, aborting\n");
13213 goto err_out;
13214 }
13215
13216 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13217 dev_err(&bp->pdev->dev,
13218 "Cannot find PCI device base address, aborting\n");
13219 rc = -ENODEV;
13220 goto err_out_disable;
13221 }
13222
13223 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13224 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13225 rc = -ENODEV;
13226 goto err_out_disable;
13227 }
13228
13229 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13230 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13231 PCICFG_REVESION_ID_ERROR_VAL) {
13232 pr_err("PCI device error, probably due to fan failure, aborting\n");
13233 rc = -ENODEV;
13234 goto err_out_disable;
13235 }
13236
13237 if (atomic_read(&pdev->enable_cnt) == 1) {
13238 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13239 if (rc) {
13240 dev_err(&bp->pdev->dev,
13241 "Cannot obtain PCI resources, aborting\n");
13242 goto err_out_disable;
13243 }
13244
13245 pci_set_master(pdev);
13246 pci_save_state(pdev);
13247 }
13248
13249 if (IS_PF(bp)) {
13250 if (!pdev->pm_cap) {
13251 dev_err(&bp->pdev->dev,
13252 "Cannot find power management capability, aborting\n");
13253 rc = -EIO;
13254 goto err_out_release;
13255 }
13256 }
13257
13258 if (!pci_is_pcie(pdev)) {
13259 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13260 rc = -EIO;
13261 goto err_out_release;
13262 }
13263
13264 rc = bnx2x_set_coherency_mask(bp);
13265 if (rc)
13266 goto err_out_release;
13267
13268 dev->mem_start = pci_resource_start(pdev, 0);
13269 dev->base_addr = dev->mem_start;
13270 dev->mem_end = pci_resource_end(pdev, 0);
13271
13272 dev->irq = pdev->irq;
13273
13274 bp->regview = pci_ioremap_bar(pdev, 0);
13275 if (!bp->regview) {
13276 dev_err(&bp->pdev->dev,
13277 "Cannot map register space, aborting\n");
13278 rc = -ENOMEM;
13279 goto err_out_release;
13280 }
13281
13282
13283
13284
13285
13286
13287 if (chip_is_e1x) {
13288 bp->pf_num = PCI_FUNC(pdev->devfn);
13289 } else {
13290
13291 pci_read_config_dword(bp->pdev,
13292 PCICFG_ME_REGISTER, &pci_cfg_dword);
13293 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13294 ME_REG_ABS_PF_NUM_SHIFT);
13295 }
13296 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13297
13298
13299 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13300 PCICFG_VENDOR_ID_OFFSET);
13301
13302
13303 pdev->needs_freset = 1;
13304
13305
13306 rc = pci_enable_pcie_error_reporting(pdev);
13307 if (!rc)
13308 bp->flags |= AER_ENABLED;
13309 else
13310 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13311
13312
13313
13314
13315
13316 if (IS_PF(bp)) {
13317 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13318 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13319 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13320 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13321
13322 if (chip_is_e1x) {
13323 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13324 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13325 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13326 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13327 }
13328
13329
13330
13331
13332
13333 if (!chip_is_e1x)
13334 REG_WR(bp,
13335 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13336 }
13337
13338 dev->watchdog_timeo = TX_TIMEOUT;
13339
13340 dev->netdev_ops = &bnx2x_netdev_ops;
13341 bnx2x_set_ethtool_ops(bp, dev);
13342
13343 dev->priv_flags |= IFF_UNICAST_FLT;
13344
13345 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13346 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13347 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13348 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13349 if (!chip_is_e1x) {
13350 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13351 NETIF_F_GSO_IPXIP4 |
13352 NETIF_F_GSO_UDP_TUNNEL |
13353 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13354 NETIF_F_GSO_PARTIAL;
13355
13356 dev->hw_enc_features =
13357 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13358 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13359 NETIF_F_GSO_IPXIP4 |
13360 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13361 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13362 NETIF_F_GSO_PARTIAL;
13363
13364 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13365 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13366 }
13367
13368 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13369 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13370
13371 if (IS_PF(bp)) {
13372 if (chip_is_e1x)
13373 bp->accept_any_vlan = true;
13374 else
13375 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13376 }
13377
13378
13379
13380
13381 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13382 dev->features |= NETIF_F_HIGHDMA;
13383 if (dev->features & NETIF_F_LRO)
13384 dev->features &= ~NETIF_F_GRO_HW;
13385
13386
13387 dev->hw_features |= NETIF_F_LOOPBACK;
13388
13389#ifdef BCM_DCBNL
13390 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13391#endif
13392
13393
13394 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13395 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13396
13397
13398 bp->mdio.prtad = MDIO_PRTAD_NONE;
13399 bp->mdio.mmds = 0;
13400 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13401 bp->mdio.dev = dev;
13402 bp->mdio.mdio_read = bnx2x_mdio_read;
13403 bp->mdio.mdio_write = bnx2x_mdio_write;
13404
13405 return 0;
13406
13407err_out_release:
13408 if (atomic_read(&pdev->enable_cnt) == 1)
13409 pci_release_regions(pdev);
13410
13411err_out_disable:
13412 pci_disable_device(pdev);
13413
13414err_out:
13415 return rc;
13416}
13417
13418static int bnx2x_check_firmware(struct bnx2x *bp)
13419{
13420 const struct firmware *firmware = bp->firmware;
13421 struct bnx2x_fw_file_hdr *fw_hdr;
13422 struct bnx2x_fw_file_section *sections;
13423 u32 offset, len, num_ops;
13424 __be16 *ops_offsets;
13425 int i;
13426 const u8 *fw_ver;
13427
13428 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13429 BNX2X_ERR("Wrong FW size\n");
13430 return -EINVAL;
13431 }
13432
13433 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13434 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13435
13436
13437
13438 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13439 offset = be32_to_cpu(sections[i].offset);
13440 len = be32_to_cpu(sections[i].len);
13441 if (offset + len > firmware->size) {
13442 BNX2X_ERR("Section %d length is out of bounds\n", i);
13443 return -EINVAL;
13444 }
13445 }
13446
13447
13448 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13449 ops_offsets = (__force __be16 *)(firmware->data + offset);
13450 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13451
13452 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13453 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13454 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13455 return -EINVAL;
13456 }
13457 }
13458
13459
13460 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13461 fw_ver = firmware->data + offset;
13462 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13463 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13464 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13465 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13466 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13467 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13468 BCM_5710_FW_MAJOR_VERSION,
13469 BCM_5710_FW_MINOR_VERSION,
13470 BCM_5710_FW_REVISION_VERSION,
13471 BCM_5710_FW_ENGINEERING_VERSION);
13472 return -EINVAL;
13473 }
13474
13475 return 0;
13476}
13477
13478static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13479{
13480 const __be32 *source = (const __be32 *)_source;
13481 u32 *target = (u32 *)_target;
13482 u32 i;
13483
13484 for (i = 0; i < n/4; i++)
13485 target[i] = be32_to_cpu(source[i]);
13486}
13487
13488
13489
13490
13491
13492static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13493{
13494 const __be32 *source = (const __be32 *)_source;
13495 struct raw_op *target = (struct raw_op *)_target;
13496 u32 i, j, tmp;
13497
13498 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13499 tmp = be32_to_cpu(source[j]);
13500 target[i].op = (tmp >> 24) & 0xff;
13501 target[i].offset = tmp & 0xffffff;
13502 target[i].raw_data = be32_to_cpu(source[j + 1]);
13503 }
13504}
13505
13506
13507
13508
13509static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13510{
13511 const __be32 *source = (const __be32 *)_source;
13512 struct iro *target = (struct iro *)_target;
13513 u32 i, j, tmp;
13514
13515 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13516 target[i].base = be32_to_cpu(source[j]);
13517 j++;
13518 tmp = be32_to_cpu(source[j]);
13519 target[i].m1 = (tmp >> 16) & 0xffff;
13520 target[i].m2 = tmp & 0xffff;
13521 j++;
13522 tmp = be32_to_cpu(source[j]);
13523 target[i].m3 = (tmp >> 16) & 0xffff;
13524 target[i].size = tmp & 0xffff;
13525 j++;
13526 }
13527}
13528
13529static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13530{
13531 const __be16 *source = (const __be16 *)_source;
13532 u16 *target = (u16 *)_target;
13533 u32 i;
13534
13535 for (i = 0; i < n/2; i++)
13536 target[i] = be16_to_cpu(source[i]);
13537}
13538
13539#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13540do { \
13541 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13542 bp->arr = kmalloc(len, GFP_KERNEL); \
13543 if (!bp->arr) \
13544 goto lbl; \
13545 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13546 (u8 *)bp->arr, len); \
13547} while (0)
13548
13549static int bnx2x_init_firmware(struct bnx2x *bp)
13550{
13551 const char *fw_file_name;
13552 struct bnx2x_fw_file_hdr *fw_hdr;
13553 int rc;
13554
13555 if (bp->firmware)
13556 return 0;
13557
13558 if (CHIP_IS_E1(bp))
13559 fw_file_name = FW_FILE_NAME_E1;
13560 else if (CHIP_IS_E1H(bp))
13561 fw_file_name = FW_FILE_NAME_E1H;
13562 else if (!CHIP_IS_E1x(bp))
13563 fw_file_name = FW_FILE_NAME_E2;
13564 else {
13565 BNX2X_ERR("Unsupported chip revision\n");
13566 return -EINVAL;
13567 }
13568 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13569
13570 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13571 if (rc) {
13572 BNX2X_ERR("Can't load firmware file %s\n",
13573 fw_file_name);
13574 goto request_firmware_exit;
13575 }
13576
13577 rc = bnx2x_check_firmware(bp);
13578 if (rc) {
13579 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13580 goto request_firmware_exit;
13581 }
13582
13583 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13584
13585
13586
13587 rc = -ENOMEM;
13588 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13589
13590
13591 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13592
13593
13594 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13595 be16_to_cpu_n);
13596
13597
13598 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13599 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13600 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13601 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13602 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13603 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13604 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13605 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13606 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13607 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13608 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13609 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13610 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13611 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13612 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13613 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13614
13615 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13616
13617 return 0;
13618
13619iro_alloc_err:
13620 kfree(bp->init_ops_offsets);
13621init_offsets_alloc_err:
13622 kfree(bp->init_ops);
13623init_ops_alloc_err:
13624 kfree(bp->init_data);
13625request_firmware_exit:
13626 release_firmware(bp->firmware);
13627 bp->firmware = NULL;
13628
13629 return rc;
13630}
13631
13632static void bnx2x_release_firmware(struct bnx2x *bp)
13633{
13634 kfree(bp->init_ops_offsets);
13635 kfree(bp->init_ops);
13636 kfree(bp->init_data);
13637 release_firmware(bp->firmware);
13638 bp->firmware = NULL;
13639}
13640
13641static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13642 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13643 .init_hw_cmn = bnx2x_init_hw_common,
13644 .init_hw_port = bnx2x_init_hw_port,
13645 .init_hw_func = bnx2x_init_hw_func,
13646
13647 .reset_hw_cmn = bnx2x_reset_common,
13648 .reset_hw_port = bnx2x_reset_port,
13649 .reset_hw_func = bnx2x_reset_func,
13650
13651 .gunzip_init = bnx2x_gunzip_init,
13652 .gunzip_end = bnx2x_gunzip_end,
13653
13654 .init_fw = bnx2x_init_firmware,
13655 .release_fw = bnx2x_release_firmware,
13656};
13657
13658void bnx2x__init_func_obj(struct bnx2x *bp)
13659{
13660
13661 bnx2x_setup_dmae(bp);
13662
13663 bnx2x_init_func_obj(bp, &bp->func_obj,
13664 bnx2x_sp(bp, func_rdata),
13665 bnx2x_sp_mapping(bp, func_rdata),
13666 bnx2x_sp(bp, func_afex_rdata),
13667 bnx2x_sp_mapping(bp, func_afex_rdata),
13668 &bnx2x_func_sp_drv);
13669}
13670
13671
13672static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13673{
13674 int cid_count = BNX2X_L2_MAX_CID(bp);
13675
13676 if (IS_SRIOV(bp))
13677 cid_count += BNX2X_VF_CIDS;
13678
13679 if (CNIC_SUPPORT(bp))
13680 cid_count += CNIC_CID_MAX;
13681
13682 return roundup(cid_count, QM_CID_ROUND);
13683}
13684
13685
13686
13687
13688
13689
13690
13691static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13692{
13693 int index;
13694 u16 control = 0;
13695
13696
13697
13698
13699
13700 if (!pdev->msix_cap) {
13701 dev_info(&pdev->dev, "no msix capability found\n");
13702 return 1 + cnic_cnt;
13703 }
13704 dev_info(&pdev->dev, "msix capability found\n");
13705
13706
13707
13708
13709
13710
13711
13712
13713 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13714
13715 index = control & PCI_MSIX_FLAGS_QSIZE;
13716
13717 return index;
13718}
13719
13720static int set_max_cos_est(int chip_id)
13721{
13722 switch (chip_id) {
13723 case BCM57710:
13724 case BCM57711:
13725 case BCM57711E:
13726 return BNX2X_MULTI_TX_COS_E1X;
13727 case BCM57712:
13728 case BCM57712_MF:
13729 return BNX2X_MULTI_TX_COS_E2_E3A0;
13730 case BCM57800:
13731 case BCM57800_MF:
13732 case BCM57810:
13733 case BCM57810_MF:
13734 case BCM57840_4_10:
13735 case BCM57840_2_20:
13736 case BCM57840_O:
13737 case BCM57840_MFO:
13738 case BCM57840_MF:
13739 case BCM57811:
13740 case BCM57811_MF:
13741 return BNX2X_MULTI_TX_COS_E3B0;
13742 case BCM57712_VF:
13743 case BCM57800_VF:
13744 case BCM57810_VF:
13745 case BCM57840_VF:
13746 case BCM57811_VF:
13747 return 1;
13748 default:
13749 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13750 return -ENODEV;
13751 }
13752}
13753
13754static int set_is_vf(int chip_id)
13755{
13756 switch (chip_id) {
13757 case BCM57712_VF:
13758 case BCM57800_VF:
13759 case BCM57810_VF:
13760 case BCM57840_VF:
13761 case BCM57811_VF:
13762 return true;
13763 default:
13764 return false;
13765 }
13766}
13767
13768
13769#define tsgen_ctrl 0x0
13770#define tsgen_freecount 0x10
13771#define tsgen_synctime_t0 0x20
13772#define tsgen_offset_t0 0x28
13773#define tsgen_drift_t0 0x30
13774#define tsgen_synctime_t1 0x58
13775#define tsgen_offset_t1 0x60
13776#define tsgen_drift_t1 0x68
13777
13778
13779static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13780 int best_val, int best_period)
13781{
13782 struct bnx2x_func_state_params func_params = {NULL};
13783 struct bnx2x_func_set_timesync_params *set_timesync_params =
13784 &func_params.params.set_timesync;
13785
13786
13787 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13788 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13789
13790 func_params.f_obj = &bp->func_obj;
13791 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13792
13793
13794 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13795 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13796 set_timesync_params->add_sub_drift_adjust_value =
13797 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13798 set_timesync_params->drift_adjust_value = best_val;
13799 set_timesync_params->drift_adjust_period = best_period;
13800
13801 return bnx2x_func_state_change(bp, &func_params);
13802}
13803
13804static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13805{
13806 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13807 int rc;
13808 int drift_dir = 1;
13809 int val, period, period1, period2, dif, dif1, dif2;
13810 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13811
13812 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13813
13814 if (!netif_running(bp->dev)) {
13815 DP(BNX2X_MSG_PTP,
13816 "PTP adjfreq called while the interface is down\n");
13817 return -ENETDOWN;
13818 }
13819
13820 if (ppb < 0) {
13821 ppb = -ppb;
13822 drift_dir = 0;
13823 }
13824
13825 if (ppb == 0) {
13826 best_val = 1;
13827 best_period = 0x1FFFFFF;
13828 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13829 best_val = 31;
13830 best_period = 1;
13831 } else {
13832
13833
13834
13835 for (val = 0; val <= 31; val++) {
13836 if ((val & 0x7) == 0)
13837 continue;
13838 period1 = val * 1000000 / ppb;
13839 period2 = period1 + 1;
13840 if (period1 != 0)
13841 dif1 = ppb - (val * 1000000 / period1);
13842 else
13843 dif1 = BNX2X_MAX_PHC_DRIFT;
13844 if (dif1 < 0)
13845 dif1 = -dif1;
13846 dif2 = ppb - (val * 1000000 / period2);
13847 if (dif2 < 0)
13848 dif2 = -dif2;
13849 dif = (dif1 < dif2) ? dif1 : dif2;
13850 period = (dif1 < dif2) ? period1 : period2;
13851 if (dif < best_dif) {
13852 best_dif = dif;
13853 best_val = val;
13854 best_period = period;
13855 }
13856 }
13857 }
13858
13859 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13860 best_period);
13861 if (rc) {
13862 BNX2X_ERR("Failed to set drift\n");
13863 return -EFAULT;
13864 }
13865
13866 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13867 best_period);
13868
13869 return 0;
13870}
13871
13872static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13873{
13874 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13875
13876 if (!netif_running(bp->dev)) {
13877 DP(BNX2X_MSG_PTP,
13878 "PTP adjtime called while the interface is down\n");
13879 return -ENETDOWN;
13880 }
13881
13882 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13883
13884 timecounter_adjtime(&bp->timecounter, delta);
13885
13886 return 0;
13887}
13888
13889static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13890{
13891 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13892 u64 ns;
13893
13894 if (!netif_running(bp->dev)) {
13895 DP(BNX2X_MSG_PTP,
13896 "PTP gettime called while the interface is down\n");
13897 return -ENETDOWN;
13898 }
13899
13900 ns = timecounter_read(&bp->timecounter);
13901
13902 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13903
13904 *ts = ns_to_timespec64(ns);
13905
13906 return 0;
13907}
13908
13909static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13910 const struct timespec64 *ts)
13911{
13912 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13913 u64 ns;
13914
13915 if (!netif_running(bp->dev)) {
13916 DP(BNX2X_MSG_PTP,
13917 "PTP settime called while the interface is down\n");
13918 return -ENETDOWN;
13919 }
13920
13921 ns = timespec64_to_ns(ts);
13922
13923 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13924
13925
13926 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13927
13928 return 0;
13929}
13930
13931
13932static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13933 struct ptp_clock_request *rq, int on)
13934{
13935 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13936
13937 BNX2X_ERR("PHC ancillary features are not supported\n");
13938 return -ENOTSUPP;
13939}
13940
13941void bnx2x_register_phc(struct bnx2x *bp)
13942{
13943
13944 bp->ptp_clock_info.owner = THIS_MODULE;
13945 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13946 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13947 bp->ptp_clock_info.n_alarm = 0;
13948 bp->ptp_clock_info.n_ext_ts = 0;
13949 bp->ptp_clock_info.n_per_out = 0;
13950 bp->ptp_clock_info.pps = 0;
13951 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13952 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13953 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13954 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13955 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13956
13957 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13958 if (IS_ERR(bp->ptp_clock)) {
13959 bp->ptp_clock = NULL;
13960 BNX2X_ERR("PTP clock registration failed\n");
13961 }
13962}
13963
13964static int bnx2x_init_one(struct pci_dev *pdev,
13965 const struct pci_device_id *ent)
13966{
13967 struct net_device *dev = NULL;
13968 struct bnx2x *bp;
13969 int rc, max_non_def_sbs;
13970 int rx_count, tx_count, rss_count, doorbell_size;
13971 int max_cos_est;
13972 bool is_vf;
13973 int cnic_cnt;
13974
13975
13976
13977
13978 if (is_kdump_kernel()) {
13979 ktime_t now = ktime_get_boottime();
13980 ktime_t fw_ready_time = ktime_set(5, 0);
13981
13982 if (ktime_before(now, fw_ready_time))
13983 msleep(ktime_ms_delta(fw_ready_time, now));
13984 }
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994 max_cos_est = set_max_cos_est(ent->driver_data);
13995 if (max_cos_est < 0)
13996 return max_cos_est;
13997 is_vf = set_is_vf(ent->driver_data);
13998 cnic_cnt = is_vf ? 0 : 1;
13999
14000 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
14001
14002
14003 max_non_def_sbs += is_vf ? 1 : 0;
14004
14005
14006 rss_count = max_non_def_sbs - cnic_cnt;
14007
14008 if (rss_count < 1)
14009 return -EINVAL;
14010
14011
14012 rx_count = rss_count + cnic_cnt;
14013
14014
14015
14016
14017 tx_count = rss_count * max_cos_est + cnic_cnt;
14018
14019
14020 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
14021 if (!dev)
14022 return -ENOMEM;
14023
14024 bp = netdev_priv(dev);
14025
14026 bp->flags = 0;
14027 if (is_vf)
14028 bp->flags |= IS_VF_FLAG;
14029
14030 bp->igu_sb_cnt = max_non_def_sbs;
14031 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
14032 bp->msg_enable = debug;
14033 bp->cnic_support = cnic_cnt;
14034 bp->cnic_probe = bnx2x_cnic_probe;
14035
14036 pci_set_drvdata(pdev, dev);
14037
14038 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
14039 if (rc < 0) {
14040 free_netdev(dev);
14041 return rc;
14042 }
14043
14044 BNX2X_DEV_INFO("This is a %s function\n",
14045 IS_PF(bp) ? "physical" : "virtual");
14046 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
14047 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
14048 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
14049 tx_count, rx_count);
14050
14051 rc = bnx2x_init_bp(bp);
14052 if (rc)
14053 goto init_one_exit;
14054
14055
14056
14057
14058
14059 if (IS_VF(bp)) {
14060 bp->doorbells = bnx2x_vf_doorbells(bp);
14061 rc = bnx2x_vf_pci_alloc(bp);
14062 if (rc)
14063 goto init_one_freemem;
14064 } else {
14065 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
14066 if (doorbell_size > pci_resource_len(pdev, 2)) {
14067 dev_err(&bp->pdev->dev,
14068 "Cannot map doorbells, bar size too small, aborting\n");
14069 rc = -ENOMEM;
14070 goto init_one_freemem;
14071 }
14072 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
14073 doorbell_size);
14074 }
14075 if (!bp->doorbells) {
14076 dev_err(&bp->pdev->dev,
14077 "Cannot map doorbell space, aborting\n");
14078 rc = -ENOMEM;
14079 goto init_one_freemem;
14080 }
14081
14082 if (IS_VF(bp)) {
14083 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
14084 if (rc)
14085 goto init_one_freemem;
14086
14087#ifdef CONFIG_BNX2X_SRIOV
14088
14089 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14090 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14091 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14092 }
14093#endif
14094 }
14095
14096
14097 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14098 if (rc)
14099 goto init_one_freemem;
14100
14101
14102 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14103 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14104
14105
14106 if (CHIP_IS_E1x(bp))
14107 bp->flags |= NO_FCOE_FLAG;
14108
14109
14110 bnx2x_set_num_queues(bp);
14111
14112
14113
14114
14115 rc = bnx2x_set_int_mode(bp);
14116 if (rc) {
14117 dev_err(&pdev->dev, "Cannot set interrupts\n");
14118 goto init_one_freemem;
14119 }
14120 BNX2X_DEV_INFO("set interrupts successfully\n");
14121
14122
14123 rc = register_netdev(dev);
14124 if (rc) {
14125 dev_err(&pdev->dev, "Cannot register net device\n");
14126 goto init_one_freemem;
14127 }
14128 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14129
14130 if (!NO_FCOE(bp)) {
14131
14132 rtnl_lock();
14133 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14134 rtnl_unlock();
14135 }
14136 BNX2X_DEV_INFO(
14137 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14138 board_info[ent->driver_data].name,
14139 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14140 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14141 pcie_print_link_status(bp->pdev);
14142
14143 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14144 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14145
14146 return 0;
14147
14148init_one_freemem:
14149 bnx2x_free_mem_bp(bp);
14150
14151init_one_exit:
14152 bnx2x_disable_pcie_error_reporting(bp);
14153
14154 if (bp->regview)
14155 iounmap(bp->regview);
14156
14157 if (IS_PF(bp) && bp->doorbells)
14158 iounmap(bp->doorbells);
14159
14160 free_netdev(dev);
14161
14162 if (atomic_read(&pdev->enable_cnt) == 1)
14163 pci_release_regions(pdev);
14164
14165 pci_disable_device(pdev);
14166
14167 return rc;
14168}
14169
14170static void __bnx2x_remove(struct pci_dev *pdev,
14171 struct net_device *dev,
14172 struct bnx2x *bp,
14173 bool remove_netdev)
14174{
14175
14176 if (!NO_FCOE(bp)) {
14177 rtnl_lock();
14178 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14179 rtnl_unlock();
14180 }
14181
14182#ifdef BCM_DCBNL
14183
14184 bnx2x_dcbnl_update_applist(bp, true);
14185#endif
14186
14187 if (IS_PF(bp) &&
14188 !BP_NOMCP(bp) &&
14189 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14190 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14191
14192
14193 if (remove_netdev) {
14194 unregister_netdev(dev);
14195 } else {
14196 rtnl_lock();
14197 dev_close(dev);
14198 rtnl_unlock();
14199 }
14200
14201 bnx2x_iov_remove_one(bp);
14202
14203
14204 if (IS_PF(bp)) {
14205 bnx2x_set_power_state(bp, PCI_D0);
14206 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14207
14208
14209
14210
14211 bnx2x_reset_endianity(bp);
14212 }
14213
14214
14215 bnx2x_disable_msi(bp);
14216
14217
14218 if (IS_PF(bp))
14219 bnx2x_set_power_state(bp, PCI_D3hot);
14220
14221
14222 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14223
14224
14225 if (IS_VF(bp))
14226 bnx2x_vfpf_release(bp);
14227
14228
14229 if (system_state == SYSTEM_POWER_OFF) {
14230 pci_wake_from_d3(pdev, bp->wol);
14231 pci_set_power_state(pdev, PCI_D3hot);
14232 }
14233
14234 bnx2x_disable_pcie_error_reporting(bp);
14235 if (remove_netdev) {
14236 if (bp->regview)
14237 iounmap(bp->regview);
14238
14239
14240
14241
14242 if (IS_PF(bp)) {
14243 if (bp->doorbells)
14244 iounmap(bp->doorbells);
14245
14246 bnx2x_release_firmware(bp);
14247 } else {
14248 bnx2x_vf_pci_dealloc(bp);
14249 }
14250 bnx2x_free_mem_bp(bp);
14251
14252 free_netdev(dev);
14253
14254 if (atomic_read(&pdev->enable_cnt) == 1)
14255 pci_release_regions(pdev);
14256
14257 pci_disable_device(pdev);
14258 }
14259}
14260
14261static void bnx2x_remove_one(struct pci_dev *pdev)
14262{
14263 struct net_device *dev = pci_get_drvdata(pdev);
14264 struct bnx2x *bp;
14265
14266 if (!dev) {
14267 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14268 return;
14269 }
14270 bp = netdev_priv(dev);
14271
14272 __bnx2x_remove(pdev, dev, bp, true);
14273}
14274
14275static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14276{
14277 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14278
14279 bp->rx_mode = BNX2X_RX_MODE_NONE;
14280
14281 if (CNIC_LOADED(bp))
14282 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14283
14284
14285 bnx2x_tx_disable(bp);
14286
14287 bnx2x_del_all_napi(bp);
14288 if (CNIC_LOADED(bp))
14289 bnx2x_del_all_napi_cnic(bp);
14290 netdev_reset_tc(bp->dev);
14291
14292 del_timer_sync(&bp->timer);
14293 cancel_delayed_work_sync(&bp->sp_task);
14294 cancel_delayed_work_sync(&bp->period_task);
14295
14296 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14297 bp->stats_state = STATS_STATE_DISABLED;
14298 up(&bp->stats_lock);
14299 }
14300
14301 bnx2x_save_statistics(bp);
14302
14303 netif_carrier_off(bp->dev);
14304
14305 return 0;
14306}
14307
14308
14309
14310
14311
14312
14313
14314
14315
14316static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14317 pci_channel_state_t state)
14318{
14319 struct net_device *dev = pci_get_drvdata(pdev);
14320 struct bnx2x *bp = netdev_priv(dev);
14321
14322 rtnl_lock();
14323
14324 BNX2X_ERR("IO error detected\n");
14325
14326 netif_device_detach(dev);
14327
14328 if (state == pci_channel_io_perm_failure) {
14329 rtnl_unlock();
14330 return PCI_ERS_RESULT_DISCONNECT;
14331 }
14332
14333 if (netif_running(dev))
14334 bnx2x_eeh_nic_unload(bp);
14335
14336 bnx2x_prev_path_mark_eeh(bp);
14337
14338 pci_disable_device(pdev);
14339
14340 rtnl_unlock();
14341
14342
14343 return PCI_ERS_RESULT_NEED_RESET;
14344}
14345
14346
14347
14348
14349
14350
14351
14352static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14353{
14354 struct net_device *dev = pci_get_drvdata(pdev);
14355 struct bnx2x *bp = netdev_priv(dev);
14356 int i;
14357
14358 rtnl_lock();
14359 BNX2X_ERR("IO slot reset initializing...\n");
14360 if (pci_enable_device(pdev)) {
14361 dev_err(&pdev->dev,
14362 "Cannot re-enable PCI device after reset\n");
14363 rtnl_unlock();
14364 return PCI_ERS_RESULT_DISCONNECT;
14365 }
14366
14367 pci_set_master(pdev);
14368 pci_restore_state(pdev);
14369 pci_save_state(pdev);
14370
14371 if (netif_running(dev))
14372 bnx2x_set_power_state(bp, PCI_D0);
14373
14374 if (netif_running(dev)) {
14375 BNX2X_ERR("IO slot reset --> driver unload\n");
14376
14377
14378 if (bnx2x_init_shmem(bp)) {
14379 rtnl_unlock();
14380 return PCI_ERS_RESULT_DISCONNECT;
14381 }
14382
14383 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14384 u32 v;
14385
14386 v = SHMEM2_RD(bp,
14387 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14388 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14389 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14390 }
14391 bnx2x_drain_tx_queues(bp);
14392 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14393 bnx2x_netif_stop(bp, 1);
14394 bnx2x_free_irq(bp);
14395
14396
14397 bnx2x_send_unload_done(bp, true);
14398
14399 bp->sp_state = 0;
14400 bp->port.pmf = 0;
14401
14402 bnx2x_prev_unload(bp);
14403
14404
14405
14406
14407 bnx2x_squeeze_objects(bp);
14408 bnx2x_free_skbs(bp);
14409 for_each_rx_queue(bp, i)
14410 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14411 bnx2x_free_fp_mem(bp);
14412 bnx2x_free_mem(bp);
14413
14414 bp->state = BNX2X_STATE_CLOSED;
14415 }
14416
14417 rtnl_unlock();
14418
14419 return PCI_ERS_RESULT_RECOVERED;
14420}
14421
14422
14423
14424
14425
14426
14427
14428
14429static void bnx2x_io_resume(struct pci_dev *pdev)
14430{
14431 struct net_device *dev = pci_get_drvdata(pdev);
14432 struct bnx2x *bp = netdev_priv(dev);
14433
14434 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14435 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14436 return;
14437 }
14438
14439 rtnl_lock();
14440
14441 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14442 DRV_MSG_SEQ_NUMBER_MASK;
14443
14444 if (netif_running(dev))
14445 bnx2x_nic_load(bp, LOAD_NORMAL);
14446
14447 netif_device_attach(dev);
14448
14449 rtnl_unlock();
14450}
14451
14452static const struct pci_error_handlers bnx2x_err_handler = {
14453 .error_detected = bnx2x_io_error_detected,
14454 .slot_reset = bnx2x_io_slot_reset,
14455 .resume = bnx2x_io_resume,
14456};
14457
14458static void bnx2x_shutdown(struct pci_dev *pdev)
14459{
14460 struct net_device *dev = pci_get_drvdata(pdev);
14461 struct bnx2x *bp;
14462
14463 if (!dev)
14464 return;
14465
14466 bp = netdev_priv(dev);
14467 if (!bp)
14468 return;
14469
14470 rtnl_lock();
14471 netif_device_detach(dev);
14472 rtnl_unlock();
14473
14474
14475
14476
14477
14478 __bnx2x_remove(pdev, dev, bp, false);
14479}
14480
14481static struct pci_driver bnx2x_pci_driver = {
14482 .name = DRV_MODULE_NAME,
14483 .id_table = bnx2x_pci_tbl,
14484 .probe = bnx2x_init_one,
14485 .remove = bnx2x_remove_one,
14486 .driver.pm = &bnx2x_pm_ops,
14487 .err_handler = &bnx2x_err_handler,
14488#ifdef CONFIG_BNX2X_SRIOV
14489 .sriov_configure = bnx2x_sriov_configure,
14490#endif
14491 .shutdown = bnx2x_shutdown,
14492};
14493
14494static int __init bnx2x_init(void)
14495{
14496 int ret;
14497
14498 pr_info("%s", version);
14499
14500 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14501 if (bnx2x_wq == NULL) {
14502 pr_err("Cannot create workqueue\n");
14503 return -ENOMEM;
14504 }
14505 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14506 if (!bnx2x_iov_wq) {
14507 pr_err("Cannot create iov workqueue\n");
14508 destroy_workqueue(bnx2x_wq);
14509 return -ENOMEM;
14510 }
14511
14512 ret = pci_register_driver(&bnx2x_pci_driver);
14513 if (ret) {
14514 pr_err("Cannot register driver\n");
14515 destroy_workqueue(bnx2x_wq);
14516 destroy_workqueue(bnx2x_iov_wq);
14517 }
14518 return ret;
14519}
14520
14521static void __exit bnx2x_cleanup(void)
14522{
14523 struct list_head *pos, *q;
14524
14525 pci_unregister_driver(&bnx2x_pci_driver);
14526
14527 destroy_workqueue(bnx2x_wq);
14528 destroy_workqueue(bnx2x_iov_wq);
14529
14530
14531 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14532 struct bnx2x_prev_path_list *tmp =
14533 list_entry(pos, struct bnx2x_prev_path_list, list);
14534 list_del(pos);
14535 kfree(tmp);
14536 }
14537}
14538
14539void bnx2x_notify_link_changed(struct bnx2x *bp)
14540{
14541 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14542}
14543
14544module_init(bnx2x_init);
14545module_exit(bnx2x_cleanup);
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14557{
14558 unsigned long ramrod_flags = 0;
14559
14560 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14561 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14562 &bp->iscsi_l2_mac_obj, true,
14563 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14564}
14565
14566
14567static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14568{
14569 struct eth_spe *spe;
14570 int cxt_index, cxt_offset;
14571
14572#ifdef BNX2X_STOP_ON_ERROR
14573 if (unlikely(bp->panic))
14574 return;
14575#endif
14576
14577 spin_lock_bh(&bp->spq_lock);
14578 BUG_ON(bp->cnic_spq_pending < count);
14579 bp->cnic_spq_pending -= count;
14580
14581 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14582 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14583 & SPE_HDR_CONN_TYPE) >>
14584 SPE_HDR_CONN_TYPE_SHIFT;
14585 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14586 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14587
14588
14589
14590
14591 if (type == ETH_CONNECTION_TYPE) {
14592 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14593 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14594 ILT_PAGE_CIDS;
14595 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14596 (cxt_index * ILT_PAGE_CIDS);
14597 bnx2x_set_ctx_validation(bp,
14598 &bp->context[cxt_index].
14599 vcxt[cxt_offset].eth,
14600 BNX2X_ISCSI_ETH_CID(bp));
14601 }
14602 }
14603
14604
14605
14606
14607
14608
14609
14610 if (type == ETH_CONNECTION_TYPE) {
14611 if (!atomic_read(&bp->cq_spq_left))
14612 break;
14613 else
14614 atomic_dec(&bp->cq_spq_left);
14615 } else if (type == NONE_CONNECTION_TYPE) {
14616 if (!atomic_read(&bp->eq_spq_left))
14617 break;
14618 else
14619 atomic_dec(&bp->eq_spq_left);
14620 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14621 (type == FCOE_CONNECTION_TYPE)) {
14622 if (bp->cnic_spq_pending >=
14623 bp->cnic_eth_dev.max_kwqe_pending)
14624 break;
14625 else
14626 bp->cnic_spq_pending++;
14627 } else {
14628 BNX2X_ERR("Unknown SPE type: %d\n", type);
14629 bnx2x_panic();
14630 break;
14631 }
14632
14633 spe = bnx2x_sp_get_next(bp);
14634 *spe = *bp->cnic_kwq_cons;
14635
14636 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14637 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14638
14639 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14640 bp->cnic_kwq_cons = bp->cnic_kwq;
14641 else
14642 bp->cnic_kwq_cons++;
14643 }
14644 bnx2x_sp_prod_update(bp);
14645 spin_unlock_bh(&bp->spq_lock);
14646}
14647
14648static int bnx2x_cnic_sp_queue(struct net_device *dev,
14649 struct kwqe_16 *kwqes[], u32 count)
14650{
14651 struct bnx2x *bp = netdev_priv(dev);
14652 int i;
14653
14654#ifdef BNX2X_STOP_ON_ERROR
14655 if (unlikely(bp->panic)) {
14656 BNX2X_ERR("Can't post to SP queue while panic\n");
14657 return -EIO;
14658 }
14659#endif
14660
14661 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14662 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14663 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14664 return -EAGAIN;
14665 }
14666
14667 spin_lock_bh(&bp->spq_lock);
14668
14669 for (i = 0; i < count; i++) {
14670 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14671
14672 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14673 break;
14674
14675 *bp->cnic_kwq_prod = *spe;
14676
14677 bp->cnic_kwq_pending++;
14678
14679 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14680 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14681 spe->data.update_data_addr.hi,
14682 spe->data.update_data_addr.lo,
14683 bp->cnic_kwq_pending);
14684
14685 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14686 bp->cnic_kwq_prod = bp->cnic_kwq;
14687 else
14688 bp->cnic_kwq_prod++;
14689 }
14690
14691 spin_unlock_bh(&bp->spq_lock);
14692
14693 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14694 bnx2x_cnic_sp_post(bp, 0);
14695
14696 return i;
14697}
14698
14699static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14700{
14701 struct cnic_ops *c_ops;
14702 int rc = 0;
14703
14704 mutex_lock(&bp->cnic_mutex);
14705 c_ops = rcu_dereference_protected(bp->cnic_ops,
14706 lockdep_is_held(&bp->cnic_mutex));
14707 if (c_ops)
14708 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14709 mutex_unlock(&bp->cnic_mutex);
14710
14711 return rc;
14712}
14713
14714static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14715{
14716 struct cnic_ops *c_ops;
14717 int rc = 0;
14718
14719 rcu_read_lock();
14720 c_ops = rcu_dereference(bp->cnic_ops);
14721 if (c_ops)
14722 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14723 rcu_read_unlock();
14724
14725 return rc;
14726}
14727
14728
14729
14730
14731int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14732{
14733 struct cnic_ctl_info ctl = {0};
14734
14735 ctl.cmd = cmd;
14736
14737 return bnx2x_cnic_ctl_send(bp, &ctl);
14738}
14739
14740static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14741{
14742 struct cnic_ctl_info ctl = {0};
14743
14744
14745 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14746 ctl.data.comp.cid = cid;
14747 ctl.data.comp.error = err;
14748
14749 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14750 bnx2x_cnic_sp_post(bp, 0);
14751}
14752
14753
14754
14755
14756
14757
14758static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14759{
14760 unsigned long accept_flags = 0, ramrod_flags = 0;
14761 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14762 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14763
14764 if (start) {
14765
14766
14767
14768
14769
14770
14771 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14772 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14773 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14774 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14775
14776
14777 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14778
14779 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14780 } else
14781
14782 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14783
14784 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14785 set_bit(sched_state, &bp->sp_state);
14786 else {
14787 __set_bit(RAMROD_RX, &ramrod_flags);
14788 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14789 ramrod_flags);
14790 }
14791}
14792
14793static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14794{
14795 struct bnx2x *bp = netdev_priv(dev);
14796 int rc = 0;
14797
14798 switch (ctl->cmd) {
14799 case DRV_CTL_CTXTBL_WR_CMD: {
14800 u32 index = ctl->data.io.offset;
14801 dma_addr_t addr = ctl->data.io.dma_addr;
14802
14803 bnx2x_ilt_wr(bp, index, addr);
14804 break;
14805 }
14806
14807 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14808 int count = ctl->data.credit.credit_count;
14809
14810 bnx2x_cnic_sp_post(bp, count);
14811 break;
14812 }
14813
14814
14815 case DRV_CTL_START_L2_CMD: {
14816 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14817 unsigned long sp_bits = 0;
14818
14819
14820 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14821 cp->iscsi_l2_client_id,
14822 cp->iscsi_l2_cid, BP_FUNC(bp),
14823 bnx2x_sp(bp, mac_rdata),
14824 bnx2x_sp_mapping(bp, mac_rdata),
14825 BNX2X_FILTER_MAC_PENDING,
14826 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14827 &bp->macs_pool);
14828
14829
14830 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14831 if (rc)
14832 break;
14833
14834 barrier();
14835
14836
14837
14838 netif_addr_lock_bh(dev);
14839 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14840 netif_addr_unlock_bh(dev);
14841
14842
14843 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14844 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14845
14846 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14847 BNX2X_ERR("rx_mode completion timed out!\n");
14848
14849 break;
14850 }
14851
14852
14853 case DRV_CTL_STOP_L2_CMD: {
14854 unsigned long sp_bits = 0;
14855
14856
14857 netif_addr_lock_bh(dev);
14858 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14859 netif_addr_unlock_bh(dev);
14860
14861
14862 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14863 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14864
14865 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14866 BNX2X_ERR("rx_mode completion timed out!\n");
14867
14868 barrier();
14869
14870
14871 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14872 BNX2X_ISCSI_ETH_MAC, true);
14873 break;
14874 }
14875 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14876 int count = ctl->data.credit.credit_count;
14877
14878 smp_mb__before_atomic();
14879 atomic_add(count, &bp->cq_spq_left);
14880 smp_mb__after_atomic();
14881 break;
14882 }
14883 case DRV_CTL_ULP_REGISTER_CMD: {
14884 int ulp_type = ctl->data.register_data.ulp_type;
14885
14886 if (CHIP_IS_E3(bp)) {
14887 int idx = BP_FW_MB_IDX(bp);
14888 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14889 int path = BP_PATH(bp);
14890 int port = BP_PORT(bp);
14891 int i;
14892 u32 scratch_offset;
14893 u32 *host_addr;
14894
14895
14896 if (ulp_type == CNIC_ULP_ISCSI)
14897 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14898 else if (ulp_type == CNIC_ULP_FCOE)
14899 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14900 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14901
14902 if ((ulp_type != CNIC_ULP_FCOE) ||
14903 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14904 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14905 break;
14906
14907
14908 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14909 if (!scratch_offset)
14910 break;
14911 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14912 fcoe_features[path][port]);
14913 host_addr = (u32 *) &(ctl->data.register_data.
14914 fcoe_features);
14915 for (i = 0; i < sizeof(struct fcoe_capabilities);
14916 i += 4)
14917 REG_WR(bp, scratch_offset + i,
14918 *(host_addr + i/4));
14919 }
14920 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14921 break;
14922 }
14923
14924 case DRV_CTL_ULP_UNREGISTER_CMD: {
14925 int ulp_type = ctl->data.ulp_type;
14926
14927 if (CHIP_IS_E3(bp)) {
14928 int idx = BP_FW_MB_IDX(bp);
14929 u32 cap;
14930
14931 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14932 if (ulp_type == CNIC_ULP_ISCSI)
14933 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14934 else if (ulp_type == CNIC_ULP_FCOE)
14935 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14936 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14937 }
14938 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14939 break;
14940 }
14941
14942 default:
14943 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14944 rc = -EINVAL;
14945 }
14946
14947
14948 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14949 switch (ctl->drv_state) {
14950 case DRV_NOP:
14951 break;
14952 case DRV_ACTIVE:
14953 bnx2x_set_os_driver_state(bp,
14954 OS_DRIVER_STATE_ACTIVE);
14955 break;
14956 case DRV_INACTIVE:
14957 bnx2x_set_os_driver_state(bp,
14958 OS_DRIVER_STATE_DISABLED);
14959 break;
14960 case DRV_UNLOADED:
14961 bnx2x_set_os_driver_state(bp,
14962 OS_DRIVER_STATE_NOT_LOADED);
14963 break;
14964 default:
14965 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14966 }
14967 }
14968
14969 return rc;
14970}
14971
14972static int bnx2x_get_fc_npiv(struct net_device *dev,
14973 struct cnic_fc_npiv_tbl *cnic_tbl)
14974{
14975 struct bnx2x *bp = netdev_priv(dev);
14976 struct bdn_fc_npiv_tbl *tbl = NULL;
14977 u32 offset, entries;
14978 int rc = -EINVAL;
14979 int i;
14980
14981 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14982 goto out;
14983
14984 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14985
14986 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14987 if (!tbl) {
14988 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14989 goto out;
14990 }
14991
14992 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14993 if (!offset) {
14994 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14995 goto out;
14996 }
14997 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14998
14999
15000 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
15001 BNX2X_ERR("Failed to read FC-NPIV table\n");
15002 goto out;
15003 }
15004
15005
15006
15007
15008 entries = tbl->fc_npiv_cfg.num_of_npiv;
15009 entries = (__force u32)be32_to_cpu((__force __be32)entries);
15010 tbl->fc_npiv_cfg.num_of_npiv = entries;
15011
15012 if (!tbl->fc_npiv_cfg.num_of_npiv) {
15013 DP(BNX2X_MSG_MCP,
15014 "No FC-NPIV table [valid, simply not present]\n");
15015 goto out;
15016 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
15017 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
15018 tbl->fc_npiv_cfg.num_of_npiv);
15019 goto out;
15020 } else {
15021 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
15022 tbl->fc_npiv_cfg.num_of_npiv);
15023 }
15024
15025
15026 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
15027 for (i = 0; i < cnic_tbl->count; i++) {
15028 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
15029 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
15030 }
15031
15032 rc = 0;
15033out:
15034 kfree(tbl);
15035 return rc;
15036}
15037
15038void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
15039{
15040 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15041
15042 if (bp->flags & USING_MSIX_FLAG) {
15043 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
15044 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
15045 cp->irq_arr[0].vector = bp->msix_table[1].vector;
15046 } else {
15047 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
15048 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
15049 }
15050 if (!CHIP_IS_E1x(bp))
15051 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
15052 else
15053 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
15054
15055 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
15056 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
15057 cp->irq_arr[1].status_blk = bp->def_status_blk;
15058 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
15059 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
15060
15061 cp->num_irq = 2;
15062}
15063
15064void bnx2x_setup_cnic_info(struct bnx2x *bp)
15065{
15066 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15067
15068 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15069 bnx2x_cid_ilt_lines(bp);
15070 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15071 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15072 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15073
15074 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
15075 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
15076 cp->iscsi_l2_cid);
15077
15078 if (NO_ISCSI_OOO(bp))
15079 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15080}
15081
15082static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
15083 void *data)
15084{
15085 struct bnx2x *bp = netdev_priv(dev);
15086 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15087 int rc;
15088
15089 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
15090
15091 if (ops == NULL) {
15092 BNX2X_ERR("NULL ops received\n");
15093 return -EINVAL;
15094 }
15095
15096 if (!CNIC_SUPPORT(bp)) {
15097 BNX2X_ERR("Can't register CNIC when not supported\n");
15098 return -EOPNOTSUPP;
15099 }
15100
15101 if (!CNIC_LOADED(bp)) {
15102 rc = bnx2x_load_cnic(bp);
15103 if (rc) {
15104 BNX2X_ERR("CNIC-related load failed\n");
15105 return rc;
15106 }
15107 }
15108
15109 bp->cnic_enabled = true;
15110
15111 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15112 if (!bp->cnic_kwq)
15113 return -ENOMEM;
15114
15115 bp->cnic_kwq_cons = bp->cnic_kwq;
15116 bp->cnic_kwq_prod = bp->cnic_kwq;
15117 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15118
15119 bp->cnic_spq_pending = 0;
15120 bp->cnic_kwq_pending = 0;
15121
15122 bp->cnic_data = data;
15123
15124 cp->num_irq = 0;
15125 cp->drv_state |= CNIC_DRV_STATE_REGD;
15126 cp->iro_arr = bp->iro_arr;
15127
15128 bnx2x_setup_cnic_irq_info(bp);
15129
15130 rcu_assign_pointer(bp->cnic_ops, ops);
15131
15132
15133 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15134
15135 return 0;
15136}
15137
15138static int bnx2x_unregister_cnic(struct net_device *dev)
15139{
15140 struct bnx2x *bp = netdev_priv(dev);
15141 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15142
15143 mutex_lock(&bp->cnic_mutex);
15144 cp->drv_state = 0;
15145 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15146 mutex_unlock(&bp->cnic_mutex);
15147 synchronize_rcu();
15148 bp->cnic_enabled = false;
15149 kfree(bp->cnic_kwq);
15150 bp->cnic_kwq = NULL;
15151
15152 return 0;
15153}
15154
15155static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15156{
15157 struct bnx2x *bp = netdev_priv(dev);
15158 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15159
15160
15161
15162
15163
15164 if (NO_ISCSI(bp) && NO_FCOE(bp))
15165 return NULL;
15166
15167 cp->drv_owner = THIS_MODULE;
15168 cp->chip_id = CHIP_ID(bp);
15169 cp->pdev = bp->pdev;
15170 cp->io_base = bp->regview;
15171 cp->io_base2 = bp->doorbells;
15172 cp->max_kwqe_pending = 8;
15173 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15174 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15175 bnx2x_cid_ilt_lines(bp);
15176 cp->ctx_tbl_len = CNIC_ILT_LINES;
15177 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15178 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15179 cp->drv_ctl = bnx2x_drv_ctl;
15180 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15181 cp->drv_register_cnic = bnx2x_register_cnic;
15182 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15183 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15184 cp->iscsi_l2_client_id =
15185 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15186 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15187
15188 if (NO_ISCSI_OOO(bp))
15189 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15190
15191 if (NO_ISCSI(bp))
15192 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15193
15194 if (NO_FCOE(bp))
15195 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15196
15197 BNX2X_DEV_INFO(
15198 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15199 cp->ctx_blk_size,
15200 cp->ctx_tbl_offset,
15201 cp->ctx_tbl_len,
15202 cp->starting_cid);
15203 return cp;
15204}
15205
15206static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15207{
15208 struct bnx2x *bp = fp->bp;
15209 u32 offset = BAR_USTRORM_INTMEM;
15210
15211 if (IS_VF(bp))
15212 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15213 else if (!CHIP_IS_E1x(bp))
15214 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15215 else
15216 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15217
15218 return offset;
15219}
15220
15221
15222
15223
15224
15225
15226int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15227{
15228 u32 pretend_reg;
15229
15230 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15231 return -1;
15232
15233
15234 pretend_reg = bnx2x_get_pretend_reg(bp);
15235 REG_WR(bp, pretend_reg, pretend_func_val);
15236 REG_RD(bp, pretend_reg);
15237 return 0;
15238}
15239
15240static void bnx2x_ptp_task(struct work_struct *work)
15241{
15242 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15243 int port = BP_PORT(bp);
15244 u32 val_seq;
15245 u64 timestamp, ns;
15246 struct skb_shared_hwtstamps shhwtstamps;
15247 bool bail = true;
15248 int i;
15249
15250
15251
15252
15253 for (i = 0; i < 10; i++) {
15254
15255 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15256 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15257 if (val_seq & 0x10000) {
15258 bail = false;
15259 break;
15260 }
15261 msleep(1 << i);
15262 }
15263
15264 if (!bail) {
15265
15266 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15267 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15268 timestamp <<= 32;
15269 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15270 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15271
15272 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15273 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15274 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15275
15276 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15277 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15278 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15279
15280 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15281 timestamp, ns);
15282 } else {
15283 DP(BNX2X_MSG_PTP,
15284 "Tx timestamp is not recorded (register read=%u)\n",
15285 val_seq);
15286 bp->eth_stats.ptp_skip_tx_ts++;
15287 }
15288
15289 dev_kfree_skb_any(bp->ptp_tx_skb);
15290 bp->ptp_tx_skb = NULL;
15291}
15292
15293void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15294{
15295 int port = BP_PORT(bp);
15296 u64 timestamp, ns;
15297
15298 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15299 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15300 timestamp <<= 32;
15301 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15302 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15303
15304
15305 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15306 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15307
15308 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15309
15310 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15311
15312 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15313 timestamp, ns);
15314}
15315
15316
15317static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15318{
15319 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15320 int port = BP_PORT(bp);
15321 u32 wb_data[2];
15322 u64 phc_cycles;
15323
15324 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15325 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15326 phc_cycles = wb_data[1];
15327 phc_cycles = (phc_cycles << 32) + wb_data[0];
15328
15329 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15330
15331 return phc_cycles;
15332}
15333
15334static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15335{
15336 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15337 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15338 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15339 bp->cyclecounter.shift = 0;
15340 bp->cyclecounter.mult = 1;
15341}
15342
15343static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15344{
15345 struct bnx2x_func_state_params func_params = {NULL};
15346 struct bnx2x_func_set_timesync_params *set_timesync_params =
15347 &func_params.params.set_timesync;
15348
15349
15350 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15351 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15352
15353 func_params.f_obj = &bp->func_obj;
15354 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15355
15356
15357 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15358 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15359
15360 return bnx2x_func_state_change(bp, &func_params);
15361}
15362
15363static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15364{
15365 struct bnx2x_queue_state_params q_params;
15366 int rc, i;
15367
15368
15369 memset(&q_params, 0, sizeof(q_params));
15370 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15371 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15372 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15373 &q_params.params.update.update_flags);
15374 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15375 &q_params.params.update.update_flags);
15376
15377
15378 for_each_eth_queue(bp, i) {
15379 struct bnx2x_fastpath *fp = &bp->fp[i];
15380
15381
15382 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15383
15384
15385 rc = bnx2x_queue_state_change(bp, &q_params);
15386 if (rc) {
15387 BNX2X_ERR("Failed to enable PTP packets\n");
15388 return rc;
15389 }
15390 }
15391
15392 return 0;
15393}
15394
15395#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15396#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15397#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15398#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15399#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15400#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15401#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15402#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15403#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15404#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15405#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15406#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15407
15408int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15409{
15410 int port = BP_PORT(bp);
15411 u32 param, rule;
15412 int rc;
15413
15414 if (!bp->hwtstamp_ioctl_called)
15415 return 0;
15416
15417 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15418 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15419 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15420 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15421 switch (bp->tx_type) {
15422 case HWTSTAMP_TX_ON:
15423 bp->flags |= TX_TIMESTAMPING_EN;
15424 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15425 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15426 break;
15427 case HWTSTAMP_TX_ONESTEP_SYNC:
15428 case HWTSTAMP_TX_ONESTEP_P2P:
15429 BNX2X_ERR("One-step timestamping is not supported\n");
15430 return -ERANGE;
15431 }
15432
15433 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15434 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15435 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15436 NIG_REG_P0_LLH_PTP_RULE_MASK;
15437 switch (bp->rx_filter) {
15438 case HWTSTAMP_FILTER_NONE:
15439 break;
15440 case HWTSTAMP_FILTER_ALL:
15441 case HWTSTAMP_FILTER_SOME:
15442 case HWTSTAMP_FILTER_NTP_ALL:
15443 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15444 break;
15445 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15446 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15447 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15448 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15449
15450 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15451 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15452 break;
15453 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15454 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15455 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15456 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15457
15458 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15459 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15460 break;
15461 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15462 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15463 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15464 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15465
15466 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15467 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15468
15469 break;
15470 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15471 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15472 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15473 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15474
15475 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15476 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15477 break;
15478 }
15479
15480
15481 rc = bnx2x_enable_ptp_packets(bp);
15482 if (rc)
15483 return rc;
15484
15485
15486 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15487 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15488
15489 return 0;
15490}
15491
15492static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15493{
15494 struct hwtstamp_config config;
15495 int rc;
15496
15497 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15498
15499 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15500 return -EFAULT;
15501
15502 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15503 config.tx_type, config.rx_filter);
15504
15505 if (config.flags) {
15506 BNX2X_ERR("config.flags is reserved for future use\n");
15507 return -EINVAL;
15508 }
15509
15510 bp->hwtstamp_ioctl_called = 1;
15511 bp->tx_type = config.tx_type;
15512 bp->rx_filter = config.rx_filter;
15513
15514 rc = bnx2x_configure_ptp_filters(bp);
15515 if (rc)
15516 return rc;
15517
15518 config.rx_filter = bp->rx_filter;
15519
15520 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15521 -EFAULT : 0;
15522}
15523
15524
15525static int bnx2x_configure_ptp(struct bnx2x *bp)
15526{
15527 int rc, port = BP_PORT(bp);
15528 u32 wb_data[2];
15529
15530
15531 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15532 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15533 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15534 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15535 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15536 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15537 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15538 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15539
15540
15541 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15542 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15543
15544
15545 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15546 NIG_REG_P0_PTP_EN, 0x3F);
15547
15548
15549 wb_data[0] = 0;
15550 wb_data[1] = 0;
15551 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15552
15553
15554 rc = bnx2x_send_reset_timesync_ramrod(bp);
15555 if (rc) {
15556 BNX2X_ERR("Failed to reset PHC drift register\n");
15557 return -EFAULT;
15558 }
15559
15560
15561 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15562 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15563 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15564 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15565
15566 return 0;
15567}
15568
15569
15570void bnx2x_init_ptp(struct bnx2x *bp)
15571{
15572 int rc;
15573
15574
15575 rc = bnx2x_configure_ptp(bp);
15576 if (rc) {
15577 BNX2X_ERR("Stopping PTP initialization\n");
15578 return;
15579 }
15580
15581
15582 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15583
15584
15585
15586
15587
15588 if (!bp->timecounter_init_done) {
15589 bnx2x_init_cyclecounter(bp);
15590 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15591 ktime_to_ns(ktime_get_real()));
15592 bp->timecounter_init_done = 1;
15593 }
15594
15595 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15596}
15597