1
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/slab.h>
5
6#include "qlge.h"
7
8
9static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
10 u32 reg)
11{
12 u32 register_to_read;
13 u32 reg_val;
14 unsigned int status = 0;
15
16 register_to_read = MPI_NIC_REG_BLOCK
17 | MPI_NIC_READ
18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
19 | reg;
20 status = ql_read_mpi_reg(qdev, register_to_read, ®_val);
21 if (status != 0)
22 return 0xffffffff;
23
24 return reg_val;
25}
26
27
28static int ql_write_other_func_reg(struct ql_adapter *qdev,
29 u32 reg, u32 reg_val)
30{
31 u32 register_to_read;
32 int status = 0;
33
34 register_to_read = MPI_NIC_REG_BLOCK
35 | MPI_NIC_READ
36 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
37 | reg;
38 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
39
40 return status;
41}
42
43static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
44 u32 bit, u32 err_bit)
45{
46 u32 temp;
47 int count = 10;
48
49 while (count) {
50 temp = ql_read_other_func_reg(qdev, reg);
51
52
53 if (temp & err_bit)
54 return -1;
55 else if (temp & bit)
56 return 0;
57 mdelay(10);
58 count--;
59 }
60 return -1;
61}
62
63static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
64 u32 *data)
65{
66 int status;
67
68
69 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
70 XG_SERDES_ADDR_RDY, 0);
71 if (status)
72 goto exit;
73
74
75 ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
76
77
78 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
79 XG_SERDES_ADDR_RDY, 0);
80 if (status)
81 goto exit;
82
83
84 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
85exit:
86 return status;
87}
88
89
90static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
91{
92 int status;
93
94
95 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
96 if (status)
97 goto exit;
98
99
100 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
101
102
103 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
104 if (status)
105 goto exit;
106
107
108 *data = ql_read32(qdev, XG_SERDES_DATA);
109exit:
110 return status;
111}
112
113static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
114 u32 *direct_ptr, u32 *indirect_ptr,
115 bool direct_valid, bool indirect_valid)
116{
117 unsigned int status;
118
119 status = 1;
120 if (direct_valid)
121 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
122
123 if (status)
124 *direct_ptr = 0xDEADBEEF;
125
126 status = 1;
127 if (indirect_valid)
128 status = ql_read_other_func_serdes_reg(
129 qdev, addr, indirect_ptr);
130
131 if (status)
132 *indirect_ptr = 0xDEADBEEF;
133}
134
135static int ql_get_serdes_regs(struct ql_adapter *qdev,
136 struct ql_mpi_coredump *mpi_coredump)
137{
138 int status;
139 bool xfi_direct_valid = false, xfi_indirect_valid = false;
140 bool xaui_direct_valid = true, xaui_indirect_valid = true;
141 unsigned int i;
142 u32 *direct_ptr, temp;
143 u32 *indirect_ptr;
144
145
146 status = ql_read_other_func_serdes_reg(qdev,
147 XG_SERDES_XAUI_HSS_PCS_START,
148 &temp);
149 if (status)
150 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
151
152 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
153 XG_SERDES_ADDR_XAUI_PWR_DOWN)
154 xaui_indirect_valid = false;
155
156 status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
157
158 if (status)
159 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
160
161 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
162 XG_SERDES_ADDR_XAUI_PWR_DOWN)
163 xaui_direct_valid = false;
164
165
166
167
168
169 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
170 if (status)
171 temp = 0;
172
173 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
174 XG_SERDES_ADDR_XFI1_PWR_UP) {
175
176 if (qdev->func & 1)
177
178 xfi_indirect_valid = true;
179 else
180 xfi_direct_valid = true;
181 }
182 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
183 XG_SERDES_ADDR_XFI2_PWR_UP) {
184
185 if (qdev->func & 1)
186
187 xfi_direct_valid = true;
188 else
189 xfi_indirect_valid = true;
190 }
191
192
193 if (qdev->func & 1) {
194
195 direct_ptr = mpi_coredump->serdes2_xaui_an;
196 indirect_ptr = mpi_coredump->serdes_xaui_an;
197 } else {
198
199 direct_ptr = mpi_coredump->serdes_xaui_an;
200 indirect_ptr = mpi_coredump->serdes2_xaui_an;
201 }
202
203 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
204 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
205 xaui_direct_valid, xaui_indirect_valid);
206
207
208 if (qdev->func & 1) {
209 direct_ptr =
210 mpi_coredump->serdes2_xaui_hss_pcs;
211 indirect_ptr =
212 mpi_coredump->serdes_xaui_hss_pcs;
213 } else {
214 direct_ptr =
215 mpi_coredump->serdes_xaui_hss_pcs;
216 indirect_ptr =
217 mpi_coredump->serdes2_xaui_hss_pcs;
218 }
219
220 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
221 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
222 xaui_direct_valid, xaui_indirect_valid);
223
224
225 if (qdev->func & 1) {
226 direct_ptr = mpi_coredump->serdes2_xfi_an;
227 indirect_ptr = mpi_coredump->serdes_xfi_an;
228 } else {
229 direct_ptr = mpi_coredump->serdes_xfi_an;
230 indirect_ptr = mpi_coredump->serdes2_xfi_an;
231 }
232
233 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
234 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
235 xfi_direct_valid, xfi_indirect_valid);
236
237
238 if (qdev->func & 1) {
239 direct_ptr = mpi_coredump->serdes2_xfi_train;
240 indirect_ptr =
241 mpi_coredump->serdes_xfi_train;
242 } else {
243 direct_ptr = mpi_coredump->serdes_xfi_train;
244 indirect_ptr =
245 mpi_coredump->serdes2_xfi_train;
246 }
247
248 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
249 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 xfi_direct_valid, xfi_indirect_valid);
251
252
253 if (qdev->func & 1) {
254 direct_ptr =
255 mpi_coredump->serdes2_xfi_hss_pcs;
256 indirect_ptr =
257 mpi_coredump->serdes_xfi_hss_pcs;
258 } else {
259 direct_ptr =
260 mpi_coredump->serdes_xfi_hss_pcs;
261 indirect_ptr =
262 mpi_coredump->serdes2_xfi_hss_pcs;
263 }
264
265 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
266 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
267 xfi_direct_valid, xfi_indirect_valid);
268
269
270 if (qdev->func & 1) {
271 direct_ptr =
272 mpi_coredump->serdes2_xfi_hss_tx;
273 indirect_ptr =
274 mpi_coredump->serdes_xfi_hss_tx;
275 } else {
276 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
277 indirect_ptr =
278 mpi_coredump->serdes2_xfi_hss_tx;
279 }
280 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
281 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 xfi_direct_valid, xfi_indirect_valid);
283
284
285 if (qdev->func & 1) {
286 direct_ptr =
287 mpi_coredump->serdes2_xfi_hss_rx;
288 indirect_ptr =
289 mpi_coredump->serdes_xfi_hss_rx;
290 } else {
291 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
292 indirect_ptr =
293 mpi_coredump->serdes2_xfi_hss_rx;
294 }
295
296 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
297 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
298 xfi_direct_valid, xfi_indirect_valid);
299
300
301 if (qdev->func & 1) {
302 direct_ptr =
303 mpi_coredump->serdes2_xfi_hss_pll;
304 indirect_ptr =
305 mpi_coredump->serdes_xfi_hss_pll;
306 } else {
307 direct_ptr =
308 mpi_coredump->serdes_xfi_hss_pll;
309 indirect_ptr =
310 mpi_coredump->serdes2_xfi_hss_pll;
311 }
312 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
313 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
314 xfi_direct_valid, xfi_indirect_valid);
315 return 0;
316}
317
318static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
319 u32 *data)
320{
321 int status = 0;
322
323
324 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
325 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
326 if (status)
327 goto exit;
328
329
330 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
331
332
333 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
334 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
335 if (status)
336 goto exit;
337
338
339 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
340exit:
341 return status;
342}
343
344
345
346
347static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
348 unsigned int other_function)
349{
350 int status = 0;
351 int i;
352
353 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
354
355
356
357 if ((i == 0x00000114) ||
358 (i == 0x00000118) ||
359 (i == 0x0000013c) ||
360 (i == 0x00000140) ||
361 (i > 0x00000150 && i < 0x000001fc) ||
362 (i > 0x00000278 && i < 0x000002a0) ||
363 (i > 0x000002c0 && i < 0x000002cf) ||
364 (i > 0x000002dc && i < 0x000002f0) ||
365 (i > 0x000003c8 && i < 0x00000400) ||
366 (i > 0x00000400 && i < 0x00000410) ||
367 (i > 0x00000410 && i < 0x00000420) ||
368 (i > 0x00000420 && i < 0x00000430) ||
369 (i > 0x00000430 && i < 0x00000440) ||
370 (i > 0x00000440 && i < 0x00000450) ||
371 (i > 0x00000450 && i < 0x00000500) ||
372 (i > 0x0000054c && i < 0x00000568) ||
373 (i > 0x000005c8 && i < 0x00000600)) {
374 if (other_function)
375 status =
376 ql_read_other_func_xgmac_reg(qdev, i, buf);
377 else
378 status = ql_read_xgmac_reg(qdev, i, buf);
379
380 if (status)
381 *buf = 0xdeadbeef;
382 break;
383 }
384 }
385 return status;
386}
387
388static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
389{
390 int i;
391
392 for (i = 0; i < 8; i++, buf++) {
393 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
394 *buf = ql_read32(qdev, NIC_ETS);
395 }
396
397 for (i = 0; i < 2; i++, buf++) {
398 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
399 *buf = ql_read32(qdev, CNA_ETS);
400 }
401
402 return 0;
403}
404
405static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
406{
407 int i;
408
409 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
410 ql_write32(qdev, INTR_EN,
411 qdev->intr_context[i].intr_read_mask);
412 *buf = ql_read32(qdev, INTR_EN);
413 }
414}
415
416static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
417{
418 int i, status;
419 u32 value[3];
420
421 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
422 if (status)
423 return status;
424
425 for (i = 0; i < 16; i++) {
426 status = ql_get_mac_addr_reg(qdev,
427 MAC_ADDR_TYPE_CAM_MAC, i, value);
428 if (status) {
429 netif_err(qdev, drv, qdev->ndev,
430 "Failed read of mac index register\n");
431 goto err;
432 }
433 *buf++ = value[0];
434 *buf++ = value[1];
435 *buf++ = value[2];
436 }
437 for (i = 0; i < 32; i++) {
438 status = ql_get_mac_addr_reg(qdev,
439 MAC_ADDR_TYPE_MULTI_MAC, i, value);
440 if (status) {
441 netif_err(qdev, drv, qdev->ndev,
442 "Failed read of mac index register\n");
443 goto err;
444 }
445 *buf++ = value[0];
446 *buf++ = value[1];
447 }
448err:
449 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
450 return status;
451}
452
453static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
454{
455 int status;
456 u32 value, i;
457
458 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
459 if (status)
460 return status;
461
462 for (i = 0; i < 16; i++) {
463 status = ql_get_routing_reg(qdev, i, &value);
464 if (status) {
465 netif_err(qdev, drv, qdev->ndev,
466 "Failed read of routing index register\n");
467 goto err;
468 } else {
469 *buf++ = value;
470 }
471 }
472err:
473 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
474 return status;
475}
476
477
478static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
479{
480 u32 i;
481 int status;
482
483 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
484 status = ql_write_mpi_reg(qdev,
485 RISC_124,
486 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
487 if (status)
488 goto end;
489 status = ql_read_mpi_reg(qdev, RISC_127, buf);
490 if (status)
491 goto end;
492 }
493end:
494 return status;
495}
496
497
498static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
499 u32 offset, u32 count)
500{
501 int i, status = 0;
502 for (i = 0; i < count; i++, buf++) {
503 status = ql_read_mpi_reg(qdev, offset + i, buf);
504 if (status)
505 return status;
506 }
507 return status;
508}
509
510
511static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
512 u32 valid, u32 *buf)
513{
514 u32 module, mux_sel, probe, lo_val, hi_val;
515
516 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
517 if (!((valid >> module) & 1))
518 continue;
519 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
520 probe = clock
521 | PRB_MX_ADDR_ARE
522 | mux_sel
523 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
524 ql_write32(qdev, PRB_MX_ADDR, probe);
525 lo_val = ql_read32(qdev, PRB_MX_DATA);
526 if (mux_sel == 0) {
527 *buf = probe;
528 buf++;
529 }
530 probe |= PRB_MX_ADDR_UP;
531 ql_write32(qdev, PRB_MX_ADDR, probe);
532 hi_val = ql_read32(qdev, PRB_MX_DATA);
533 *buf = lo_val;
534 buf++;
535 *buf = hi_val;
536 buf++;
537 }
538 }
539 return buf;
540}
541
542static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
543{
544
545 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
546 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
547 PRB_MX_ADDR_VALID_SYS_MOD, buf);
548 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
549 PRB_MX_ADDR_VALID_PCI_MOD, buf);
550 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
551 PRB_MX_ADDR_VALID_XGM_MOD, buf);
552 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
553 PRB_MX_ADDR_VALID_FC_MOD, buf);
554 return 0;
555
556}
557
558
559static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
560{
561 int status;
562 u32 type, index, index_max;
563 u32 result_index;
564 u32 result_data;
565 u32 val;
566
567 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
568 if (status)
569 return status;
570
571 for (type = 0; type < 4; type++) {
572 if (type < 2)
573 index_max = 8;
574 else
575 index_max = 16;
576 for (index = 0; index < index_max; index++) {
577 val = RT_IDX_RS
578 | (type << RT_IDX_TYPE_SHIFT)
579 | (index << RT_IDX_IDX_SHIFT);
580 ql_write32(qdev, RT_IDX, val);
581 result_index = 0;
582 while ((result_index & RT_IDX_MR) == 0)
583 result_index = ql_read32(qdev, RT_IDX);
584 result_data = ql_read32(qdev, RT_DATA);
585 *buf = type;
586 buf++;
587 *buf = index;
588 buf++;
589 *buf = result_index;
590 buf++;
591 *buf = result_data;
592 buf++;
593 }
594 }
595 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
596 return status;
597}
598
599
600static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
601{
602 u32 result_index, result_data;
603 u32 type;
604 u32 index;
605 u32 offset;
606 u32 val;
607 u32 initial_val = MAC_ADDR_RS;
608 u32 max_index;
609 u32 max_offset;
610
611 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
612 switch (type) {
613
614 case 0:
615 initial_val |= MAC_ADDR_ADR;
616 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
617 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
618 break;
619 case 1:
620 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
621 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
622 break;
623 case 2:
624 case 3:
625 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
626 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
627 break;
628 case 4:
629 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
630 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
631 break;
632 case 5:
633 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
634 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
635 break;
636 case 6:
637 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
638 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
639 break;
640 case 7:
641 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
642 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
643 break;
644 case 8:
645 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
646 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
647 break;
648 case 9:
649 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
650 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
651 break;
652 default:
653 pr_err("Bad type!!! 0x%08x\n", type);
654 max_index = 0;
655 max_offset = 0;
656 break;
657 }
658 for (index = 0; index < max_index; index++) {
659 for (offset = 0; offset < max_offset; offset++) {
660 val = initial_val
661 | (type << MAC_ADDR_TYPE_SHIFT)
662 | (index << MAC_ADDR_IDX_SHIFT)
663 | (offset);
664 ql_write32(qdev, MAC_ADDR_IDX, val);
665 result_index = 0;
666 while ((result_index & MAC_ADDR_MR) == 0) {
667 result_index = ql_read32(qdev,
668 MAC_ADDR_IDX);
669 }
670 result_data = ql_read32(qdev, MAC_ADDR_DATA);
671 *buf = result_index;
672 buf++;
673 *buf = result_data;
674 buf++;
675 }
676 }
677 }
678}
679
680static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
681{
682 u32 func_num, reg, reg_val;
683 int status;
684
685 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
686 reg = MPI_NIC_REG_BLOCK
687 | (func_num << MPI_NIC_FUNCTION_SHIFT)
688 | (SEM / 4);
689 status = ql_read_mpi_reg(qdev, reg, ®_val);
690 *buf = reg_val;
691
692 if (!status)
693 *buf = 0xdeadbeef;
694 buf++;
695 }
696}
697
698
699static void ql_build_coredump_seg_header(
700 struct mpi_coredump_segment_header *seg_hdr,
701 u32 seg_number, u32 seg_size, u8 *desc)
702{
703 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
704 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
705 seg_hdr->seg_num = seg_number;
706 seg_hdr->seg_size = seg_size;
707 strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
708}
709
710
711
712
713
714
715
716
717int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
718{
719 int status;
720 int i;
721
722 if (!mpi_coredump) {
723 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
724 return -EINVAL;
725 }
726
727
728
729
730
731 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
732
733 status = ql_pause_mpi_risc(qdev);
734 if (status) {
735 netif_err(qdev, drv, qdev->ndev,
736 "Failed RISC pause. Status = 0x%.08x\n", status);
737 goto err;
738 }
739
740
741 memset(&(mpi_coredump->mpi_global_header), 0,
742 sizeof(struct mpi_coredump_global_header));
743 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
744 mpi_coredump->mpi_global_header.header_size =
745 sizeof(struct mpi_coredump_global_header);
746 mpi_coredump->mpi_global_header.image_size =
747 sizeof(struct ql_mpi_coredump);
748 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
749 sizeof(mpi_coredump->mpi_global_header.id_string));
750
751
752 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
753 NIC1_CONTROL_SEG_NUM,
754 sizeof(struct mpi_coredump_segment_header) +
755 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
756
757 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
758 NIC2_CONTROL_SEG_NUM,
759 sizeof(struct mpi_coredump_segment_header) +
760 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
761
762
763 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
764 NIC1_XGMAC_SEG_NUM,
765 sizeof(struct mpi_coredump_segment_header) +
766 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
767
768 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
769 NIC2_XGMAC_SEG_NUM,
770 sizeof(struct mpi_coredump_segment_header) +
771 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
772
773 if (qdev->func & 1) {
774
775 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
776 mpi_coredump->nic2_regs[i] =
777 ql_read32(qdev, i * sizeof(u32));
778
779 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
780 mpi_coredump->nic_regs[i] =
781 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
782
783 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
784 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
785 } else {
786
787 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
788 mpi_coredump->nic_regs[i] =
789 ql_read32(qdev, i * sizeof(u32));
790 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
791 mpi_coredump->nic2_regs[i] =
792 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
793
794 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
795 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
796 }
797
798
799 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
800 XAUI_AN_SEG_NUM,
801 sizeof(struct mpi_coredump_segment_header) +
802 sizeof(mpi_coredump->serdes_xaui_an),
803 "XAUI AN Registers");
804
805
806 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
807 XAUI_HSS_PCS_SEG_NUM,
808 sizeof(struct mpi_coredump_segment_header) +
809 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
810 "XAUI HSS PCS Registers");
811
812 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
813 sizeof(struct mpi_coredump_segment_header) +
814 sizeof(mpi_coredump->serdes_xfi_an),
815 "XFI AN Registers");
816
817 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
818 XFI_TRAIN_SEG_NUM,
819 sizeof(struct mpi_coredump_segment_header) +
820 sizeof(mpi_coredump->serdes_xfi_train),
821 "XFI TRAIN Registers");
822
823 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
824 XFI_HSS_PCS_SEG_NUM,
825 sizeof(struct mpi_coredump_segment_header) +
826 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
827 "XFI HSS PCS Registers");
828
829 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
830 XFI_HSS_TX_SEG_NUM,
831 sizeof(struct mpi_coredump_segment_header) +
832 sizeof(mpi_coredump->serdes_xfi_hss_tx),
833 "XFI HSS TX Registers");
834
835 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
836 XFI_HSS_RX_SEG_NUM,
837 sizeof(struct mpi_coredump_segment_header) +
838 sizeof(mpi_coredump->serdes_xfi_hss_rx),
839 "XFI HSS RX Registers");
840
841 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
842 XFI_HSS_PLL_SEG_NUM,
843 sizeof(struct mpi_coredump_segment_header) +
844 sizeof(mpi_coredump->serdes_xfi_hss_pll),
845 "XFI HSS PLL Registers");
846
847 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
848 XAUI2_AN_SEG_NUM,
849 sizeof(struct mpi_coredump_segment_header) +
850 sizeof(mpi_coredump->serdes2_xaui_an),
851 "XAUI2 AN Registers");
852
853 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
854 XAUI2_HSS_PCS_SEG_NUM,
855 sizeof(struct mpi_coredump_segment_header) +
856 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
857 "XAUI2 HSS PCS Registers");
858
859 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
860 XFI2_AN_SEG_NUM,
861 sizeof(struct mpi_coredump_segment_header) +
862 sizeof(mpi_coredump->serdes2_xfi_an),
863 "XFI2 AN Registers");
864
865 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
866 XFI2_TRAIN_SEG_NUM,
867 sizeof(struct mpi_coredump_segment_header) +
868 sizeof(mpi_coredump->serdes2_xfi_train),
869 "XFI2 TRAIN Registers");
870
871 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
872 XFI2_HSS_PCS_SEG_NUM,
873 sizeof(struct mpi_coredump_segment_header) +
874 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
875 "XFI2 HSS PCS Registers");
876
877 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
878 XFI2_HSS_TX_SEG_NUM,
879 sizeof(struct mpi_coredump_segment_header) +
880 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
881 "XFI2 HSS TX Registers");
882
883 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
884 XFI2_HSS_RX_SEG_NUM,
885 sizeof(struct mpi_coredump_segment_header) +
886 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
887 "XFI2 HSS RX Registers");
888
889 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
890 XFI2_HSS_PLL_SEG_NUM,
891 sizeof(struct mpi_coredump_segment_header) +
892 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
893 "XFI2 HSS PLL Registers");
894
895 status = ql_get_serdes_regs(qdev, mpi_coredump);
896 if (status) {
897 netif_err(qdev, drv, qdev->ndev,
898 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
899 status);
900 goto err;
901 }
902
903 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
904 CORE_SEG_NUM,
905 sizeof(mpi_coredump->core_regs_seg_hdr) +
906 sizeof(mpi_coredump->mpi_core_regs) +
907 sizeof(mpi_coredump->mpi_core_sh_regs),
908 "Core Registers");
909
910
911 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
912 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
913 if (status)
914 goto err;
915
916 status = ql_get_mpi_shadow_regs(qdev,
917 &mpi_coredump->mpi_core_sh_regs[0]);
918 if (status)
919 goto err;
920
921
922 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
923 TEST_LOGIC_SEG_NUM,
924 sizeof(struct mpi_coredump_segment_header)
925 + sizeof(mpi_coredump->test_logic_regs),
926 "Test Logic Regs");
927 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
928 TEST_REGS_ADDR, TEST_REGS_CNT);
929 if (status)
930 goto err;
931
932
933 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
934 RMII_SEG_NUM,
935 sizeof(struct mpi_coredump_segment_header)
936 + sizeof(mpi_coredump->rmii_regs),
937 "RMII Registers");
938 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
939 RMII_REGS_ADDR, RMII_REGS_CNT);
940 if (status)
941 goto err;
942
943
944 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
945 FCMAC1_SEG_NUM,
946 sizeof(struct mpi_coredump_segment_header)
947 + sizeof(mpi_coredump->fcmac1_regs),
948 "FCMAC1 Registers");
949 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
950 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
951 if (status)
952 goto err;
953
954
955
956 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
957 FCMAC2_SEG_NUM,
958 sizeof(struct mpi_coredump_segment_header)
959 + sizeof(mpi_coredump->fcmac2_regs),
960 "FCMAC2 Registers");
961
962 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
963 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
964 if (status)
965 goto err;
966
967
968 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
969 FC1_MBOX_SEG_NUM,
970 sizeof(struct mpi_coredump_segment_header)
971 + sizeof(mpi_coredump->fc1_mbx_regs),
972 "FC1 MBox Regs");
973 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
974 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
975 if (status)
976 goto err;
977
978
979 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
980 IDE_SEG_NUM,
981 sizeof(struct mpi_coredump_segment_header)
982 + sizeof(mpi_coredump->ide_regs),
983 "IDE Registers");
984 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
985 IDE_REGS_ADDR, IDE_REGS_CNT);
986 if (status)
987 goto err;
988
989
990 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
991 NIC1_MBOX_SEG_NUM,
992 sizeof(struct mpi_coredump_segment_header)
993 + sizeof(mpi_coredump->nic1_mbx_regs),
994 "NIC1 MBox Regs");
995 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
996 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
997 if (status)
998 goto err;
999
1000
1001 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1002 SMBUS_SEG_NUM,
1003 sizeof(struct mpi_coredump_segment_header)
1004 + sizeof(mpi_coredump->smbus_regs),
1005 "SMBus Registers");
1006 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1007 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1008 if (status)
1009 goto err;
1010
1011
1012 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1013 FC2_MBOX_SEG_NUM,
1014 sizeof(struct mpi_coredump_segment_header)
1015 + sizeof(mpi_coredump->fc2_mbx_regs),
1016 "FC2 MBox Regs");
1017 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1018 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1019 if (status)
1020 goto err;
1021
1022
1023 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1024 NIC2_MBOX_SEG_NUM,
1025 sizeof(struct mpi_coredump_segment_header)
1026 + sizeof(mpi_coredump->nic2_mbx_regs),
1027 "NIC2 MBox Regs");
1028 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1029 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1030 if (status)
1031 goto err;
1032
1033
1034 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1035 I2C_SEG_NUM,
1036 sizeof(struct mpi_coredump_segment_header)
1037 + sizeof(mpi_coredump->i2c_regs),
1038 "I2C Registers");
1039 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1040 I2C_REGS_ADDR, I2C_REGS_CNT);
1041 if (status)
1042 goto err;
1043
1044
1045 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1046 MEMC_SEG_NUM,
1047 sizeof(struct mpi_coredump_segment_header)
1048 + sizeof(mpi_coredump->memc_regs),
1049 "MEMC Registers");
1050 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1051 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1052 if (status)
1053 goto err;
1054
1055
1056 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1057 PBUS_SEG_NUM,
1058 sizeof(struct mpi_coredump_segment_header)
1059 + sizeof(mpi_coredump->pbus_regs),
1060 "PBUS Registers");
1061 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1062 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1063 if (status)
1064 goto err;
1065
1066
1067 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1068 MDE_SEG_NUM,
1069 sizeof(struct mpi_coredump_segment_header)
1070 + sizeof(mpi_coredump->mde_regs),
1071 "MDE Registers");
1072 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1073 MDE_REGS_ADDR, MDE_REGS_CNT);
1074 if (status)
1075 goto err;
1076
1077 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1078 MISC_NIC_INFO_SEG_NUM,
1079 sizeof(struct mpi_coredump_segment_header)
1080 + sizeof(mpi_coredump->misc_nic_info),
1081 "MISC NIC INFO");
1082 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1083 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1084 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1085 mpi_coredump->misc_nic_info.function = qdev->func;
1086
1087
1088
1089 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1090 INTR_STATES_SEG_NUM,
1091 sizeof(struct mpi_coredump_segment_header)
1092 + sizeof(mpi_coredump->intr_states),
1093 "INTR States");
1094 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1095
1096 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1097 CAM_ENTRIES_SEG_NUM,
1098 sizeof(struct mpi_coredump_segment_header)
1099 + sizeof(mpi_coredump->cam_entries),
1100 "CAM Entries");
1101 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1102 if (status)
1103 goto err;
1104
1105 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1106 ROUTING_WORDS_SEG_NUM,
1107 sizeof(struct mpi_coredump_segment_header)
1108 + sizeof(mpi_coredump->nic_routing_words),
1109 "Routing Words");
1110 status = ql_get_routing_entries(qdev,
1111 &mpi_coredump->nic_routing_words[0]);
1112 if (status)
1113 goto err;
1114
1115
1116 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1117 ETS_SEG_NUM,
1118 sizeof(struct mpi_coredump_segment_header)
1119 + sizeof(mpi_coredump->ets),
1120 "ETS Registers");
1121 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1122 if (status)
1123 goto err;
1124
1125 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1126 PROBE_DUMP_SEG_NUM,
1127 sizeof(struct mpi_coredump_segment_header)
1128 + sizeof(mpi_coredump->probe_dump),
1129 "Probe Dump");
1130 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1131
1132 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1133 ROUTING_INDEX_SEG_NUM,
1134 sizeof(struct mpi_coredump_segment_header)
1135 + sizeof(mpi_coredump->routing_regs),
1136 "Routing Regs");
1137 status = ql_get_routing_index_registers(qdev,
1138 &mpi_coredump->routing_regs[0]);
1139 if (status)
1140 goto err;
1141
1142 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1143 MAC_PROTOCOL_SEG_NUM,
1144 sizeof(struct mpi_coredump_segment_header)
1145 + sizeof(mpi_coredump->mac_prot_regs),
1146 "MAC Prot Regs");
1147 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1148
1149
1150 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1151 SEM_REGS_SEG_NUM,
1152 sizeof(struct mpi_coredump_segment_header) +
1153 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1154
1155 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1156
1157
1158 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1159
1160
1161 status = ql_unpause_mpi_risc(qdev);
1162 if (status) {
1163 netif_err(qdev, drv, qdev->ndev,
1164 "Failed RISC unpause. Status = 0x%.08x\n", status);
1165 goto err;
1166 }
1167
1168
1169 status = ql_hard_reset_mpi_risc(qdev);
1170 if (status) {
1171 netif_err(qdev, drv, qdev->ndev,
1172 "Failed RISC reset. Status = 0x%.08x\n", status);
1173 goto err;
1174 }
1175
1176 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1177 WCS_RAM_SEG_NUM,
1178 sizeof(struct mpi_coredump_segment_header)
1179 + sizeof(mpi_coredump->code_ram),
1180 "WCS RAM");
1181 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1182 CODE_RAM_ADDR, CODE_RAM_CNT);
1183 if (status) {
1184 netif_err(qdev, drv, qdev->ndev,
1185 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1186 status);
1187 goto err;
1188 }
1189
1190
1191 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1192 MEMC_RAM_SEG_NUM,
1193 sizeof(struct mpi_coredump_segment_header)
1194 + sizeof(mpi_coredump->memc_ram),
1195 "MEMC RAM");
1196 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1197 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1198 if (status) {
1199 netif_err(qdev, drv, qdev->ndev,
1200 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1201 status);
1202 goto err;
1203 }
1204err:
1205 ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
1206 return status;
1207
1208}
1209
1210static void ql_get_core_dump(struct ql_adapter *qdev)
1211{
1212 if (!ql_own_firmware(qdev)) {
1213 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1214 return;
1215 }
1216
1217 if (!netif_running(qdev->ndev)) {
1218 netif_err(qdev, ifup, qdev->ndev,
1219 "Force Coredump can only be done from interface that is up\n");
1220 return;
1221 }
1222 ql_queue_fw_error(qdev);
1223}
1224
1225static void ql_gen_reg_dump(struct ql_adapter *qdev,
1226 struct ql_reg_dump *mpi_coredump)
1227{
1228 int i, status;
1229
1230 memset(&(mpi_coredump->mpi_global_header), 0,
1231 sizeof(struct mpi_coredump_global_header));
1232 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1233 mpi_coredump->mpi_global_header.header_size =
1234 sizeof(struct mpi_coredump_global_header);
1235 mpi_coredump->mpi_global_header.image_size =
1236 sizeof(struct ql_reg_dump);
1237 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
1238 sizeof(mpi_coredump->mpi_global_header.id_string));
1239
1240
1241 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1242 MISC_NIC_INFO_SEG_NUM,
1243 sizeof(struct mpi_coredump_segment_header)
1244 + sizeof(mpi_coredump->misc_nic_info),
1245 "MISC NIC INFO");
1246 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1247 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1248 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1249 mpi_coredump->misc_nic_info.function = qdev->func;
1250
1251
1252 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1253 NIC1_CONTROL_SEG_NUM,
1254 sizeof(struct mpi_coredump_segment_header)
1255 + sizeof(mpi_coredump->nic_regs),
1256 "NIC Registers");
1257
1258 for (i = 0; i < 64; i++)
1259 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1260
1261
1262
1263 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1264 INTR_STATES_SEG_NUM,
1265 sizeof(struct mpi_coredump_segment_header)
1266 + sizeof(mpi_coredump->intr_states),
1267 "INTR States");
1268 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1269
1270 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1271 CAM_ENTRIES_SEG_NUM,
1272 sizeof(struct mpi_coredump_segment_header)
1273 + sizeof(mpi_coredump->cam_entries),
1274 "CAM Entries");
1275 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1276 if (status)
1277 return;
1278
1279 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1280 ROUTING_WORDS_SEG_NUM,
1281 sizeof(struct mpi_coredump_segment_header)
1282 + sizeof(mpi_coredump->nic_routing_words),
1283 "Routing Words");
1284 status = ql_get_routing_entries(qdev,
1285 &mpi_coredump->nic_routing_words[0]);
1286 if (status)
1287 return;
1288
1289
1290 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1291 ETS_SEG_NUM,
1292 sizeof(struct mpi_coredump_segment_header)
1293 + sizeof(mpi_coredump->ets),
1294 "ETS Registers");
1295 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1296 if (status)
1297 return;
1298}
1299
1300void ql_get_dump(struct ql_adapter *qdev, void *buff)
1301{
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1312 if (!ql_core_dump(qdev, buff))
1313 ql_soft_reset_mpi_risc(qdev);
1314 else
1315 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1316 } else {
1317 ql_gen_reg_dump(qdev, buff);
1318 ql_get_core_dump(qdev);
1319 }
1320}
1321
1322
1323void ql_mpi_core_to_log(struct work_struct *work)
1324{
1325 struct ql_adapter *qdev =
1326 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1327 u32 *tmp, count;
1328 int i;
1329
1330 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1331 tmp = (u32 *)qdev->mpi_coredump;
1332 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1333 "Core is dumping to log file!\n");
1334
1335 for (i = 0; i < count; i += 8) {
1336 pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1337 "%.08x %.08x %.08x\n", i,
1338 tmp[i + 0],
1339 tmp[i + 1],
1340 tmp[i + 2],
1341 tmp[i + 3],
1342 tmp[i + 4],
1343 tmp[i + 5],
1344 tmp[i + 6],
1345 tmp[i + 7]);
1346 msleep(5);
1347 }
1348}
1349
1350#ifdef QL_REG_DUMP
1351static void ql_dump_intr_states(struct ql_adapter *qdev)
1352{
1353 int i;
1354 u32 value;
1355 for (i = 0; i < qdev->intr_count; i++) {
1356 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1357 value = ql_read32(qdev, INTR_EN);
1358 pr_err("%s: Interrupt %d is %s\n",
1359 qdev->ndev->name, i,
1360 (value & INTR_EN_EN ? "enabled" : "disabled"));
1361 }
1362}
1363
1364#define DUMP_XGMAC(qdev, reg) \
1365do { \
1366 u32 data; \
1367 ql_read_xgmac_reg(qdev, reg, &data); \
1368 pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1369} while (0)
1370
1371void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1372{
1373 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1374 pr_err("%s: Couldn't get xgmac sem\n", __func__);
1375 return;
1376 }
1377 DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1378 DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1379 DUMP_XGMAC(qdev, GLOBAL_CFG);
1380 DUMP_XGMAC(qdev, TX_CFG);
1381 DUMP_XGMAC(qdev, RX_CFG);
1382 DUMP_XGMAC(qdev, FLOW_CTL);
1383 DUMP_XGMAC(qdev, PAUSE_OPCODE);
1384 DUMP_XGMAC(qdev, PAUSE_TIMER);
1385 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1386 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1387 DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1388 DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1389 DUMP_XGMAC(qdev, MAC_SYS_INT);
1390 DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1391 DUMP_XGMAC(qdev, MAC_MGMT_INT);
1392 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1393 DUMP_XGMAC(qdev, EXT_ARB_MODE);
1394 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1395}
1396
1397static void ql_dump_ets_regs(struct ql_adapter *qdev)
1398{
1399}
1400
1401static void ql_dump_cam_entries(struct ql_adapter *qdev)
1402{
1403 int i;
1404 u32 value[3];
1405
1406 i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1407 if (i)
1408 return;
1409 for (i = 0; i < 4; i++) {
1410 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1411 pr_err("%s: Failed read of mac index register\n",
1412 __func__);
1413 return;
1414 } else {
1415 if (value[0])
1416 pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1417 qdev->ndev->name, i, value[1], value[0],
1418 value[2]);
1419 }
1420 }
1421 for (i = 0; i < 32; i++) {
1422 if (ql_get_mac_addr_reg
1423 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1424 pr_err("%s: Failed read of mac index register\n",
1425 __func__);
1426 return;
1427 } else {
1428 if (value[0])
1429 pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1430 qdev->ndev->name, i, value[1], value[0]);
1431 }
1432 }
1433 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1434}
1435
1436void ql_dump_routing_entries(struct ql_adapter *qdev)
1437{
1438 int i;
1439 u32 value;
1440 i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1441 if (i)
1442 return;
1443 for (i = 0; i < 16; i++) {
1444 value = 0;
1445 if (ql_get_routing_reg(qdev, i, &value)) {
1446 pr_err("%s: Failed read of routing index register\n",
1447 __func__);
1448 return;
1449 } else {
1450 if (value)
1451 pr_err("%s: Routing Mask %d = 0x%.08x\n",
1452 qdev->ndev->name, i, value);
1453 }
1454 }
1455 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1456}
1457
1458#define DUMP_REG(qdev, reg) \
1459 pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1460
1461void ql_dump_regs(struct ql_adapter *qdev)
1462{
1463 pr_err("reg dump for function #%d\n", qdev->func);
1464 DUMP_REG(qdev, SYS);
1465 DUMP_REG(qdev, RST_FO);
1466 DUMP_REG(qdev, FSC);
1467 DUMP_REG(qdev, CSR);
1468 DUMP_REG(qdev, ICB_RID);
1469 DUMP_REG(qdev, ICB_L);
1470 DUMP_REG(qdev, ICB_H);
1471 DUMP_REG(qdev, CFG);
1472 DUMP_REG(qdev, BIOS_ADDR);
1473 DUMP_REG(qdev, STS);
1474 DUMP_REG(qdev, INTR_EN);
1475 DUMP_REG(qdev, INTR_MASK);
1476 DUMP_REG(qdev, ISR1);
1477 DUMP_REG(qdev, ISR2);
1478 DUMP_REG(qdev, ISR3);
1479 DUMP_REG(qdev, ISR4);
1480 DUMP_REG(qdev, REV_ID);
1481 DUMP_REG(qdev, FRC_ECC_ERR);
1482 DUMP_REG(qdev, ERR_STS);
1483 DUMP_REG(qdev, RAM_DBG_ADDR);
1484 DUMP_REG(qdev, RAM_DBG_DATA);
1485 DUMP_REG(qdev, ECC_ERR_CNT);
1486 DUMP_REG(qdev, SEM);
1487 DUMP_REG(qdev, GPIO_1);
1488 DUMP_REG(qdev, GPIO_2);
1489 DUMP_REG(qdev, GPIO_3);
1490 DUMP_REG(qdev, XGMAC_ADDR);
1491 DUMP_REG(qdev, XGMAC_DATA);
1492 DUMP_REG(qdev, NIC_ETS);
1493 DUMP_REG(qdev, CNA_ETS);
1494 DUMP_REG(qdev, FLASH_ADDR);
1495 DUMP_REG(qdev, FLASH_DATA);
1496 DUMP_REG(qdev, CQ_STOP);
1497 DUMP_REG(qdev, PAGE_TBL_RID);
1498 DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1499 DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1500 DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1501 DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1502 DUMP_REG(qdev, COS_DFLT_CQ1);
1503 DUMP_REG(qdev, COS_DFLT_CQ2);
1504 DUMP_REG(qdev, SPLT_HDR);
1505 DUMP_REG(qdev, FC_PAUSE_THRES);
1506 DUMP_REG(qdev, NIC_PAUSE_THRES);
1507 DUMP_REG(qdev, FC_ETHERTYPE);
1508 DUMP_REG(qdev, FC_RCV_CFG);
1509 DUMP_REG(qdev, NIC_RCV_CFG);
1510 DUMP_REG(qdev, FC_COS_TAGS);
1511 DUMP_REG(qdev, NIC_COS_TAGS);
1512 DUMP_REG(qdev, MGMT_RCV_CFG);
1513 DUMP_REG(qdev, XG_SERDES_ADDR);
1514 DUMP_REG(qdev, XG_SERDES_DATA);
1515 DUMP_REG(qdev, PRB_MX_ADDR);
1516 DUMP_REG(qdev, PRB_MX_DATA);
1517 ql_dump_intr_states(qdev);
1518 ql_dump_xgmac_control_regs(qdev);
1519 ql_dump_ets_regs(qdev);
1520 ql_dump_cam_entries(qdev);
1521 ql_dump_routing_entries(qdev);
1522}
1523#endif
1524
1525#ifdef QL_STAT_DUMP
1526
1527#define DUMP_STAT(qdev, stat) \
1528 pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1529
1530void ql_dump_stat(struct ql_adapter *qdev)
1531{
1532 pr_err("%s: Enter\n", __func__);
1533 DUMP_STAT(qdev, tx_pkts);
1534 DUMP_STAT(qdev, tx_bytes);
1535 DUMP_STAT(qdev, tx_mcast_pkts);
1536 DUMP_STAT(qdev, tx_bcast_pkts);
1537 DUMP_STAT(qdev, tx_ucast_pkts);
1538 DUMP_STAT(qdev, tx_ctl_pkts);
1539 DUMP_STAT(qdev, tx_pause_pkts);
1540 DUMP_STAT(qdev, tx_64_pkt);
1541 DUMP_STAT(qdev, tx_65_to_127_pkt);
1542 DUMP_STAT(qdev, tx_128_to_255_pkt);
1543 DUMP_STAT(qdev, tx_256_511_pkt);
1544 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1545 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1546 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1547 DUMP_STAT(qdev, tx_undersize_pkt);
1548 DUMP_STAT(qdev, tx_oversize_pkt);
1549 DUMP_STAT(qdev, rx_bytes);
1550 DUMP_STAT(qdev, rx_bytes_ok);
1551 DUMP_STAT(qdev, rx_pkts);
1552 DUMP_STAT(qdev, rx_pkts_ok);
1553 DUMP_STAT(qdev, rx_bcast_pkts);
1554 DUMP_STAT(qdev, rx_mcast_pkts);
1555 DUMP_STAT(qdev, rx_ucast_pkts);
1556 DUMP_STAT(qdev, rx_undersize_pkts);
1557 DUMP_STAT(qdev, rx_oversize_pkts);
1558 DUMP_STAT(qdev, rx_jabber_pkts);
1559 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1560 DUMP_STAT(qdev, rx_drop_events);
1561 DUMP_STAT(qdev, rx_fcerr_pkts);
1562 DUMP_STAT(qdev, rx_align_err);
1563 DUMP_STAT(qdev, rx_symbol_err);
1564 DUMP_STAT(qdev, rx_mac_err);
1565 DUMP_STAT(qdev, rx_ctl_pkts);
1566 DUMP_STAT(qdev, rx_pause_pkts);
1567 DUMP_STAT(qdev, rx_64_pkts);
1568 DUMP_STAT(qdev, rx_65_to_127_pkts);
1569 DUMP_STAT(qdev, rx_128_255_pkts);
1570 DUMP_STAT(qdev, rx_256_511_pkts);
1571 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1572 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1573 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1574 DUMP_STAT(qdev, rx_len_err_pkts);
1575};
1576#endif
1577
1578#ifdef QL_DEV_DUMP
1579
1580#define DUMP_QDEV_FIELD(qdev, type, field) \
1581 pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1582#define DUMP_QDEV_DMA_FIELD(qdev, field) \
1583 pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1584#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1585 pr_err("%s[%d].%s = " type "\n", \
1586 #array, index, #field, qdev->array[index].field);
1587void ql_dump_qdev(struct ql_adapter *qdev)
1588{
1589 int i;
1590 DUMP_QDEV_FIELD(qdev, "%lx", flags);
1591 DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1592 DUMP_QDEV_FIELD(qdev, "%p", pdev);
1593 DUMP_QDEV_FIELD(qdev, "%p", ndev);
1594 DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1595 DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1596 DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1597 DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1598 DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1599 DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1600 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1601 DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1602 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1603 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1604 if (qdev->msi_x_entry)
1605 for (i = 0; i < qdev->intr_count; i++) {
1606 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1607 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1608 }
1609 for (i = 0; i < qdev->intr_count; i++) {
1610 DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1611 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1612 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1613 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1614 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1615 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1616 }
1617 DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1618 DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1619 DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1620 DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1621 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1622 DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1623 DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1624 DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1625 DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1626 DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1627 DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1628 DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1629 DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
1630}
1631#endif
1632
1633#ifdef QL_CB_DUMP
1634void ql_dump_wqicb(struct wqicb *wqicb)
1635{
1636 pr_err("Dumping wqicb stuff...\n");
1637 pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1638 pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1639 pr_err("wqicb->cq_id_rss = %d\n",
1640 le16_to_cpu(wqicb->cq_id_rss));
1641 pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1642 pr_err("wqicb->wq_addr = 0x%llx\n",
1643 (unsigned long long) le64_to_cpu(wqicb->addr));
1644 pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1645 (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1646}
1647
1648void ql_dump_tx_ring(struct tx_ring *tx_ring)
1649{
1650 if (!tx_ring)
1651 return;
1652 pr_err("===================== Dumping tx_ring %d ===============\n",
1653 tx_ring->wq_id);
1654 pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1655 pr_err("tx_ring->base_dma = 0x%llx\n",
1656 (unsigned long long) tx_ring->wq_base_dma);
1657 pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1658 tx_ring->cnsmr_idx_sh_reg,
1659 tx_ring->cnsmr_idx_sh_reg
1660 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1661 pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1662 pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1663 pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1664 pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1665 pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1666 pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1667 pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1668 pr_err("tx_ring->q = %p\n", tx_ring->q);
1669 pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1670}
1671
1672void ql_dump_ricb(struct ricb *ricb)
1673{
1674 int i;
1675 pr_err("===================== Dumping ricb ===============\n");
1676 pr_err("Dumping ricb stuff...\n");
1677
1678 pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1679 pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1680 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1681 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1682 ricb->flags & RSS_LI ? "RSS_LI " : "",
1683 ricb->flags & RSS_LB ? "RSS_LB " : "",
1684 ricb->flags & RSS_LM ? "RSS_LM " : "",
1685 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1686 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1687 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1688 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1689 pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1690 for (i = 0; i < 16; i++)
1691 pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1692 le32_to_cpu(ricb->hash_cq_id[i]));
1693 for (i = 0; i < 10; i++)
1694 pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1695 le32_to_cpu(ricb->ipv6_hash_key[i]));
1696 for (i = 0; i < 4; i++)
1697 pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1698 le32_to_cpu(ricb->ipv4_hash_key[i]));
1699}
1700
1701void ql_dump_cqicb(struct cqicb *cqicb)
1702{
1703 pr_err("Dumping cqicb stuff...\n");
1704
1705 pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1706 pr_err("cqicb->flags = %x\n", cqicb->flags);
1707 pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1708 pr_err("cqicb->addr = 0x%llx\n",
1709 (unsigned long long) le64_to_cpu(cqicb->addr));
1710 pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1711 (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1712 pr_err("cqicb->pkt_delay = 0x%.04x\n",
1713 le16_to_cpu(cqicb->pkt_delay));
1714 pr_err("cqicb->irq_delay = 0x%.04x\n",
1715 le16_to_cpu(cqicb->irq_delay));
1716 pr_err("cqicb->lbq_addr = 0x%llx\n",
1717 (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1718 pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1719 le16_to_cpu(cqicb->lbq_buf_size));
1720 pr_err("cqicb->lbq_len = 0x%.04x\n",
1721 le16_to_cpu(cqicb->lbq_len));
1722 pr_err("cqicb->sbq_addr = 0x%llx\n",
1723 (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1724 pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1725 le16_to_cpu(cqicb->sbq_buf_size));
1726 pr_err("cqicb->sbq_len = 0x%.04x\n",
1727 le16_to_cpu(cqicb->sbq_len));
1728}
1729
1730static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
1731{
1732 struct ql_adapter *qdev = rx_ring->qdev;
1733
1734 if (rx_ring->cq_id < qdev->rss_ring_count)
1735 return "RX COMPLETION";
1736 else
1737 return "TX COMPLETION";
1738};
1739
1740void ql_dump_rx_ring(struct rx_ring *rx_ring)
1741{
1742 if (!rx_ring)
1743 return;
1744 pr_err("===================== Dumping rx_ring %d ===============\n",
1745 rx_ring->cq_id);
1746 pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
1747 qlge_rx_ring_type_name(rx_ring));
1748 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1749 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1750 pr_err("rx_ring->cq_base_dma = %llx\n",
1751 (unsigned long long) rx_ring->cq_base_dma);
1752 pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1753 pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1754 pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1755 rx_ring->prod_idx_sh_reg,
1756 rx_ring->prod_idx_sh_reg
1757 ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1758 pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1759 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1760 pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1761 rx_ring->cnsmr_idx_db_reg);
1762 pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1763 pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1764 pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1765
1766 pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
1767 pr_err("rx_ring->lbq.base_dma = %llx\n",
1768 (unsigned long long)rx_ring->lbq.base_dma);
1769 pr_err("rx_ring->lbq.base_indirect = %p\n",
1770 rx_ring->lbq.base_indirect);
1771 pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
1772 (unsigned long long)rx_ring->lbq.base_indirect_dma);
1773 pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
1774 pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
1775 rx_ring->lbq.prod_idx_db_reg);
1776 pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
1777 pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
1778 pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1779 pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1780
1781 pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
1782 pr_err("rx_ring->sbq.base_dma = %llx\n",
1783 (unsigned long long)rx_ring->sbq.base_dma);
1784 pr_err("rx_ring->sbq.base_indirect = %p\n",
1785 rx_ring->sbq.base_indirect);
1786 pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
1787 (unsigned long long)rx_ring->sbq.base_indirect_dma);
1788 pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
1789 pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
1790 rx_ring->sbq.prod_idx_db_reg);
1791 pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
1792 pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
1793 pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1794 pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1795 pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1796 pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1797}
1798
1799void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1800{
1801 void *ptr;
1802
1803 pr_err("%s: Enter\n", __func__);
1804
1805 ptr = kmalloc(size, GFP_ATOMIC);
1806 if (!ptr)
1807 return;
1808
1809 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1810 pr_err("%s: Failed to upload control block!\n", __func__);
1811 goto fail_it;
1812 }
1813 switch (bit) {
1814 case CFG_DRQ:
1815 ql_dump_wqicb((struct wqicb *)ptr);
1816 break;
1817 case CFG_DCQ:
1818 ql_dump_cqicb((struct cqicb *)ptr);
1819 break;
1820 case CFG_DR:
1821 ql_dump_ricb((struct ricb *)ptr);
1822 break;
1823 default:
1824 pr_err("%s: Invalid bit value = %x\n", __func__, bit);
1825 break;
1826 }
1827fail_it:
1828 kfree(ptr);
1829}
1830#endif
1831
1832#ifdef QL_OB_DUMP
1833void ql_dump_tx_desc(struct tx_buf_desc *tbd)
1834{
1835 pr_err("tbd->addr = 0x%llx\n",
1836 le64_to_cpu((u64) tbd->addr));
1837 pr_err("tbd->len = %d\n",
1838 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1839 pr_err("tbd->flags = %s %s\n",
1840 tbd->len & TX_DESC_C ? "C" : ".",
1841 tbd->len & TX_DESC_E ? "E" : ".");
1842 tbd++;
1843 pr_err("tbd->addr = 0x%llx\n",
1844 le64_to_cpu((u64) tbd->addr));
1845 pr_err("tbd->len = %d\n",
1846 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1847 pr_err("tbd->flags = %s %s\n",
1848 tbd->len & TX_DESC_C ? "C" : ".",
1849 tbd->len & TX_DESC_E ? "E" : ".");
1850 tbd++;
1851 pr_err("tbd->addr = 0x%llx\n",
1852 le64_to_cpu((u64) tbd->addr));
1853 pr_err("tbd->len = %d\n",
1854 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1855 pr_err("tbd->flags = %s %s\n",
1856 tbd->len & TX_DESC_C ? "C" : ".",
1857 tbd->len & TX_DESC_E ? "E" : ".");
1858
1859}
1860
1861void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
1862{
1863 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1864 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1865 struct tx_buf_desc *tbd;
1866 u16 frame_len;
1867
1868 pr_err("%s\n", __func__);
1869 pr_err("opcode = %s\n",
1870 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1871 pr_err("flags1 = %s %s %s %s %s\n",
1872 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1873 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1874 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1875 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1876 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1877 pr_err("flags2 = %s %s %s\n",
1878 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1879 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1880 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1881 pr_err("flags3 = %s %s %s\n",
1882 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1883 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1884 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1885 pr_err("tid = %x\n", ob_mac_iocb->tid);
1886 pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
1887 pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
1888 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1889 pr_err("frame_len = %d\n",
1890 le32_to_cpu(ob_mac_tso_iocb->frame_len));
1891 pr_err("mss = %d\n",
1892 le16_to_cpu(ob_mac_tso_iocb->mss));
1893 pr_err("prot_hdr_len = %d\n",
1894 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1895 pr_err("hdr_offset = 0x%.04x\n",
1896 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1897 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1898 } else {
1899 pr_err("frame_len = %d\n",
1900 le16_to_cpu(ob_mac_iocb->frame_len));
1901 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1902 }
1903 tbd = &ob_mac_iocb->tbd[0];
1904 ql_dump_tx_desc(tbd);
1905}
1906
1907void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
1908{
1909 pr_err("%s\n", __func__);
1910 pr_err("opcode = %d\n", ob_mac_rsp->opcode);
1911 pr_err("flags = %s %s %s %s %s %s %s\n",
1912 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
1913 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1914 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1915 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1916 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1917 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1918 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1919 pr_err("tid = %x\n", ob_mac_rsp->tid);
1920}
1921#endif
1922
1923#ifdef QL_IB_DUMP
1924void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
1925{
1926 pr_err("%s\n", __func__);
1927 pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
1928 pr_err("flags1 = %s%s%s%s%s%s\n",
1929 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1930 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1931 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1932 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1933 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1934 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1935
1936 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1937 pr_err("%s%s%s Multicast\n",
1938 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1939 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1944
1945 pr_err("flags2 = %s%s%s%s%s\n",
1946 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1947 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1948 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1949 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1950 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1951
1952 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1953 pr_err("%s%s%s%s%s error\n",
1954 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1955 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1956 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1957 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1958 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1959 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1960 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1961 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1962 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1963 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1964
1965 pr_err("flags3 = %s%s\n",
1966 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1967 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1968
1969 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1970 pr_err("RSS flags = %s%s%s%s\n",
1971 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1972 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1973 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1974 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1975 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1976 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1977 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1978 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
1979
1980 pr_err("data_len = %d\n",
1981 le32_to_cpu(ib_mac_rsp->data_len));
1982 pr_err("data_addr = 0x%llx\n",
1983 (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
1984 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1985 pr_err("rss = %x\n",
1986 le32_to_cpu(ib_mac_rsp->rss));
1987 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
1988 pr_err("vlan_id = %x\n",
1989 le16_to_cpu(ib_mac_rsp->vlan_id));
1990
1991 pr_err("flags4 = %s%s%s\n",
1992 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
1993 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
1994 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
1995
1996 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1997 pr_err("hdr length = %d\n",
1998 le32_to_cpu(ib_mac_rsp->hdr_len));
1999 pr_err("hdr addr = 0x%llx\n",
2000 (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2001 }
2002}
2003#endif
2004
2005#ifdef QL_ALL_DUMP
2006void ql_dump_all(struct ql_adapter *qdev)
2007{
2008 int i;
2009
2010 QL_DUMP_REGS(qdev);
2011 QL_DUMP_QDEV(qdev);
2012 for (i = 0; i < qdev->tx_ring_count; i++) {
2013 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2014 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2015 }
2016 for (i = 0; i < qdev->rx_ring_count; i++) {
2017 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2018 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2019 }
2020}
2021#endif
2022