1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/sort.h>
19#include <linux/string.h>
20
21#include "t4_regs.h"
22#include "cxgb4.h"
23#include "cxgb4_cudbg.h"
24#include "cudbg_if.h"
25#include "cudbg_lib_common.h"
26#include "cudbg_entity.h"
27#include "cudbg_lib.h"
28#include "cudbg_zlib.h"
29
30static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
31 {0x7e40, 0x7e44, 0x020, 28},
32 {0x7e40, 0x7e44, 0x040, 10},
33 {0x7e40, 0x7e44, 0x050, 10},
34 {0x7e40, 0x7e44, 0x060, 14},
35 {0x7e40, 0x7e44, 0x06F, 1},
36 {0x7e40, 0x7e44, 0x070, 6},
37 {0x7e40, 0x7e44, 0x130, 18},
38 {0x7e40, 0x7e44, 0x145, 19},
39 {0x7e40, 0x7e44, 0x160, 1},
40 {0x7e40, 0x7e44, 0x230, 25},
41 {0x7e40, 0x7e44, 0x24a, 3},
42 {0x7e40, 0x7e44, 0x8C0, 1}
43};
44
45static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = {
46 {0x7e40, 0x7e44, 0x020, 28},
47 {0x7e40, 0x7e44, 0x040, 19},
48 {0x7e40, 0x7e44, 0x054, 2},
49 {0x7e40, 0x7e44, 0x060, 13},
50 {0x7e40, 0x7e44, 0x06F, 1},
51 {0x7e40, 0x7e44, 0x120, 4},
52 {0x7e40, 0x7e44, 0x12b, 2},
53 {0x7e40, 0x7e44, 0x12f, 21},
54 {0x7e40, 0x7e44, 0x145, 19},
55 {0x7e40, 0x7e44, 0x230, 25},
56 {0x7e40, 0x7e44, 0x8C0, 1}
57};
58
59static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = {
60 {0x7e18, 0x7e1c, 0x0, 12}
61};
62
63static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = {
64 {0x7e18, 0x7e1c, 0x0, 12}
65};
66
67static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = {
68 {0x7e50, 0x7e54, 0x0, 13},
69 {0x7e50, 0x7e54, 0x10, 6},
70 {0x7e50, 0x7e54, 0x18, 21},
71 {0x7e50, 0x7e54, 0x30, 32},
72 {0x7e50, 0x7e54, 0x50, 22},
73 {0x7e50, 0x7e54, 0x68, 12}
74};
75
76static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = {
77 {0x7e50, 0x7e54, 0x0, 13},
78 {0x7e50, 0x7e54, 0x10, 6},
79 {0x7e50, 0x7e54, 0x18, 8},
80 {0x7e50, 0x7e54, 0x20, 13},
81 {0x7e50, 0x7e54, 0x30, 16},
82 {0x7e50, 0x7e54, 0x40, 16},
83 {0x7e50, 0x7e54, 0x50, 16},
84 {0x7e50, 0x7e54, 0x60, 6},
85 {0x7e50, 0x7e54, 0x68, 4}
86};
87
88static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
89 {0x10cc, 0x10d0, 0x0, 16},
90 {0x10cc, 0x10d4, 0x0, 16},
91};
92
93static const u32 t6_sge_qbase_index_array[] = {
94
95 0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
96};
97
98static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
99 {0x5a04, 0x5a0c, 0x00, 0x20},
100 {0x5a04, 0x5a0c, 0x21, 0x20},
101 {0x5a04, 0x5a0c, 0x41, 0x10},
102};
103
104static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = {
105 {0x5a10, 0x5a18, 0x00, 0x20},
106 {0x5a10, 0x5a18, 0x21, 0x18},
107};
108
109static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = {
110 {0x8FD0, 0x8FD4, 0x10000, 0x20},
111 {0x8FD0, 0x8FD4, 0x10021, 0x0D},
112};
113
114static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
115 {0x8FF0, 0x8FF4, 0x10000, 0x20},
116 {0x8FF0, 0x8FF4, 0x10021, 0x1D},
117};
118
119static const u32 t5_pcie_config_array[][2] = {
120 {0x0, 0x34},
121 {0x3c, 0x40},
122 {0x50, 0x64},
123 {0x70, 0x80},
124 {0x94, 0xa0},
125 {0xb0, 0xb8},
126 {0xd0, 0xd4},
127 {0x100, 0x128},
128 {0x140, 0x148},
129 {0x150, 0x164},
130 {0x170, 0x178},
131 {0x180, 0x194},
132 {0x1a0, 0x1b8},
133 {0x1c0, 0x208},
134};
135
136static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
137 {0x78f8, 0x78fc, 0xa000, 23},
138 {0x78f8, 0x78fc, 0xa400, 30},
139 {0x78f8, 0x78fc, 0xa800, 20}
140};
141
142static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
143 {0x78f8, 0x78fc, 0xe400, 17},
144 {0x78f8, 0x78fc, 0xe640, 13}
145};
146
147static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
148 {0x7b50, 0x7b54, 0x2000, 0x20, 0},
149 {0x7b50, 0x7b54, 0x2080, 0x1d, 0},
150 {0x7b50, 0x7b54, 0x00, 0x20, 0},
151 {0x7b50, 0x7b54, 0x80, 0x20, 0},
152 {0x7b50, 0x7b54, 0x100, 0x11, 0},
153 {0x7b50, 0x7b54, 0x200, 0x10, 0},
154 {0x7b50, 0x7b54, 0x240, 0x2, 0},
155 {0x7b50, 0x7b54, 0x250, 0x2, 0},
156 {0x7b50, 0x7b54, 0x260, 0x2, 0},
157 {0x7b50, 0x7b54, 0x270, 0x2, 0},
158 {0x7b50, 0x7b54, 0x280, 0x20, 0},
159 {0x7b50, 0x7b54, 0x300, 0x20, 0},
160 {0x7b50, 0x7b54, 0x380, 0x14, 0},
161 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4},
162 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4},
163 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4},
164 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4},
165 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4},
166 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10},
167 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10},
168 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10},
169 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10},
170};
171
172static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
173 {0x7b50, 0x7b54, 0x2000, 0x20, 0},
174 {0x7b50, 0x7b54, 0x2080, 0x19, 0},
175 {0x7b50, 0x7b54, 0x00, 0x20, 0},
176 {0x7b50, 0x7b54, 0x80, 0x20, 0},
177 {0x7b50, 0x7b54, 0x100, 0x11, 0},
178 {0x7b50, 0x7b54, 0x200, 0x10, 0},
179 {0x7b50, 0x7b54, 0x240, 0x2, 0},
180 {0x7b50, 0x7b54, 0x250, 0x2, 0},
181 {0x7b50, 0x7b54, 0x260, 0x2, 0},
182 {0x7b50, 0x7b54, 0x270, 0x2, 0},
183 {0x7b50, 0x7b54, 0x280, 0x20, 0},
184 {0x7b50, 0x7b54, 0x300, 0x20, 0},
185 {0x7b50, 0x7b54, 0x380, 0x14, 0},
186};
187
188static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
189 {0x51320, 0x51324, 0xa000, 32}
190};
191
192u32 cudbg_get_entity_length(struct adapter *adap, u32 entity)
193{
194 struct cudbg_tcam tcam_region = { 0 };
195 u32 value, n = 0, len = 0;
196
197 switch (entity) {
198 case CUDBG_REG_DUMP:
199 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
200 case CHELSIO_T4:
201 len = T4_REGMAP_SIZE;
202 break;
203 case CHELSIO_T5:
204 case CHELSIO_T6:
205 len = T5_REGMAP_SIZE;
206 break;
207 default:
208 break;
209 }
210 break;
211 case CUDBG_DEV_LOG:
212 len = adap->params.devlog.size;
213 break;
214 case CUDBG_CIM_LA:
215 if (is_t6(adap->params.chip)) {
216 len = adap->params.cim_la_size / 10 + 1;
217 len *= 10 * sizeof(u32);
218 } else {
219 len = adap->params.cim_la_size / 8;
220 len *= 8 * sizeof(u32);
221 }
222 len += sizeof(u32);
223 break;
224 case CUDBG_CIM_MA_LA:
225 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
226 break;
227 case CUDBG_CIM_QCFG:
228 len = sizeof(struct cudbg_cim_qcfg);
229 break;
230 case CUDBG_CIM_IBQ_TP0:
231 case CUDBG_CIM_IBQ_TP1:
232 case CUDBG_CIM_IBQ_ULP:
233 case CUDBG_CIM_IBQ_SGE0:
234 case CUDBG_CIM_IBQ_SGE1:
235 case CUDBG_CIM_IBQ_NCSI:
236 len = CIM_IBQ_SIZE * 4 * sizeof(u32);
237 break;
238 case CUDBG_CIM_OBQ_ULP0:
239 len = cudbg_cim_obq_size(adap, 0);
240 break;
241 case CUDBG_CIM_OBQ_ULP1:
242 len = cudbg_cim_obq_size(adap, 1);
243 break;
244 case CUDBG_CIM_OBQ_ULP2:
245 len = cudbg_cim_obq_size(adap, 2);
246 break;
247 case CUDBG_CIM_OBQ_ULP3:
248 len = cudbg_cim_obq_size(adap, 3);
249 break;
250 case CUDBG_CIM_OBQ_SGE:
251 len = cudbg_cim_obq_size(adap, 4);
252 break;
253 case CUDBG_CIM_OBQ_NCSI:
254 len = cudbg_cim_obq_size(adap, 5);
255 break;
256 case CUDBG_CIM_OBQ_RXQ0:
257 len = cudbg_cim_obq_size(adap, 6);
258 break;
259 case CUDBG_CIM_OBQ_RXQ1:
260 len = cudbg_cim_obq_size(adap, 7);
261 break;
262 case CUDBG_EDC0:
263 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
264 if (value & EDRAM0_ENABLE_F) {
265 value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
266 len = EDRAM0_SIZE_G(value);
267 }
268 len = cudbg_mbytes_to_bytes(len);
269 break;
270 case CUDBG_EDC1:
271 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
272 if (value & EDRAM1_ENABLE_F) {
273 value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
274 len = EDRAM1_SIZE_G(value);
275 }
276 len = cudbg_mbytes_to_bytes(len);
277 break;
278 case CUDBG_MC0:
279 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
280 if (value & EXT_MEM0_ENABLE_F) {
281 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
282 len = EXT_MEM0_SIZE_G(value);
283 }
284 len = cudbg_mbytes_to_bytes(len);
285 break;
286 case CUDBG_MC1:
287 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
288 if (value & EXT_MEM1_ENABLE_F) {
289 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
290 len = EXT_MEM1_SIZE_G(value);
291 }
292 len = cudbg_mbytes_to_bytes(len);
293 break;
294 case CUDBG_RSS:
295 len = t4_chip_rss_size(adap) * sizeof(u16);
296 break;
297 case CUDBG_RSS_VF_CONF:
298 len = adap->params.arch.vfcount *
299 sizeof(struct cudbg_rss_vf_conf);
300 break;
301 case CUDBG_PATH_MTU:
302 len = NMTUS * sizeof(u16);
303 break;
304 case CUDBG_PM_STATS:
305 len = sizeof(struct cudbg_pm_stats);
306 break;
307 case CUDBG_HW_SCHED:
308 len = sizeof(struct cudbg_hw_sched);
309 break;
310 case CUDBG_TP_INDIRECT:
311 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
312 case CHELSIO_T5:
313 n = sizeof(t5_tp_pio_array) +
314 sizeof(t5_tp_tm_pio_array) +
315 sizeof(t5_tp_mib_index_array);
316 break;
317 case CHELSIO_T6:
318 n = sizeof(t6_tp_pio_array) +
319 sizeof(t6_tp_tm_pio_array) +
320 sizeof(t6_tp_mib_index_array);
321 break;
322 default:
323 break;
324 }
325 n = n / (IREG_NUM_ELEM * sizeof(u32));
326 len = sizeof(struct ireg_buf) * n;
327 break;
328 case CUDBG_SGE_INDIRECT:
329 len = sizeof(struct ireg_buf) * 2 +
330 sizeof(struct sge_qbase_reg_field);
331 break;
332 case CUDBG_ULPRX_LA:
333 len = sizeof(struct cudbg_ulprx_la);
334 break;
335 case CUDBG_TP_LA:
336 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
337 break;
338 case CUDBG_MEMINFO:
339 len = sizeof(struct cudbg_ver_hdr) +
340 sizeof(struct cudbg_meminfo);
341 break;
342 case CUDBG_CIM_PIF_LA:
343 len = sizeof(struct cudbg_cim_pif_la);
344 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
345 break;
346 case CUDBG_CLK:
347 len = sizeof(struct cudbg_clk_info);
348 break;
349 case CUDBG_PCIE_INDIRECT:
350 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
351 len = sizeof(struct ireg_buf) * n * 2;
352 break;
353 case CUDBG_PM_INDIRECT:
354 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
355 len = sizeof(struct ireg_buf) * n * 2;
356 break;
357 case CUDBG_TID_INFO:
358 len = sizeof(struct cudbg_tid_info_region_rev1);
359 break;
360 case CUDBG_PCIE_CONFIG:
361 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
362 break;
363 case CUDBG_DUMP_CONTEXT:
364 len = cudbg_dump_context_size(adap);
365 break;
366 case CUDBG_MPS_TCAM:
367 len = sizeof(struct cudbg_mps_tcam) *
368 adap->params.arch.mps_tcam_size;
369 break;
370 case CUDBG_VPD_DATA:
371 len = sizeof(struct cudbg_vpd_data);
372 break;
373 case CUDBG_LE_TCAM:
374 cudbg_fill_le_tcam_info(adap, &tcam_region);
375 len = sizeof(struct cudbg_tcam) +
376 sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
377 break;
378 case CUDBG_CCTRL:
379 len = sizeof(u16) * NMTUS * NCCTRL_WIN;
380 break;
381 case CUDBG_MA_INDIRECT:
382 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
383 n = sizeof(t6_ma_ireg_array) /
384 (IREG_NUM_ELEM * sizeof(u32));
385 len = sizeof(struct ireg_buf) * n * 2;
386 }
387 break;
388 case CUDBG_ULPTX_LA:
389 len = sizeof(struct cudbg_ver_hdr) +
390 sizeof(struct cudbg_ulptx_la);
391 break;
392 case CUDBG_UP_CIM_INDIRECT:
393 n = 0;
394 if (is_t5(adap->params.chip))
395 n = sizeof(t5_up_cim_reg_array) /
396 ((IREG_NUM_ELEM + 1) * sizeof(u32));
397 else if (is_t6(adap->params.chip))
398 n = sizeof(t6_up_cim_reg_array) /
399 ((IREG_NUM_ELEM + 1) * sizeof(u32));
400 len = sizeof(struct ireg_buf) * n;
401 break;
402 case CUDBG_PBT_TABLE:
403 len = sizeof(struct cudbg_pbt_tables);
404 break;
405 case CUDBG_MBOX_LOG:
406 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
407 break;
408 case CUDBG_HMA_INDIRECT:
409 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
410 n = sizeof(t6_hma_ireg_array) /
411 (IREG_NUM_ELEM * sizeof(u32));
412 len = sizeof(struct ireg_buf) * n;
413 }
414 break;
415 case CUDBG_HMA:
416 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
417 if (value & HMA_MUX_F) {
418
419
420
421 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
422 len = EXT_MEM1_SIZE_G(value);
423 }
424 len = cudbg_mbytes_to_bytes(len);
425 break;
426 case CUDBG_QDESC:
427 cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
428 break;
429 default:
430 break;
431 }
432
433 return len;
434}
435
436static int cudbg_do_compression(struct cudbg_init *pdbg_init,
437 struct cudbg_buffer *pin_buff,
438 struct cudbg_buffer *dbg_buff)
439{
440 struct cudbg_buffer temp_in_buff = { 0 };
441 int bytes_left, bytes_read, bytes;
442 u32 offset = dbg_buff->offset;
443 int rc;
444
445 temp_in_buff.offset = pin_buff->offset;
446 temp_in_buff.data = pin_buff->data;
447 temp_in_buff.size = pin_buff->size;
448
449 bytes_left = pin_buff->size;
450 bytes_read = 0;
451 while (bytes_left > 0) {
452
453 bytes = min_t(unsigned long, bytes_left,
454 (unsigned long)CUDBG_CHUNK_SIZE);
455 temp_in_buff.data = (char *)pin_buff->data + bytes_read;
456 temp_in_buff.size = bytes;
457 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
458 if (rc)
459 return rc;
460 bytes_left -= bytes;
461 bytes_read += bytes;
462 }
463
464 pin_buff->size = dbg_buff->offset - offset;
465 return 0;
466}
467
468static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
469 struct cudbg_buffer *pin_buff,
470 struct cudbg_buffer *dbg_buff)
471{
472 int rc = 0;
473
474 if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
475 cudbg_update_buff(pin_buff, dbg_buff);
476 } else {
477 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
478 if (rc)
479 goto out;
480 }
481
482out:
483 cudbg_put_buff(pdbg_init, pin_buff);
484 return rc;
485}
486
487static int is_fw_attached(struct cudbg_init *pdbg_init)
488{
489 struct adapter *padap = pdbg_init->adap;
490
491 if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
492 return 0;
493
494 return 1;
495}
496
497
498
499
500void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
501 struct cudbg_entity_hdr *entity_hdr)
502{
503 u8 zero_buf[4] = {0};
504 u8 padding, remain;
505
506 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
507 padding = 4 - remain;
508 if (remain) {
509 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
510 padding);
511 dbg_buff->offset += padding;
512 entity_hdr->num_pad = padding;
513 }
514 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
515}
516
517struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
518{
519 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
520
521 return (struct cudbg_entity_hdr *)
522 ((char *)outbuf + cudbg_hdr->hdr_len +
523 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
524}
525
526static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
527 void *dest)
528{
529 int vaddr, rc;
530
531 vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
532 if (vaddr < 0)
533 return vaddr;
534
535 rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
536 if (rc < 0)
537 return rc;
538
539 return 0;
540}
541
542static int cudbg_mem_desc_cmp(const void *a, const void *b)
543{
544 return ((const struct cudbg_mem_desc *)a)->base -
545 ((const struct cudbg_mem_desc *)b)->base;
546}
547
548int cudbg_fill_meminfo(struct adapter *padap,
549 struct cudbg_meminfo *meminfo_buff)
550{
551 struct cudbg_mem_desc *md;
552 u32 lo, hi, used, alloc;
553 int n, i;
554
555 memset(meminfo_buff->avail, 0,
556 ARRAY_SIZE(meminfo_buff->avail) *
557 sizeof(struct cudbg_mem_desc));
558 memset(meminfo_buff->mem, 0,
559 (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
560 md = meminfo_buff->mem;
561
562 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
563 meminfo_buff->mem[i].limit = 0;
564 meminfo_buff->mem[i].idx = i;
565 }
566
567
568 i = 0;
569 lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
570 if (lo & EDRAM0_ENABLE_F) {
571 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
572 meminfo_buff->avail[i].base =
573 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
574 meminfo_buff->avail[i].limit =
575 meminfo_buff->avail[i].base +
576 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
577 meminfo_buff->avail[i].idx = 0;
578 i++;
579 }
580
581 if (lo & EDRAM1_ENABLE_F) {
582 hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
583 meminfo_buff->avail[i].base =
584 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
585 meminfo_buff->avail[i].limit =
586 meminfo_buff->avail[i].base +
587 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
588 meminfo_buff->avail[i].idx = 1;
589 i++;
590 }
591
592 if (is_t5(padap->params.chip)) {
593 if (lo & EXT_MEM0_ENABLE_F) {
594 hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
595 meminfo_buff->avail[i].base =
596 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
597 meminfo_buff->avail[i].limit =
598 meminfo_buff->avail[i].base +
599 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
600 meminfo_buff->avail[i].idx = 3;
601 i++;
602 }
603
604 if (lo & EXT_MEM1_ENABLE_F) {
605 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
606 meminfo_buff->avail[i].base =
607 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
608 meminfo_buff->avail[i].limit =
609 meminfo_buff->avail[i].base +
610 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
611 meminfo_buff->avail[i].idx = 4;
612 i++;
613 }
614 } else {
615 if (lo & EXT_MEM_ENABLE_F) {
616 hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
617 meminfo_buff->avail[i].base =
618 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
619 meminfo_buff->avail[i].limit =
620 meminfo_buff->avail[i].base +
621 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
622 meminfo_buff->avail[i].idx = 2;
623 i++;
624 }
625
626 if (lo & HMA_MUX_F) {
627 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
628 meminfo_buff->avail[i].base =
629 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
630 meminfo_buff->avail[i].limit =
631 meminfo_buff->avail[i].base +
632 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
633 meminfo_buff->avail[i].idx = 5;
634 i++;
635 }
636 }
637
638 if (!i)
639 return CUDBG_STATUS_ENTITY_NOT_FOUND;
640
641 meminfo_buff->avail_c = i;
642 sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
643 cudbg_mem_desc_cmp, NULL);
644 (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
645 (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
646 (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
647 (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
648 (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
649 (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
650 (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
651 (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
652 (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
653
654
655 md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
656 md->limit = md->base - 1 +
657 t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
658 PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
659 md++;
660
661 md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
662 md->limit = md->base - 1 +
663 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
664 PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
665 md++;
666
667 if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
668 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
669 hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
670 md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
671 } else {
672 hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
673 md->base = t4_read_reg(padap,
674 LE_DB_HASH_TBL_BASE_ADDR_A);
675 }
676 md->limit = 0;
677 } else {
678 md->base = 0;
679 md->idx = ARRAY_SIZE(cudbg_region);
680 }
681 md++;
682
683#define ulp_region(reg) do { \
684 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
685 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
686} while (0)
687
688 ulp_region(RX_ISCSI);
689 ulp_region(RX_TDDP);
690 ulp_region(TX_TPT);
691 ulp_region(RX_STAG);
692 ulp_region(RX_RQ);
693 ulp_region(RX_RQUDP);
694 ulp_region(RX_PBL);
695 ulp_region(TX_PBL);
696#undef ulp_region
697 md->base = 0;
698 md->idx = ARRAY_SIZE(cudbg_region);
699 if (!is_t4(padap->params.chip)) {
700 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
701 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
702 u32 size = 0;
703
704 if (is_t5(padap->params.chip)) {
705 if (sge_ctrl & VFIFO_ENABLE_F)
706 size = DBVFIFO_SIZE_G(fifo_size);
707 } else {
708 size = T6_DBVFIFO_SIZE_G(fifo_size);
709 }
710
711 if (size) {
712 md->base = BASEADDR_G(t4_read_reg(padap,
713 SGE_DBVFIFO_BADDR_A));
714 md->limit = md->base + (size << 2) - 1;
715 }
716 }
717
718 md++;
719
720 md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
721 md->limit = 0;
722 md++;
723 md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
724 md->limit = 0;
725 md++;
726
727 md->base = padap->vres.ocq.start;
728 if (padap->vres.ocq.size)
729 md->limit = md->base + padap->vres.ocq.size - 1;
730 else
731 md->idx = ARRAY_SIZE(cudbg_region);
732 md++;
733
734
735 for (n = 0; n < i - 1; n++)
736 if (meminfo_buff->avail[n].limit <
737 meminfo_buff->avail[n + 1].base)
738 (md++)->base = meminfo_buff->avail[n].limit;
739
740 if (meminfo_buff->avail[n].limit)
741 (md++)->base = meminfo_buff->avail[n].limit;
742
743 n = md - meminfo_buff->mem;
744 meminfo_buff->mem_c = n;
745
746 sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
747 cudbg_mem_desc_cmp, NULL);
748
749 lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
750 hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
751 meminfo_buff->up_ram_lo = lo;
752 meminfo_buff->up_ram_hi = hi;
753
754 lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
755 hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
756 meminfo_buff->up_extmem2_lo = lo;
757 meminfo_buff->up_extmem2_hi = hi;
758
759 lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
760 for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
761 meminfo_buff->free_rx_cnt +=
762 FREERXPAGECOUNT_G(t4_read_reg(padap,
763 TP_FLM_FREE_RX_CNT_A));
764
765 meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
766 meminfo_buff->rx_pages_data[1] =
767 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
768 meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
769
770 lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
771 hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
772 for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
773 meminfo_buff->free_tx_cnt +=
774 FREETXPAGECOUNT_G(t4_read_reg(padap,
775 TP_FLM_FREE_TX_CNT_A));
776
777 meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
778 meminfo_buff->tx_pages_data[1] =
779 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
780 meminfo_buff->tx_pages_data[2] =
781 hi >= (1 << 20) ? 'M' : 'K';
782 meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
783
784 meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
785 meminfo_buff->p_structs_free_cnt =
786 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
787
788 for (i = 0; i < 4; i++) {
789 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
790 lo = t4_read_reg(padap,
791 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
792 else
793 lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
794 if (is_t5(padap->params.chip)) {
795 used = T5_USED_G(lo);
796 alloc = T5_ALLOC_G(lo);
797 } else {
798 used = USED_G(lo);
799 alloc = ALLOC_G(lo);
800 }
801 meminfo_buff->port_used[i] = used;
802 meminfo_buff->port_alloc[i] = alloc;
803 }
804
805 for (i = 0; i < padap->params.arch.nchan; i++) {
806 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
807 lo = t4_read_reg(padap,
808 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
809 else
810 lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
811 if (is_t5(padap->params.chip)) {
812 used = T5_USED_G(lo);
813 alloc = T5_ALLOC_G(lo);
814 } else {
815 used = USED_G(lo);
816 alloc = ALLOC_G(lo);
817 }
818 meminfo_buff->loopback_used[i] = used;
819 meminfo_buff->loopback_alloc[i] = alloc;
820 }
821
822 return 0;
823}
824
825int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
826 struct cudbg_buffer *dbg_buff,
827 struct cudbg_error *cudbg_err)
828{
829 struct adapter *padap = pdbg_init->adap;
830 struct cudbg_buffer temp_buff = { 0 };
831 u32 buf_size = 0;
832 int rc = 0;
833
834 if (is_t4(padap->params.chip))
835 buf_size = T4_REGMAP_SIZE;
836 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
837 buf_size = T5_REGMAP_SIZE;
838
839 rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
840 if (rc)
841 return rc;
842 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
843 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
844}
845
846int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
847 struct cudbg_buffer *dbg_buff,
848 struct cudbg_error *cudbg_err)
849{
850 struct adapter *padap = pdbg_init->adap;
851 struct cudbg_buffer temp_buff = { 0 };
852 struct devlog_params *dparams;
853 int rc = 0;
854
855 rc = t4_init_devlog_params(padap);
856 if (rc < 0) {
857 cudbg_err->sys_err = rc;
858 return rc;
859 }
860
861 dparams = &padap->params.devlog;
862 rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
863 if (rc)
864 return rc;
865
866
867 if (dparams->start != 0) {
868 spin_lock(&padap->win0_lock);
869 rc = t4_memory_rw(padap, padap->params.drv_memwin,
870 dparams->memtype, dparams->start,
871 dparams->size,
872 (__be32 *)(char *)temp_buff.data,
873 1);
874 spin_unlock(&padap->win0_lock);
875 if (rc) {
876 cudbg_err->sys_err = rc;
877 cudbg_put_buff(pdbg_init, &temp_buff);
878 return rc;
879 }
880 }
881 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
882}
883
884int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
885 struct cudbg_buffer *dbg_buff,
886 struct cudbg_error *cudbg_err)
887{
888 struct adapter *padap = pdbg_init->adap;
889 struct cudbg_buffer temp_buff = { 0 };
890 int size, rc;
891 u32 cfg = 0;
892
893 if (is_t6(padap->params.chip)) {
894 size = padap->params.cim_la_size / 10 + 1;
895 size *= 10 * sizeof(u32);
896 } else {
897 size = padap->params.cim_la_size / 8;
898 size *= 8 * sizeof(u32);
899 }
900
901 size += sizeof(cfg);
902 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
903 if (rc)
904 return rc;
905
906 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
907 if (rc) {
908 cudbg_err->sys_err = rc;
909 cudbg_put_buff(pdbg_init, &temp_buff);
910 return rc;
911 }
912
913 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
914 rc = t4_cim_read_la(padap,
915 (u32 *)((char *)temp_buff.data + sizeof(cfg)),
916 NULL);
917 if (rc < 0) {
918 cudbg_err->sys_err = rc;
919 cudbg_put_buff(pdbg_init, &temp_buff);
920 return rc;
921 }
922 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
923}
924
925int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
926 struct cudbg_buffer *dbg_buff,
927 struct cudbg_error *cudbg_err)
928{
929 struct adapter *padap = pdbg_init->adap;
930 struct cudbg_buffer temp_buff = { 0 };
931 int size, rc;
932
933 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
934 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
935 if (rc)
936 return rc;
937
938 t4_cim_read_ma_la(padap,
939 (u32 *)temp_buff.data,
940 (u32 *)((char *)temp_buff.data +
941 5 * CIM_MALA_SIZE));
942 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
943}
944
945int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
946 struct cudbg_buffer *dbg_buff,
947 struct cudbg_error *cudbg_err)
948{
949 struct adapter *padap = pdbg_init->adap;
950 struct cudbg_buffer temp_buff = { 0 };
951 struct cudbg_cim_qcfg *cim_qcfg_data;
952 int rc;
953
954 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
955 &temp_buff);
956 if (rc)
957 return rc;
958
959 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
960 cim_qcfg_data->chip = padap->params.chip;
961 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
962 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
963 if (rc) {
964 cudbg_err->sys_err = rc;
965 cudbg_put_buff(pdbg_init, &temp_buff);
966 return rc;
967 }
968
969 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
970 ARRAY_SIZE(cim_qcfg_data->obq_wr),
971 cim_qcfg_data->obq_wr);
972 if (rc) {
973 cudbg_err->sys_err = rc;
974 cudbg_put_buff(pdbg_init, &temp_buff);
975 return rc;
976 }
977
978 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
979 cim_qcfg_data->thres);
980 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
981}
982
983static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
984 struct cudbg_buffer *dbg_buff,
985 struct cudbg_error *cudbg_err, int qid)
986{
987 struct adapter *padap = pdbg_init->adap;
988 struct cudbg_buffer temp_buff = { 0 };
989 int no_of_read_words, rc = 0;
990 u32 qsize;
991
992
993 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
994 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
995 if (rc)
996 return rc;
997
998
999 no_of_read_words = t4_read_cim_ibq(padap, qid,
1000 (u32 *)temp_buff.data, qsize);
1001
1002 if (no_of_read_words <= 0) {
1003 if (!no_of_read_words)
1004 rc = CUDBG_SYSTEM_ERROR;
1005 else
1006 rc = no_of_read_words;
1007 cudbg_err->sys_err = rc;
1008 cudbg_put_buff(pdbg_init, &temp_buff);
1009 return rc;
1010 }
1011 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1012}
1013
1014int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
1015 struct cudbg_buffer *dbg_buff,
1016 struct cudbg_error *cudbg_err)
1017{
1018 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
1019}
1020
1021int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
1022 struct cudbg_buffer *dbg_buff,
1023 struct cudbg_error *cudbg_err)
1024{
1025 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
1026}
1027
1028int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
1029 struct cudbg_buffer *dbg_buff,
1030 struct cudbg_error *cudbg_err)
1031{
1032 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
1033}
1034
1035int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
1036 struct cudbg_buffer *dbg_buff,
1037 struct cudbg_error *cudbg_err)
1038{
1039 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
1040}
1041
1042int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
1043 struct cudbg_buffer *dbg_buff,
1044 struct cudbg_error *cudbg_err)
1045{
1046 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
1047}
1048
1049int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
1050 struct cudbg_buffer *dbg_buff,
1051 struct cudbg_error *cudbg_err)
1052{
1053 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
1054}
1055
1056u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
1057{
1058 u32 value;
1059
1060 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
1061 QUENUMSELECT_V(qid));
1062 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
1063 value = CIMQSIZE_G(value) * 64;
1064 return value * sizeof(u32);
1065}
1066
1067static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
1068 struct cudbg_buffer *dbg_buff,
1069 struct cudbg_error *cudbg_err, int qid)
1070{
1071 struct adapter *padap = pdbg_init->adap;
1072 struct cudbg_buffer temp_buff = { 0 };
1073 int no_of_read_words, rc = 0;
1074 u32 qsize;
1075
1076
1077 qsize = cudbg_cim_obq_size(padap, qid);
1078 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
1079 if (rc)
1080 return rc;
1081
1082
1083 no_of_read_words = t4_read_cim_obq(padap, qid,
1084 (u32 *)temp_buff.data, qsize);
1085
1086 if (no_of_read_words <= 0) {
1087 if (!no_of_read_words)
1088 rc = CUDBG_SYSTEM_ERROR;
1089 else
1090 rc = no_of_read_words;
1091 cudbg_err->sys_err = rc;
1092 cudbg_put_buff(pdbg_init, &temp_buff);
1093 return rc;
1094 }
1095 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1096}
1097
1098int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
1099 struct cudbg_buffer *dbg_buff,
1100 struct cudbg_error *cudbg_err)
1101{
1102 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
1103}
1104
1105int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
1106 struct cudbg_buffer *dbg_buff,
1107 struct cudbg_error *cudbg_err)
1108{
1109 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
1110}
1111
1112int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
1113 struct cudbg_buffer *dbg_buff,
1114 struct cudbg_error *cudbg_err)
1115{
1116 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
1117}
1118
1119int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
1120 struct cudbg_buffer *dbg_buff,
1121 struct cudbg_error *cudbg_err)
1122{
1123 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
1124}
1125
1126int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
1127 struct cudbg_buffer *dbg_buff,
1128 struct cudbg_error *cudbg_err)
1129{
1130 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
1131}
1132
1133int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
1134 struct cudbg_buffer *dbg_buff,
1135 struct cudbg_error *cudbg_err)
1136{
1137 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
1138}
1139
1140int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
1141 struct cudbg_buffer *dbg_buff,
1142 struct cudbg_error *cudbg_err)
1143{
1144 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
1145}
1146
1147int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
1148 struct cudbg_buffer *dbg_buff,
1149 struct cudbg_error *cudbg_err)
1150{
1151 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
1152}
1153
1154static int cudbg_meminfo_get_mem_index(struct adapter *padap,
1155 struct cudbg_meminfo *mem_info,
1156 u8 mem_type, u8 *idx)
1157{
1158 u8 i, flag;
1159
1160 switch (mem_type) {
1161 case MEM_EDC0:
1162 flag = EDC0_FLAG;
1163 break;
1164 case MEM_EDC1:
1165 flag = EDC1_FLAG;
1166 break;
1167 case MEM_MC0:
1168
1169 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
1170 break;
1171 case MEM_MC1:
1172 flag = MC1_FLAG;
1173 break;
1174 case MEM_HMA:
1175 flag = HMA_FLAG;
1176 break;
1177 default:
1178 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1179 }
1180
1181 for (i = 0; i < mem_info->avail_c; i++) {
1182 if (mem_info->avail[i].idx == flag) {
1183 *idx = i;
1184 return 0;
1185 }
1186 }
1187
1188 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1189}
1190
1191
1192static int cudbg_get_mem_region(struct adapter *padap,
1193 struct cudbg_meminfo *meminfo,
1194 u8 mem_type, const char *region_name,
1195 struct cudbg_mem_desc *mem_desc)
1196{
1197 u8 mc, found = 0;
1198 u32 idx = 0;
1199 int rc, i;
1200
1201 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
1202 if (rc)
1203 return rc;
1204
1205 i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name);
1206 if (i < 0)
1207 return -EINVAL;
1208
1209 idx = i;
1210 for (i = 0; i < meminfo->mem_c; i++) {
1211 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
1212 continue;
1213
1214 if (!(meminfo->mem[i].limit))
1215 meminfo->mem[i].limit =
1216 i < meminfo->mem_c - 1 ?
1217 meminfo->mem[i + 1].base - 1 : ~0;
1218
1219 if (meminfo->mem[i].idx == idx) {
1220
1221 if (meminfo->mem[i].base < meminfo->avail[mc].base &&
1222 meminfo->mem[i].limit < meminfo->avail[mc].base)
1223 return -EINVAL;
1224
1225 if (meminfo->mem[i].base > meminfo->avail[mc].limit)
1226 return -EINVAL;
1227
1228 memcpy(mem_desc, &meminfo->mem[i],
1229 sizeof(struct cudbg_mem_desc));
1230 found = 1;
1231 break;
1232 }
1233 }
1234 if (!found)
1235 return -EINVAL;
1236
1237 return 0;
1238}
1239
1240
1241
1242
1243static int cudbg_get_mem_relative(struct adapter *padap,
1244 struct cudbg_meminfo *meminfo,
1245 u8 mem_type, u32 *out_base, u32 *out_end)
1246{
1247 u8 mc_idx;
1248 int rc;
1249
1250 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
1251 if (rc)
1252 return rc;
1253
1254 if (*out_base < meminfo->avail[mc_idx].base)
1255 *out_base = 0;
1256 else
1257 *out_base -= meminfo->avail[mc_idx].base;
1258
1259 if (*out_end > meminfo->avail[mc_idx].limit)
1260 *out_end = meminfo->avail[mc_idx].limit;
1261 else
1262 *out_end -= meminfo->avail[mc_idx].base;
1263
1264 return 0;
1265}
1266
1267
1268static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
1269 const char *region_name,
1270 struct cudbg_region_info *payload)
1271{
1272 struct cudbg_mem_desc mem_desc = { 0 };
1273 struct cudbg_meminfo meminfo;
1274 int rc;
1275
1276 rc = cudbg_fill_meminfo(padap, &meminfo);
1277 if (rc)
1278 return rc;
1279
1280 rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
1281 &mem_desc);
1282 if (rc) {
1283 payload->exist = false;
1284 return 0;
1285 }
1286
1287 payload->exist = true;
1288 payload->start = mem_desc.base;
1289 payload->end = mem_desc.limit;
1290
1291 return cudbg_get_mem_relative(padap, &meminfo, mem_type,
1292 &payload->start, &payload->end);
1293}
1294
1295static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
1296 int mtype, u32 addr, u32 len, void *hbuf)
1297{
1298 u32 win_pf, memoffset, mem_aperture, mem_base;
1299 struct adapter *adap = pdbg_init->adap;
1300 u32 pos, offset, resid;
1301 u32 *res_buf;
1302 u64 *buf;
1303 int ret;
1304
1305
1306
1307 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
1308 return -EINVAL;
1309
1310 buf = (u64 *)hbuf;
1311
1312
1313 resid = len & 0x7;
1314 len -= resid;
1315
1316 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
1317 &mem_aperture);
1318 if (ret)
1319 return ret;
1320
1321 addr = addr + memoffset;
1322 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
1323
1324 pos = addr & ~(mem_aperture - 1);
1325 offset = addr - pos;
1326
1327
1328
1329
1330 t4_memory_update_win(adap, win, pos | win_pf);
1331
1332
1333 while (len > 0) {
1334 *buf++ = le64_to_cpu((__force __le64)
1335 t4_read_reg64(adap, mem_base + offset));
1336 offset += sizeof(u64);
1337 len -= sizeof(u64);
1338
1339
1340
1341
1342 if (offset == mem_aperture) {
1343 pos += mem_aperture;
1344 offset = 0;
1345 t4_memory_update_win(adap, win, pos | win_pf);
1346 }
1347 }
1348
1349 res_buf = (u32 *)buf;
1350
1351 while (resid > sizeof(u32)) {
1352 *res_buf++ = le32_to_cpu((__force __le32)
1353 t4_read_reg(adap, mem_base + offset));
1354 offset += sizeof(u32);
1355 resid -= sizeof(u32);
1356
1357
1358
1359
1360 if (offset == mem_aperture) {
1361 pos += mem_aperture;
1362 offset = 0;
1363 t4_memory_update_win(adap, win, pos | win_pf);
1364 }
1365 }
1366
1367
1368 if (resid)
1369 t4_memory_rw_residual(adap, resid, mem_base + offset,
1370 (u8 *)res_buf, T4_MEMORY_READ);
1371
1372 return 0;
1373}
1374
1375#define CUDBG_YIELD_ITERATION 256
1376
1377static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
1378 struct cudbg_buffer *dbg_buff, u8 mem_type,
1379 unsigned long tot_len,
1380 struct cudbg_error *cudbg_err)
1381{
1382 static const char * const region_name[] = { "Tx payload:",
1383 "Rx payload:" };
1384 unsigned long bytes, bytes_left, bytes_read = 0;
1385 struct adapter *padap = pdbg_init->adap;
1386 struct cudbg_buffer temp_buff = { 0 };
1387 struct cudbg_region_info payload[2];
1388 u32 yield_count = 0;
1389 int rc = 0;
1390 u8 i;
1391
1392
1393 memset(payload, 0, sizeof(payload));
1394 for (i = 0; i < ARRAY_SIZE(region_name); i++) {
1395 rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
1396 &payload[i]);
1397 if (rc)
1398 return rc;
1399
1400 if (payload[i].exist) {
1401
1402 payload[i].start = roundup(payload[i].start,
1403 CUDBG_CHUNK_SIZE);
1404 payload[i].end = rounddown(payload[i].end,
1405 CUDBG_CHUNK_SIZE);
1406 }
1407 }
1408
1409 bytes_left = tot_len;
1410 while (bytes_left > 0) {
1411
1412
1413
1414
1415
1416 yield_count++;
1417 if (!(yield_count % CUDBG_YIELD_ITERATION))
1418 schedule();
1419
1420 bytes = min_t(unsigned long, bytes_left,
1421 (unsigned long)CUDBG_CHUNK_SIZE);
1422 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1423 if (rc)
1424 return rc;
1425
1426 for (i = 0; i < ARRAY_SIZE(payload); i++)
1427 if (payload[i].exist &&
1428 bytes_read >= payload[i].start &&
1429 bytes_read + bytes <= payload[i].end)
1430
1431 goto skip_read;
1432
1433 spin_lock(&padap->win0_lock);
1434 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1435 bytes_read, bytes, temp_buff.data);
1436 spin_unlock(&padap->win0_lock);
1437 if (rc) {
1438 cudbg_err->sys_err = rc;
1439 cudbg_put_buff(pdbg_init, &temp_buff);
1440 return rc;
1441 }
1442
1443skip_read:
1444 bytes_left -= bytes;
1445 bytes_read += bytes;
1446 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1447 dbg_buff);
1448 if (rc) {
1449 cudbg_put_buff(pdbg_init, &temp_buff);
1450 return rc;
1451 }
1452 }
1453 return rc;
1454}
1455
1456static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1457 struct cudbg_error *cudbg_err)
1458{
1459 struct adapter *padap = pdbg_init->adap;
1460 int rc;
1461
1462 if (is_fw_attached(pdbg_init)) {
1463
1464 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1465 if (rc)
1466 cudbg_err->sys_warn = rc;
1467 }
1468}
1469
1470static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
1471 struct cudbg_error *cudbg_err,
1472 u8 mem_type, unsigned long *region_size)
1473{
1474 struct adapter *padap = pdbg_init->adap;
1475 struct cudbg_meminfo mem_info;
1476 u8 mc_idx;
1477 int rc;
1478
1479 memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1480 rc = cudbg_fill_meminfo(padap, &mem_info);
1481 if (rc) {
1482 cudbg_err->sys_err = rc;
1483 return rc;
1484 }
1485
1486 cudbg_t4_fwcache(pdbg_init, cudbg_err);
1487 rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1488 if (rc) {
1489 cudbg_err->sys_err = rc;
1490 return rc;
1491 }
1492
1493 if (region_size)
1494 *region_size = mem_info.avail[mc_idx].limit -
1495 mem_info.avail[mc_idx].base;
1496
1497 return 0;
1498}
1499
1500static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1501 struct cudbg_buffer *dbg_buff,
1502 struct cudbg_error *cudbg_err,
1503 u8 mem_type)
1504{
1505 unsigned long size = 0;
1506 int rc;
1507
1508 rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
1509 if (rc)
1510 return rc;
1511
1512 return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1513 cudbg_err);
1514}
1515
1516int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1517 struct cudbg_buffer *dbg_buff,
1518 struct cudbg_error *cudbg_err)
1519{
1520 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1521 MEM_EDC0);
1522}
1523
1524int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1525 struct cudbg_buffer *dbg_buff,
1526 struct cudbg_error *cudbg_err)
1527{
1528 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1529 MEM_EDC1);
1530}
1531
1532int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1533 struct cudbg_buffer *dbg_buff,
1534 struct cudbg_error *cudbg_err)
1535{
1536 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1537 MEM_MC0);
1538}
1539
1540int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1541 struct cudbg_buffer *dbg_buff,
1542 struct cudbg_error *cudbg_err)
1543{
1544 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1545 MEM_MC1);
1546}
1547
1548int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1549 struct cudbg_buffer *dbg_buff,
1550 struct cudbg_error *cudbg_err)
1551{
1552 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1553 MEM_HMA);
1554}
1555
1556int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1557 struct cudbg_buffer *dbg_buff,
1558 struct cudbg_error *cudbg_err)
1559{
1560 struct adapter *padap = pdbg_init->adap;
1561 struct cudbg_buffer temp_buff = { 0 };
1562 int rc, nentries;
1563
1564 nentries = t4_chip_rss_size(padap);
1565 rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1566 &temp_buff);
1567 if (rc)
1568 return rc;
1569
1570 rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1571 if (rc) {
1572 cudbg_err->sys_err = rc;
1573 cudbg_put_buff(pdbg_init, &temp_buff);
1574 return rc;
1575 }
1576 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1577}
1578
1579int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1580 struct cudbg_buffer *dbg_buff,
1581 struct cudbg_error *cudbg_err)
1582{
1583 struct adapter *padap = pdbg_init->adap;
1584 struct cudbg_buffer temp_buff = { 0 };
1585 struct cudbg_rss_vf_conf *vfconf;
1586 int vf, rc, vf_count;
1587
1588 vf_count = padap->params.arch.vfcount;
1589 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1590 vf_count * sizeof(struct cudbg_rss_vf_conf),
1591 &temp_buff);
1592 if (rc)
1593 return rc;
1594
1595 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1596 for (vf = 0; vf < vf_count; vf++)
1597 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1598 &vfconf[vf].rss_vf_vfh, true);
1599 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1600}
1601
1602int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1603 struct cudbg_buffer *dbg_buff,
1604 struct cudbg_error *cudbg_err)
1605{
1606 struct adapter *padap = pdbg_init->adap;
1607 struct cudbg_buffer temp_buff = { 0 };
1608 int rc;
1609
1610 rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1611 &temp_buff);
1612 if (rc)
1613 return rc;
1614
1615 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1616 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1617}
1618
1619int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1620 struct cudbg_buffer *dbg_buff,
1621 struct cudbg_error *cudbg_err)
1622{
1623 struct adapter *padap = pdbg_init->adap;
1624 struct cudbg_buffer temp_buff = { 0 };
1625 struct cudbg_pm_stats *pm_stats_buff;
1626 int rc;
1627
1628 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1629 &temp_buff);
1630 if (rc)
1631 return rc;
1632
1633 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1634 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1635 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1636 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1637}
1638
1639int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1640 struct cudbg_buffer *dbg_buff,
1641 struct cudbg_error *cudbg_err)
1642{
1643 struct adapter *padap = pdbg_init->adap;
1644 struct cudbg_buffer temp_buff = { 0 };
1645 struct cudbg_hw_sched *hw_sched_buff;
1646 int i, rc = 0;
1647
1648 if (!padap->params.vpd.cclk)
1649 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1650
1651 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1652 &temp_buff);
1653
1654 if (rc)
1655 return rc;
1656
1657 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1658 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1659 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1660 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1661 for (i = 0; i < NTX_SCHED; ++i)
1662 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1663 &hw_sched_buff->ipg[i], true);
1664 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1665}
1666
1667int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1668 struct cudbg_buffer *dbg_buff,
1669 struct cudbg_error *cudbg_err)
1670{
1671 struct adapter *padap = pdbg_init->adap;
1672 struct cudbg_buffer temp_buff = { 0 };
1673 struct ireg_buf *ch_tp_pio;
1674 int i, rc, n = 0;
1675 u32 size;
1676
1677 if (is_t5(padap->params.chip))
1678 n = sizeof(t5_tp_pio_array) +
1679 sizeof(t5_tp_tm_pio_array) +
1680 sizeof(t5_tp_mib_index_array);
1681 else
1682 n = sizeof(t6_tp_pio_array) +
1683 sizeof(t6_tp_tm_pio_array) +
1684 sizeof(t6_tp_mib_index_array);
1685
1686 n = n / (IREG_NUM_ELEM * sizeof(u32));
1687 size = sizeof(struct ireg_buf) * n;
1688 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1689 if (rc)
1690 return rc;
1691
1692 ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1693
1694
1695 if (is_t5(padap->params.chip))
1696 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1697 else if (is_t6(padap->params.chip))
1698 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1699
1700 for (i = 0; i < n; i++) {
1701 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1702 u32 *buff = ch_tp_pio->outbuf;
1703
1704 if (is_t5(padap->params.chip)) {
1705 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1706 tp_pio->ireg_data = t5_tp_pio_array[i][1];
1707 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1708 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1709 } else if (is_t6(padap->params.chip)) {
1710 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1711 tp_pio->ireg_data = t6_tp_pio_array[i][1];
1712 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1713 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1714 }
1715 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1716 tp_pio->ireg_local_offset, true);
1717 ch_tp_pio++;
1718 }
1719
1720
1721 if (is_t5(padap->params.chip))
1722 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1723 else if (is_t6(padap->params.chip))
1724 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1725
1726 for (i = 0; i < n; i++) {
1727 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1728 u32 *buff = ch_tp_pio->outbuf;
1729
1730 if (is_t5(padap->params.chip)) {
1731 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1732 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1733 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1734 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1735 } else if (is_t6(padap->params.chip)) {
1736 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1737 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1738 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1739 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1740 }
1741 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1742 tp_pio->ireg_local_offset, true);
1743 ch_tp_pio++;
1744 }
1745
1746
1747 if (is_t5(padap->params.chip))
1748 n = sizeof(t5_tp_mib_index_array) /
1749 (IREG_NUM_ELEM * sizeof(u32));
1750 else if (is_t6(padap->params.chip))
1751 n = sizeof(t6_tp_mib_index_array) /
1752 (IREG_NUM_ELEM * sizeof(u32));
1753
1754 for (i = 0; i < n ; i++) {
1755 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1756 u32 *buff = ch_tp_pio->outbuf;
1757
1758 if (is_t5(padap->params.chip)) {
1759 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1760 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1761 tp_pio->ireg_local_offset =
1762 t5_tp_mib_index_array[i][2];
1763 tp_pio->ireg_offset_range =
1764 t5_tp_mib_index_array[i][3];
1765 } else if (is_t6(padap->params.chip)) {
1766 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1767 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1768 tp_pio->ireg_local_offset =
1769 t6_tp_mib_index_array[i][2];
1770 tp_pio->ireg_offset_range =
1771 t6_tp_mib_index_array[i][3];
1772 }
1773 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1774 tp_pio->ireg_local_offset, true);
1775 ch_tp_pio++;
1776 }
1777 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1778}
1779
1780static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1781 struct sge_qbase_reg_field *qbase,
1782 u32 func, bool is_pf)
1783{
1784 u32 *buff, i;
1785
1786 if (is_pf) {
1787 buff = qbase->pf_data_value[func];
1788 } else {
1789 buff = qbase->vf_data_value[func];
1790
1791
1792
1793 func += 8;
1794 }
1795
1796 t4_write_reg(padap, qbase->reg_addr, func);
1797 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1798 *buff = t4_read_reg(padap, qbase->reg_data[i]);
1799}
1800
1801int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1802 struct cudbg_buffer *dbg_buff,
1803 struct cudbg_error *cudbg_err)
1804{
1805 struct adapter *padap = pdbg_init->adap;
1806 struct cudbg_buffer temp_buff = { 0 };
1807 struct sge_qbase_reg_field *sge_qbase;
1808 struct ireg_buf *ch_sge_dbg;
1809 u8 padap_running = 0;
1810 int i, rc;
1811 u32 size;
1812
1813
1814
1815
1816
1817 for_each_port(padap, i) {
1818 padap_running = netif_running(padap->port[i]);
1819 if (padap_running)
1820 break;
1821 }
1822
1823 size = sizeof(*ch_sge_dbg) * 2;
1824 if (!padap_running)
1825 size += sizeof(*sge_qbase);
1826
1827 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1828 if (rc)
1829 return rc;
1830
1831 ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1832 for (i = 0; i < 2; i++) {
1833 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1834 u32 *buff = ch_sge_dbg->outbuf;
1835
1836 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1837 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1838 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1839 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1840 t4_read_indirect(padap,
1841 sge_pio->ireg_addr,
1842 sge_pio->ireg_data,
1843 buff,
1844 sge_pio->ireg_offset_range,
1845 sge_pio->ireg_local_offset);
1846 ch_sge_dbg++;
1847 }
1848
1849 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
1850 !padap_running) {
1851 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1852
1853
1854
1855 sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1856 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1857 sge_qbase->reg_data[i] =
1858 t6_sge_qbase_index_array[i + 1];
1859
1860 for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1861 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1862 i, true);
1863
1864 for (i = 0; i < padap->params.arch.vfcount; i++)
1865 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1866 i, false);
1867
1868 sge_qbase->vfcount = padap->params.arch.vfcount;
1869 }
1870
1871 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1872}
1873
1874int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1875 struct cudbg_buffer *dbg_buff,
1876 struct cudbg_error *cudbg_err)
1877{
1878 struct adapter *padap = pdbg_init->adap;
1879 struct cudbg_buffer temp_buff = { 0 };
1880 struct cudbg_ulprx_la *ulprx_la_buff;
1881 int rc;
1882
1883 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1884 &temp_buff);
1885 if (rc)
1886 return rc;
1887
1888 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1889 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1890 ulprx_la_buff->size = ULPRX_LA_SIZE;
1891 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1892}
1893
1894int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1895 struct cudbg_buffer *dbg_buff,
1896 struct cudbg_error *cudbg_err)
1897{
1898 struct adapter *padap = pdbg_init->adap;
1899 struct cudbg_buffer temp_buff = { 0 };
1900 struct cudbg_tp_la *tp_la_buff;
1901 int size, rc;
1902
1903 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
1904 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1905 if (rc)
1906 return rc;
1907
1908 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1909 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1910 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1911 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1912}
1913
1914int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1915 struct cudbg_buffer *dbg_buff,
1916 struct cudbg_error *cudbg_err)
1917{
1918 struct adapter *padap = pdbg_init->adap;
1919 struct cudbg_buffer temp_buff = { 0 };
1920 struct cudbg_meminfo *meminfo_buff;
1921 struct cudbg_ver_hdr *ver_hdr;
1922 int rc;
1923
1924 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1925 sizeof(struct cudbg_ver_hdr) +
1926 sizeof(struct cudbg_meminfo),
1927 &temp_buff);
1928 if (rc)
1929 return rc;
1930
1931 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1932 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1933 ver_hdr->revision = CUDBG_MEMINFO_REV;
1934 ver_hdr->size = sizeof(struct cudbg_meminfo);
1935
1936 meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1937 sizeof(*ver_hdr));
1938 rc = cudbg_fill_meminfo(padap, meminfo_buff);
1939 if (rc) {
1940 cudbg_err->sys_err = rc;
1941 cudbg_put_buff(pdbg_init, &temp_buff);
1942 return rc;
1943 }
1944
1945 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1946}
1947
1948int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1949 struct cudbg_buffer *dbg_buff,
1950 struct cudbg_error *cudbg_err)
1951{
1952 struct cudbg_cim_pif_la *cim_pif_la_buff;
1953 struct adapter *padap = pdbg_init->adap;
1954 struct cudbg_buffer temp_buff = { 0 };
1955 int size, rc;
1956
1957 size = sizeof(struct cudbg_cim_pif_la) +
1958 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1959 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1960 if (rc)
1961 return rc;
1962
1963 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1964 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1965 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1966 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1967 NULL, NULL);
1968 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1969}
1970
1971int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1972 struct cudbg_buffer *dbg_buff,
1973 struct cudbg_error *cudbg_err)
1974{
1975 struct adapter *padap = pdbg_init->adap;
1976 struct cudbg_buffer temp_buff = { 0 };
1977 struct cudbg_clk_info *clk_info_buff;
1978 u64 tp_tick_us;
1979 int rc;
1980
1981 if (!padap->params.vpd.cclk)
1982 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1983
1984 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1985 &temp_buff);
1986 if (rc)
1987 return rc;
1988
1989 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1990 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;
1991 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1992 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1993 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1994 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1995
1996 clk_info_buff->dack_timer =
1997 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1998 t4_read_reg(padap, TP_DACK_TIMER_A);
1999 clk_info_buff->retransmit_min =
2000 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
2001 clk_info_buff->retransmit_max =
2002 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
2003 clk_info_buff->persist_timer_min =
2004 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
2005 clk_info_buff->persist_timer_max =
2006 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
2007 clk_info_buff->keepalive_idle_timer =
2008 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
2009 clk_info_buff->keepalive_interval =
2010 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
2011 clk_info_buff->initial_srtt =
2012 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
2013 clk_info_buff->finwait2_timer =
2014 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
2015
2016 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2017}
2018
2019int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
2020 struct cudbg_buffer *dbg_buff,
2021 struct cudbg_error *cudbg_err)
2022{
2023 struct adapter *padap = pdbg_init->adap;
2024 struct cudbg_buffer temp_buff = { 0 };
2025 struct ireg_buf *ch_pcie;
2026 int i, rc, n;
2027 u32 size;
2028
2029 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
2030 size = sizeof(struct ireg_buf) * n * 2;
2031 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2032 if (rc)
2033 return rc;
2034
2035 ch_pcie = (struct ireg_buf *)temp_buff.data;
2036
2037 for (i = 0; i < n; i++) {
2038 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
2039 u32 *buff = ch_pcie->outbuf;
2040
2041 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
2042 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
2043 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
2044 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
2045 t4_read_indirect(padap,
2046 pcie_pio->ireg_addr,
2047 pcie_pio->ireg_data,
2048 buff,
2049 pcie_pio->ireg_offset_range,
2050 pcie_pio->ireg_local_offset);
2051 ch_pcie++;
2052 }
2053
2054
2055 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
2056 for (i = 0; i < n; i++) {
2057 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
2058 u32 *buff = ch_pcie->outbuf;
2059
2060 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
2061 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
2062 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
2063 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
2064 t4_read_indirect(padap,
2065 pcie_pio->ireg_addr,
2066 pcie_pio->ireg_data,
2067 buff,
2068 pcie_pio->ireg_offset_range,
2069 pcie_pio->ireg_local_offset);
2070 ch_pcie++;
2071 }
2072 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2073}
2074
2075int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
2076 struct cudbg_buffer *dbg_buff,
2077 struct cudbg_error *cudbg_err)
2078{
2079 struct adapter *padap = pdbg_init->adap;
2080 struct cudbg_buffer temp_buff = { 0 };
2081 struct ireg_buf *ch_pm;
2082 int i, rc, n;
2083 u32 size;
2084
2085 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
2086 size = sizeof(struct ireg_buf) * n * 2;
2087 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2088 if (rc)
2089 return rc;
2090
2091 ch_pm = (struct ireg_buf *)temp_buff.data;
2092
2093 for (i = 0; i < n; i++) {
2094 struct ireg_field *pm_pio = &ch_pm->tp_pio;
2095 u32 *buff = ch_pm->outbuf;
2096
2097 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
2098 pm_pio->ireg_data = t5_pm_rx_array[i][1];
2099 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
2100 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
2101 t4_read_indirect(padap,
2102 pm_pio->ireg_addr,
2103 pm_pio->ireg_data,
2104 buff,
2105 pm_pio->ireg_offset_range,
2106 pm_pio->ireg_local_offset);
2107 ch_pm++;
2108 }
2109
2110
2111 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
2112 for (i = 0; i < n; i++) {
2113 struct ireg_field *pm_pio = &ch_pm->tp_pio;
2114 u32 *buff = ch_pm->outbuf;
2115
2116 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
2117 pm_pio->ireg_data = t5_pm_tx_array[i][1];
2118 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
2119 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
2120 t4_read_indirect(padap,
2121 pm_pio->ireg_addr,
2122 pm_pio->ireg_data,
2123 buff,
2124 pm_pio->ireg_offset_range,
2125 pm_pio->ireg_local_offset);
2126 ch_pm++;
2127 }
2128 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2129}
2130
2131int cudbg_collect_tid(struct cudbg_init *pdbg_init,
2132 struct cudbg_buffer *dbg_buff,
2133 struct cudbg_error *cudbg_err)
2134{
2135 struct adapter *padap = pdbg_init->adap;
2136 struct cudbg_tid_info_region_rev1 *tid1;
2137 struct cudbg_buffer temp_buff = { 0 };
2138 struct cudbg_tid_info_region *tid;
2139 u32 para[2], val[2];
2140 int rc;
2141
2142 rc = cudbg_get_buff(pdbg_init, dbg_buff,
2143 sizeof(struct cudbg_tid_info_region_rev1),
2144 &temp_buff);
2145 if (rc)
2146 return rc;
2147
2148 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
2149 tid = &tid1->tid;
2150 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
2151 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
2152 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
2153 sizeof(struct cudbg_ver_hdr);
2154
2155
2156
2157
2158 if (!is_fw_attached(pdbg_init))
2159 goto fill_tid;
2160
2161#define FW_PARAM_PFVF_A(param) \
2162 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
2163 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
2164 FW_PARAMS_PARAM_Y_V(0) | \
2165 FW_PARAMS_PARAM_Z_V(0))
2166
2167 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
2168 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
2169 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
2170 if (rc < 0) {
2171 cudbg_err->sys_err = rc;
2172 cudbg_put_buff(pdbg_init, &temp_buff);
2173 return rc;
2174 }
2175 tid->uotid_base = val[0];
2176 tid->nuotids = val[1] - val[0] + 1;
2177
2178 if (is_t5(padap->params.chip)) {
2179 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
2180 } else if (is_t6(padap->params.chip)) {
2181 tid1->tid_start =
2182 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
2183 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
2184
2185 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
2186 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
2187 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
2188 para, val);
2189 if (rc < 0) {
2190 cudbg_err->sys_err = rc;
2191 cudbg_put_buff(pdbg_init, &temp_buff);
2192 return rc;
2193 }
2194 tid->hpftid_base = val[0];
2195 tid->nhpftids = val[1] - val[0] + 1;
2196 }
2197
2198#undef FW_PARAM_PFVF_A
2199
2200fill_tid:
2201 tid->ntids = padap->tids.ntids;
2202 tid->nstids = padap->tids.nstids;
2203 tid->stid_base = padap->tids.stid_base;
2204 tid->hash_base = padap->tids.hash_base;
2205
2206 tid->natids = padap->tids.natids;
2207 tid->nftids = padap->tids.nftids;
2208 tid->ftid_base = padap->tids.ftid_base;
2209 tid->aftid_base = padap->tids.aftid_base;
2210 tid->aftid_end = padap->tids.aftid_end;
2211
2212 tid->sftid_base = padap->tids.sftid_base;
2213 tid->nsftids = padap->tids.nsftids;
2214
2215 tid->flags = padap->flags;
2216 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
2217 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
2218 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
2219
2220 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2221}
2222
2223int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
2224 struct cudbg_buffer *dbg_buff,
2225 struct cudbg_error *cudbg_err)
2226{
2227 struct adapter *padap = pdbg_init->adap;
2228 struct cudbg_buffer temp_buff = { 0 };
2229 u32 size, *value, j;
2230 int i, rc, n;
2231
2232 size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
2233 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
2234 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2235 if (rc)
2236 return rc;
2237
2238 value = (u32 *)temp_buff.data;
2239 for (i = 0; i < n; i++) {
2240 for (j = t5_pcie_config_array[i][0];
2241 j <= t5_pcie_config_array[i][1]; j += 4) {
2242 t4_hw_pci_read_cfg4(padap, j, value);
2243 value++;
2244 }
2245 }
2246 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2247}
2248
2249static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
2250{
2251 int index, bit, bit_pos = 0;
2252
2253 switch (type) {
2254 case CTXT_EGRESS:
2255 bit_pos = 176;
2256 break;
2257 case CTXT_INGRESS:
2258 bit_pos = 141;
2259 break;
2260 case CTXT_FLM:
2261 bit_pos = 89;
2262 break;
2263 }
2264 index = bit_pos / 32;
2265 bit = bit_pos % 32;
2266 return buf[index] & (1U << bit);
2267}
2268
2269static int cudbg_get_ctxt_region_info(struct adapter *padap,
2270 struct cudbg_region_info *ctx_info,
2271 u8 *mem_type)
2272{
2273 struct cudbg_mem_desc mem_desc;
2274 struct cudbg_meminfo meminfo;
2275 u32 i, j, value, found;
2276 u8 flq;
2277 int rc;
2278
2279 rc = cudbg_fill_meminfo(padap, &meminfo);
2280 if (rc)
2281 return rc;
2282
2283
2284 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2285 found = 0;
2286 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
2287 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
2288 rc = cudbg_get_mem_region(padap, &meminfo, j,
2289 cudbg_region[i],
2290 &mem_desc);
2291 if (!rc) {
2292 found = 1;
2293 rc = cudbg_get_mem_relative(padap, &meminfo, j,
2294 &mem_desc.base,
2295 &mem_desc.limit);
2296 if (rc) {
2297 ctx_info[i].exist = false;
2298 break;
2299 }
2300 ctx_info[i].exist = true;
2301 ctx_info[i].start = mem_desc.base;
2302 ctx_info[i].end = mem_desc.limit;
2303 mem_type[i] = j;
2304 break;
2305 }
2306 }
2307 if (!found)
2308 ctx_info[i].exist = false;
2309 }
2310
2311
2312 value = t4_read_reg(padap, SGE_FLM_CFG_A);
2313
2314
2315 flq = HDRSTARTFLQ_G(value);
2316 ctx_info[CTXT_FLM].exist = true;
2317 ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
2318
2319
2320
2321
2322 ctx_info[CTXT_CNM].exist = true;
2323 ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
2324
2325 return 0;
2326}
2327
2328int cudbg_dump_context_size(struct adapter *padap)
2329{
2330 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
2331 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
2332 u32 i, size = 0;
2333 int rc;
2334
2335
2336 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
2337 if (rc)
2338 return rc;
2339
2340 for (i = 0; i < CTXT_CNM; i++) {
2341 if (!region_info[i].exist) {
2342 if (i == CTXT_EGRESS || i == CTXT_INGRESS)
2343 size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
2344 SGE_CTXT_SIZE;
2345 continue;
2346 }
2347
2348 size += (region_info[i].end - region_info[i].start + 1) /
2349 SGE_CTXT_SIZE;
2350 }
2351 return size * sizeof(struct cudbg_ch_cntxt);
2352}
2353
2354static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
2355 enum ctxt_type ctype, u32 *data)
2356{
2357 struct adapter *padap = pdbg_init->adap;
2358 int rc = -1;
2359
2360
2361
2362
2363
2364
2365
2366
2367 if (is_fw_attached(pdbg_init))
2368 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
2369 if (rc)
2370 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
2371}
2372
2373static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
2374 u8 ctxt_type,
2375 struct cudbg_ch_cntxt **out_buff)
2376{
2377 struct cudbg_ch_cntxt *buff = *out_buff;
2378 int rc;
2379 u32 j;
2380
2381 for (j = 0; j < max_qid; j++) {
2382 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
2383 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
2384 if (!rc)
2385 continue;
2386
2387 buff->cntxt_type = ctxt_type;
2388 buff->cntxt_id = j;
2389 buff++;
2390 if (ctxt_type == CTXT_FLM) {
2391 cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
2392 buff->cntxt_type = CTXT_CNM;
2393 buff->cntxt_id = j;
2394 buff++;
2395 }
2396 }
2397
2398 *out_buff = buff;
2399}
2400
2401int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
2402 struct cudbg_buffer *dbg_buff,
2403 struct cudbg_error *cudbg_err)
2404{
2405 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
2406 struct adapter *padap = pdbg_init->adap;
2407 u32 j, size, max_ctx_size, max_ctx_qid;
2408 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
2409 struct cudbg_buffer temp_buff = { 0 };
2410 struct cudbg_ch_cntxt *buff;
2411 u8 *ctx_buf;
2412 u8 i, k;
2413 int rc;
2414
2415
2416 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
2417 if (rc)
2418 return rc;
2419
2420 rc = cudbg_dump_context_size(padap);
2421 if (rc <= 0)
2422 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2423
2424 size = rc;
2425 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2426 if (rc)
2427 return rc;
2428
2429
2430
2431
2432 max_ctx_size = max(region_info[CTXT_EGRESS].end -
2433 region_info[CTXT_EGRESS].start + 1,
2434 region_info[CTXT_INGRESS].end -
2435 region_info[CTXT_INGRESS].start + 1);
2436
2437 ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
2438 if (!ctx_buf) {
2439 cudbg_put_buff(pdbg_init, &temp_buff);
2440 return -ENOMEM;
2441 }
2442
2443 buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2444
2445
2446
2447
2448
2449 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2450 if (!region_info[i].exist) {
2451 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2452 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2453 &buff);
2454 continue;
2455 }
2456
2457 max_ctx_size = region_info[i].end - region_info[i].start + 1;
2458 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2459
2460
2461
2462
2463 if (is_fw_attached(pdbg_init)) {
2464 t4_sge_ctxt_flush(padap, padap->mbox, i);
2465
2466 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2467 region_info[i].start, max_ctx_size,
2468 (__be32 *)ctx_buf, 1);
2469 }
2470
2471 if (rc || !is_fw_attached(pdbg_init)) {
2472 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2473 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2474 &buff);
2475 continue;
2476 }
2477
2478 for (j = 0; j < max_ctx_qid; j++) {
2479 __be64 *dst_off;
2480 u64 *src_off;
2481
2482 src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2483 dst_off = (__be64 *)buff->data;
2484
2485
2486
2487
2488 for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2489 dst_off[k] = cpu_to_be64(src_off[k]);
2490
2491 rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2492 if (!rc)
2493 continue;
2494
2495 buff->cntxt_type = i;
2496 buff->cntxt_id = j;
2497 buff++;
2498 }
2499 }
2500
2501 kvfree(ctx_buf);
2502
2503
2504 max_ctx_size = region_info[CTXT_FLM].end -
2505 region_info[CTXT_FLM].start + 1;
2506 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2507
2508
2509
2510 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2511
2512 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2513}
2514
2515static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2516{
2517 *mask = x | y;
2518 y = (__force u64)cpu_to_be64(y);
2519 memcpy(addr, (char *)&y + 2, ETH_ALEN);
2520}
2521
2522static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2523 struct fw_ldst_mps_rplc *mps_rplc)
2524{
2525 if (is_t5(padap->params.chip)) {
2526 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2527 MPS_VF_RPLCT_MAP3_A));
2528 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2529 MPS_VF_RPLCT_MAP2_A));
2530 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2531 MPS_VF_RPLCT_MAP1_A));
2532 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2533 MPS_VF_RPLCT_MAP0_A));
2534 } else {
2535 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2536 MPS_VF_RPLCT_MAP7_A));
2537 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2538 MPS_VF_RPLCT_MAP6_A));
2539 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2540 MPS_VF_RPLCT_MAP5_A));
2541 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2542 MPS_VF_RPLCT_MAP4_A));
2543 }
2544 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2545 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2546 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2547 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2548}
2549
2550static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2551 struct cudbg_mps_tcam *tcam, u32 idx)
2552{
2553 struct adapter *padap = pdbg_init->adap;
2554 u64 tcamy, tcamx, val;
2555 u32 ctl, data2;
2556 int rc = 0;
2557
2558 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2559
2560
2561
2562
2563
2564
2565
2566 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2567 if (idx < 256)
2568 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2569 else
2570 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2571
2572 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2573 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2574 tcamy = DMACH_G(val) << 32;
2575 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2576 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2577 tcam->lookup_type = DATALKPTYPE_G(data2);
2578
2579
2580
2581
2582
2583 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2584
2585 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2586 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2587 tcam->dip_hit = data2 & DATADIPHIT_F;
2588 } else {
2589 tcam->vlan_vld = data2 & DATAVIDH2_F;
2590 tcam->ivlan = VIDL_G(val);
2591 }
2592
2593 tcam->port_num = DATAPORTNUM_G(data2);
2594
2595
2596 ctl |= CTLXYBITSEL_V(1);
2597 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2598 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2599 tcamx = DMACH_G(val) << 32;
2600 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2601 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2602 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2603
2604 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2605 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2606 }
2607 } else {
2608 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2609 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2610 }
2611
2612
2613 if (tcamx & tcamy)
2614 return rc;
2615
2616 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2617 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2618
2619 if (is_t5(padap->params.chip))
2620 tcam->repli = (tcam->cls_lo & REPLICATE_F);
2621 else if (is_t6(padap->params.chip))
2622 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2623
2624 if (tcam->repli) {
2625 struct fw_ldst_cmd ldst_cmd;
2626 struct fw_ldst_mps_rplc mps_rplc;
2627
2628 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2629 ldst_cmd.op_to_addrspace =
2630 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2631 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2632 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2633 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2634 ldst_cmd.u.mps.rplc.fid_idx =
2635 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2636 FW_LDST_CMD_IDX_V(idx));
2637
2638
2639
2640
2641 if (is_fw_attached(pdbg_init))
2642 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2643 sizeof(ldst_cmd), &ldst_cmd);
2644
2645 if (rc || !is_fw_attached(pdbg_init)) {
2646 cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2647
2648
2649
2650 rc = 0;
2651 } else {
2652 mps_rplc = ldst_cmd.u.mps.rplc;
2653 }
2654
2655 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2656 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2657 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2658 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2659 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2660 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2661 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2662 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2663 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2664 }
2665 }
2666 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2667 tcam->idx = idx;
2668 tcam->rplc_size = padap->params.arch.mps_rplc_size;
2669 return rc;
2670}
2671
2672int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2673 struct cudbg_buffer *dbg_buff,
2674 struct cudbg_error *cudbg_err)
2675{
2676 struct adapter *padap = pdbg_init->adap;
2677 struct cudbg_buffer temp_buff = { 0 };
2678 u32 size = 0, i, n, total_size = 0;
2679 struct cudbg_mps_tcam *tcam;
2680 int rc;
2681
2682 n = padap->params.arch.mps_tcam_size;
2683 size = sizeof(struct cudbg_mps_tcam) * n;
2684 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2685 if (rc)
2686 return rc;
2687
2688 tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2689 for (i = 0; i < n; i++) {
2690 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2691 if (rc) {
2692 cudbg_err->sys_err = rc;
2693 cudbg_put_buff(pdbg_init, &temp_buff);
2694 return rc;
2695 }
2696 total_size += sizeof(struct cudbg_mps_tcam);
2697 tcam++;
2698 }
2699
2700 if (!total_size) {
2701 rc = CUDBG_SYSTEM_ERROR;
2702 cudbg_err->sys_err = rc;
2703 cudbg_put_buff(pdbg_init, &temp_buff);
2704 return rc;
2705 }
2706 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2707}
2708
2709int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2710 struct cudbg_buffer *dbg_buff,
2711 struct cudbg_error *cudbg_err)
2712{
2713 struct adapter *padap = pdbg_init->adap;
2714 struct cudbg_buffer temp_buff = { 0 };
2715 char vpd_str[CUDBG_VPD_VER_LEN + 1];
2716 struct cudbg_vpd_data *vpd_data;
2717 struct vpd_params vpd = { 0 };
2718 u32 vpd_vers, fw_vers;
2719 int rc;
2720
2721 rc = t4_get_raw_vpd_params(padap, &vpd);
2722 if (rc)
2723 return rc;
2724
2725 rc = t4_get_fw_version(padap, &fw_vers);
2726 if (rc)
2727 return rc;
2728
2729 rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2730 vpd_str);
2731 if (rc)
2732 return rc;
2733
2734 vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2735 rc = kstrtouint(vpd_str, 0, &vpd_vers);
2736 if (rc)
2737 return rc;
2738
2739 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2740 &temp_buff);
2741 if (rc)
2742 return rc;
2743
2744 vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2745 memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2746 memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2747 memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2748 memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2749 vpd_data->scfg_vers = t4_read_reg(padap, PCIE_STATIC_SPARE2_A);
2750 vpd_data->vpd_vers = vpd_vers;
2751 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2752 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2753 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2754 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2755 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2756}
2757
2758static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2759 struct cudbg_tid_data *tid_data)
2760{
2761 struct adapter *padap = pdbg_init->adap;
2762 int i, cmd_retry = 8;
2763 u32 val;
2764
2765
2766 for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2767 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2768
2769
2770 val = DBGICMD_V(4) | DBGITID_V(tid);
2771 t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2772 tid_data->dbig_cmd = val;
2773
2774 val = DBGICMDSTRT_F | DBGICMDMODE_V(1);
2775 t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2776 tid_data->dbig_conf = val;
2777
2778
2779 val = 1;
2780 while (val) {
2781 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2782 val = val & DBGICMDBUSY_F;
2783 cmd_retry--;
2784 if (!cmd_retry)
2785 return CUDBG_SYSTEM_ERROR;
2786 }
2787
2788
2789 val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2790 tid_data->dbig_rsp_stat = val;
2791 if (!(val & 1))
2792 return CUDBG_SYSTEM_ERROR;
2793
2794
2795 for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2796 tid_data->data[i] = t4_read_reg(padap,
2797 LE_DB_DBGI_RSP_DATA_A +
2798 (i << 2));
2799 tid_data->tid = tid;
2800 return 0;
2801}
2802
2803static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2804{
2805 int type = LE_ET_UNKNOWN;
2806
2807 if (tid < tcam_region.server_start)
2808 type = LE_ET_TCAM_CON;
2809 else if (tid < tcam_region.filter_start)
2810 type = LE_ET_TCAM_SERVER;
2811 else if (tid < tcam_region.clip_start)
2812 type = LE_ET_TCAM_FILTER;
2813 else if (tid < tcam_region.routing_start)
2814 type = LE_ET_TCAM_CLIP;
2815 else if (tid < tcam_region.tid_hash_base)
2816 type = LE_ET_TCAM_ROUTING;
2817 else if (tid < tcam_region.max_tid)
2818 type = LE_ET_HASH_CON;
2819 else
2820 type = LE_ET_INVALID_TID;
2821
2822 return type;
2823}
2824
2825static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2826 struct cudbg_tcam tcam_region)
2827{
2828 int ipv6 = 0;
2829 int le_type;
2830
2831 le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2832 if (tid_data->tid & 1)
2833 return 0;
2834
2835 if (le_type == LE_ET_HASH_CON) {
2836 ipv6 = tid_data->data[16] & 0x8000;
2837 } else if (le_type == LE_ET_TCAM_CON) {
2838 ipv6 = tid_data->data[16] & 0x8000;
2839 if (ipv6)
2840 ipv6 = tid_data->data[9] == 0x00C00000;
2841 } else {
2842 ipv6 = 0;
2843 }
2844 return ipv6;
2845}
2846
2847void cudbg_fill_le_tcam_info(struct adapter *padap,
2848 struct cudbg_tcam *tcam_region)
2849{
2850 u32 value;
2851
2852
2853 value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A);
2854 tcam_region->tid_hash_base = value;
2855
2856
2857 value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2858 tcam_region->routing_start = value;
2859
2860
2861 if (is_t6(padap->params.chip))
2862 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2863 else
2864 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2865 tcam_region->clip_start = value;
2866
2867
2868 value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2869 tcam_region->filter_start = value;
2870
2871
2872 value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2873 tcam_region->server_start = value;
2874
2875
2876 value = t4_read_reg(padap, LE_DB_CONFIG_A);
2877 if ((value >> HASHEN_S) & 1) {
2878 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2879 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2880 tcam_region->max_tid = (value & 0xFFFFF) +
2881 tcam_region->tid_hash_base;
2882 } else {
2883 value = HASHTIDSIZE_G(value);
2884 value = 1 << value;
2885 tcam_region->max_tid = value +
2886 tcam_region->tid_hash_base;
2887 }
2888 } else {
2889 if (is_t6(padap->params.chip))
2890 tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2891 CUDBG_MAX_TID_COMP_EN :
2892 CUDBG_MAX_TID_COMP_DIS;
2893 else
2894 tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2895 }
2896
2897 if (is_t6(padap->params.chip))
2898 tcam_region->max_tid += CUDBG_T6_CLIP;
2899}
2900
2901int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2902 struct cudbg_buffer *dbg_buff,
2903 struct cudbg_error *cudbg_err)
2904{
2905 struct adapter *padap = pdbg_init->adap;
2906 struct cudbg_buffer temp_buff = { 0 };
2907 struct cudbg_tcam tcam_region = { 0 };
2908 struct cudbg_tid_data *tid_data;
2909 u32 bytes = 0;
2910 int rc, size;
2911 u32 i;
2912
2913 cudbg_fill_le_tcam_info(padap, &tcam_region);
2914
2915 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2916 size += sizeof(struct cudbg_tcam);
2917 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2918 if (rc)
2919 return rc;
2920
2921 memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2922 bytes = sizeof(struct cudbg_tcam);
2923 tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2924
2925 for (i = 0; i < tcam_region.max_tid; ) {
2926 rc = cudbg_read_tid(pdbg_init, i, tid_data);
2927 if (rc) {
2928 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2929
2930 tcam_region.max_tid = i;
2931 memcpy(temp_buff.data, &tcam_region,
2932 sizeof(struct cudbg_tcam));
2933 goto out;
2934 }
2935
2936 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2937
2938 if (is_t6(padap->params.chip) &&
2939 i >= tcam_region.clip_start &&
2940 i < tcam_region.clip_start + CUDBG_T6_CLIP)
2941 i += 4;
2942 else
2943 i += 2;
2944 } else {
2945 i++;
2946 }
2947
2948 tid_data++;
2949 bytes += sizeof(struct cudbg_tid_data);
2950 }
2951
2952out:
2953 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2954}
2955
2956int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2957 struct cudbg_buffer *dbg_buff,
2958 struct cudbg_error *cudbg_err)
2959{
2960 struct adapter *padap = pdbg_init->adap;
2961 struct cudbg_buffer temp_buff = { 0 };
2962 u32 size;
2963 int rc;
2964
2965 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2966 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2967 if (rc)
2968 return rc;
2969
2970 t4_read_cong_tbl(padap, (void *)temp_buff.data);
2971 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2972}
2973
2974int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2975 struct cudbg_buffer *dbg_buff,
2976 struct cudbg_error *cudbg_err)
2977{
2978 struct adapter *padap = pdbg_init->adap;
2979 struct cudbg_buffer temp_buff = { 0 };
2980 struct ireg_buf *ma_indr;
2981 int i, rc, n;
2982 u32 size, j;
2983
2984 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2985 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2986
2987 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2988 size = sizeof(struct ireg_buf) * n * 2;
2989 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2990 if (rc)
2991 return rc;
2992
2993 ma_indr = (struct ireg_buf *)temp_buff.data;
2994 for (i = 0; i < n; i++) {
2995 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2996 u32 *buff = ma_indr->outbuf;
2997
2998 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2999 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3000 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3001 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3002 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3003 buff, ma_fli->ireg_offset_range,
3004 ma_fli->ireg_local_offset);
3005 ma_indr++;
3006 }
3007
3008 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
3009 for (i = 0; i < n; i++) {
3010 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3011 u32 *buff = ma_indr->outbuf;
3012
3013 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3014 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3015 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3016 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3017 t4_read_indirect(padap, ma_fli->ireg_addr,
3018 ma_fli->ireg_data, buff, 1,
3019 ma_fli->ireg_local_offset);
3020 buff++;
3021 ma_fli->ireg_local_offset += 0x20;
3022 }
3023 ma_indr++;
3024 }
3025 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3026}
3027
3028int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
3029 struct cudbg_buffer *dbg_buff,
3030 struct cudbg_error *cudbg_err)
3031{
3032 struct adapter *padap = pdbg_init->adap;
3033 struct cudbg_buffer temp_buff = { 0 };
3034 struct cudbg_ulptx_la *ulptx_la_buff;
3035 struct cudbg_ver_hdr *ver_hdr;
3036 u32 i, j;
3037 int rc;
3038
3039 rc = cudbg_get_buff(pdbg_init, dbg_buff,
3040 sizeof(struct cudbg_ver_hdr) +
3041 sizeof(struct cudbg_ulptx_la),
3042 &temp_buff);
3043 if (rc)
3044 return rc;
3045
3046 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
3047 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
3048 ver_hdr->revision = CUDBG_ULPTX_LA_REV;
3049 ver_hdr->size = sizeof(struct cudbg_ulptx_la);
3050
3051 ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
3052 sizeof(*ver_hdr));
3053 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
3054 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
3055 ULP_TX_LA_RDPTR_0_A +
3056 0x10 * i);
3057 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
3058 ULP_TX_LA_WRPTR_0_A +
3059 0x10 * i);
3060 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
3061 ULP_TX_LA_RDDATA_0_A +
3062 0x10 * i);
3063 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
3064 ulptx_la_buff->rd_data[i][j] =
3065 t4_read_reg(padap,
3066 ULP_TX_LA_RDDATA_0_A + 0x10 * i);
3067 }
3068
3069 for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
3070 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
3071 ulptx_la_buff->rdptr_asic[i] =
3072 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
3073 ulptx_la_buff->rddata_asic[i][0] =
3074 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
3075 ulptx_la_buff->rddata_asic[i][1] =
3076 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
3077 ulptx_la_buff->rddata_asic[i][2] =
3078 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
3079 ulptx_la_buff->rddata_asic[i][3] =
3080 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
3081 ulptx_la_buff->rddata_asic[i][4] =
3082 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
3083 ulptx_la_buff->rddata_asic[i][5] =
3084 t4_read_reg(padap, PM_RX_BASE_ADDR);
3085 }
3086
3087 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3088}
3089
3090int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3091 struct cudbg_buffer *dbg_buff,
3092 struct cudbg_error *cudbg_err)
3093{
3094 struct adapter *padap = pdbg_init->adap;
3095 struct cudbg_buffer temp_buff = { 0 };
3096 u32 local_offset, local_range;
3097 struct ireg_buf *up_cim;
3098 u32 size, j, iter;
3099 u32 instance = 0;
3100 int i, rc, n;
3101
3102 if (is_t5(padap->params.chip))
3103 n = sizeof(t5_up_cim_reg_array) /
3104 ((IREG_NUM_ELEM + 1) * sizeof(u32));
3105 else if (is_t6(padap->params.chip))
3106 n = sizeof(t6_up_cim_reg_array) /
3107 ((IREG_NUM_ELEM + 1) * sizeof(u32));
3108 else
3109 return CUDBG_STATUS_NOT_IMPLEMENTED;
3110
3111 size = sizeof(struct ireg_buf) * n;
3112 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
3113 if (rc)
3114 return rc;
3115
3116 up_cim = (struct ireg_buf *)temp_buff.data;
3117 for (i = 0; i < n; i++) {
3118 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3119 u32 *buff = up_cim->outbuf;
3120
3121 if (is_t5(padap->params.chip)) {
3122 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3123 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3124 up_cim_reg->ireg_local_offset =
3125 t5_up_cim_reg_array[i][2];
3126 up_cim_reg->ireg_offset_range =
3127 t5_up_cim_reg_array[i][3];
3128 instance = t5_up_cim_reg_array[i][4];
3129 } else if (is_t6(padap->params.chip)) {
3130 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3131 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3132 up_cim_reg->ireg_local_offset =
3133 t6_up_cim_reg_array[i][2];
3134 up_cim_reg->ireg_offset_range =
3135 t6_up_cim_reg_array[i][3];
3136 instance = t6_up_cim_reg_array[i][4];
3137 }
3138
3139 switch (instance) {
3140 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
3141 iter = up_cim_reg->ireg_offset_range;
3142 local_offset = 0x120;
3143 local_range = 1;
3144 break;
3145 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
3146 iter = up_cim_reg->ireg_offset_range;
3147 local_offset = 0x10;
3148 local_range = 1;
3149 break;
3150 default:
3151 iter = 1;
3152 local_offset = 0;
3153 local_range = up_cim_reg->ireg_offset_range;
3154 break;
3155 }
3156
3157 for (j = 0; j < iter; j++, buff++) {
3158 rc = t4_cim_read(padap,
3159 up_cim_reg->ireg_local_offset +
3160 (j * local_offset), local_range, buff);
3161 if (rc) {
3162 cudbg_put_buff(pdbg_init, &temp_buff);
3163 return rc;
3164 }
3165 }
3166 up_cim++;
3167 }
3168 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3169}
3170
3171int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
3172 struct cudbg_buffer *dbg_buff,
3173 struct cudbg_error *cudbg_err)
3174{
3175 struct adapter *padap = pdbg_init->adap;
3176 struct cudbg_buffer temp_buff = { 0 };
3177 struct cudbg_pbt_tables *pbt;
3178 int i, rc;
3179 u32 addr;
3180
3181 rc = cudbg_get_buff(pdbg_init, dbg_buff,
3182 sizeof(struct cudbg_pbt_tables),
3183 &temp_buff);
3184 if (rc)
3185 return rc;
3186
3187 pbt = (struct cudbg_pbt_tables *)temp_buff.data;
3188
3189 addr = CUDBG_CHAC_PBT_ADDR;
3190 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3191 rc = t4_cim_read(padap, addr + (i * 4), 1,
3192 &pbt->pbt_dynamic[i]);
3193 if (rc) {
3194 cudbg_err->sys_err = rc;
3195 cudbg_put_buff(pdbg_init, &temp_buff);
3196 return rc;
3197 }
3198 }
3199
3200
3201
3202 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3203 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3204 rc = t4_cim_read(padap, addr + (i * 4), 1,
3205 &pbt->pbt_static[i]);
3206 if (rc) {
3207 cudbg_err->sys_err = rc;
3208 cudbg_put_buff(pdbg_init, &temp_buff);
3209 return rc;
3210 }
3211 }
3212
3213
3214 addr = CUDBG_CHAC_PBT_LRF;
3215 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3216 rc = t4_cim_read(padap, addr + (i * 4), 1,
3217 &pbt->lrf_table[i]);
3218 if (rc) {
3219 cudbg_err->sys_err = rc;
3220 cudbg_put_buff(pdbg_init, &temp_buff);
3221 return rc;
3222 }
3223 }
3224
3225
3226 addr = CUDBG_CHAC_PBT_DATA;
3227 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3228 rc = t4_cim_read(padap, addr + (i * 4), 1,
3229 &pbt->pbt_data[i]);
3230 if (rc) {
3231 cudbg_err->sys_err = rc;
3232 cudbg_put_buff(pdbg_init, &temp_buff);
3233 return rc;
3234 }
3235 }
3236 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3237}
3238
3239int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
3240 struct cudbg_buffer *dbg_buff,
3241 struct cudbg_error *cudbg_err)
3242{
3243 struct adapter *padap = pdbg_init->adap;
3244 struct cudbg_mbox_log *mboxlog = NULL;
3245 struct cudbg_buffer temp_buff = { 0 };
3246 struct mbox_cmd_log *log = NULL;
3247 struct mbox_cmd *entry;
3248 unsigned int entry_idx;
3249 u16 mbox_cmds;
3250 int i, k, rc;
3251 u64 flit;
3252 u32 size;
3253
3254 log = padap->mbox_log;
3255 mbox_cmds = padap->mbox_log->size;
3256 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3257 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
3258 if (rc)
3259 return rc;
3260
3261 mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
3262 for (k = 0; k < mbox_cmds; k++) {
3263 entry_idx = log->cursor + k;
3264 if (entry_idx >= log->size)
3265 entry_idx -= log->size;
3266
3267 entry = mbox_cmd_log_entry(log, entry_idx);
3268
3269 if (entry->timestamp == 0)
3270 continue;
3271
3272 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3273 for (i = 0; i < MBOX_LEN / 8; i++) {
3274 flit = entry->cmd[i];
3275 mboxlog->hi[i] = (u32)(flit >> 32);
3276 mboxlog->lo[i] = (u32)flit;
3277 }
3278 mboxlog++;
3279 }
3280 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3281}
3282
3283int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
3284 struct cudbg_buffer *dbg_buff,
3285 struct cudbg_error *cudbg_err)
3286{
3287 struct adapter *padap = pdbg_init->adap;
3288 struct cudbg_buffer temp_buff = { 0 };
3289 struct ireg_buf *hma_indr;
3290 int i, rc, n;
3291 u32 size;
3292
3293 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
3294 return CUDBG_STATUS_ENTITY_NOT_FOUND;
3295
3296 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
3297 size = sizeof(struct ireg_buf) * n;
3298 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
3299 if (rc)
3300 return rc;
3301
3302 hma_indr = (struct ireg_buf *)temp_buff.data;
3303 for (i = 0; i < n; i++) {
3304 struct ireg_field *hma_fli = &hma_indr->tp_pio;
3305 u32 *buff = hma_indr->outbuf;
3306
3307 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3308 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3309 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3310 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3311 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3312 buff, hma_fli->ireg_offset_range,
3313 hma_fli->ireg_local_offset);
3314 hma_indr++;
3315 }
3316 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
3317}
3318
3319void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
3320 u32 *num, u32 *size)
3321{
3322 u32 tot_entries = 0, tot_size = 0;
3323
3324
3325 tot_entries += MAX_ETH_QSETS * 3;
3326 tot_entries += MAX_CTRL_QUEUES;
3327
3328 tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
3329 tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
3330 tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
3331 tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
3332 MAX_CTRL_TXQ_DESC_SIZE;
3333
3334
3335 tot_entries += INGQ_EXTRAS;
3336 tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
3337
3338
3339 tot_entries += 1;
3340 tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
3341
3342
3343 tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
3344 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
3345
3346 tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
3347 MAX_TXQ_DESC_SIZE;
3348 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
3349 MAX_RXQ_DESC_SIZE;
3350 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
3351 MAX_FL_DESC_SIZE;
3352
3353
3354 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
3355 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
3356 MAX_RXQ_DESC_SIZE;
3357
3358
3359 tot_entries += MAX_OFLD_QSETS * 3;
3360 tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
3361
3362 tot_size += sizeof(struct cudbg_ver_hdr) +
3363 sizeof(struct cudbg_qdesc_info) +
3364 sizeof(struct cudbg_qdesc_entry) * tot_entries;
3365
3366 if (num)
3367 *num = tot_entries;
3368
3369 if (size)
3370 *size = tot_size;
3371}
3372
3373int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
3374 struct cudbg_buffer *dbg_buff,
3375 struct cudbg_error *cudbg_err)
3376{
3377 u32 num_queues = 0, tot_entries = 0, size = 0;
3378 struct adapter *padap = pdbg_init->adap;
3379 struct cudbg_buffer temp_buff = { 0 };
3380 struct cudbg_qdesc_entry *qdesc_entry;
3381 struct cudbg_qdesc_info *qdesc_info;
3382 struct cudbg_ver_hdr *ver_hdr;
3383 struct sge *s = &padap->sge;
3384 u32 i, j, cur_off, tot_len;
3385 u8 *data;
3386 int rc;
3387
3388 cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
3389 size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
3390 tot_len = size;
3391 data = kvzalloc(size, GFP_KERNEL);
3392 if (!data)
3393 return -ENOMEM;
3394
3395 ver_hdr = (struct cudbg_ver_hdr *)data;
3396 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
3397 ver_hdr->revision = CUDBG_QDESC_REV;
3398 ver_hdr->size = sizeof(struct cudbg_qdesc_info);
3399 size -= sizeof(*ver_hdr);
3400
3401 qdesc_info = (struct cudbg_qdesc_info *)(data +
3402 sizeof(*ver_hdr));
3403 size -= sizeof(*qdesc_info);
3404 qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
3405
3406#define QDESC_GET(q, desc, type, label) do { \
3407 if (size <= 0) { \
3408 goto label; \
3409 } \
3410 if (desc) { \
3411 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
3412 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
3413 num_queues++; \
3414 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
3415 } \
3416} while (0)
3417
3418#define QDESC_GET_TXQ(q, type, label) do { \
3419 struct sge_txq *txq = (struct sge_txq *)q; \
3420 QDESC_GET(txq, txq->desc, type, label); \
3421} while (0)
3422
3423#define QDESC_GET_RXQ(q, type, label) do { \
3424 struct sge_rspq *rxq = (struct sge_rspq *)q; \
3425 QDESC_GET(rxq, rxq->desc, type, label); \
3426} while (0)
3427
3428#define QDESC_GET_FLQ(q, type, label) do { \
3429 struct sge_fl *flq = (struct sge_fl *)q; \
3430 QDESC_GET(flq, flq->desc, type, label); \
3431} while (0)
3432
3433
3434 for (i = 0; i < s->ethqsets; i++)
3435 QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
3436
3437
3438 for (i = 0; i < s->ethqsets; i++)
3439 QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
3440
3441
3442 for (i = 0; i < s->ethqsets; i++)
3443 QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
3444
3445
3446 for (i = 0; i < padap->params.nports; i++)
3447 QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
3448
3449
3450 QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
3451
3452
3453 QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
3454
3455
3456 QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
3457
3458
3459 mutex_lock(&uld_mutex);
3460
3461 if (s->uld_txq_info) {
3462 struct sge_uld_txq_info *utxq;
3463
3464
3465 for (j = 0; j < CXGB4_TX_MAX; j++) {
3466 if (!s->uld_txq_info[j])
3467 continue;
3468
3469 utxq = s->uld_txq_info[j];
3470 for (i = 0; i < utxq->ntxq; i++)
3471 QDESC_GET_TXQ(&utxq->uldtxq[i].q,
3472 cudbg_uld_txq_to_qtype(j),
3473 out_unlock);
3474 }
3475 }
3476
3477 if (s->uld_rxq_info) {
3478 struct sge_uld_rxq_info *urxq;
3479 u32 base;
3480
3481
3482 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3483 if (!s->uld_rxq_info[j])
3484 continue;
3485
3486 urxq = s->uld_rxq_info[j];
3487 for (i = 0; i < urxq->nrxq; i++)
3488 QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
3489 cudbg_uld_rxq_to_qtype(j),
3490 out_unlock);
3491 }
3492
3493
3494 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3495 if (!s->uld_rxq_info[j])
3496 continue;
3497
3498 urxq = s->uld_rxq_info[j];
3499 for (i = 0; i < urxq->nrxq; i++)
3500 QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
3501 cudbg_uld_flq_to_qtype(j),
3502 out_unlock);
3503 }
3504
3505
3506 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3507 if (!s->uld_rxq_info[j])
3508 continue;
3509
3510 urxq = s->uld_rxq_info[j];
3511 base = urxq->nrxq;
3512 for (i = 0; i < urxq->nciq; i++)
3513 QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
3514 cudbg_uld_ciq_to_qtype(j),
3515 out_unlock);
3516 }
3517 }
3518
3519
3520 if (s->eohw_txq)
3521 for (i = 0; i < s->eoqsets; i++)
3522 QDESC_GET_TXQ(&s->eohw_txq[i].q,
3523 CUDBG_QTYPE_ETHOFLD_TXQ, out);
3524
3525
3526 if (s->eohw_rxq) {
3527 for (i = 0; i < s->eoqsets; i++)
3528 QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
3529 CUDBG_QTYPE_ETHOFLD_RXQ, out);
3530
3531 for (i = 0; i < s->eoqsets; i++)
3532 QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
3533 CUDBG_QTYPE_ETHOFLD_FLQ, out);
3534 }
3535
3536out_unlock:
3537 mutex_unlock(&uld_mutex);
3538
3539out:
3540 qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
3541 qdesc_info->num_queues = num_queues;
3542 cur_off = 0;
3543 while (tot_len) {
3544 u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
3545
3546 rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
3547 &temp_buff);
3548 if (rc) {
3549 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3550 goto out_free;
3551 }
3552
3553 memcpy(temp_buff.data, data + cur_off, chunk_size);
3554 tot_len -= chunk_size;
3555 cur_off += chunk_size;
3556 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
3557 dbg_buff);
3558 if (rc) {
3559 cudbg_put_buff(pdbg_init, &temp_buff);
3560 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3561 goto out_free;
3562 }
3563 }
3564
3565out_free:
3566 if (data)
3567 kvfree(data);
3568
3569#undef QDESC_GET_FLQ
3570#undef QDESC_GET_RXQ
3571#undef QDESC_GET_TXQ
3572#undef QDESC_GET
3573
3574 return rc;
3575}
3576
3577int cudbg_collect_flash(struct cudbg_init *pdbg_init,
3578 struct cudbg_buffer *dbg_buff,
3579 struct cudbg_error *cudbg_err)
3580{
3581 struct adapter *padap = pdbg_init->adap;
3582 u32 count = padap->params.sf_size, n;
3583 struct cudbg_buffer temp_buff = {0};
3584 u32 addr, i;
3585 int rc;
3586
3587 addr = FLASH_EXP_ROM_START;
3588
3589 for (i = 0; i < count; i += SF_PAGE_SIZE) {
3590 n = min_t(u32, count - i, SF_PAGE_SIZE);
3591
3592 rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff);
3593 if (rc) {
3594 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3595 goto out;
3596 }
3597 rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0);
3598 if (rc)
3599 goto out;
3600
3601 addr += (n * 4);
3602 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
3603 dbg_buff);
3604 if (rc) {
3605 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3606 goto out;
3607 }
3608 }
3609
3610out:
3611 return rc;
3612}
3613