1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/sort.h>
19
20#include "t4_regs.h"
21#include "cxgb4.h"
22#include "cxgb4_cudbg.h"
23#include "cudbg_if.h"
24#include "cudbg_lib_common.h"
25#include "cudbg_entity.h"
26#include "cudbg_lib.h"
27#include "cudbg_zlib.h"
28
29static int cudbg_do_compression(struct cudbg_init *pdbg_init,
30 struct cudbg_buffer *pin_buff,
31 struct cudbg_buffer *dbg_buff)
32{
33 struct cudbg_buffer temp_in_buff = { 0 };
34 int bytes_left, bytes_read, bytes;
35 u32 offset = dbg_buff->offset;
36 int rc;
37
38 temp_in_buff.offset = pin_buff->offset;
39 temp_in_buff.data = pin_buff->data;
40 temp_in_buff.size = pin_buff->size;
41
42 bytes_left = pin_buff->size;
43 bytes_read = 0;
44 while (bytes_left > 0) {
45
46 bytes = min_t(unsigned long, bytes_left,
47 (unsigned long)CUDBG_CHUNK_SIZE);
48 temp_in_buff.data = (char *)pin_buff->data + bytes_read;
49 temp_in_buff.size = bytes;
50 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
51 if (rc)
52 return rc;
53 bytes_left -= bytes;
54 bytes_read += bytes;
55 }
56
57 pin_buff->size = dbg_buff->offset - offset;
58 return 0;
59}
60
61static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
62 struct cudbg_buffer *pin_buff,
63 struct cudbg_buffer *dbg_buff)
64{
65 int rc = 0;
66
67 if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
68 cudbg_update_buff(pin_buff, dbg_buff);
69 } else {
70 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
71 if (rc)
72 goto out;
73 }
74
75out:
76 cudbg_put_buff(pdbg_init, pin_buff);
77 return rc;
78}
79
80static int is_fw_attached(struct cudbg_init *pdbg_init)
81{
82 struct adapter *padap = pdbg_init->adap;
83
84 if (!(padap->flags & FW_OK) || padap->use_bd)
85 return 0;
86
87 return 1;
88}
89
90
91
92
93void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
94 struct cudbg_entity_hdr *entity_hdr)
95{
96 u8 zero_buf[4] = {0};
97 u8 padding, remain;
98
99 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
100 padding = 4 - remain;
101 if (remain) {
102 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
103 padding);
104 dbg_buff->offset += padding;
105 entity_hdr->num_pad = padding;
106 }
107 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
108}
109
110struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
111{
112 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
113
114 return (struct cudbg_entity_hdr *)
115 ((char *)outbuf + cudbg_hdr->hdr_len +
116 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
117}
118
119static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
120 void *dest)
121{
122 int vaddr, rc;
123
124 vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
125 if (vaddr < 0)
126 return vaddr;
127
128 rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
129 if (rc < 0)
130 return rc;
131
132 return 0;
133}
134
135static int cudbg_mem_desc_cmp(const void *a, const void *b)
136{
137 return ((const struct cudbg_mem_desc *)a)->base -
138 ((const struct cudbg_mem_desc *)b)->base;
139}
140
141int cudbg_fill_meminfo(struct adapter *padap,
142 struct cudbg_meminfo *meminfo_buff)
143{
144 struct cudbg_mem_desc *md;
145 u32 lo, hi, used, alloc;
146 int n, i;
147
148 memset(meminfo_buff->avail, 0,
149 ARRAY_SIZE(meminfo_buff->avail) *
150 sizeof(struct cudbg_mem_desc));
151 memset(meminfo_buff->mem, 0,
152 (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
153 md = meminfo_buff->mem;
154
155 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
156 meminfo_buff->mem[i].limit = 0;
157 meminfo_buff->mem[i].idx = i;
158 }
159
160
161 i = 0;
162 lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
163 if (lo & EDRAM0_ENABLE_F) {
164 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
165 meminfo_buff->avail[i].base =
166 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
167 meminfo_buff->avail[i].limit =
168 meminfo_buff->avail[i].base +
169 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
170 meminfo_buff->avail[i].idx = 0;
171 i++;
172 }
173
174 if (lo & EDRAM1_ENABLE_F) {
175 hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
176 meminfo_buff->avail[i].base =
177 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
178 meminfo_buff->avail[i].limit =
179 meminfo_buff->avail[i].base +
180 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
181 meminfo_buff->avail[i].idx = 1;
182 i++;
183 }
184
185 if (is_t5(padap->params.chip)) {
186 if (lo & EXT_MEM0_ENABLE_F) {
187 hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
188 meminfo_buff->avail[i].base =
189 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
190 meminfo_buff->avail[i].limit =
191 meminfo_buff->avail[i].base +
192 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
193 meminfo_buff->avail[i].idx = 3;
194 i++;
195 }
196
197 if (lo & EXT_MEM1_ENABLE_F) {
198 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
199 meminfo_buff->avail[i].base =
200 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
201 meminfo_buff->avail[i].limit =
202 meminfo_buff->avail[i].base +
203 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
204 meminfo_buff->avail[i].idx = 4;
205 i++;
206 }
207 } else {
208 if (lo & EXT_MEM_ENABLE_F) {
209 hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
210 meminfo_buff->avail[i].base =
211 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
212 meminfo_buff->avail[i].limit =
213 meminfo_buff->avail[i].base +
214 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
215 meminfo_buff->avail[i].idx = 2;
216 i++;
217 }
218
219 if (lo & HMA_MUX_F) {
220 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
221 meminfo_buff->avail[i].base =
222 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
223 meminfo_buff->avail[i].limit =
224 meminfo_buff->avail[i].base +
225 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
226 meminfo_buff->avail[i].idx = 5;
227 i++;
228 }
229 }
230
231 if (!i)
232 return CUDBG_STATUS_ENTITY_NOT_FOUND;
233
234 meminfo_buff->avail_c = i;
235 sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
236 cudbg_mem_desc_cmp, NULL);
237 (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
238 (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
239 (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
240 (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
241 (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
242 (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
243 (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
244 (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
245 (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
246
247
248 md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
249 md->limit = md->base - 1 +
250 t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
251 PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
252 md++;
253
254 md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
255 md->limit = md->base - 1 +
256 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
257 PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
258 md++;
259
260 if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
261 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
262 hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
263 md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
264 } else {
265 hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
266 md->base = t4_read_reg(padap,
267 LE_DB_HASH_TBL_BASE_ADDR_A);
268 }
269 md->limit = 0;
270 } else {
271 md->base = 0;
272 md->idx = ARRAY_SIZE(cudbg_region);
273 }
274 md++;
275
276#define ulp_region(reg) do { \
277 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
278 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
279} while (0)
280
281 ulp_region(RX_ISCSI);
282 ulp_region(RX_TDDP);
283 ulp_region(TX_TPT);
284 ulp_region(RX_STAG);
285 ulp_region(RX_RQ);
286 ulp_region(RX_RQUDP);
287 ulp_region(RX_PBL);
288 ulp_region(TX_PBL);
289#undef ulp_region
290 md->base = 0;
291 md->idx = ARRAY_SIZE(cudbg_region);
292 if (!is_t4(padap->params.chip)) {
293 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
294 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
295 u32 size = 0;
296
297 if (is_t5(padap->params.chip)) {
298 if (sge_ctrl & VFIFO_ENABLE_F)
299 size = DBVFIFO_SIZE_G(fifo_size);
300 } else {
301 size = T6_DBVFIFO_SIZE_G(fifo_size);
302 }
303
304 if (size) {
305 md->base = BASEADDR_G(t4_read_reg(padap,
306 SGE_DBVFIFO_BADDR_A));
307 md->limit = md->base + (size << 2) - 1;
308 }
309 }
310
311 md++;
312
313 md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
314 md->limit = 0;
315 md++;
316 md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
317 md->limit = 0;
318 md++;
319
320 md->base = padap->vres.ocq.start;
321 if (padap->vres.ocq.size)
322 md->limit = md->base + padap->vres.ocq.size - 1;
323 else
324 md->idx = ARRAY_SIZE(cudbg_region);
325 md++;
326
327
328 for (n = 0; n < i - 1; n++)
329 if (meminfo_buff->avail[n].limit <
330 meminfo_buff->avail[n + 1].base)
331 (md++)->base = meminfo_buff->avail[n].limit;
332
333 if (meminfo_buff->avail[n].limit)
334 (md++)->base = meminfo_buff->avail[n].limit;
335
336 n = md - meminfo_buff->mem;
337 meminfo_buff->mem_c = n;
338
339 sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
340 cudbg_mem_desc_cmp, NULL);
341
342 lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
343 hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
344 meminfo_buff->up_ram_lo = lo;
345 meminfo_buff->up_ram_hi = hi;
346
347 lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
348 hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
349 meminfo_buff->up_extmem2_lo = lo;
350 meminfo_buff->up_extmem2_hi = hi;
351
352 lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
353 for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
354 meminfo_buff->free_rx_cnt +=
355 FREERXPAGECOUNT_G(t4_read_reg(padap,
356 TP_FLM_FREE_RX_CNT_A));
357
358 meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
359 meminfo_buff->rx_pages_data[1] =
360 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
361 meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
362
363 lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
364 hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
365 for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
366 meminfo_buff->free_tx_cnt +=
367 FREETXPAGECOUNT_G(t4_read_reg(padap,
368 TP_FLM_FREE_TX_CNT_A));
369
370 meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
371 meminfo_buff->tx_pages_data[1] =
372 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
373 meminfo_buff->tx_pages_data[2] =
374 hi >= (1 << 20) ? 'M' : 'K';
375 meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
376
377 meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
378 meminfo_buff->p_structs_free_cnt =
379 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
380
381 for (i = 0; i < 4; i++) {
382 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
383 lo = t4_read_reg(padap,
384 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
385 else
386 lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
387 if (is_t5(padap->params.chip)) {
388 used = T5_USED_G(lo);
389 alloc = T5_ALLOC_G(lo);
390 } else {
391 used = USED_G(lo);
392 alloc = ALLOC_G(lo);
393 }
394 meminfo_buff->port_used[i] = used;
395 meminfo_buff->port_alloc[i] = alloc;
396 }
397
398 for (i = 0; i < padap->params.arch.nchan; i++) {
399 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
400 lo = t4_read_reg(padap,
401 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
402 else
403 lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
404 if (is_t5(padap->params.chip)) {
405 used = T5_USED_G(lo);
406 alloc = T5_ALLOC_G(lo);
407 } else {
408 used = USED_G(lo);
409 alloc = ALLOC_G(lo);
410 }
411 meminfo_buff->loopback_used[i] = used;
412 meminfo_buff->loopback_alloc[i] = alloc;
413 }
414
415 return 0;
416}
417
418int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
419 struct cudbg_buffer *dbg_buff,
420 struct cudbg_error *cudbg_err)
421{
422 struct adapter *padap = pdbg_init->adap;
423 struct cudbg_buffer temp_buff = { 0 };
424 u32 buf_size = 0;
425 int rc = 0;
426
427 if (is_t4(padap->params.chip))
428 buf_size = T4_REGMAP_SIZE;
429 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
430 buf_size = T5_REGMAP_SIZE;
431
432 rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
433 if (rc)
434 return rc;
435 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
436 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
437}
438
439int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
440 struct cudbg_buffer *dbg_buff,
441 struct cudbg_error *cudbg_err)
442{
443 struct adapter *padap = pdbg_init->adap;
444 struct cudbg_buffer temp_buff = { 0 };
445 struct devlog_params *dparams;
446 int rc = 0;
447
448 rc = t4_init_devlog_params(padap);
449 if (rc < 0) {
450 cudbg_err->sys_err = rc;
451 return rc;
452 }
453
454 dparams = &padap->params.devlog;
455 rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
456 if (rc)
457 return rc;
458
459
460 if (dparams->start != 0) {
461 spin_lock(&padap->win0_lock);
462 rc = t4_memory_rw(padap, padap->params.drv_memwin,
463 dparams->memtype, dparams->start,
464 dparams->size,
465 (__be32 *)(char *)temp_buff.data,
466 1);
467 spin_unlock(&padap->win0_lock);
468 if (rc) {
469 cudbg_err->sys_err = rc;
470 cudbg_put_buff(pdbg_init, &temp_buff);
471 return rc;
472 }
473 }
474 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
475}
476
477int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
478 struct cudbg_buffer *dbg_buff,
479 struct cudbg_error *cudbg_err)
480{
481 struct adapter *padap = pdbg_init->adap;
482 struct cudbg_buffer temp_buff = { 0 };
483 int size, rc;
484 u32 cfg = 0;
485
486 if (is_t6(padap->params.chip)) {
487 size = padap->params.cim_la_size / 10 + 1;
488 size *= 10 * sizeof(u32);
489 } else {
490 size = padap->params.cim_la_size / 8;
491 size *= 8 * sizeof(u32);
492 }
493
494 size += sizeof(cfg);
495 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
496 if (rc)
497 return rc;
498
499 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
500 if (rc) {
501 cudbg_err->sys_err = rc;
502 cudbg_put_buff(pdbg_init, &temp_buff);
503 return rc;
504 }
505
506 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
507 rc = t4_cim_read_la(padap,
508 (u32 *)((char *)temp_buff.data + sizeof(cfg)),
509 NULL);
510 if (rc < 0) {
511 cudbg_err->sys_err = rc;
512 cudbg_put_buff(pdbg_init, &temp_buff);
513 return rc;
514 }
515 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
516}
517
518int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
519 struct cudbg_buffer *dbg_buff,
520 struct cudbg_error *cudbg_err)
521{
522 struct adapter *padap = pdbg_init->adap;
523 struct cudbg_buffer temp_buff = { 0 };
524 int size, rc;
525
526 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
527 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
528 if (rc)
529 return rc;
530
531 t4_cim_read_ma_la(padap,
532 (u32 *)temp_buff.data,
533 (u32 *)((char *)temp_buff.data +
534 5 * CIM_MALA_SIZE));
535 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
536}
537
538int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
539 struct cudbg_buffer *dbg_buff,
540 struct cudbg_error *cudbg_err)
541{
542 struct adapter *padap = pdbg_init->adap;
543 struct cudbg_buffer temp_buff = { 0 };
544 struct cudbg_cim_qcfg *cim_qcfg_data;
545 int rc;
546
547 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
548 &temp_buff);
549 if (rc)
550 return rc;
551
552 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
553 cim_qcfg_data->chip = padap->params.chip;
554 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
555 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
556 if (rc) {
557 cudbg_err->sys_err = rc;
558 cudbg_put_buff(pdbg_init, &temp_buff);
559 return rc;
560 }
561
562 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
563 ARRAY_SIZE(cim_qcfg_data->obq_wr),
564 cim_qcfg_data->obq_wr);
565 if (rc) {
566 cudbg_err->sys_err = rc;
567 cudbg_put_buff(pdbg_init, &temp_buff);
568 return rc;
569 }
570
571 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
572 cim_qcfg_data->thres);
573 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
574}
575
576static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
577 struct cudbg_buffer *dbg_buff,
578 struct cudbg_error *cudbg_err, int qid)
579{
580 struct adapter *padap = pdbg_init->adap;
581 struct cudbg_buffer temp_buff = { 0 };
582 int no_of_read_words, rc = 0;
583 u32 qsize;
584
585
586 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
587 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
588 if (rc)
589 return rc;
590
591
592 no_of_read_words = t4_read_cim_ibq(padap, qid,
593 (u32 *)temp_buff.data, qsize);
594
595 if (no_of_read_words <= 0) {
596 if (!no_of_read_words)
597 rc = CUDBG_SYSTEM_ERROR;
598 else
599 rc = no_of_read_words;
600 cudbg_err->sys_err = rc;
601 cudbg_put_buff(pdbg_init, &temp_buff);
602 return rc;
603 }
604 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
605}
606
607int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
608 struct cudbg_buffer *dbg_buff,
609 struct cudbg_error *cudbg_err)
610{
611 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
612}
613
614int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
615 struct cudbg_buffer *dbg_buff,
616 struct cudbg_error *cudbg_err)
617{
618 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
619}
620
621int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
622 struct cudbg_buffer *dbg_buff,
623 struct cudbg_error *cudbg_err)
624{
625 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
626}
627
628int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
629 struct cudbg_buffer *dbg_buff,
630 struct cudbg_error *cudbg_err)
631{
632 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
633}
634
635int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
636 struct cudbg_buffer *dbg_buff,
637 struct cudbg_error *cudbg_err)
638{
639 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
640}
641
642int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
643 struct cudbg_buffer *dbg_buff,
644 struct cudbg_error *cudbg_err)
645{
646 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
647}
648
649u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
650{
651 u32 value;
652
653 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
654 QUENUMSELECT_V(qid));
655 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
656 value = CIMQSIZE_G(value) * 64;
657 return value * sizeof(u32);
658}
659
660static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
661 struct cudbg_buffer *dbg_buff,
662 struct cudbg_error *cudbg_err, int qid)
663{
664 struct adapter *padap = pdbg_init->adap;
665 struct cudbg_buffer temp_buff = { 0 };
666 int no_of_read_words, rc = 0;
667 u32 qsize;
668
669
670 qsize = cudbg_cim_obq_size(padap, qid);
671 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
672 if (rc)
673 return rc;
674
675
676 no_of_read_words = t4_read_cim_obq(padap, qid,
677 (u32 *)temp_buff.data, qsize);
678
679 if (no_of_read_words <= 0) {
680 if (!no_of_read_words)
681 rc = CUDBG_SYSTEM_ERROR;
682 else
683 rc = no_of_read_words;
684 cudbg_err->sys_err = rc;
685 cudbg_put_buff(pdbg_init, &temp_buff);
686 return rc;
687 }
688 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
689}
690
691int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
692 struct cudbg_buffer *dbg_buff,
693 struct cudbg_error *cudbg_err)
694{
695 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
696}
697
698int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
699 struct cudbg_buffer *dbg_buff,
700 struct cudbg_error *cudbg_err)
701{
702 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
703}
704
705int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
706 struct cudbg_buffer *dbg_buff,
707 struct cudbg_error *cudbg_err)
708{
709 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
710}
711
712int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
713 struct cudbg_buffer *dbg_buff,
714 struct cudbg_error *cudbg_err)
715{
716 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
717}
718
719int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
720 struct cudbg_buffer *dbg_buff,
721 struct cudbg_error *cudbg_err)
722{
723 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
724}
725
726int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
727 struct cudbg_buffer *dbg_buff,
728 struct cudbg_error *cudbg_err)
729{
730 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
731}
732
733int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
734 struct cudbg_buffer *dbg_buff,
735 struct cudbg_error *cudbg_err)
736{
737 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
738}
739
740int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
741 struct cudbg_buffer *dbg_buff,
742 struct cudbg_error *cudbg_err)
743{
744 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
745}
746
747static int cudbg_meminfo_get_mem_index(struct adapter *padap,
748 struct cudbg_meminfo *mem_info,
749 u8 mem_type, u8 *idx)
750{
751 u8 i, flag;
752
753 switch (mem_type) {
754 case MEM_EDC0:
755 flag = EDC0_FLAG;
756 break;
757 case MEM_EDC1:
758 flag = EDC1_FLAG;
759 break;
760 case MEM_MC0:
761
762 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
763 break;
764 case MEM_MC1:
765 flag = MC1_FLAG;
766 break;
767 case MEM_HMA:
768 flag = HMA_FLAG;
769 break;
770 default:
771 return CUDBG_STATUS_ENTITY_NOT_FOUND;
772 }
773
774 for (i = 0; i < mem_info->avail_c; i++) {
775 if (mem_info->avail[i].idx == flag) {
776 *idx = i;
777 return 0;
778 }
779 }
780
781 return CUDBG_STATUS_ENTITY_NOT_FOUND;
782}
783
784
785static int cudbg_get_mem_region(struct adapter *padap,
786 struct cudbg_meminfo *meminfo,
787 u8 mem_type, const char *region_name,
788 struct cudbg_mem_desc *mem_desc)
789{
790 u8 mc, found = 0;
791 u32 i, idx = 0;
792 int rc;
793
794 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
795 if (rc)
796 return rc;
797
798 for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
799 if (!strcmp(cudbg_region[i], region_name)) {
800 found = 1;
801 idx = i;
802 break;
803 }
804 }
805 if (!found)
806 return -EINVAL;
807
808 found = 0;
809 for (i = 0; i < meminfo->mem_c; i++) {
810 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
811 continue;
812
813 if (!(meminfo->mem[i].limit))
814 meminfo->mem[i].limit =
815 i < meminfo->mem_c - 1 ?
816 meminfo->mem[i + 1].base - 1 : ~0;
817
818 if (meminfo->mem[i].idx == idx) {
819
820 if (meminfo->mem[i].base < meminfo->avail[mc].base &&
821 meminfo->mem[i].limit < meminfo->avail[mc].base)
822 return -EINVAL;
823
824 if (meminfo->mem[i].base > meminfo->avail[mc].limit)
825 return -EINVAL;
826
827 memcpy(mem_desc, &meminfo->mem[i],
828 sizeof(struct cudbg_mem_desc));
829 found = 1;
830 break;
831 }
832 }
833 if (!found)
834 return -EINVAL;
835
836 return 0;
837}
838
839
840
841
842static int cudbg_get_mem_relative(struct adapter *padap,
843 struct cudbg_meminfo *meminfo,
844 u8 mem_type, u32 *out_base, u32 *out_end)
845{
846 u8 mc_idx;
847 int rc;
848
849 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
850 if (rc)
851 return rc;
852
853 if (*out_base < meminfo->avail[mc_idx].base)
854 *out_base = 0;
855 else
856 *out_base -= meminfo->avail[mc_idx].base;
857
858 if (*out_end > meminfo->avail[mc_idx].limit)
859 *out_end = meminfo->avail[mc_idx].limit;
860 else
861 *out_end -= meminfo->avail[mc_idx].base;
862
863 return 0;
864}
865
866
867static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
868 const char *region_name,
869 struct cudbg_region_info *payload)
870{
871 struct cudbg_mem_desc mem_desc = { 0 };
872 struct cudbg_meminfo meminfo;
873 int rc;
874
875 rc = cudbg_fill_meminfo(padap, &meminfo);
876 if (rc)
877 return rc;
878
879 rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
880 &mem_desc);
881 if (rc) {
882 payload->exist = false;
883 return 0;
884 }
885
886 payload->exist = true;
887 payload->start = mem_desc.base;
888 payload->end = mem_desc.limit;
889
890 return cudbg_get_mem_relative(padap, &meminfo, mem_type,
891 &payload->start, &payload->end);
892}
893
894static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
895 int mtype, u32 addr, u32 len, void *hbuf)
896{
897 u32 win_pf, memoffset, mem_aperture, mem_base;
898 struct adapter *adap = pdbg_init->adap;
899 u32 pos, offset, resid;
900 u32 *res_buf;
901 u64 *buf;
902 int ret;
903
904
905
906 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
907 return -EINVAL;
908
909 buf = (u64 *)hbuf;
910
911
912 resid = len & 0x7;
913 len -= resid;
914
915 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
916 &mem_aperture);
917 if (ret)
918 return ret;
919
920 addr = addr + memoffset;
921 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
922
923 pos = addr & ~(mem_aperture - 1);
924 offset = addr - pos;
925
926
927
928
929 t4_memory_update_win(adap, win, pos | win_pf);
930
931
932 while (len > 0) {
933 *buf++ = le64_to_cpu((__force __le64)
934 t4_read_reg64(adap, mem_base + offset));
935 offset += sizeof(u64);
936 len -= sizeof(u64);
937
938
939
940
941 if (offset == mem_aperture) {
942 pos += mem_aperture;
943 offset = 0;
944 t4_memory_update_win(adap, win, pos | win_pf);
945 }
946 }
947
948 res_buf = (u32 *)buf;
949
950 while (resid > sizeof(u32)) {
951 *res_buf++ = le32_to_cpu((__force __le32)
952 t4_read_reg(adap, mem_base + offset));
953 offset += sizeof(u32);
954 resid -= sizeof(u32);
955
956
957
958
959 if (offset == mem_aperture) {
960 pos += mem_aperture;
961 offset = 0;
962 t4_memory_update_win(adap, win, pos | win_pf);
963 }
964 }
965
966
967 if (resid)
968 t4_memory_rw_residual(adap, resid, mem_base + offset,
969 (u8 *)res_buf, T4_MEMORY_READ);
970
971 return 0;
972}
973
974#define CUDBG_YIELD_ITERATION 256
975
976static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
977 struct cudbg_buffer *dbg_buff, u8 mem_type,
978 unsigned long tot_len,
979 struct cudbg_error *cudbg_err)
980{
981 static const char * const region_name[] = { "Tx payload:",
982 "Rx payload:" };
983 unsigned long bytes, bytes_left, bytes_read = 0;
984 struct adapter *padap = pdbg_init->adap;
985 struct cudbg_buffer temp_buff = { 0 };
986 struct cudbg_region_info payload[2];
987 u32 yield_count = 0;
988 int rc = 0;
989 u8 i;
990
991
992 memset(payload, 0, sizeof(payload));
993 for (i = 0; i < ARRAY_SIZE(region_name); i++) {
994 rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
995 &payload[i]);
996 if (rc)
997 return rc;
998
999 if (payload[i].exist) {
1000
1001 payload[i].start = roundup(payload[i].start,
1002 CUDBG_CHUNK_SIZE);
1003 payload[i].end = rounddown(payload[i].end,
1004 CUDBG_CHUNK_SIZE);
1005 }
1006 }
1007
1008 bytes_left = tot_len;
1009 while (bytes_left > 0) {
1010
1011
1012
1013
1014
1015 yield_count++;
1016 if (!(yield_count % CUDBG_YIELD_ITERATION))
1017 schedule();
1018
1019 bytes = min_t(unsigned long, bytes_left,
1020 (unsigned long)CUDBG_CHUNK_SIZE);
1021 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1022 if (rc)
1023 return rc;
1024
1025 for (i = 0; i < ARRAY_SIZE(payload); i++)
1026 if (payload[i].exist &&
1027 bytes_read >= payload[i].start &&
1028 bytes_read + bytes <= payload[i].end)
1029
1030 goto skip_read;
1031
1032 spin_lock(&padap->win0_lock);
1033 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1034 bytes_read, bytes, temp_buff.data);
1035 spin_unlock(&padap->win0_lock);
1036 if (rc) {
1037 cudbg_err->sys_err = rc;
1038 cudbg_put_buff(pdbg_init, &temp_buff);
1039 return rc;
1040 }
1041
1042skip_read:
1043 bytes_left -= bytes;
1044 bytes_read += bytes;
1045 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1046 dbg_buff);
1047 if (rc) {
1048 cudbg_put_buff(pdbg_init, &temp_buff);
1049 return rc;
1050 }
1051 }
1052 return rc;
1053}
1054
1055static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1056 struct cudbg_error *cudbg_err)
1057{
1058 struct adapter *padap = pdbg_init->adap;
1059 int rc;
1060
1061 if (is_fw_attached(pdbg_init)) {
1062
1063 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1064 if (rc)
1065 cudbg_err->sys_warn = rc;
1066 }
1067}
1068
1069static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1070 struct cudbg_buffer *dbg_buff,
1071 struct cudbg_error *cudbg_err,
1072 u8 mem_type)
1073{
1074 struct adapter *padap = pdbg_init->adap;
1075 struct cudbg_meminfo mem_info;
1076 unsigned long size;
1077 u8 mc_idx;
1078 int rc;
1079
1080 memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1081 rc = cudbg_fill_meminfo(padap, &mem_info);
1082 if (rc)
1083 return rc;
1084
1085 cudbg_t4_fwcache(pdbg_init, cudbg_err);
1086 rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1087 if (rc)
1088 return rc;
1089
1090 size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
1091 return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1092 cudbg_err);
1093}
1094
1095int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1096 struct cudbg_buffer *dbg_buff,
1097 struct cudbg_error *cudbg_err)
1098{
1099 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1100 MEM_EDC0);
1101}
1102
1103int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1104 struct cudbg_buffer *dbg_buff,
1105 struct cudbg_error *cudbg_err)
1106{
1107 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1108 MEM_EDC1);
1109}
1110
1111int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1112 struct cudbg_buffer *dbg_buff,
1113 struct cudbg_error *cudbg_err)
1114{
1115 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1116 MEM_MC0);
1117}
1118
1119int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1120 struct cudbg_buffer *dbg_buff,
1121 struct cudbg_error *cudbg_err)
1122{
1123 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1124 MEM_MC1);
1125}
1126
1127int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1128 struct cudbg_buffer *dbg_buff,
1129 struct cudbg_error *cudbg_err)
1130{
1131 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1132 MEM_HMA);
1133}
1134
1135int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1136 struct cudbg_buffer *dbg_buff,
1137 struct cudbg_error *cudbg_err)
1138{
1139 struct adapter *padap = pdbg_init->adap;
1140 struct cudbg_buffer temp_buff = { 0 };
1141 int rc, nentries;
1142
1143 nentries = t4_chip_rss_size(padap);
1144 rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1145 &temp_buff);
1146 if (rc)
1147 return rc;
1148
1149 rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1150 if (rc) {
1151 cudbg_err->sys_err = rc;
1152 cudbg_put_buff(pdbg_init, &temp_buff);
1153 return rc;
1154 }
1155 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1156}
1157
1158int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1159 struct cudbg_buffer *dbg_buff,
1160 struct cudbg_error *cudbg_err)
1161{
1162 struct adapter *padap = pdbg_init->adap;
1163 struct cudbg_buffer temp_buff = { 0 };
1164 struct cudbg_rss_vf_conf *vfconf;
1165 int vf, rc, vf_count;
1166
1167 vf_count = padap->params.arch.vfcount;
1168 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1169 vf_count * sizeof(struct cudbg_rss_vf_conf),
1170 &temp_buff);
1171 if (rc)
1172 return rc;
1173
1174 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1175 for (vf = 0; vf < vf_count; vf++)
1176 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1177 &vfconf[vf].rss_vf_vfh, true);
1178 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1179}
1180
1181int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1182 struct cudbg_buffer *dbg_buff,
1183 struct cudbg_error *cudbg_err)
1184{
1185 struct adapter *padap = pdbg_init->adap;
1186 struct cudbg_buffer temp_buff = { 0 };
1187 int rc;
1188
1189 rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1190 &temp_buff);
1191 if (rc)
1192 return rc;
1193
1194 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1195 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1196}
1197
1198int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1199 struct cudbg_buffer *dbg_buff,
1200 struct cudbg_error *cudbg_err)
1201{
1202 struct adapter *padap = pdbg_init->adap;
1203 struct cudbg_buffer temp_buff = { 0 };
1204 struct cudbg_pm_stats *pm_stats_buff;
1205 int rc;
1206
1207 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1208 &temp_buff);
1209 if (rc)
1210 return rc;
1211
1212 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1213 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1214 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1215 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1216}
1217
1218int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1219 struct cudbg_buffer *dbg_buff,
1220 struct cudbg_error *cudbg_err)
1221{
1222 struct adapter *padap = pdbg_init->adap;
1223 struct cudbg_buffer temp_buff = { 0 };
1224 struct cudbg_hw_sched *hw_sched_buff;
1225 int i, rc = 0;
1226
1227 if (!padap->params.vpd.cclk)
1228 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1229
1230 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1231 &temp_buff);
1232
1233 if (rc)
1234 return rc;
1235
1236 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1237 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1238 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1239 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1240 for (i = 0; i < NTX_SCHED; ++i)
1241 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1242 &hw_sched_buff->ipg[i], true);
1243 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1244}
1245
1246int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1247 struct cudbg_buffer *dbg_buff,
1248 struct cudbg_error *cudbg_err)
1249{
1250 struct adapter *padap = pdbg_init->adap;
1251 struct cudbg_buffer temp_buff = { 0 };
1252 struct ireg_buf *ch_tp_pio;
1253 int i, rc, n = 0;
1254 u32 size;
1255
1256 if (is_t5(padap->params.chip))
1257 n = sizeof(t5_tp_pio_array) +
1258 sizeof(t5_tp_tm_pio_array) +
1259 sizeof(t5_tp_mib_index_array);
1260 else
1261 n = sizeof(t6_tp_pio_array) +
1262 sizeof(t6_tp_tm_pio_array) +
1263 sizeof(t6_tp_mib_index_array);
1264
1265 n = n / (IREG_NUM_ELEM * sizeof(u32));
1266 size = sizeof(struct ireg_buf) * n;
1267 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1268 if (rc)
1269 return rc;
1270
1271 ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1272
1273
1274 if (is_t5(padap->params.chip))
1275 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1276 else if (is_t6(padap->params.chip))
1277 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1278
1279 for (i = 0; i < n; i++) {
1280 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1281 u32 *buff = ch_tp_pio->outbuf;
1282
1283 if (is_t5(padap->params.chip)) {
1284 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1285 tp_pio->ireg_data = t5_tp_pio_array[i][1];
1286 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1287 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1288 } else if (is_t6(padap->params.chip)) {
1289 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1290 tp_pio->ireg_data = t6_tp_pio_array[i][1];
1291 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1292 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1293 }
1294 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1295 tp_pio->ireg_local_offset, true);
1296 ch_tp_pio++;
1297 }
1298
1299
1300 if (is_t5(padap->params.chip))
1301 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1302 else if (is_t6(padap->params.chip))
1303 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1304
1305 for (i = 0; i < n; i++) {
1306 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1307 u32 *buff = ch_tp_pio->outbuf;
1308
1309 if (is_t5(padap->params.chip)) {
1310 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1311 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1312 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1313 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1314 } else if (is_t6(padap->params.chip)) {
1315 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1316 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1317 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1318 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1319 }
1320 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1321 tp_pio->ireg_local_offset, true);
1322 ch_tp_pio++;
1323 }
1324
1325
1326 if (is_t5(padap->params.chip))
1327 n = sizeof(t5_tp_mib_index_array) /
1328 (IREG_NUM_ELEM * sizeof(u32));
1329 else if (is_t6(padap->params.chip))
1330 n = sizeof(t6_tp_mib_index_array) /
1331 (IREG_NUM_ELEM * sizeof(u32));
1332
1333 for (i = 0; i < n ; i++) {
1334 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1335 u32 *buff = ch_tp_pio->outbuf;
1336
1337 if (is_t5(padap->params.chip)) {
1338 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1339 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1340 tp_pio->ireg_local_offset =
1341 t5_tp_mib_index_array[i][2];
1342 tp_pio->ireg_offset_range =
1343 t5_tp_mib_index_array[i][3];
1344 } else if (is_t6(padap->params.chip)) {
1345 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1346 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1347 tp_pio->ireg_local_offset =
1348 t6_tp_mib_index_array[i][2];
1349 tp_pio->ireg_offset_range =
1350 t6_tp_mib_index_array[i][3];
1351 }
1352 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1353 tp_pio->ireg_local_offset, true);
1354 ch_tp_pio++;
1355 }
1356 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1357}
1358
1359static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1360 struct sge_qbase_reg_field *qbase,
1361 u32 func, bool is_pf)
1362{
1363 u32 *buff, i;
1364
1365 if (is_pf) {
1366 buff = qbase->pf_data_value[func];
1367 } else {
1368 buff = qbase->vf_data_value[func];
1369
1370
1371
1372 func += 8;
1373 }
1374
1375 t4_write_reg(padap, qbase->reg_addr, func);
1376 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1377 *buff = t4_read_reg(padap, qbase->reg_data[i]);
1378}
1379
1380int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1381 struct cudbg_buffer *dbg_buff,
1382 struct cudbg_error *cudbg_err)
1383{
1384 struct adapter *padap = pdbg_init->adap;
1385 struct cudbg_buffer temp_buff = { 0 };
1386 struct sge_qbase_reg_field *sge_qbase;
1387 struct ireg_buf *ch_sge_dbg;
1388 int i, rc;
1389
1390 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1391 sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
1392 &temp_buff);
1393 if (rc)
1394 return rc;
1395
1396 ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1397 for (i = 0; i < 2; i++) {
1398 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1399 u32 *buff = ch_sge_dbg->outbuf;
1400
1401 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1402 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1403 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1404 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1405 t4_read_indirect(padap,
1406 sge_pio->ireg_addr,
1407 sge_pio->ireg_data,
1408 buff,
1409 sge_pio->ireg_offset_range,
1410 sge_pio->ireg_local_offset);
1411 ch_sge_dbg++;
1412 }
1413
1414 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1415 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1416
1417
1418
1419 sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1420 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1421 sge_qbase->reg_data[i] =
1422 t6_sge_qbase_index_array[i + 1];
1423
1424 for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1425 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1426 i, true);
1427
1428 for (i = 0; i < padap->params.arch.vfcount; i++)
1429 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1430 i, false);
1431
1432 sge_qbase->vfcount = padap->params.arch.vfcount;
1433 }
1434
1435 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1436}
1437
1438int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1439 struct cudbg_buffer *dbg_buff,
1440 struct cudbg_error *cudbg_err)
1441{
1442 struct adapter *padap = pdbg_init->adap;
1443 struct cudbg_buffer temp_buff = { 0 };
1444 struct cudbg_ulprx_la *ulprx_la_buff;
1445 int rc;
1446
1447 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1448 &temp_buff);
1449 if (rc)
1450 return rc;
1451
1452 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1453 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1454 ulprx_la_buff->size = ULPRX_LA_SIZE;
1455 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1456}
1457
1458int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1459 struct cudbg_buffer *dbg_buff,
1460 struct cudbg_error *cudbg_err)
1461{
1462 struct adapter *padap = pdbg_init->adap;
1463 struct cudbg_buffer temp_buff = { 0 };
1464 struct cudbg_tp_la *tp_la_buff;
1465 int size, rc;
1466
1467 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
1468 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1469 if (rc)
1470 return rc;
1471
1472 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1473 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1474 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1475 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1476}
1477
1478int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1479 struct cudbg_buffer *dbg_buff,
1480 struct cudbg_error *cudbg_err)
1481{
1482 struct adapter *padap = pdbg_init->adap;
1483 struct cudbg_buffer temp_buff = { 0 };
1484 struct cudbg_meminfo *meminfo_buff;
1485 struct cudbg_ver_hdr *ver_hdr;
1486 int rc;
1487
1488 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1489 sizeof(struct cudbg_ver_hdr) +
1490 sizeof(struct cudbg_meminfo),
1491 &temp_buff);
1492 if (rc)
1493 return rc;
1494
1495 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1496 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1497 ver_hdr->revision = CUDBG_MEMINFO_REV;
1498 ver_hdr->size = sizeof(struct cudbg_meminfo);
1499
1500 meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1501 sizeof(*ver_hdr));
1502 rc = cudbg_fill_meminfo(padap, meminfo_buff);
1503 if (rc) {
1504 cudbg_err->sys_err = rc;
1505 cudbg_put_buff(pdbg_init, &temp_buff);
1506 return rc;
1507 }
1508
1509 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1510}
1511
1512int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1513 struct cudbg_buffer *dbg_buff,
1514 struct cudbg_error *cudbg_err)
1515{
1516 struct cudbg_cim_pif_la *cim_pif_la_buff;
1517 struct adapter *padap = pdbg_init->adap;
1518 struct cudbg_buffer temp_buff = { 0 };
1519 int size, rc;
1520
1521 size = sizeof(struct cudbg_cim_pif_la) +
1522 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1523 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1524 if (rc)
1525 return rc;
1526
1527 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1528 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1529 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1530 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1531 NULL, NULL);
1532 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1533}
1534
1535int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1536 struct cudbg_buffer *dbg_buff,
1537 struct cudbg_error *cudbg_err)
1538{
1539 struct adapter *padap = pdbg_init->adap;
1540 struct cudbg_buffer temp_buff = { 0 };
1541 struct cudbg_clk_info *clk_info_buff;
1542 u64 tp_tick_us;
1543 int rc;
1544
1545 if (!padap->params.vpd.cclk)
1546 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1547
1548 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1549 &temp_buff);
1550 if (rc)
1551 return rc;
1552
1553 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1554 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;
1555 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1556 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1557 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1558 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1559
1560 clk_info_buff->dack_timer =
1561 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1562 t4_read_reg(padap, TP_DACK_TIMER_A);
1563 clk_info_buff->retransmit_min =
1564 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1565 clk_info_buff->retransmit_max =
1566 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1567 clk_info_buff->persist_timer_min =
1568 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1569 clk_info_buff->persist_timer_max =
1570 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1571 clk_info_buff->keepalive_idle_timer =
1572 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1573 clk_info_buff->keepalive_interval =
1574 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1575 clk_info_buff->initial_srtt =
1576 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1577 clk_info_buff->finwait2_timer =
1578 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1579
1580 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1581}
1582
1583int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1584 struct cudbg_buffer *dbg_buff,
1585 struct cudbg_error *cudbg_err)
1586{
1587 struct adapter *padap = pdbg_init->adap;
1588 struct cudbg_buffer temp_buff = { 0 };
1589 struct ireg_buf *ch_pcie;
1590 int i, rc, n;
1591 u32 size;
1592
1593 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1594 size = sizeof(struct ireg_buf) * n * 2;
1595 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1596 if (rc)
1597 return rc;
1598
1599 ch_pcie = (struct ireg_buf *)temp_buff.data;
1600
1601 for (i = 0; i < n; i++) {
1602 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1603 u32 *buff = ch_pcie->outbuf;
1604
1605 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1606 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1607 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1608 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1609 t4_read_indirect(padap,
1610 pcie_pio->ireg_addr,
1611 pcie_pio->ireg_data,
1612 buff,
1613 pcie_pio->ireg_offset_range,
1614 pcie_pio->ireg_local_offset);
1615 ch_pcie++;
1616 }
1617
1618
1619 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1620 for (i = 0; i < n; i++) {
1621 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1622 u32 *buff = ch_pcie->outbuf;
1623
1624 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1625 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1626 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1627 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1628 t4_read_indirect(padap,
1629 pcie_pio->ireg_addr,
1630 pcie_pio->ireg_data,
1631 buff,
1632 pcie_pio->ireg_offset_range,
1633 pcie_pio->ireg_local_offset);
1634 ch_pcie++;
1635 }
1636 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1637}
1638
1639int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1640 struct cudbg_buffer *dbg_buff,
1641 struct cudbg_error *cudbg_err)
1642{
1643 struct adapter *padap = pdbg_init->adap;
1644 struct cudbg_buffer temp_buff = { 0 };
1645 struct ireg_buf *ch_pm;
1646 int i, rc, n;
1647 u32 size;
1648
1649 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1650 size = sizeof(struct ireg_buf) * n * 2;
1651 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1652 if (rc)
1653 return rc;
1654
1655 ch_pm = (struct ireg_buf *)temp_buff.data;
1656
1657 for (i = 0; i < n; i++) {
1658 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1659 u32 *buff = ch_pm->outbuf;
1660
1661 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1662 pm_pio->ireg_data = t5_pm_rx_array[i][1];
1663 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1664 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1665 t4_read_indirect(padap,
1666 pm_pio->ireg_addr,
1667 pm_pio->ireg_data,
1668 buff,
1669 pm_pio->ireg_offset_range,
1670 pm_pio->ireg_local_offset);
1671 ch_pm++;
1672 }
1673
1674
1675 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1676 for (i = 0; i < n; i++) {
1677 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1678 u32 *buff = ch_pm->outbuf;
1679
1680 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1681 pm_pio->ireg_data = t5_pm_tx_array[i][1];
1682 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1683 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1684 t4_read_indirect(padap,
1685 pm_pio->ireg_addr,
1686 pm_pio->ireg_data,
1687 buff,
1688 pm_pio->ireg_offset_range,
1689 pm_pio->ireg_local_offset);
1690 ch_pm++;
1691 }
1692 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1693}
1694
1695int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1696 struct cudbg_buffer *dbg_buff,
1697 struct cudbg_error *cudbg_err)
1698{
1699 struct adapter *padap = pdbg_init->adap;
1700 struct cudbg_tid_info_region_rev1 *tid1;
1701 struct cudbg_buffer temp_buff = { 0 };
1702 struct cudbg_tid_info_region *tid;
1703 u32 para[2], val[2];
1704 int rc;
1705
1706 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1707 sizeof(struct cudbg_tid_info_region_rev1),
1708 &temp_buff);
1709 if (rc)
1710 return rc;
1711
1712 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1713 tid = &tid1->tid;
1714 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1715 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1716 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1717 sizeof(struct cudbg_ver_hdr);
1718
1719
1720
1721
1722 if (!is_fw_attached(pdbg_init))
1723 goto fill_tid;
1724
1725#define FW_PARAM_PFVF_A(param) \
1726 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1727 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1728 FW_PARAMS_PARAM_Y_V(0) | \
1729 FW_PARAMS_PARAM_Z_V(0))
1730
1731 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1732 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1733 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1734 if (rc < 0) {
1735 cudbg_err->sys_err = rc;
1736 cudbg_put_buff(pdbg_init, &temp_buff);
1737 return rc;
1738 }
1739 tid->uotid_base = val[0];
1740 tid->nuotids = val[1] - val[0] + 1;
1741
1742 if (is_t5(padap->params.chip)) {
1743 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1744 } else if (is_t6(padap->params.chip)) {
1745 tid1->tid_start =
1746 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1747 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1748
1749 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1750 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1751 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1752 para, val);
1753 if (rc < 0) {
1754 cudbg_err->sys_err = rc;
1755 cudbg_put_buff(pdbg_init, &temp_buff);
1756 return rc;
1757 }
1758 tid->hpftid_base = val[0];
1759 tid->nhpftids = val[1] - val[0] + 1;
1760 }
1761
1762#undef FW_PARAM_PFVF_A
1763
1764fill_tid:
1765 tid->ntids = padap->tids.ntids;
1766 tid->nstids = padap->tids.nstids;
1767 tid->stid_base = padap->tids.stid_base;
1768 tid->hash_base = padap->tids.hash_base;
1769
1770 tid->natids = padap->tids.natids;
1771 tid->nftids = padap->tids.nftids;
1772 tid->ftid_base = padap->tids.ftid_base;
1773 tid->aftid_base = padap->tids.aftid_base;
1774 tid->aftid_end = padap->tids.aftid_end;
1775
1776 tid->sftid_base = padap->tids.sftid_base;
1777 tid->nsftids = padap->tids.nsftids;
1778
1779 tid->flags = padap->flags;
1780 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1781 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1782 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1783
1784 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1785}
1786
1787int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
1788 struct cudbg_buffer *dbg_buff,
1789 struct cudbg_error *cudbg_err)
1790{
1791 struct adapter *padap = pdbg_init->adap;
1792 struct cudbg_buffer temp_buff = { 0 };
1793 u32 size, *value, j;
1794 int i, rc, n;
1795
1796 size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
1797 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
1798 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1799 if (rc)
1800 return rc;
1801
1802 value = (u32 *)temp_buff.data;
1803 for (i = 0; i < n; i++) {
1804 for (j = t5_pcie_config_array[i][0];
1805 j <= t5_pcie_config_array[i][1]; j += 4) {
1806 t4_hw_pci_read_cfg4(padap, j, value);
1807 value++;
1808 }
1809 }
1810 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1811}
1812
1813static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
1814{
1815 int index, bit, bit_pos = 0;
1816
1817 switch (type) {
1818 case CTXT_EGRESS:
1819 bit_pos = 176;
1820 break;
1821 case CTXT_INGRESS:
1822 bit_pos = 141;
1823 break;
1824 case CTXT_FLM:
1825 bit_pos = 89;
1826 break;
1827 }
1828 index = bit_pos / 32;
1829 bit = bit_pos % 32;
1830 return buf[index] & (1U << bit);
1831}
1832
1833static int cudbg_get_ctxt_region_info(struct adapter *padap,
1834 struct cudbg_region_info *ctx_info,
1835 u8 *mem_type)
1836{
1837 struct cudbg_mem_desc mem_desc;
1838 struct cudbg_meminfo meminfo;
1839 u32 i, j, value, found;
1840 u8 flq;
1841 int rc;
1842
1843 rc = cudbg_fill_meminfo(padap, &meminfo);
1844 if (rc)
1845 return rc;
1846
1847
1848 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
1849 found = 0;
1850 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
1851 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
1852 rc = cudbg_get_mem_region(padap, &meminfo, j,
1853 cudbg_region[i],
1854 &mem_desc);
1855 if (!rc) {
1856 found = 1;
1857 rc = cudbg_get_mem_relative(padap, &meminfo, j,
1858 &mem_desc.base,
1859 &mem_desc.limit);
1860 if (rc) {
1861 ctx_info[i].exist = false;
1862 break;
1863 }
1864 ctx_info[i].exist = true;
1865 ctx_info[i].start = mem_desc.base;
1866 ctx_info[i].end = mem_desc.limit;
1867 mem_type[i] = j;
1868 break;
1869 }
1870 }
1871 if (!found)
1872 ctx_info[i].exist = false;
1873 }
1874
1875
1876 value = t4_read_reg(padap, SGE_FLM_CFG_A);
1877
1878
1879 flq = HDRSTARTFLQ_G(value);
1880 ctx_info[CTXT_FLM].exist = true;
1881 ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
1882
1883
1884
1885
1886 ctx_info[CTXT_CNM].exist = true;
1887 ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
1888
1889 return 0;
1890}
1891
1892int cudbg_dump_context_size(struct adapter *padap)
1893{
1894 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1895 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1896 u32 i, size = 0;
1897 int rc;
1898
1899
1900 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1901 if (rc)
1902 return rc;
1903
1904 for (i = 0; i < CTXT_CNM; i++) {
1905 if (!region_info[i].exist) {
1906 if (i == CTXT_EGRESS || i == CTXT_INGRESS)
1907 size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
1908 SGE_CTXT_SIZE;
1909 continue;
1910 }
1911
1912 size += (region_info[i].end - region_info[i].start + 1) /
1913 SGE_CTXT_SIZE;
1914 }
1915 return size * sizeof(struct cudbg_ch_cntxt);
1916}
1917
1918static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1919 enum ctxt_type ctype, u32 *data)
1920{
1921 struct adapter *padap = pdbg_init->adap;
1922 int rc = -1;
1923
1924
1925
1926
1927
1928
1929
1930
1931 if (is_fw_attached(pdbg_init))
1932 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1933 if (rc)
1934 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1935}
1936
1937static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
1938 u8 ctxt_type,
1939 struct cudbg_ch_cntxt **out_buff)
1940{
1941 struct cudbg_ch_cntxt *buff = *out_buff;
1942 int rc;
1943 u32 j;
1944
1945 for (j = 0; j < max_qid; j++) {
1946 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
1947 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
1948 if (!rc)
1949 continue;
1950
1951 buff->cntxt_type = ctxt_type;
1952 buff->cntxt_id = j;
1953 buff++;
1954 if (ctxt_type == CTXT_FLM) {
1955 cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
1956 buff->cntxt_type = CTXT_CNM;
1957 buff->cntxt_id = j;
1958 buff++;
1959 }
1960 }
1961
1962 *out_buff = buff;
1963}
1964
1965int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1966 struct cudbg_buffer *dbg_buff,
1967 struct cudbg_error *cudbg_err)
1968{
1969 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1970 struct adapter *padap = pdbg_init->adap;
1971 u32 j, size, max_ctx_size, max_ctx_qid;
1972 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1973 struct cudbg_buffer temp_buff = { 0 };
1974 struct cudbg_ch_cntxt *buff;
1975 u64 *dst_off, *src_off;
1976 u8 *ctx_buf;
1977 u8 i, k;
1978 int rc;
1979
1980
1981 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1982 if (rc)
1983 return rc;
1984
1985 rc = cudbg_dump_context_size(padap);
1986 if (rc <= 0)
1987 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1988
1989 size = rc;
1990 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1991 if (rc)
1992 return rc;
1993
1994
1995
1996
1997 max_ctx_size = max(region_info[CTXT_EGRESS].end -
1998 region_info[CTXT_EGRESS].start + 1,
1999 region_info[CTXT_INGRESS].end -
2000 region_info[CTXT_INGRESS].start + 1);
2001
2002 ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
2003 if (!ctx_buf) {
2004 cudbg_put_buff(pdbg_init, &temp_buff);
2005 return -ENOMEM;
2006 }
2007
2008 buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2009
2010
2011
2012
2013
2014 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2015 if (!region_info[i].exist) {
2016 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2017 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2018 &buff);
2019 continue;
2020 }
2021
2022 max_ctx_size = region_info[i].end - region_info[i].start + 1;
2023 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2024
2025
2026
2027
2028 if (is_fw_attached(pdbg_init)) {
2029 t4_sge_ctxt_flush(padap, padap->mbox, i);
2030
2031 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2032 region_info[i].start, max_ctx_size,
2033 (__be32 *)ctx_buf, 1);
2034 }
2035
2036 if (rc || !is_fw_attached(pdbg_init)) {
2037 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2038 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2039 &buff);
2040 continue;
2041 }
2042
2043 for (j = 0; j < max_ctx_qid; j++) {
2044 src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2045 dst_off = (u64 *)buff->data;
2046
2047
2048
2049
2050 for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2051 dst_off[k] = cpu_to_be64(src_off[k]);
2052
2053 rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2054 if (!rc)
2055 continue;
2056
2057 buff->cntxt_type = i;
2058 buff->cntxt_id = j;
2059 buff++;
2060 }
2061 }
2062
2063 kvfree(ctx_buf);
2064
2065
2066 max_ctx_size = region_info[CTXT_FLM].end -
2067 region_info[CTXT_FLM].start + 1;
2068 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2069
2070
2071
2072 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2073
2074 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2075}
2076
2077static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2078{
2079 *mask = x | y;
2080 y = (__force u64)cpu_to_be64(y);
2081 memcpy(addr, (char *)&y + 2, ETH_ALEN);
2082}
2083
2084static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2085 struct fw_ldst_mps_rplc *mps_rplc)
2086{
2087 if (is_t5(padap->params.chip)) {
2088 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2089 MPS_VF_RPLCT_MAP3_A));
2090 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2091 MPS_VF_RPLCT_MAP2_A));
2092 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2093 MPS_VF_RPLCT_MAP1_A));
2094 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2095 MPS_VF_RPLCT_MAP0_A));
2096 } else {
2097 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2098 MPS_VF_RPLCT_MAP7_A));
2099 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2100 MPS_VF_RPLCT_MAP6_A));
2101 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2102 MPS_VF_RPLCT_MAP5_A));
2103 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2104 MPS_VF_RPLCT_MAP4_A));
2105 }
2106 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2107 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2108 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2109 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2110}
2111
2112static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2113 struct cudbg_mps_tcam *tcam, u32 idx)
2114{
2115 struct adapter *padap = pdbg_init->adap;
2116 u64 tcamy, tcamx, val;
2117 u32 ctl, data2;
2118 int rc = 0;
2119
2120 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2121
2122
2123
2124
2125
2126
2127
2128 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2129 if (idx < 256)
2130 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2131 else
2132 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2133
2134 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2135 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2136 tcamy = DMACH_G(val) << 32;
2137 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2138 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2139 tcam->lookup_type = DATALKPTYPE_G(data2);
2140
2141
2142
2143
2144
2145 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2146
2147 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2148 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2149 tcam->dip_hit = data2 & DATADIPHIT_F;
2150 } else {
2151 tcam->vlan_vld = data2 & DATAVIDH2_F;
2152 tcam->ivlan = VIDL_G(val);
2153 }
2154
2155 tcam->port_num = DATAPORTNUM_G(data2);
2156
2157
2158 ctl |= CTLXYBITSEL_V(1);
2159 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2160 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2161 tcamx = DMACH_G(val) << 32;
2162 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2163 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2164 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2165
2166 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2167 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2168 }
2169 } else {
2170 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2171 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2172 }
2173
2174
2175 if (tcamx & tcamy)
2176 return rc;
2177
2178 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2179 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2180
2181 if (is_t5(padap->params.chip))
2182 tcam->repli = (tcam->cls_lo & REPLICATE_F);
2183 else if (is_t6(padap->params.chip))
2184 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2185
2186 if (tcam->repli) {
2187 struct fw_ldst_cmd ldst_cmd;
2188 struct fw_ldst_mps_rplc mps_rplc;
2189
2190 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2191 ldst_cmd.op_to_addrspace =
2192 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2193 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2194 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2195 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2196 ldst_cmd.u.mps.rplc.fid_idx =
2197 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2198 FW_LDST_CMD_IDX_V(idx));
2199
2200
2201
2202
2203 if (is_fw_attached(pdbg_init))
2204 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2205 sizeof(ldst_cmd), &ldst_cmd);
2206
2207 if (rc || !is_fw_attached(pdbg_init)) {
2208 cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2209
2210
2211
2212 rc = 0;
2213 } else {
2214 mps_rplc = ldst_cmd.u.mps.rplc;
2215 }
2216
2217 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2218 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2219 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2220 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2221 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2222 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2223 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2224 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2225 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2226 }
2227 }
2228 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2229 tcam->idx = idx;
2230 tcam->rplc_size = padap->params.arch.mps_rplc_size;
2231 return rc;
2232}
2233
2234int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2235 struct cudbg_buffer *dbg_buff,
2236 struct cudbg_error *cudbg_err)
2237{
2238 struct adapter *padap = pdbg_init->adap;
2239 struct cudbg_buffer temp_buff = { 0 };
2240 u32 size = 0, i, n, total_size = 0;
2241 struct cudbg_mps_tcam *tcam;
2242 int rc;
2243
2244 n = padap->params.arch.mps_tcam_size;
2245 size = sizeof(struct cudbg_mps_tcam) * n;
2246 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2247 if (rc)
2248 return rc;
2249
2250 tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2251 for (i = 0; i < n; i++) {
2252 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2253 if (rc) {
2254 cudbg_err->sys_err = rc;
2255 cudbg_put_buff(pdbg_init, &temp_buff);
2256 return rc;
2257 }
2258 total_size += sizeof(struct cudbg_mps_tcam);
2259 tcam++;
2260 }
2261
2262 if (!total_size) {
2263 rc = CUDBG_SYSTEM_ERROR;
2264 cudbg_err->sys_err = rc;
2265 cudbg_put_buff(pdbg_init, &temp_buff);
2266 return rc;
2267 }
2268 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2269}
2270
2271int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2272 struct cudbg_buffer *dbg_buff,
2273 struct cudbg_error *cudbg_err)
2274{
2275 struct adapter *padap = pdbg_init->adap;
2276 struct cudbg_buffer temp_buff = { 0 };
2277 char vpd_str[CUDBG_VPD_VER_LEN + 1];
2278 u32 scfg_vers, vpd_vers, fw_vers;
2279 struct cudbg_vpd_data *vpd_data;
2280 struct vpd_params vpd = { 0 };
2281 int rc, ret;
2282
2283 rc = t4_get_raw_vpd_params(padap, &vpd);
2284 if (rc)
2285 return rc;
2286
2287 rc = t4_get_fw_version(padap, &fw_vers);
2288 if (rc)
2289 return rc;
2290
2291
2292
2293
2294 rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
2295 if (rc < 0)
2296 return rc;
2297
2298 ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
2299 &scfg_vers);
2300
2301
2302 rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
2303 if (rc < 0)
2304 return rc;
2305
2306 if (ret)
2307 return ret;
2308
2309 rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2310 vpd_str);
2311 if (rc)
2312 return rc;
2313
2314 vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2315 rc = kstrtouint(vpd_str, 0, &vpd_vers);
2316 if (rc)
2317 return rc;
2318
2319 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2320 &temp_buff);
2321 if (rc)
2322 return rc;
2323
2324 vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2325 memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2326 memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2327 memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2328 memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2329 vpd_data->scfg_vers = scfg_vers;
2330 vpd_data->vpd_vers = vpd_vers;
2331 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2332 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2333 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2334 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2335 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2336}
2337
2338static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2339 struct cudbg_tid_data *tid_data)
2340{
2341 struct adapter *padap = pdbg_init->adap;
2342 int i, cmd_retry = 8;
2343 u32 val;
2344
2345
2346 for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2347 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2348
2349
2350 val = DBGICMD_V(4) | DBGITID_V(tid);
2351 t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2352 tid_data->dbig_cmd = val;
2353
2354 val = DBGICMDSTRT_F | DBGICMDMODE_V(1);
2355 t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2356 tid_data->dbig_conf = val;
2357
2358
2359 val = 1;
2360 while (val) {
2361 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2362 val = val & DBGICMDBUSY_F;
2363 cmd_retry--;
2364 if (!cmd_retry)
2365 return CUDBG_SYSTEM_ERROR;
2366 }
2367
2368
2369 val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2370 tid_data->dbig_rsp_stat = val;
2371 if (!(val & 1))
2372 return CUDBG_SYSTEM_ERROR;
2373
2374
2375 for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2376 tid_data->data[i] = t4_read_reg(padap,
2377 LE_DB_DBGI_RSP_DATA_A +
2378 (i << 2));
2379 tid_data->tid = tid;
2380 return 0;
2381}
2382
2383static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2384{
2385 int type = LE_ET_UNKNOWN;
2386
2387 if (tid < tcam_region.server_start)
2388 type = LE_ET_TCAM_CON;
2389 else if (tid < tcam_region.filter_start)
2390 type = LE_ET_TCAM_SERVER;
2391 else if (tid < tcam_region.clip_start)
2392 type = LE_ET_TCAM_FILTER;
2393 else if (tid < tcam_region.routing_start)
2394 type = LE_ET_TCAM_CLIP;
2395 else if (tid < tcam_region.tid_hash_base)
2396 type = LE_ET_TCAM_ROUTING;
2397 else if (tid < tcam_region.max_tid)
2398 type = LE_ET_HASH_CON;
2399 else
2400 type = LE_ET_INVALID_TID;
2401
2402 return type;
2403}
2404
2405static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2406 struct cudbg_tcam tcam_region)
2407{
2408 int ipv6 = 0;
2409 int le_type;
2410
2411 le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2412 if (tid_data->tid & 1)
2413 return 0;
2414
2415 if (le_type == LE_ET_HASH_CON) {
2416 ipv6 = tid_data->data[16] & 0x8000;
2417 } else if (le_type == LE_ET_TCAM_CON) {
2418 ipv6 = tid_data->data[16] & 0x8000;
2419 if (ipv6)
2420 ipv6 = tid_data->data[9] == 0x00C00000;
2421 } else {
2422 ipv6 = 0;
2423 }
2424 return ipv6;
2425}
2426
2427void cudbg_fill_le_tcam_info(struct adapter *padap,
2428 struct cudbg_tcam *tcam_region)
2429{
2430 u32 value;
2431
2432
2433 value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A);
2434 tcam_region->tid_hash_base = value;
2435
2436
2437 value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2438 tcam_region->routing_start = value;
2439
2440
2441 if (is_t6(padap->params.chip))
2442 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2443 else
2444 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2445 tcam_region->clip_start = value;
2446
2447
2448 value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2449 tcam_region->filter_start = value;
2450
2451
2452 value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2453 tcam_region->server_start = value;
2454
2455
2456 value = t4_read_reg(padap, LE_DB_CONFIG_A);
2457 if ((value >> HASHEN_S) & 1) {
2458 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2459 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2460 tcam_region->max_tid = (value & 0xFFFFF) +
2461 tcam_region->tid_hash_base;
2462 } else {
2463 value = HASHTIDSIZE_G(value);
2464 value = 1 << value;
2465 tcam_region->max_tid = value +
2466 tcam_region->tid_hash_base;
2467 }
2468 } else {
2469 if (is_t6(padap->params.chip))
2470 tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2471 CUDBG_MAX_TID_COMP_EN :
2472 CUDBG_MAX_TID_COMP_DIS;
2473 else
2474 tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2475 }
2476
2477 if (is_t6(padap->params.chip))
2478 tcam_region->max_tid += CUDBG_T6_CLIP;
2479}
2480
2481int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2482 struct cudbg_buffer *dbg_buff,
2483 struct cudbg_error *cudbg_err)
2484{
2485 struct adapter *padap = pdbg_init->adap;
2486 struct cudbg_buffer temp_buff = { 0 };
2487 struct cudbg_tcam tcam_region = { 0 };
2488 struct cudbg_tid_data *tid_data;
2489 u32 bytes = 0;
2490 int rc, size;
2491 u32 i;
2492
2493 cudbg_fill_le_tcam_info(padap, &tcam_region);
2494
2495 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2496 size += sizeof(struct cudbg_tcam);
2497 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2498 if (rc)
2499 return rc;
2500
2501 memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2502 bytes = sizeof(struct cudbg_tcam);
2503 tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2504
2505 for (i = 0; i < tcam_region.max_tid; ) {
2506 rc = cudbg_read_tid(pdbg_init, i, tid_data);
2507 if (rc) {
2508 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2509
2510 tcam_region.max_tid = i;
2511 memcpy(temp_buff.data, &tcam_region,
2512 sizeof(struct cudbg_tcam));
2513 goto out;
2514 }
2515
2516 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2517
2518 if (is_t6(padap->params.chip) &&
2519 i >= tcam_region.clip_start &&
2520 i < tcam_region.clip_start + CUDBG_T6_CLIP)
2521 i += 4;
2522 else
2523 i += 2;
2524 } else {
2525 i++;
2526 }
2527
2528 tid_data++;
2529 bytes += sizeof(struct cudbg_tid_data);
2530 }
2531
2532out:
2533 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2534}
2535
2536int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2537 struct cudbg_buffer *dbg_buff,
2538 struct cudbg_error *cudbg_err)
2539{
2540 struct adapter *padap = pdbg_init->adap;
2541 struct cudbg_buffer temp_buff = { 0 };
2542 u32 size;
2543 int rc;
2544
2545 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2546 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2547 if (rc)
2548 return rc;
2549
2550 t4_read_cong_tbl(padap, (void *)temp_buff.data);
2551 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2552}
2553
2554int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2555 struct cudbg_buffer *dbg_buff,
2556 struct cudbg_error *cudbg_err)
2557{
2558 struct adapter *padap = pdbg_init->adap;
2559 struct cudbg_buffer temp_buff = { 0 };
2560 struct ireg_buf *ma_indr;
2561 int i, rc, n;
2562 u32 size, j;
2563
2564 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2565 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2566
2567 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2568 size = sizeof(struct ireg_buf) * n * 2;
2569 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2570 if (rc)
2571 return rc;
2572
2573 ma_indr = (struct ireg_buf *)temp_buff.data;
2574 for (i = 0; i < n; i++) {
2575 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2576 u32 *buff = ma_indr->outbuf;
2577
2578 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2579 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
2580 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
2581 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
2582 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
2583 buff, ma_fli->ireg_offset_range,
2584 ma_fli->ireg_local_offset);
2585 ma_indr++;
2586 }
2587
2588 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
2589 for (i = 0; i < n; i++) {
2590 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2591 u32 *buff = ma_indr->outbuf;
2592
2593 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
2594 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
2595 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
2596 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
2597 t4_read_indirect(padap, ma_fli->ireg_addr,
2598 ma_fli->ireg_data, buff, 1,
2599 ma_fli->ireg_local_offset);
2600 buff++;
2601 ma_fli->ireg_local_offset += 0x20;
2602 }
2603 ma_indr++;
2604 }
2605 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2606}
2607
2608int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
2609 struct cudbg_buffer *dbg_buff,
2610 struct cudbg_error *cudbg_err)
2611{
2612 struct adapter *padap = pdbg_init->adap;
2613 struct cudbg_buffer temp_buff = { 0 };
2614 struct cudbg_ulptx_la *ulptx_la_buff;
2615 struct cudbg_ver_hdr *ver_hdr;
2616 u32 i, j;
2617 int rc;
2618
2619 rc = cudbg_get_buff(pdbg_init, dbg_buff,
2620 sizeof(struct cudbg_ver_hdr) +
2621 sizeof(struct cudbg_ulptx_la),
2622 &temp_buff);
2623 if (rc)
2624 return rc;
2625
2626 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
2627 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2628 ver_hdr->revision = CUDBG_ULPTX_LA_REV;
2629 ver_hdr->size = sizeof(struct cudbg_ulptx_la);
2630
2631 ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
2632 sizeof(*ver_hdr));
2633 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2634 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2635 ULP_TX_LA_RDPTR_0_A +
2636 0x10 * i);
2637 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2638 ULP_TX_LA_WRPTR_0_A +
2639 0x10 * i);
2640 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2641 ULP_TX_LA_RDDATA_0_A +
2642 0x10 * i);
2643 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2644 ulptx_la_buff->rd_data[i][j] =
2645 t4_read_reg(padap,
2646 ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2647 }
2648
2649 for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
2650 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
2651 ulptx_la_buff->rdptr_asic[i] =
2652 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
2653 ulptx_la_buff->rddata_asic[i][0] =
2654 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
2655 ulptx_la_buff->rddata_asic[i][1] =
2656 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
2657 ulptx_la_buff->rddata_asic[i][2] =
2658 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
2659 ulptx_la_buff->rddata_asic[i][3] =
2660 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
2661 ulptx_la_buff->rddata_asic[i][4] =
2662 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
2663 ulptx_la_buff->rddata_asic[i][5] =
2664 t4_read_reg(padap, PM_RX_BASE_ADDR);
2665 }
2666
2667 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2668}
2669
2670int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2671 struct cudbg_buffer *dbg_buff,
2672 struct cudbg_error *cudbg_err)
2673{
2674 struct adapter *padap = pdbg_init->adap;
2675 struct cudbg_buffer temp_buff = { 0 };
2676 u32 local_offset, local_range;
2677 struct ireg_buf *up_cim;
2678 u32 size, j, iter;
2679 u32 instance = 0;
2680 int i, rc, n;
2681
2682 if (is_t5(padap->params.chip))
2683 n = sizeof(t5_up_cim_reg_array) /
2684 ((IREG_NUM_ELEM + 1) * sizeof(u32));
2685 else if (is_t6(padap->params.chip))
2686 n = sizeof(t6_up_cim_reg_array) /
2687 ((IREG_NUM_ELEM + 1) * sizeof(u32));
2688 else
2689 return CUDBG_STATUS_NOT_IMPLEMENTED;
2690
2691 size = sizeof(struct ireg_buf) * n;
2692 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2693 if (rc)
2694 return rc;
2695
2696 up_cim = (struct ireg_buf *)temp_buff.data;
2697 for (i = 0; i < n; i++) {
2698 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2699 u32 *buff = up_cim->outbuf;
2700
2701 if (is_t5(padap->params.chip)) {
2702 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2703 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2704 up_cim_reg->ireg_local_offset =
2705 t5_up_cim_reg_array[i][2];
2706 up_cim_reg->ireg_offset_range =
2707 t5_up_cim_reg_array[i][3];
2708 instance = t5_up_cim_reg_array[i][4];
2709 } else if (is_t6(padap->params.chip)) {
2710 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2711 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2712 up_cim_reg->ireg_local_offset =
2713 t6_up_cim_reg_array[i][2];
2714 up_cim_reg->ireg_offset_range =
2715 t6_up_cim_reg_array[i][3];
2716 instance = t6_up_cim_reg_array[i][4];
2717 }
2718
2719 switch (instance) {
2720 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
2721 iter = up_cim_reg->ireg_offset_range;
2722 local_offset = 0x120;
2723 local_range = 1;
2724 break;
2725 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
2726 iter = up_cim_reg->ireg_offset_range;
2727 local_offset = 0x10;
2728 local_range = 1;
2729 break;
2730 default:
2731 iter = 1;
2732 local_offset = 0;
2733 local_range = up_cim_reg->ireg_offset_range;
2734 break;
2735 }
2736
2737 for (j = 0; j < iter; j++, buff++) {
2738 rc = t4_cim_read(padap,
2739 up_cim_reg->ireg_local_offset +
2740 (j * local_offset), local_range, buff);
2741 if (rc) {
2742 cudbg_put_buff(pdbg_init, &temp_buff);
2743 return rc;
2744 }
2745 }
2746 up_cim++;
2747 }
2748 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2749}
2750
2751int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2752 struct cudbg_buffer *dbg_buff,
2753 struct cudbg_error *cudbg_err)
2754{
2755 struct adapter *padap = pdbg_init->adap;
2756 struct cudbg_buffer temp_buff = { 0 };
2757 struct cudbg_pbt_tables *pbt;
2758 int i, rc;
2759 u32 addr;
2760
2761 rc = cudbg_get_buff(pdbg_init, dbg_buff,
2762 sizeof(struct cudbg_pbt_tables),
2763 &temp_buff);
2764 if (rc)
2765 return rc;
2766
2767 pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2768
2769 addr = CUDBG_CHAC_PBT_ADDR;
2770 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2771 rc = t4_cim_read(padap, addr + (i * 4), 1,
2772 &pbt->pbt_dynamic[i]);
2773 if (rc) {
2774 cudbg_err->sys_err = rc;
2775 cudbg_put_buff(pdbg_init, &temp_buff);
2776 return rc;
2777 }
2778 }
2779
2780
2781
2782 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2783 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2784 rc = t4_cim_read(padap, addr + (i * 4), 1,
2785 &pbt->pbt_static[i]);
2786 if (rc) {
2787 cudbg_err->sys_err = rc;
2788 cudbg_put_buff(pdbg_init, &temp_buff);
2789 return rc;
2790 }
2791 }
2792
2793
2794 addr = CUDBG_CHAC_PBT_LRF;
2795 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2796 rc = t4_cim_read(padap, addr + (i * 4), 1,
2797 &pbt->lrf_table[i]);
2798 if (rc) {
2799 cudbg_err->sys_err = rc;
2800 cudbg_put_buff(pdbg_init, &temp_buff);
2801 return rc;
2802 }
2803 }
2804
2805
2806 addr = CUDBG_CHAC_PBT_DATA;
2807 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2808 rc = t4_cim_read(padap, addr + (i * 4), 1,
2809 &pbt->pbt_data[i]);
2810 if (rc) {
2811 cudbg_err->sys_err = rc;
2812 cudbg_put_buff(pdbg_init, &temp_buff);
2813 return rc;
2814 }
2815 }
2816 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2817}
2818
2819int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2820 struct cudbg_buffer *dbg_buff,
2821 struct cudbg_error *cudbg_err)
2822{
2823 struct adapter *padap = pdbg_init->adap;
2824 struct cudbg_mbox_log *mboxlog = NULL;
2825 struct cudbg_buffer temp_buff = { 0 };
2826 struct mbox_cmd_log *log = NULL;
2827 struct mbox_cmd *entry;
2828 unsigned int entry_idx;
2829 u16 mbox_cmds;
2830 int i, k, rc;
2831 u64 flit;
2832 u32 size;
2833
2834 log = padap->mbox_log;
2835 mbox_cmds = padap->mbox_log->size;
2836 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2837 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2838 if (rc)
2839 return rc;
2840
2841 mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2842 for (k = 0; k < mbox_cmds; k++) {
2843 entry_idx = log->cursor + k;
2844 if (entry_idx >= log->size)
2845 entry_idx -= log->size;
2846
2847 entry = mbox_cmd_log_entry(log, entry_idx);
2848
2849 if (entry->timestamp == 0)
2850 continue;
2851
2852 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2853 for (i = 0; i < MBOX_LEN / 8; i++) {
2854 flit = entry->cmd[i];
2855 mboxlog->hi[i] = (u32)(flit >> 32);
2856 mboxlog->lo[i] = (u32)flit;
2857 }
2858 mboxlog++;
2859 }
2860 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2861}
2862
2863int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2864 struct cudbg_buffer *dbg_buff,
2865 struct cudbg_error *cudbg_err)
2866{
2867 struct adapter *padap = pdbg_init->adap;
2868 struct cudbg_buffer temp_buff = { 0 };
2869 struct ireg_buf *hma_indr;
2870 int i, rc, n;
2871 u32 size;
2872
2873 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2874 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2875
2876 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2877 size = sizeof(struct ireg_buf) * n;
2878 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2879 if (rc)
2880 return rc;
2881
2882 hma_indr = (struct ireg_buf *)temp_buff.data;
2883 for (i = 0; i < n; i++) {
2884 struct ireg_field *hma_fli = &hma_indr->tp_pio;
2885 u32 *buff = hma_indr->outbuf;
2886
2887 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2888 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2889 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2890 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2891 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2892 buff, hma_fli->ireg_offset_range,
2893 hma_fli->ireg_local_offset);
2894 hma_indr++;
2895 }
2896 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2897}
2898
2899void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
2900 u32 *num, u32 *size)
2901{
2902 u32 tot_entries = 0, tot_size = 0;
2903
2904
2905 tot_entries += MAX_ETH_QSETS * 3;
2906 tot_entries += MAX_CTRL_QUEUES;
2907
2908 tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2909 tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2910 tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
2911 tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
2912 MAX_CTRL_TXQ_DESC_SIZE;
2913
2914
2915 tot_entries += INGQ_EXTRAS;
2916 tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2917
2918
2919 tot_entries += 1;
2920 tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2921
2922
2923 tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
2924 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
2925
2926 tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
2927 MAX_TXQ_DESC_SIZE;
2928 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
2929 MAX_RXQ_DESC_SIZE;
2930 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
2931 MAX_FL_DESC_SIZE;
2932
2933
2934 tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
2935 tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
2936 MAX_RXQ_DESC_SIZE;
2937
2938 tot_size += sizeof(struct cudbg_ver_hdr) +
2939 sizeof(struct cudbg_qdesc_info) +
2940 sizeof(struct cudbg_qdesc_entry) * tot_entries;
2941
2942 if (num)
2943 *num = tot_entries;
2944
2945 if (size)
2946 *size = tot_size;
2947}
2948
2949int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
2950 struct cudbg_buffer *dbg_buff,
2951 struct cudbg_error *cudbg_err)
2952{
2953 u32 num_queues = 0, tot_entries = 0, size = 0;
2954 struct adapter *padap = pdbg_init->adap;
2955 struct cudbg_buffer temp_buff = { 0 };
2956 struct cudbg_qdesc_entry *qdesc_entry;
2957 struct cudbg_qdesc_info *qdesc_info;
2958 struct cudbg_ver_hdr *ver_hdr;
2959 struct sge *s = &padap->sge;
2960 u32 i, j, cur_off, tot_len;
2961 u8 *data;
2962 int rc;
2963
2964 cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
2965 size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
2966 tot_len = size;
2967 data = kvzalloc(size, GFP_KERNEL);
2968 if (!data)
2969 return -ENOMEM;
2970
2971 ver_hdr = (struct cudbg_ver_hdr *)data;
2972 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2973 ver_hdr->revision = CUDBG_QDESC_REV;
2974 ver_hdr->size = sizeof(struct cudbg_qdesc_info);
2975 size -= sizeof(*ver_hdr);
2976
2977 qdesc_info = (struct cudbg_qdesc_info *)(data +
2978 sizeof(*ver_hdr));
2979 size -= sizeof(*qdesc_info);
2980 qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
2981
2982#define QDESC_GET(q, desc, type, label) do { \
2983 if (size <= 0) { \
2984 goto label; \
2985 } \
2986 if (desc) { \
2987 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
2988 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
2989 num_queues++; \
2990 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
2991 } \
2992} while (0)
2993
2994#define QDESC_GET_TXQ(q, type, label) do { \
2995 struct sge_txq *txq = (struct sge_txq *)q; \
2996 QDESC_GET(txq, txq->desc, type, label); \
2997} while (0)
2998
2999#define QDESC_GET_RXQ(q, type, label) do { \
3000 struct sge_rspq *rxq = (struct sge_rspq *)q; \
3001 QDESC_GET(rxq, rxq->desc, type, label); \
3002} while (0)
3003
3004#define QDESC_GET_FLQ(q, type, label) do { \
3005 struct sge_fl *flq = (struct sge_fl *)q; \
3006 QDESC_GET(flq, flq->desc, type, label); \
3007} while (0)
3008
3009
3010 for (i = 0; i < s->ethqsets; i++)
3011 QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
3012
3013
3014 for (i = 0; i < s->ethqsets; i++)
3015 QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
3016
3017
3018 for (i = 0; i < s->ethqsets; i++)
3019 QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
3020
3021
3022 for (i = 0; i < padap->params.nports; i++)
3023 QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
3024
3025
3026 QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
3027
3028
3029 QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
3030
3031
3032 QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
3033
3034
3035 mutex_lock(&uld_mutex);
3036
3037 if (s->uld_txq_info) {
3038 struct sge_uld_txq_info *utxq;
3039
3040
3041 for (j = 0; j < CXGB4_TX_MAX; j++) {
3042 if (!s->uld_txq_info[j])
3043 continue;
3044
3045 utxq = s->uld_txq_info[j];
3046 for (i = 0; i < utxq->ntxq; i++)
3047 QDESC_GET_TXQ(&utxq->uldtxq[i].q,
3048 cudbg_uld_txq_to_qtype(j),
3049 out_unlock);
3050 }
3051 }
3052
3053 if (s->uld_rxq_info) {
3054 struct sge_uld_rxq_info *urxq;
3055 u32 base;
3056
3057
3058 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3059 if (!s->uld_rxq_info[j])
3060 continue;
3061
3062 urxq = s->uld_rxq_info[j];
3063 for (i = 0; i < urxq->nrxq; i++)
3064 QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
3065 cudbg_uld_rxq_to_qtype(j),
3066 out_unlock);
3067 }
3068
3069
3070 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3071 if (!s->uld_rxq_info[j])
3072 continue;
3073
3074 urxq = s->uld_rxq_info[j];
3075 for (i = 0; i < urxq->nrxq; i++)
3076 QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
3077 cudbg_uld_flq_to_qtype(j),
3078 out_unlock);
3079 }
3080
3081
3082 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3083 if (!s->uld_rxq_info[j])
3084 continue;
3085
3086 urxq = s->uld_rxq_info[j];
3087 base = urxq->nrxq;
3088 for (i = 0; i < urxq->nciq; i++)
3089 QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
3090 cudbg_uld_ciq_to_qtype(j),
3091 out_unlock);
3092 }
3093 }
3094
3095out_unlock:
3096 mutex_unlock(&uld_mutex);
3097
3098out:
3099 qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
3100 qdesc_info->num_queues = num_queues;
3101 cur_off = 0;
3102 while (tot_len) {
3103 u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
3104
3105 rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
3106 &temp_buff);
3107 if (rc) {
3108 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3109 goto out_free;
3110 }
3111
3112 memcpy(temp_buff.data, data + cur_off, chunk_size);
3113 tot_len -= chunk_size;
3114 cur_off += chunk_size;
3115 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
3116 dbg_buff);
3117 if (rc) {
3118 cudbg_put_buff(pdbg_init, &temp_buff);
3119 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3120 goto out_free;
3121 }
3122 }
3123
3124out_free:
3125 if (data)
3126 kvfree(data);
3127
3128#undef QDESC_GET_FLQ
3129#undef QDESC_GET_RXQ
3130#undef QDESC_GET_TXQ
3131#undef QDESC_GET
3132
3133 return rc;
3134}
3135