1
2
3
4
5
6
7
8
9
10
11#ifdef CONFIG_DEBUG_FS
12
13#include <linux/fs.h>
14#include <linux/debugfs.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17
18#include "rvu_struct.h"
19#include "rvu_reg.h"
20#include "rvu.h"
21#include "cgx.h"
22#include "npc.h"
23
24#define DEBUGFS_DIR_NAME "octeontx2"
25
26enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46};
47
48
49enum nix_stat_lf_tx {
50 TX_UCAST = 0x0,
51 TX_BCAST = 0x1,
52 TX_MCAST = 0x2,
53 TX_DROP = 0x3,
54 TX_OCTS = 0x4,
55 TX_STATS_ENUM_LAST,
56};
57
58
59enum nix_stat_lf_rx {
60 RX_OCTS = 0x0,
61 RX_UCAST = 0x1,
62 RX_BCAST = 0x2,
63 RX_MCAST = 0x3,
64 RX_DROP = 0x4,
65 RX_DROP_OCTS = 0x5,
66 RX_FCS = 0x6,
67 RX_ERR = 0x7,
68 RX_DRP_BCAST = 0x8,
69 RX_DRP_MCAST = 0x9,
70 RX_DRP_L3BCAST = 0xa,
71 RX_DRP_L3MCAST = 0xb,
72 RX_STATS_ENUM_LAST,
73};
74
75static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
89};
90
91static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65–127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
110};
111
112enum cpt_eng_type {
113 CPT_AE_TYPE = 1,
114 CPT_SE_TYPE = 2,
115 CPT_IE_TYPE = 3,
116};
117
118#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
119 blk_addr, NDC_AF_CONST) & 0xFF)
120
121#define rvu_dbg_NULL NULL
122#define rvu_dbg_open_NULL NULL
123
124#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
125static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
126{ \
127 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
128} \
129static const struct file_operations rvu_dbg_##name##_fops = { \
130 .owner = THIS_MODULE, \
131 .open = rvu_dbg_open_##name, \
132 .read = seq_read, \
133 .write = rvu_dbg_##write_op, \
134 .llseek = seq_lseek, \
135 .release = single_release, \
136}
137
138#define RVU_DEBUG_FOPS(name, read_op, write_op) \
139static const struct file_operations rvu_dbg_##name##_fops = { \
140 .owner = THIS_MODULE, \
141 .open = simple_open, \
142 .read = rvu_dbg_##read_op, \
143 .write = rvu_dbg_##write_op \
144}
145
146static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
147
148
149static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
150 char __user *buffer,
151 size_t count, loff_t *ppos)
152{
153 int index, off = 0, flag = 0, go_back = 0, off_prev;
154 struct rvu *rvu = filp->private_data;
155 int lf, pf, vf, pcifunc;
156 struct rvu_block block;
157 int bytes_not_copied;
158 int buf_size = 2048;
159 char *buf;
160
161
162 if (*ppos != 0)
163 return 0;
164
165 buf = kzalloc(buf_size, GFP_KERNEL);
166 if (!buf)
167 return -ENOSPC;
168 off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
169 for (index = 0; index < BLK_COUNT; index++)
170 if (strlen(rvu->hw->block[index].name))
171 off += scnprintf(&buf[off], buf_size - 1 - off,
172 "%*s\t", (index - 1) * 2,
173 rvu->hw->block[index].name);
174 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
175 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
176 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
177 pcifunc = pf << 10 | vf;
178 if (!pcifunc)
179 continue;
180
181 if (vf) {
182 go_back = scnprintf(&buf[off],
183 buf_size - 1 - off,
184 "PF%d:VF%d\t\t", pf,
185 vf - 1);
186 } else {
187 go_back = scnprintf(&buf[off],
188 buf_size - 1 - off,
189 "PF%d\t\t", pf);
190 }
191
192 off += go_back;
193 for (index = 0; index < BLKTYPE_MAX; index++) {
194 block = rvu->hw->block[index];
195 if (!strlen(block.name))
196 continue;
197 off_prev = off;
198 for (lf = 0; lf < block.lf.max; lf++) {
199 if (block.fn_map[lf] != pcifunc)
200 continue;
201 flag = 1;
202 off += scnprintf(&buf[off], buf_size - 1
203 - off, "%3d,", lf);
204 }
205 if (flag && off_prev != off)
206 off--;
207 else
208 go_back++;
209 off += scnprintf(&buf[off], buf_size - 1 - off,
210 "\t");
211 }
212 if (!flag)
213 off -= go_back;
214 else
215 flag = 0;
216 off--;
217 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
218 }
219 }
220
221 bytes_not_copied = copy_to_user(buffer, buf, off);
222 kfree(buf);
223
224 if (bytes_not_copied)
225 return -EFAULT;
226
227 *ppos = off;
228 return off;
229}
230
231RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
232
233static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
234{
235 struct rvu *rvu = filp->private;
236 struct pci_dev *pdev = NULL;
237 char cgx[10], lmac[10];
238 struct rvu_pfvf *pfvf;
239 int pf, domain, blkid;
240 u8 cgx_id, lmac_id;
241 u16 pcifunc;
242
243 domain = 2;
244 seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
245 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
246 if (!is_pf_cgxmapped(rvu, pf))
247 continue;
248
249 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
250 if (!pdev)
251 continue;
252
253 cgx[0] = 0;
254 lmac[0] = 0;
255 pcifunc = pf << 10;
256 pfvf = rvu_get_pfvf(rvu, pcifunc);
257
258 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
259 blkid = 0;
260 else
261 blkid = 1;
262
263 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
264 &lmac_id);
265 sprintf(cgx, "CGX%d", cgx_id);
266 sprintf(lmac, "LMAC%d", lmac_id);
267 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
268 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
269 }
270 return 0;
271}
272
273RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
274
275static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
276 u16 *pcifunc)
277{
278 struct rvu_block *block;
279 struct rvu_hwinfo *hw;
280
281 hw = rvu->hw;
282 block = &hw->block[blkaddr];
283
284 if (lf < 0 || lf >= block->lf.max) {
285 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
286 block->lf.max - 1);
287 return false;
288 }
289
290 *pcifunc = block->fn_map[lf];
291 if (!*pcifunc) {
292 dev_warn(rvu->dev,
293 "This LF is not attached to any RVU PFFUNC\n");
294 return false;
295 }
296 return true;
297}
298
299static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
300{
301 char *buf;
302
303 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
304 if (!buf)
305 return;
306
307 if (!pfvf->aura_ctx) {
308 seq_puts(m, "Aura context is not initialized\n");
309 } else {
310 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
311 pfvf->aura_ctx->qsize);
312 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
313 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
314 }
315
316 if (!pfvf->pool_ctx) {
317 seq_puts(m, "Pool context is not initialized\n");
318 } else {
319 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
320 pfvf->pool_ctx->qsize);
321 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
322 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
323 }
324 kfree(buf);
325}
326
327
328
329
330static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
331 int blktype)
332{
333 void (*print_qsize)(struct seq_file *filp,
334 struct rvu_pfvf *pfvf) = NULL;
335 struct dentry *current_dir;
336 struct rvu_pfvf *pfvf;
337 struct rvu *rvu;
338 int qsize_id;
339 u16 pcifunc;
340 int blkaddr;
341
342 rvu = filp->private;
343 switch (blktype) {
344 case BLKTYPE_NPA:
345 qsize_id = rvu->rvu_dbg.npa_qsize_id;
346 print_qsize = print_npa_qsize;
347 break;
348
349 case BLKTYPE_NIX:
350 qsize_id = rvu->rvu_dbg.nix_qsize_id;
351 print_qsize = print_nix_qsize;
352 break;
353
354 default:
355 return -EINVAL;
356 }
357
358 if (blktype == BLKTYPE_NPA) {
359 blkaddr = BLKADDR_NPA;
360 } else {
361 current_dir = filp->file->f_path.dentry->d_parent;
362 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
363 BLKADDR_NIX1 : BLKADDR_NIX0);
364 }
365
366 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
367 return -EINVAL;
368
369 pfvf = rvu_get_pfvf(rvu, pcifunc);
370 print_qsize(filp, pfvf);
371
372 return 0;
373}
374
375static ssize_t rvu_dbg_qsize_write(struct file *filp,
376 const char __user *buffer, size_t count,
377 loff_t *ppos, int blktype)
378{
379 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
380 struct seq_file *seqfile = filp->private_data;
381 char *cmd_buf, *cmd_buf_tmp, *subtoken;
382 struct rvu *rvu = seqfile->private;
383 struct dentry *current_dir;
384 int blkaddr;
385 u16 pcifunc;
386 int ret, lf;
387
388 cmd_buf = memdup_user(buffer, count);
389 if (IS_ERR(cmd_buf))
390 return -ENOMEM;
391
392 cmd_buf[count] = '\0';
393
394 cmd_buf_tmp = strchr(cmd_buf, '\n');
395 if (cmd_buf_tmp) {
396 *cmd_buf_tmp = '\0';
397 count = cmd_buf_tmp - cmd_buf + 1;
398 }
399
400 cmd_buf_tmp = cmd_buf;
401 subtoken = strsep(&cmd_buf, " ");
402 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
403 if (cmd_buf)
404 ret = -EINVAL;
405
406 if (!strncmp(subtoken, "help", 4) || ret < 0) {
407 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
408 goto qsize_write_done;
409 }
410
411 if (blktype == BLKTYPE_NPA) {
412 blkaddr = BLKADDR_NPA;
413 } else {
414 current_dir = filp->f_path.dentry->d_parent;
415 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
416 BLKADDR_NIX1 : BLKADDR_NIX0);
417 }
418
419 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
420 ret = -EINVAL;
421 goto qsize_write_done;
422 }
423 if (blktype == BLKTYPE_NPA)
424 rvu->rvu_dbg.npa_qsize_id = lf;
425 else
426 rvu->rvu_dbg.nix_qsize_id = lf;
427
428qsize_write_done:
429 kfree(cmd_buf_tmp);
430 return ret ? ret : count;
431}
432
433static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
434 const char __user *buffer,
435 size_t count, loff_t *ppos)
436{
437 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
438 BLKTYPE_NPA);
439}
440
441static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
442{
443 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
444}
445
446RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
447
448
449static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
450{
451 struct npa_aura_s *aura = &rsp->aura;
452
453 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
454
455 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
456 aura->ena, aura->pool_caching);
457 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
458 aura->pool_way_mask, aura->avg_con);
459 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
460 aura->pool_drop_ena, aura->aura_drop_ena);
461 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
462 aura->bp_ena, aura->aura_drop);
463 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
464 aura->shift, aura->avg_level);
465
466 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
467 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
468
469 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
470 (u64)aura->limit, aura->bp, aura->fc_ena);
471 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
472 aura->fc_up_crossing, aura->fc_stype);
473 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
474
475 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
476
477 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
478 aura->pool_drop, aura->update_time);
479 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
480 aura->err_int, aura->err_int_ena);
481 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
482 aura->thresh_int, aura->thresh_int_ena);
483 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
484 aura->thresh_up, aura->thresh_qint_idx);
485 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
486
487 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
488}
489
490
491static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
492{
493 struct npa_pool_s *pool = &rsp->pool;
494
495 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
496
497 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
498 pool->ena, pool->nat_align);
499 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
500 pool->stack_caching, pool->stack_way_mask);
501 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
502 pool->buf_offset, pool->buf_size);
503
504 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
505 pool->stack_max_pages, pool->stack_pages);
506
507 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
508
509 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
510 pool->stack_offset, pool->shift, pool->avg_level);
511 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
512 pool->avg_con, pool->fc_ena, pool->fc_stype);
513 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
514 pool->fc_hyst_bits, pool->fc_up_crossing);
515 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
516
517 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
518
519 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
520
521 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
522
523 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
524 pool->err_int, pool->err_int_ena);
525 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
526 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
527 pool->thresh_int_ena, pool->thresh_up);
528 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
529 pool->thresh_qint_idx, pool->err_qint_idx);
530}
531
532
533static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
534{
535 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
536 struct npa_aq_enq_req aq_req;
537 struct npa_aq_enq_rsp rsp;
538 struct rvu_pfvf *pfvf;
539 int aura, rc, max_id;
540 int npalf, id, all;
541 struct rvu *rvu;
542 u16 pcifunc;
543
544 rvu = m->private;
545
546 switch (ctype) {
547 case NPA_AQ_CTYPE_AURA:
548 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
549 id = rvu->rvu_dbg.npa_aura_ctx.id;
550 all = rvu->rvu_dbg.npa_aura_ctx.all;
551 break;
552
553 case NPA_AQ_CTYPE_POOL:
554 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
555 id = rvu->rvu_dbg.npa_pool_ctx.id;
556 all = rvu->rvu_dbg.npa_pool_ctx.all;
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
563 return -EINVAL;
564
565 pfvf = rvu_get_pfvf(rvu, pcifunc);
566 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
567 seq_puts(m, "Aura context is not initialized\n");
568 return -EINVAL;
569 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
570 seq_puts(m, "Pool context is not initialized\n");
571 return -EINVAL;
572 }
573
574 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
575 aq_req.hdr.pcifunc = pcifunc;
576 aq_req.ctype = ctype;
577 aq_req.op = NPA_AQ_INSTOP_READ;
578 if (ctype == NPA_AQ_CTYPE_AURA) {
579 max_id = pfvf->aura_ctx->qsize;
580 print_npa_ctx = print_npa_aura_ctx;
581 } else {
582 max_id = pfvf->pool_ctx->qsize;
583 print_npa_ctx = print_npa_pool_ctx;
584 }
585
586 if (id < 0 || id >= max_id) {
587 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
588 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
589 max_id - 1);
590 return -EINVAL;
591 }
592
593 if (all)
594 id = 0;
595 else
596 max_id = id + 1;
597
598 for (aura = id; aura < max_id; aura++) {
599 aq_req.aura_id = aura;
600 seq_printf(m, "======%s : %d=======\n",
601 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
602 aq_req.aura_id);
603 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
604 if (rc) {
605 seq_puts(m, "Failed to read context\n");
606 return -EINVAL;
607 }
608 print_npa_ctx(m, &rsp);
609 }
610 return 0;
611}
612
613static int write_npa_ctx(struct rvu *rvu, bool all,
614 int npalf, int id, int ctype)
615{
616 struct rvu_pfvf *pfvf;
617 int max_id = 0;
618 u16 pcifunc;
619
620 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
621 return -EINVAL;
622
623 pfvf = rvu_get_pfvf(rvu, pcifunc);
624
625 if (ctype == NPA_AQ_CTYPE_AURA) {
626 if (!pfvf->aura_ctx) {
627 dev_warn(rvu->dev, "Aura context is not initialized\n");
628 return -EINVAL;
629 }
630 max_id = pfvf->aura_ctx->qsize;
631 } else if (ctype == NPA_AQ_CTYPE_POOL) {
632 if (!pfvf->pool_ctx) {
633 dev_warn(rvu->dev, "Pool context is not initialized\n");
634 return -EINVAL;
635 }
636 max_id = pfvf->pool_ctx->qsize;
637 }
638
639 if (id < 0 || id >= max_id) {
640 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
641 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
642 max_id - 1);
643 return -EINVAL;
644 }
645
646 switch (ctype) {
647 case NPA_AQ_CTYPE_AURA:
648 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
649 rvu->rvu_dbg.npa_aura_ctx.id = id;
650 rvu->rvu_dbg.npa_aura_ctx.all = all;
651 break;
652
653 case NPA_AQ_CTYPE_POOL:
654 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
655 rvu->rvu_dbg.npa_pool_ctx.id = id;
656 rvu->rvu_dbg.npa_pool_ctx.all = all;
657 break;
658 default:
659 return -EINVAL;
660 }
661 return 0;
662}
663
664static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
665 const char __user *buffer, int *npalf,
666 int *id, bool *all)
667{
668 int bytes_not_copied;
669 char *cmd_buf_tmp;
670 char *subtoken;
671 int ret;
672
673 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
674 if (bytes_not_copied)
675 return -EFAULT;
676
677 cmd_buf[*count] = '\0';
678 cmd_buf_tmp = strchr(cmd_buf, '\n');
679
680 if (cmd_buf_tmp) {
681 *cmd_buf_tmp = '\0';
682 *count = cmd_buf_tmp - cmd_buf + 1;
683 }
684
685 subtoken = strsep(&cmd_buf, " ");
686 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
687 if (ret < 0)
688 return ret;
689 subtoken = strsep(&cmd_buf, " ");
690 if (subtoken && strcmp(subtoken, "all") == 0) {
691 *all = true;
692 } else {
693 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
694 if (ret < 0)
695 return ret;
696 }
697 if (cmd_buf)
698 return -EINVAL;
699 return ret;
700}
701
702static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
703 const char __user *buffer,
704 size_t count, loff_t *ppos, int ctype)
705{
706 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
707 "aura" : "pool";
708 struct seq_file *seqfp = filp->private_data;
709 struct rvu *rvu = seqfp->private;
710 int npalf, id = 0, ret;
711 bool all = false;
712
713 if ((*ppos != 0) || !count)
714 return -EINVAL;
715
716 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
717 if (!cmd_buf)
718 return count;
719 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
720 &npalf, &id, &all);
721 if (ret < 0) {
722 dev_info(rvu->dev,
723 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
724 ctype_string, ctype_string);
725 goto done;
726 } else {
727 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
728 }
729done:
730 kfree(cmd_buf);
731 return ret ? ret : count;
732}
733
734static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
735 const char __user *buffer,
736 size_t count, loff_t *ppos)
737{
738 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
739 NPA_AQ_CTYPE_AURA);
740}
741
742static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
743{
744 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
745}
746
747RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
748
749static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
750 const char __user *buffer,
751 size_t count, loff_t *ppos)
752{
753 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
754 NPA_AQ_CTYPE_POOL);
755}
756
757static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
758{
759 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
760}
761
762RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
763
764static void ndc_cache_stats(struct seq_file *s, int blk_addr,
765 int ctype, int transaction)
766{
767 u64 req, out_req, lat, cant_alloc;
768 struct nix_hw *nix_hw;
769 struct rvu *rvu;
770 int port;
771
772 if (blk_addr == BLKADDR_NDC_NPA0) {
773 rvu = s->private;
774 } else {
775 nix_hw = s->private;
776 rvu = nix_hw->rvu;
777 }
778
779 for (port = 0; port < NDC_MAX_PORT; port++) {
780 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
781 (port, ctype, transaction));
782 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
783 (port, ctype, transaction));
784 out_req = rvu_read64(rvu, blk_addr,
785 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
786 (port, ctype, transaction));
787 cant_alloc = rvu_read64(rvu, blk_addr,
788 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
789 (port, transaction));
790 seq_printf(s, "\nPort:%d\n", port);
791 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
792 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
793 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
794 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
795 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
796 }
797}
798
799static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
800{
801 seq_puts(s, "\n***** CACHE mode read stats *****\n");
802 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
803 seq_puts(s, "\n***** CACHE mode write stats *****\n");
804 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
805 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
806 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
807 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
808 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
809 return 0;
810}
811
812static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
813{
814 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
815}
816
817RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
818
819static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
820{
821 struct nix_hw *nix_hw;
822 struct rvu *rvu;
823 int bank, max_bank;
824
825 if (blk_addr == BLKADDR_NDC_NPA0) {
826 rvu = s->private;
827 } else {
828 nix_hw = s->private;
829 rvu = nix_hw->rvu;
830 }
831
832 max_bank = NDC_MAX_BANK(rvu, blk_addr);
833 for (bank = 0; bank < max_bank; bank++) {
834 seq_printf(s, "BANK:%d\n", bank);
835 seq_printf(s, "\tHits:\t%lld\n",
836 (u64)rvu_read64(rvu, blk_addr,
837 NDC_AF_BANKX_HIT_PC(bank)));
838 seq_printf(s, "\tMiss:\t%lld\n",
839 (u64)rvu_read64(rvu, blk_addr,
840 NDC_AF_BANKX_MISS_PC(bank)));
841 }
842 return 0;
843}
844
845static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
846{
847 struct nix_hw *nix_hw = filp->private;
848 int blkaddr = 0;
849 int ndc_idx = 0;
850
851 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
852 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
853 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
854
855 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
856}
857
858RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
859
860static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
861{
862 struct nix_hw *nix_hw = filp->private;
863 int blkaddr = 0;
864 int ndc_idx = 0;
865
866 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
867 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
868 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
869
870 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
871}
872
873RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
874
875static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
876 void *unused)
877{
878 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
879}
880
881RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
882
883static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
884 void *unused)
885{
886 struct nix_hw *nix_hw = filp->private;
887 int ndc_idx = NPA0_U;
888 int blkaddr = 0;
889
890 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
891 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
892
893 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
894}
895
896RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
897
898static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
899 void *unused)
900{
901 struct nix_hw *nix_hw = filp->private;
902 int ndc_idx = NPA0_U;
903 int blkaddr = 0;
904
905 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
906 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
907
908 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
909}
910
911RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
912
913
914static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
915{
916 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
917
918 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
919 sq_ctx->sqe_way_mask, sq_ctx->cq);
920 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
921 sq_ctx->sdp_mcast, sq_ctx->substream);
922 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
923 sq_ctx->qint_idx, sq_ctx->ena);
924
925 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
926 sq_ctx->sqb_count, sq_ctx->default_chan);
927 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
928 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
929 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
930 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
931
932 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
933 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
934 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
935 sq_ctx->sq_int, sq_ctx->sqb_aura);
936 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
937
938 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
939 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
940 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
941 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
942 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
943 sq_ctx->smenq_offset, sq_ctx->tail_offset);
944 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
945 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
946 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
947 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
948 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
949 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
950
951 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
952 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
953 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
954 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
955 sq_ctx->smenq_next_sqb);
956
957 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
958
959 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
960 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
961 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
962 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
963 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
964 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
965 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
966
967 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
968 (u64)sq_ctx->scm_lso_rem);
969 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
970 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
971 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
972 (u64)sq_ctx->dropped_octs);
973 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
974 (u64)sq_ctx->dropped_pkts);
975}
976
977
978static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
979{
980 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
981
982 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
983 rq_ctx->wqe_aura, rq_ctx->substream);
984 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
985 rq_ctx->cq, rq_ctx->ena_wqwd);
986 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
987 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
988 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
989
990 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
991 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
992 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
993 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
994 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
995 rq_ctx->pb_caching, rq_ctx->sso_tt);
996 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
997 rq_ctx->sso_grp, rq_ctx->lpb_aura);
998 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
999
1000 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1001 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1002 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1003 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1004 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1005 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1006 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1007 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1008 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1009
1010 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1011 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1012 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1013 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1014 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1015 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1016 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1017 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1018
1019 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1020 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1021 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1022 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1023 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1024 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1025 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1026
1027 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1028 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1029 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1030 rq_ctx->good_utag, rq_ctx->ltag);
1031
1032 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1033 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1034 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1035 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1036 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1037}
1038
1039
1040static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1041{
1042 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1043
1044 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1045
1046 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1047 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1048 cq_ctx->avg_con, cq_ctx->cint_idx);
1049 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1050 cq_ctx->cq_err, cq_ctx->qint_idx);
1051 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1052 cq_ctx->bpid, cq_ctx->bp_ena);
1053
1054 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1055 cq_ctx->update_time, cq_ctx->avg_level);
1056 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1057 cq_ctx->head, cq_ctx->tail);
1058
1059 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1060 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1061 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1062 cq_ctx->qsize, cq_ctx->caching);
1063 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1064 cq_ctx->substream, cq_ctx->ena);
1065 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1066 cq_ctx->drop_ena, cq_ctx->drop);
1067 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1068}
1069
1070static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1071 void *unused, int ctype)
1072{
1073 void (*print_nix_ctx)(struct seq_file *filp,
1074 struct nix_aq_enq_rsp *rsp) = NULL;
1075 struct nix_hw *nix_hw = filp->private;
1076 struct rvu *rvu = nix_hw->rvu;
1077 struct nix_aq_enq_req aq_req;
1078 struct nix_aq_enq_rsp rsp;
1079 char *ctype_string = NULL;
1080 int qidx, rc, max_id = 0;
1081 struct rvu_pfvf *pfvf;
1082 int nixlf, id, all;
1083 u16 pcifunc;
1084
1085 switch (ctype) {
1086 case NIX_AQ_CTYPE_CQ:
1087 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1088 id = rvu->rvu_dbg.nix_cq_ctx.id;
1089 all = rvu->rvu_dbg.nix_cq_ctx.all;
1090 break;
1091
1092 case NIX_AQ_CTYPE_SQ:
1093 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1094 id = rvu->rvu_dbg.nix_sq_ctx.id;
1095 all = rvu->rvu_dbg.nix_sq_ctx.all;
1096 break;
1097
1098 case NIX_AQ_CTYPE_RQ:
1099 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1100 id = rvu->rvu_dbg.nix_rq_ctx.id;
1101 all = rvu->rvu_dbg.nix_rq_ctx.all;
1102 break;
1103
1104 default:
1105 return -EINVAL;
1106 }
1107
1108 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1109 return -EINVAL;
1110
1111 pfvf = rvu_get_pfvf(rvu, pcifunc);
1112 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1113 seq_puts(filp, "SQ context is not initialized\n");
1114 return -EINVAL;
1115 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1116 seq_puts(filp, "RQ context is not initialized\n");
1117 return -EINVAL;
1118 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1119 seq_puts(filp, "CQ context is not initialized\n");
1120 return -EINVAL;
1121 }
1122
1123 if (ctype == NIX_AQ_CTYPE_SQ) {
1124 max_id = pfvf->sq_ctx->qsize;
1125 ctype_string = "sq";
1126 print_nix_ctx = print_nix_sq_ctx;
1127 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1128 max_id = pfvf->rq_ctx->qsize;
1129 ctype_string = "rq";
1130 print_nix_ctx = print_nix_rq_ctx;
1131 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1132 max_id = pfvf->cq_ctx->qsize;
1133 ctype_string = "cq";
1134 print_nix_ctx = print_nix_cq_ctx;
1135 }
1136
1137 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1138 aq_req.hdr.pcifunc = pcifunc;
1139 aq_req.ctype = ctype;
1140 aq_req.op = NIX_AQ_INSTOP_READ;
1141 if (all)
1142 id = 0;
1143 else
1144 max_id = id + 1;
1145 for (qidx = id; qidx < max_id; qidx++) {
1146 aq_req.qidx = qidx;
1147 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1148 ctype_string, nixlf, aq_req.qidx);
1149 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1150 if (rc) {
1151 seq_puts(filp, "Failed to read the context\n");
1152 return -EINVAL;
1153 }
1154 print_nix_ctx(filp, &rsp);
1155 }
1156 return 0;
1157}
1158
1159static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1160 int id, int ctype, char *ctype_string,
1161 struct seq_file *m)
1162{
1163 struct nix_hw *nix_hw = m->private;
1164 struct rvu_pfvf *pfvf;
1165 int max_id = 0;
1166 u16 pcifunc;
1167
1168 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1169 return -EINVAL;
1170
1171 pfvf = rvu_get_pfvf(rvu, pcifunc);
1172
1173 if (ctype == NIX_AQ_CTYPE_SQ) {
1174 if (!pfvf->sq_ctx) {
1175 dev_warn(rvu->dev, "SQ context is not initialized\n");
1176 return -EINVAL;
1177 }
1178 max_id = pfvf->sq_ctx->qsize;
1179 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1180 if (!pfvf->rq_ctx) {
1181 dev_warn(rvu->dev, "RQ context is not initialized\n");
1182 return -EINVAL;
1183 }
1184 max_id = pfvf->rq_ctx->qsize;
1185 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1186 if (!pfvf->cq_ctx) {
1187 dev_warn(rvu->dev, "CQ context is not initialized\n");
1188 return -EINVAL;
1189 }
1190 max_id = pfvf->cq_ctx->qsize;
1191 }
1192
1193 if (id < 0 || id >= max_id) {
1194 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1195 ctype_string, max_id - 1);
1196 return -EINVAL;
1197 }
1198 switch (ctype) {
1199 case NIX_AQ_CTYPE_CQ:
1200 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1201 rvu->rvu_dbg.nix_cq_ctx.id = id;
1202 rvu->rvu_dbg.nix_cq_ctx.all = all;
1203 break;
1204
1205 case NIX_AQ_CTYPE_SQ:
1206 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1207 rvu->rvu_dbg.nix_sq_ctx.id = id;
1208 rvu->rvu_dbg.nix_sq_ctx.all = all;
1209 break;
1210
1211 case NIX_AQ_CTYPE_RQ:
1212 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1213 rvu->rvu_dbg.nix_rq_ctx.id = id;
1214 rvu->rvu_dbg.nix_rq_ctx.all = all;
1215 break;
1216 default:
1217 return -EINVAL;
1218 }
1219 return 0;
1220}
1221
1222static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1223 const char __user *buffer,
1224 size_t count, loff_t *ppos,
1225 int ctype)
1226{
1227 struct seq_file *m = filp->private_data;
1228 struct nix_hw *nix_hw = m->private;
1229 struct rvu *rvu = nix_hw->rvu;
1230 char *cmd_buf, *ctype_string;
1231 int nixlf, id = 0, ret;
1232 bool all = false;
1233
1234 if ((*ppos != 0) || !count)
1235 return -EINVAL;
1236
1237 switch (ctype) {
1238 case NIX_AQ_CTYPE_SQ:
1239 ctype_string = "sq";
1240 break;
1241 case NIX_AQ_CTYPE_RQ:
1242 ctype_string = "rq";
1243 break;
1244 case NIX_AQ_CTYPE_CQ:
1245 ctype_string = "cq";
1246 break;
1247 default:
1248 return -EINVAL;
1249 }
1250
1251 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1252
1253 if (!cmd_buf)
1254 return count;
1255
1256 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1257 &nixlf, &id, &all);
1258 if (ret < 0) {
1259 dev_info(rvu->dev,
1260 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1261 ctype_string, ctype_string);
1262 goto done;
1263 } else {
1264 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1265 ctype_string, m);
1266 }
1267done:
1268 kfree(cmd_buf);
1269 return ret ? ret : count;
1270}
1271
1272static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1273 const char __user *buffer,
1274 size_t count, loff_t *ppos)
1275{
1276 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1277 NIX_AQ_CTYPE_SQ);
1278}
1279
1280static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1281{
1282 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1283}
1284
1285RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1286
1287static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1288 const char __user *buffer,
1289 size_t count, loff_t *ppos)
1290{
1291 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1292 NIX_AQ_CTYPE_RQ);
1293}
1294
1295static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1296{
1297 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1298}
1299
1300RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1301
1302static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1303 const char __user *buffer,
1304 size_t count, loff_t *ppos)
1305{
1306 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1307 NIX_AQ_CTYPE_CQ);
1308}
1309
1310static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1311{
1312 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1313}
1314
1315RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1316
1317static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1318 unsigned long *bmap, char *qtype)
1319{
1320 char *buf;
1321
1322 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1323 if (!buf)
1324 return;
1325
1326 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1327 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1328 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1329 qtype, buf);
1330 kfree(buf);
1331}
1332
1333static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1334{
1335 if (!pfvf->cq_ctx)
1336 seq_puts(filp, "cq context is not initialized\n");
1337 else
1338 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1339 "cq");
1340
1341 if (!pfvf->rq_ctx)
1342 seq_puts(filp, "rq context is not initialized\n");
1343 else
1344 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1345 "rq");
1346
1347 if (!pfvf->sq_ctx)
1348 seq_puts(filp, "sq context is not initialized\n");
1349 else
1350 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1351 "sq");
1352}
1353
1354static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1355 const char __user *buffer,
1356 size_t count, loff_t *ppos)
1357{
1358 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1359 BLKTYPE_NIX);
1360}
1361
1362static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1363{
1364 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1365}
1366
1367RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1368
1369static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1370{
1371 struct nix_hw *nix_hw;
1372
1373 if (!is_block_implemented(rvu->hw, blkaddr))
1374 return;
1375
1376 if (blkaddr == BLKADDR_NIX0) {
1377 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1378 nix_hw = &rvu->hw->nix[0];
1379 } else {
1380 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1381 rvu->rvu_dbg.root);
1382 nix_hw = &rvu->hw->nix[1];
1383 }
1384
1385 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1386 &rvu_dbg_nix_sq_ctx_fops);
1387 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1388 &rvu_dbg_nix_rq_ctx_fops);
1389 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1390 &rvu_dbg_nix_cq_ctx_fops);
1391 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1392 &rvu_dbg_nix_ndc_tx_cache_fops);
1393 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1394 &rvu_dbg_nix_ndc_rx_cache_fops);
1395 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1396 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1397 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1398 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1399 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1400 &rvu_dbg_nix_qsize_fops);
1401}
1402
1403static void rvu_dbg_npa_init(struct rvu *rvu)
1404{
1405 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1406
1407 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1408 &rvu_dbg_npa_qsize_fops);
1409 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1410 &rvu_dbg_npa_aura_ctx_fops);
1411 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1412 &rvu_dbg_npa_pool_ctx_fops);
1413 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1414 &rvu_dbg_npa_ndc_cache_fops);
1415 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1416 &rvu_dbg_npa_ndc_hits_miss_fops);
1417}
1418
1419#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
1420 ({ \
1421 u64 cnt; \
1422 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1423 NIX_STATS_RX, &(cnt)); \
1424 if (!err) \
1425 seq_printf(s, "%s: %llu\n", name, cnt); \
1426 cnt; \
1427 })
1428
1429#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
1430 ({ \
1431 u64 cnt; \
1432 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1433 NIX_STATS_TX, &(cnt)); \
1434 if (!err) \
1435 seq_printf(s, "%s: %llu\n", name, cnt); \
1436 cnt; \
1437 })
1438
1439static int cgx_print_stats(struct seq_file *s, int lmac_id)
1440{
1441 struct cgx_link_user_info linfo;
1442 void *cgxd = s->private;
1443 u64 ucast, mcast, bcast;
1444 int stat = 0, err = 0;
1445 u64 tx_stat, rx_stat;
1446 struct rvu *rvu;
1447
1448 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1449 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1450 if (!rvu)
1451 return -ENODEV;
1452
1453
1454 seq_puts(s, "\n=======Link Status======\n\n");
1455 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1456 if (err)
1457 seq_puts(s, "Failed to read link status\n");
1458 seq_printf(s, "\nLink is %s %d Mbps\n\n",
1459 linfo.link_up ? "UP" : "DOWN", linfo.speed);
1460
1461
1462 seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
1463 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1464 if (err)
1465 return err;
1466 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1467 if (err)
1468 return err;
1469 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1470 if (err)
1471 return err;
1472 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1473 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1474 if (err)
1475 return err;
1476 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1477 if (err)
1478 return err;
1479 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1480 if (err)
1481 return err;
1482
1483
1484 seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
1485 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1486 if (err)
1487 return err;
1488 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1489 if (err)
1490 return err;
1491 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1492 if (err)
1493 return err;
1494 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1495 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1496 if (err)
1497 return err;
1498 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1499 if (err)
1500 return err;
1501
1502
1503 seq_puts(s, "\n=======CGX RX_STATS======\n\n");
1504 while (stat < CGX_RX_STATS_COUNT) {
1505 err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1506 if (err)
1507 return err;
1508 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
1509 stat++;
1510 }
1511
1512
1513 stat = 0;
1514 seq_puts(s, "\n=======CGX TX_STATS======\n\n");
1515 while (stat < CGX_TX_STATS_COUNT) {
1516 err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1517 if (err)
1518 return err;
1519 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
1520 stat++;
1521 }
1522
1523 return err;
1524}
1525
1526static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1527{
1528 struct dentry *current_dir;
1529 int err, lmac_id;
1530 char *buf;
1531
1532 current_dir = filp->file->f_path.dentry->d_parent;
1533 buf = strrchr(current_dir->d_name.name, 'c');
1534 if (!buf)
1535 return -EINVAL;
1536
1537 err = kstrtoint(buf + 1, 10, &lmac_id);
1538 if (!err) {
1539 err = cgx_print_stats(filp, lmac_id);
1540 if (err)
1541 return err;
1542 }
1543 return err;
1544}
1545
1546RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1547
1548static void rvu_dbg_cgx_init(struct rvu *rvu)
1549{
1550 int i, lmac_id;
1551 char dname[20];
1552 void *cgx;
1553
1554 rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
1555
1556 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1557 cgx = rvu_cgx_pdata(i, rvu);
1558 if (!cgx)
1559 continue;
1560
1561 sprintf(dname, "cgx%d", i);
1562 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1563 rvu->rvu_dbg.cgx_root);
1564 for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
1565
1566 sprintf(dname, "lmac%d", lmac_id);
1567 rvu->rvu_dbg.lmac =
1568 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1569
1570 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
1571 cgx, &rvu_dbg_cgx_stat_fops);
1572 }
1573 }
1574}
1575
1576
1577static void rvu_print_npc_mcam_info(struct seq_file *s,
1578 u16 pcifunc, int blkaddr)
1579{
1580 struct rvu *rvu = s->private;
1581 int entry_acnt, entry_ecnt;
1582 int cntr_acnt, cntr_ecnt;
1583
1584
1585 if (!pcifunc)
1586 return;
1587 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1588 &entry_acnt, &entry_ecnt);
1589 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1590 &cntr_acnt, &cntr_ecnt);
1591 if (!entry_acnt && !cntr_acnt)
1592 return;
1593
1594 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1595 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1596 rvu_get_pf(pcifunc));
1597 else
1598 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1599 rvu_get_pf(pcifunc),
1600 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1601
1602 if (entry_acnt) {
1603 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1604 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1605 }
1606 if (cntr_acnt) {
1607 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1608 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1609 }
1610}
1611
1612static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1613{
1614 struct rvu *rvu = filp->private;
1615 int pf, vf, numvfs, blkaddr;
1616 struct npc_mcam *mcam;
1617 u16 pcifunc, counters;
1618 u64 cfg;
1619
1620 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1621 if (blkaddr < 0)
1622 return -ENODEV;
1623
1624 mcam = &rvu->hw->mcam;
1625 counters = rvu->hw->npc_counters;
1626
1627 seq_puts(filp, "\nNPC MCAM info:\n");
1628
1629 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1630 cfg = (cfg >> 32) & 0x07;
1631 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1632 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1633 "224bits" : "448bits"));
1634 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1635 cfg = (cfg >> 32) & 0x07;
1636 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1637 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1638 "224bits" : "448bits"));
1639
1640 mutex_lock(&mcam->lock);
1641
1642 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1643 seq_printf(filp, "\t\t Reserved \t: %d\n",
1644 mcam->total_entries - mcam->bmap_entries);
1645 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1646
1647
1648 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
1649 seq_printf(filp, "\t\t Reserved \t: %d\n",
1650 counters - mcam->counters.max);
1651 seq_printf(filp, "\t\t Available \t: %d\n",
1652 rvu_rsrc_free_count(&mcam->counters));
1653
1654 if (mcam->bmap_entries == mcam->bmap_fcnt) {
1655 mutex_unlock(&mcam->lock);
1656 return 0;
1657 }
1658
1659 seq_puts(filp, "\n\t\t Current allocation\n");
1660 seq_puts(filp, "\t\t====================\n");
1661 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1662 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1663 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1664
1665 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1666 numvfs = (cfg >> 12) & 0xFF;
1667 for (vf = 0; vf < numvfs; vf++) {
1668 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1669 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1670 }
1671 }
1672
1673 mutex_unlock(&mcam->lock);
1674 return 0;
1675}
1676
1677RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1678
1679static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1680 void *unused)
1681{
1682 struct rvu *rvu = filp->private;
1683 struct npc_mcam *mcam;
1684 int blkaddr;
1685
1686 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1687 if (blkaddr < 0)
1688 return -ENODEV;
1689
1690 mcam = &rvu->hw->mcam;
1691
1692 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1693 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1694 rvu_read64(rvu, blkaddr,
1695 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1696
1697 return 0;
1698}
1699
1700RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1701
1702static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
1703 struct rvu_npc_mcam_rule *rule)
1704{
1705 u8 bit;
1706
1707 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
1708 seq_printf(s, "\t%s ", npc_get_field_name(bit));
1709 switch (bit) {
1710 case NPC_DMAC:
1711 seq_printf(s, "%pM ", rule->packet.dmac);
1712 seq_printf(s, "mask %pM\n", rule->mask.dmac);
1713 break;
1714 case NPC_SMAC:
1715 seq_printf(s, "%pM ", rule->packet.smac);
1716 seq_printf(s, "mask %pM\n", rule->mask.smac);
1717 break;
1718 case NPC_ETYPE:
1719 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
1720 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
1721 break;
1722 case NPC_OUTER_VID:
1723 seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
1724 seq_printf(s, "mask 0x%x\n",
1725 ntohs(rule->mask.vlan_tci));
1726 break;
1727 case NPC_TOS:
1728 seq_printf(s, "%d ", rule->packet.tos);
1729 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
1730 break;
1731 case NPC_SIP_IPV4:
1732 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
1733 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
1734 break;
1735 case NPC_DIP_IPV4:
1736 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
1737 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
1738 break;
1739 case NPC_SIP_IPV6:
1740 seq_printf(s, "%pI6 ", rule->packet.ip6src);
1741 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
1742 break;
1743 case NPC_DIP_IPV6:
1744 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
1745 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
1746 break;
1747 case NPC_SPORT_TCP:
1748 case NPC_SPORT_UDP:
1749 case NPC_SPORT_SCTP:
1750 seq_printf(s, "%d ", ntohs(rule->packet.sport));
1751 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
1752 break;
1753 case NPC_DPORT_TCP:
1754 case NPC_DPORT_UDP:
1755 case NPC_DPORT_SCTP:
1756 seq_printf(s, "%d ", ntohs(rule->packet.dport));
1757 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
1758 break;
1759 default:
1760 break;
1761 }
1762 }
1763}
1764
1765static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
1766 struct rvu_npc_mcam_rule *rule)
1767{
1768 if (rule->intf == NIX_INTF_TX) {
1769 switch (rule->tx_action.op) {
1770 case NIX_TX_ACTIONOP_DROP:
1771 seq_puts(s, "\taction: Drop\n");
1772 break;
1773 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
1774 seq_puts(s, "\taction: Unicast to default channel\n");
1775 break;
1776 case NIX_TX_ACTIONOP_UCAST_CHAN:
1777 seq_printf(s, "\taction: Unicast to channel %d\n",
1778 rule->tx_action.index);
1779 break;
1780 case NIX_TX_ACTIONOP_MCAST:
1781 seq_puts(s, "\taction: Multicast\n");
1782 break;
1783 case NIX_TX_ACTIONOP_DROP_VIOL:
1784 seq_puts(s, "\taction: Lockdown Violation Drop\n");
1785 break;
1786 default:
1787 break;
1788 };
1789 } else {
1790 switch (rule->rx_action.op) {
1791 case NIX_RX_ACTIONOP_DROP:
1792 seq_puts(s, "\taction: Drop\n");
1793 break;
1794 case NIX_RX_ACTIONOP_UCAST:
1795 seq_printf(s, "\taction: Direct to queue %d\n",
1796 rule->rx_action.index);
1797 break;
1798 case NIX_RX_ACTIONOP_RSS:
1799 seq_puts(s, "\taction: RSS\n");
1800 break;
1801 case NIX_RX_ACTIONOP_UCAST_IPSEC:
1802 seq_puts(s, "\taction: Unicast ipsec\n");
1803 break;
1804 case NIX_RX_ACTIONOP_MCAST:
1805 seq_puts(s, "\taction: Multicast\n");
1806 break;
1807 default:
1808 break;
1809 };
1810 }
1811}
1812
1813static const char *rvu_dbg_get_intf_name(int intf)
1814{
1815 switch (intf) {
1816 case NIX_INTFX_RX(0):
1817 return "NIX0_RX";
1818 case NIX_INTFX_RX(1):
1819 return "NIX1_RX";
1820 case NIX_INTFX_TX(0):
1821 return "NIX0_TX";
1822 case NIX_INTFX_TX(1):
1823 return "NIX1_TX";
1824 default:
1825 break;
1826 }
1827
1828 return "unknown";
1829}
1830
1831static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
1832{
1833 struct rvu_npc_mcam_rule *iter;
1834 struct rvu *rvu = s->private;
1835 struct npc_mcam *mcam;
1836 int pf, vf = -1;
1837 int blkaddr;
1838 u16 target;
1839 u64 hits;
1840
1841 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1842 if (blkaddr < 0)
1843 return 0;
1844
1845 mcam = &rvu->hw->mcam;
1846
1847 mutex_lock(&mcam->lock);
1848 list_for_each_entry(iter, &mcam->mcam_rules, list) {
1849 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
1850 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
1851
1852 if (iter->owner & RVU_PFVF_FUNC_MASK) {
1853 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
1854 seq_printf(s, "VF%d", vf);
1855 }
1856 seq_puts(s, "\n");
1857
1858 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
1859 "RX" : "TX");
1860 seq_printf(s, "\tinterface: %s\n",
1861 rvu_dbg_get_intf_name(iter->intf));
1862 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
1863
1864 rvu_dbg_npc_mcam_show_flows(s, iter);
1865 if (iter->intf == NIX_INTF_RX) {
1866 target = iter->rx_action.pf_func;
1867 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
1868 seq_printf(s, "\tForward to: PF%d ", pf);
1869
1870 if (target & RVU_PFVF_FUNC_MASK) {
1871 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
1872 seq_printf(s, "VF%d", vf);
1873 }
1874 seq_puts(s, "\n");
1875 }
1876
1877 rvu_dbg_npc_mcam_show_action(s, iter);
1878 seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
1879
1880 if (!iter->has_cntr)
1881 continue;
1882 seq_printf(s, "\tcounter: %d\n", iter->cntr);
1883
1884 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
1885 seq_printf(s, "\thits: %lld\n", hits);
1886 }
1887 mutex_unlock(&mcam->lock);
1888
1889 return 0;
1890}
1891
1892RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
1893
1894static void rvu_dbg_npc_init(struct rvu *rvu)
1895{
1896 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
1897
1898 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
1899 &rvu_dbg_npc_mcam_info_fops);
1900 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
1901 &rvu_dbg_npc_mcam_rules_fops);
1902 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
1903 &rvu_dbg_npc_rx_miss_act_fops);
1904}
1905
1906
1907static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
1908{
1909 struct rvu *rvu = filp->private;
1910 u64 busy_sts = 0, free_sts = 0;
1911 u32 e_min = 0, e_max = 0, e, i;
1912 u16 max_ses, max_ies, max_aes;
1913 int blkaddr;
1914 u64 reg;
1915
1916 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
1917 if (blkaddr < 0)
1918 return -ENODEV;
1919
1920 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
1921 max_ses = reg & 0xffff;
1922 max_ies = (reg >> 16) & 0xffff;
1923 max_aes = (reg >> 32) & 0xffff;
1924
1925 switch (eng_type) {
1926 case CPT_AE_TYPE:
1927 e_min = max_ses + max_ies;
1928 e_max = max_ses + max_ies + max_aes;
1929 break;
1930 case CPT_SE_TYPE:
1931 e_min = 0;
1932 e_max = max_ses;
1933 break;
1934 case CPT_IE_TYPE:
1935 e_min = max_ses;
1936 e_max = max_ses + max_ies;
1937 break;
1938 default:
1939 return -EINVAL;
1940 }
1941
1942 for (e = e_min, i = 0; e < e_max; e++, i++) {
1943 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
1944 if (reg & 0x1)
1945 busy_sts |= 1ULL << i;
1946
1947 if (reg & 0x2)
1948 free_sts |= 1ULL << i;
1949 }
1950 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
1951 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
1952
1953 return 0;
1954}
1955
1956static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
1957{
1958 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
1959}
1960
1961RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
1962
1963static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
1964{
1965 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
1966}
1967
1968RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
1969
1970static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
1971{
1972 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
1973}
1974
1975RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
1976
1977static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
1978{
1979 struct rvu *rvu = filp->private;
1980 u16 max_ses, max_ies, max_aes;
1981 u32 e_max, e;
1982 int blkaddr;
1983 u64 reg;
1984
1985 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
1986 if (blkaddr < 0)
1987 return -ENODEV;
1988
1989 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
1990 max_ses = reg & 0xffff;
1991 max_ies = (reg >> 16) & 0xffff;
1992 max_aes = (reg >> 32) & 0xffff;
1993
1994 e_max = max_ses + max_ies + max_aes;
1995
1996 seq_puts(filp, "===========================================\n");
1997 for (e = 0; e < e_max; e++) {
1998 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
1999 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2000 reg & 0xff);
2001 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2002 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2003 reg);
2004 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2005 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2006 reg);
2007 seq_puts(filp, "===========================================\n");
2008 }
2009 return 0;
2010}
2011
2012RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2013
2014static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2015{
2016 struct rvu *rvu = filp->private;
2017 struct rvu_hwinfo *hw = rvu->hw;
2018 struct rvu_block *block;
2019 int blkaddr;
2020 u64 reg;
2021 u32 lf;
2022
2023 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2024 if (blkaddr < 0)
2025 return -ENODEV;
2026
2027 block = &hw->block[blkaddr];
2028 if (!block->lf.bmap)
2029 return -ENODEV;
2030
2031 seq_puts(filp, "===========================================\n");
2032 for (lf = 0; lf < block->lf.max; lf++) {
2033 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2034 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2035 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2036 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2037 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2038 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2039 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2040 (lf << block->lfshift));
2041 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2042 seq_puts(filp, "===========================================\n");
2043 }
2044 return 0;
2045}
2046
2047RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2048
2049static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2050{
2051 struct rvu *rvu = filp->private;
2052 u64 reg0, reg1;
2053 int blkaddr;
2054
2055 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2056 if (blkaddr < 0)
2057 return -ENODEV;
2058
2059 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2060 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2061 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2062 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2063 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2064 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2065 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2066 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2067 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2068 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2069 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2070 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2071 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2072 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2073
2074 return 0;
2075}
2076
2077RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2078
2079static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2080{
2081 struct rvu *rvu;
2082 int blkaddr;
2083 u64 reg;
2084
2085 rvu = filp->private;
2086 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2087 if (blkaddr < 0)
2088 return -ENODEV;
2089
2090 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2091 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2092 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2093 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2094 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2095 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2096 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2097 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2098 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2099 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2100 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2101 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2102 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2103 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2104
2105 return 0;
2106}
2107
2108RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2109
2110static void rvu_dbg_cpt_init(struct rvu *rvu)
2111{
2112 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
2113 return;
2114
2115 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2116
2117 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
2118 &rvu_dbg_cpt_pc_fops);
2119 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu,
2120 &rvu_dbg_cpt_ae_sts_fops);
2121 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu,
2122 &rvu_dbg_cpt_se_sts_fops);
2123 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu,
2124 &rvu_dbg_cpt_ie_sts_fops);
2125 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
2126 &rvu_dbg_cpt_engines_info_fops);
2127 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
2128 &rvu_dbg_cpt_lfs_info_fops);
2129 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
2130 &rvu_dbg_cpt_err_info_fops);
2131}
2132
2133void rvu_dbg_init(struct rvu *rvu)
2134{
2135 rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
2136
2137 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2138 &rvu_dbg_rsrc_status_fops);
2139 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu,
2140 &rvu_dbg_rvu_pf_cgx_map_fops);
2141
2142 rvu_dbg_npa_init(rvu);
2143 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2144
2145 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2146 rvu_dbg_cgx_init(rvu);
2147 rvu_dbg_npc_init(rvu);
2148 rvu_dbg_cpt_init(rvu);
2149}
2150
2151void rvu_dbg_exit(struct rvu *rvu)
2152{
2153 debugfs_remove_recursive(rvu->rvu_dbg.root);
2154}
2155
2156#endif
2157