1
2
3
4
5
6#include "qla_def.h"
7
8#include <linux/debugfs.h>
9#include <linux/seq_file.h>
10
11static struct dentry *qla2x00_dfs_root;
12static atomic_t qla2x00_dfs_root_count;
13
14#define QLA_DFS_RPORT_DEVLOSS_TMO 1
15
16static int
17qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18{
19 switch (attr_id) {
20 case QLA_DFS_RPORT_DEVLOSS_TMO:
21
22 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23 return -EIO;
24 *val = fp->nvme_remote_port->dev_loss_tmo;
25 break;
26 default:
27 return -EINVAL;
28 }
29 return 0;
30}
31
32static int
33qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34{
35 switch (attr_id) {
36 case QLA_DFS_RPORT_DEVLOSS_TMO:
37
38 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39 return -EIO;
40#if (IS_ENABLED(CONFIG_NVME_FC))
41 return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42 val);
43#else
44 return -EINVAL;
45#endif
46 default:
47 return -EINVAL;
48 }
49 return 0;
50}
51
52#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
53static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
54{ \
55 struct fc_port *fp = data; \
56 return qla_dfs_rport_get(fp, _attr_id, val); \
57} \
58static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
59{ \
60 struct fc_port *fp = data; \
61 return qla_dfs_rport_set(fp, _attr_id, val); \
62} \
63DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
64 qla_dfs_rport_##_attr##_get, \
65 qla_dfs_rport_##_attr##_set, "%llu\n")
66
67
68
69
70
71
72
73#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
74static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
75{ \
76 struct fc_port *fp = data; \
77 *val = _get_val; \
78 return 0; \
79} \
80DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
81 qla_dfs_rport_field_##_attr##_get, \
82 NULL, "%llu\n")
83
84#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86
87#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89
90DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91
92DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96DEFINE_QLA_DFS_RPORT_FIELD(flags);
97DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104
105void
106qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107{
108 char wwn[32];
109
110#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
111 debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
112 fp, &qla_dfs_rport_field_##_attr##_fops)
113
114 if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115 return;
116
117 sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118 fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119 if (!fp->dfs_rport_dir)
120 return;
121 if (NVME_TARGET(vha->hw, fp))
122 debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123 fp, &qla_dfs_rport_dev_loss_tmo_fops);
124
125 QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126 QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127 QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128 QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129 QLA_CREATE_RPORT_FIELD_ATTR(flags);
130 QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131 QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132 QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133 QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134 QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135 QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136 QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137}
138
139void
140qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141{
142 if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143 return;
144 debugfs_remove_recursive(fp->dfs_rport_dir);
145 fp->dfs_rport_dir = NULL;
146}
147
148static int
149qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150{
151 scsi_qla_host_t *vha = s->private;
152 struct qla_hw_data *ha = vha->hw;
153 unsigned long flags;
154 struct fc_port *sess = NULL;
155 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156
157 seq_printf(s, "%s\n", vha->host_str);
158 if (tgt) {
159 seq_puts(s, "Port ID Port Name Handle\n");
160
161 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162 list_for_each_entry(sess, &vha->vp_fcports, list)
163 seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
164 sess->d_id.b.domain, sess->d_id.b.area,
165 sess->d_id.b.al_pa, sess->port_name,
166 sess->loop_id);
167 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168 }
169
170 return 0;
171}
172
173DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
174
175static int
176qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
177{
178 scsi_qla_host_t *vha = s->private;
179 struct qla_hw_data *ha = vha->hw;
180 struct gid_list_info *gid_list;
181 dma_addr_t gid_list_dma;
182 fc_port_t fc_port;
183 char *id_iter;
184 int rc, i;
185 uint16_t entries, loop_id;
186
187 seq_printf(s, "%s\n", vha->host_str);
188 gid_list = dma_alloc_coherent(&ha->pdev->dev,
189 qla2x00_gid_list_size(ha),
190 &gid_list_dma, GFP_KERNEL);
191 if (!gid_list) {
192 ql_dbg(ql_dbg_user, vha, 0x7018,
193 "DMA allocation failed for %u\n",
194 qla2x00_gid_list_size(ha));
195 return 0;
196 }
197
198 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
199 &entries);
200 if (rc != QLA_SUCCESS)
201 goto out_free_id_list;
202
203 id_iter = (char *)gid_list;
204
205 seq_puts(s, "Port Name Port ID Loop ID\n");
206
207 for (i = 0; i < entries; i++) {
208 struct gid_list_info *gid =
209 (struct gid_list_info *)id_iter;
210 loop_id = le16_to_cpu(gid->loop_id);
211 memset(&fc_port, 0, sizeof(fc_port_t));
212
213 fc_port.loop_id = loop_id;
214
215 rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
216 seq_printf(s, "%8phC %02x%02x%02x %d\n",
217 fc_port.port_name, fc_port.d_id.b.domain,
218 fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
219 fc_port.loop_id);
220 id_iter += ha->gid_list_info_size;
221 }
222out_free_id_list:
223 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
224 gid_list, gid_list_dma);
225
226 return 0;
227}
228
229DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
230
231static int
232qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
233{
234 struct scsi_qla_host *vha = s->private;
235 uint16_t mb[MAX_IOCB_MB_REG];
236 int rc;
237 struct qla_hw_data *ha = vha->hw;
238 u16 iocbs_used, i;
239
240 rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
241 if (rc != QLA_SUCCESS) {
242 seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
243 } else {
244 seq_puts(s, "FW Resource count\n\n");
245 seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
246 seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
247 seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
248 seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
249 seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
250 seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
251 seq_printf(s, "MAX VP count[%d]\n", mb[11]);
252 seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
253 seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
254 mb[20]);
255 seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
256 mb[21]);
257 seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
258 mb[22]);
259 seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
260 mb[23]);
261 }
262
263 if (ql2xenforce_iocb_limit) {
264
265 iocbs_used = ha->base_qpair->fwres.iocbs_used;
266 for (i = 0; i < ha->max_qpairs; i++) {
267 if (ha->queue_pair_map[i])
268 iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
269 }
270
271 seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
272 iocbs_used, ha->base_qpair->fwres.iocbs_limit);
273 }
274
275 return 0;
276}
277
278DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
279
280static int
281qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
282{
283 struct scsi_qla_host *vha = s->private;
284 struct qla_qpair *qpair = vha->hw->base_qpair;
285 uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
286 core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
287 num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
288 u16 i;
289 fc_port_t *fcport = NULL;
290
291 if (qla2x00_chip_is_down(vha))
292 return 0;
293
294 qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
295 core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
296 qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
297 core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
298 qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
299 core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
300 num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
301 num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
302 num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
303
304 for (i = 0; i < vha->hw->max_qpairs; i++) {
305 qpair = vha->hw->queue_pair_map[i];
306 if (!qpair)
307 continue;
308 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
309 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
310 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
311 core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
312 qla_core_ret_sta_ctio +=
313 qpair->tgt_counters.qla_core_ret_sta_ctio;
314 core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
315 num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
316 num_alloc_iocb_failed +=
317 qpair->tgt_counters.num_alloc_iocb_failed;
318 num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
319 }
320
321 seq_puts(s, "Target Counters\n");
322 seq_printf(s, "qla_core_sbt_cmd = %lld\n",
323 qla_core_sbt_cmd);
324 seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
325 qla_core_ret_sta_ctio);
326 seq_printf(s, "qla_core_ret_ctio = %lld\n",
327 qla_core_ret_ctio);
328 seq_printf(s, "core_qla_que_buf = %lld\n",
329 core_qla_que_buf);
330 seq_printf(s, "core_qla_snd_status = %lld\n",
331 core_qla_snd_status);
332 seq_printf(s, "core_qla_free_cmd = %lld\n",
333 core_qla_free_cmd);
334 seq_printf(s, "num alloc iocb failed = %lld\n",
335 num_alloc_iocb_failed);
336 seq_printf(s, "num term exchange sent = %lld\n",
337 num_term_xchg_sent);
338 seq_printf(s, "num Q full sent = %lld\n",
339 num_q_full_sent);
340
341
342 seq_printf(s, "DIF Inp Bytes = %lld\n",
343 vha->qla_stats.qla_dif_stats.dif_input_bytes);
344 seq_printf(s, "DIF Outp Bytes = %lld\n",
345 vha->qla_stats.qla_dif_stats.dif_output_bytes);
346 seq_printf(s, "DIF Inp Req = %lld\n",
347 vha->qla_stats.qla_dif_stats.dif_input_requests);
348 seq_printf(s, "DIF Outp Req = %lld\n",
349 vha->qla_stats.qla_dif_stats.dif_output_requests);
350 seq_printf(s, "DIF Guard err = %d\n",
351 vha->qla_stats.qla_dif_stats.dif_guard_err);
352 seq_printf(s, "DIF Ref tag err = %d\n",
353 vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
354 seq_printf(s, "DIF App tag err = %d\n",
355 vha->qla_stats.qla_dif_stats.dif_app_tag_err);
356
357 seq_puts(s, "\n");
358 seq_puts(s, "Initiator Error Counters\n");
359 seq_printf(s, "HW Error Count = %14lld\n",
360 vha->hw_err_cnt);
361 seq_printf(s, "Link Down Count = %14lld\n",
362 vha->short_link_down_cnt);
363 seq_printf(s, "Interface Err Count = %14lld\n",
364 vha->interface_err_cnt);
365 seq_printf(s, "Cmd Timeout Count = %14lld\n",
366 vha->cmd_timeout_cnt);
367 seq_printf(s, "Reset Count = %14lld\n",
368 vha->reset_cmd_err_cnt);
369 seq_puts(s, "\n");
370
371 list_for_each_entry(fcport, &vha->vp_fcports, list) {
372 if (!fcport->rport)
373 continue;
374
375 seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
376 fcport->rport->number, fcport->tgt_short_link_down_cnt);
377 }
378 seq_puts(s, "\n");
379
380 return 0;
381}
382
383DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
384
385static int
386qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
387{
388 scsi_qla_host_t *vha = s->private;
389 uint32_t cnt;
390 uint32_t *fce;
391 uint64_t fce_start;
392 struct qla_hw_data *ha = vha->hw;
393
394 mutex_lock(&ha->fce_mutex);
395
396 seq_puts(s, "FCE Trace Buffer\n");
397 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
398 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
399 seq_puts(s, "FCE Enable Registers\n");
400 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
401 ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
402 ha->fce_mb[5], ha->fce_mb[6]);
403
404 fce = (uint32_t *) ha->fce;
405 fce_start = (unsigned long long) ha->fce_dma;
406 for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
407 if (cnt % 8 == 0)
408 seq_printf(s, "\n%llx: ",
409 (unsigned long long)((cnt * 4) + fce_start));
410 else
411 seq_putc(s, ' ');
412 seq_printf(s, "%08x", *fce++);
413 }
414
415 seq_puts(s, "\nEnd\n");
416
417 mutex_unlock(&ha->fce_mutex);
418
419 return 0;
420}
421
422static int
423qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
424{
425 scsi_qla_host_t *vha = inode->i_private;
426 struct qla_hw_data *ha = vha->hw;
427 int rval;
428
429 if (!ha->flags.fce_enabled)
430 goto out;
431
432 mutex_lock(&ha->fce_mutex);
433
434
435 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
436 if (rval)
437 ql_dbg(ql_dbg_user, vha, 0x705c,
438 "DebugFS: Unable to disable FCE (%d).\n", rval);
439
440 ha->flags.fce_enabled = 0;
441
442 mutex_unlock(&ha->fce_mutex);
443out:
444 return single_open(file, qla2x00_dfs_fce_show, vha);
445}
446
447static int
448qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
449{
450 scsi_qla_host_t *vha = inode->i_private;
451 struct qla_hw_data *ha = vha->hw;
452 int rval;
453
454 if (ha->flags.fce_enabled)
455 goto out;
456
457 mutex_lock(&ha->fce_mutex);
458
459
460 ha->flags.fce_enabled = 1;
461 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
462 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
463 ha->fce_mb, &ha->fce_bufs);
464 if (rval) {
465 ql_dbg(ql_dbg_user, vha, 0x700d,
466 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
467 ha->flags.fce_enabled = 0;
468 }
469
470 mutex_unlock(&ha->fce_mutex);
471out:
472 return single_release(inode, file);
473}
474
475static const struct file_operations dfs_fce_ops = {
476 .open = qla2x00_dfs_fce_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = qla2x00_dfs_fce_release,
480};
481
482static int
483qla_dfs_naqp_show(struct seq_file *s, void *unused)
484{
485 struct scsi_qla_host *vha = s->private;
486 struct qla_hw_data *ha = vha->hw;
487
488 seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
489 return 0;
490}
491
492static int
493qla_dfs_naqp_open(struct inode *inode, struct file *file)
494{
495 struct scsi_qla_host *vha = inode->i_private;
496
497 return single_open(file, qla_dfs_naqp_show, vha);
498}
499
500static ssize_t
501qla_dfs_naqp_write(struct file *file, const char __user *buffer,
502 size_t count, loff_t *pos)
503{
504 struct seq_file *s = file->private_data;
505 struct scsi_qla_host *vha = s->private;
506 struct qla_hw_data *ha = vha->hw;
507 char *buf;
508 int rc = 0;
509 unsigned long num_act_qp;
510
511 if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
512 pr_err("host%ld: this adapter does not support Multi Q.",
513 vha->host_no);
514 return -EINVAL;
515 }
516
517 if (!vha->flags.qpairs_available) {
518 pr_err("host%ld: Driver is not setup with Multi Q.",
519 vha->host_no);
520 return -EINVAL;
521 }
522 buf = memdup_user_nul(buffer, count);
523 if (IS_ERR(buf)) {
524 pr_err("host%ld: fail to copy user buffer.",
525 vha->host_no);
526 return PTR_ERR(buf);
527 }
528
529 num_act_qp = simple_strtoul(buf, NULL, 0);
530
531 if (num_act_qp >= vha->hw->max_qpairs) {
532 pr_err("User set invalid number of qpairs %lu. Max = %d",
533 num_act_qp, vha->hw->max_qpairs);
534 rc = -EINVAL;
535 goto out_free;
536 }
537
538 if (num_act_qp != ha->tgt.num_act_qpairs) {
539 ha->tgt.num_act_qpairs = num_act_qp;
540 qlt_clr_qp_table(vha);
541 }
542 rc = count;
543out_free:
544 kfree(buf);
545 return rc;
546}
547
548static const struct file_operations dfs_naqp_ops = {
549 .open = qla_dfs_naqp_open,
550 .read = seq_read,
551 .llseek = seq_lseek,
552 .release = single_release,
553 .write = qla_dfs_naqp_write,
554};
555
556
557int
558qla2x00_dfs_setup(scsi_qla_host_t *vha)
559{
560 struct qla_hw_data *ha = vha->hw;
561
562 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
563 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
564 goto out;
565 if (!ha->fce)
566 goto out;
567
568 if (qla2x00_dfs_root)
569 goto create_dir;
570
571 atomic_set(&qla2x00_dfs_root_count, 0);
572 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
573
574create_dir:
575 if (ha->dfs_dir)
576 goto create_nodes;
577
578 mutex_init(&ha->fce_mutex);
579 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
580
581 atomic_inc(&qla2x00_dfs_root_count);
582
583create_nodes:
584 ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
585 S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
586
587 ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
588 ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
589
590 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
591 S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
592
593 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
594 &dfs_fce_ops);
595
596 ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
597 S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
598
599 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
600 ha->tgt.dfs_naqp = debugfs_create_file("naqp",
601 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
602 if (!ha->tgt.dfs_naqp) {
603 ql_log(ql_log_warn, vha, 0xd011,
604 "Unable to create debugFS naqp node.\n");
605 goto out;
606 }
607 }
608 vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
609 if (!vha->dfs_rport_root) {
610 ql_log(ql_log_warn, vha, 0xd012,
611 "Unable to create debugFS rports node.\n");
612 goto out;
613 }
614out:
615 return 0;
616}
617
618int
619qla2x00_dfs_remove(scsi_qla_host_t *vha)
620{
621 struct qla_hw_data *ha = vha->hw;
622
623 if (ha->tgt.dfs_naqp) {
624 debugfs_remove(ha->tgt.dfs_naqp);
625 ha->tgt.dfs_naqp = NULL;
626 }
627
628 if (ha->tgt.dfs_tgt_sess) {
629 debugfs_remove(ha->tgt.dfs_tgt_sess);
630 ha->tgt.dfs_tgt_sess = NULL;
631 }
632
633 if (ha->tgt.dfs_tgt_port_database) {
634 debugfs_remove(ha->tgt.dfs_tgt_port_database);
635 ha->tgt.dfs_tgt_port_database = NULL;
636 }
637
638 if (ha->dfs_fw_resource_cnt) {
639 debugfs_remove(ha->dfs_fw_resource_cnt);
640 ha->dfs_fw_resource_cnt = NULL;
641 }
642
643 if (ha->dfs_tgt_counters) {
644 debugfs_remove(ha->dfs_tgt_counters);
645 ha->dfs_tgt_counters = NULL;
646 }
647
648 if (ha->dfs_fce) {
649 debugfs_remove(ha->dfs_fce);
650 ha->dfs_fce = NULL;
651 }
652
653 if (vha->dfs_rport_root) {
654 debugfs_remove_recursive(vha->dfs_rport_root);
655 vha->dfs_rport_root = NULL;
656 }
657
658 if (ha->dfs_dir) {
659 debugfs_remove(ha->dfs_dir);
660 ha->dfs_dir = NULL;
661 atomic_dec(&qla2x00_dfs_root_count);
662 }
663
664 if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
665 qla2x00_dfs_root) {
666 debugfs_remove(qla2x00_dfs_root);
667 qla2x00_dfs_root = NULL;
668 }
669
670 return 0;
671}
672