1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/ctype.h>
25#include <linux/delay.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/aer.h>
30#include <linux/gfp.h>
31#include <linux/kernel.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_tcq.h>
37#include <scsi/scsi_transport_fc.h>
38#include <scsi/fc/fc_fs.h>
39
40#include <linux/nvme-fc-driver.h>
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
51#include "lpfc_nvmet.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_version.h"
54#include "lpfc_compat.h"
55#include "lpfc_crtn.h"
56#include "lpfc_vport.h"
57#include "lpfc_attr.h"
58
59#define LPFC_DEF_DEVLOSS_TMO 30
60#define LPFC_MIN_DEVLOSS_TMO 1
61#define LPFC_MAX_DEVLOSS_TMO 255
62
63#define LPFC_DEF_MRQ_POST 512
64#define LPFC_MIN_MRQ_POST 512
65#define LPFC_MAX_MRQ_POST 2048
66
67
68
69
70
71#define LPFC_REG_WRITE_KEY_SIZE 4
72#define LPFC_REG_WRITE_KEY "EMLX"
73
74const char *const trunk_errmsg[] = {
75 "",
76 "link negotiated speed does not match existing"
77 " trunk - link was \"low\" speed",
78 "link negotiated speed does not match"
79 " existing trunk - link was \"middle\" speed",
80 "link negotiated speed does not match existing"
81 " trunk - link was \"high\" speed",
82 "Attached to non-trunking port - F_Port",
83 "Attached to non-trunking port - N_Port",
84 "FLOGI response timeout",
85 "non-FLOGI frame received",
86 "Invalid FLOGI response",
87 "Trunking initialization protocol",
88 "Trunk peer device mismatch",
89};
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106static void
107lpfc_jedec_to_ascii(int incr, char hdw[])
108{
109 int i, j;
110 for (i = 0; i < 8; i++) {
111 j = (incr & 0xf);
112 if (j <= 9)
113 hdw[7 - i] = 0x30 + j;
114 else
115 hdw[7 - i] = 0x61 + j - 10;
116 incr = (incr >> 4);
117 }
118 hdw[8] = 0;
119 return;
120}
121
122
123
124
125
126
127
128
129
130static ssize_t
131lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
133{
134 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
135}
136
137
138
139
140
141
142
143
144
145static ssize_t
146lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
147 char *buf)
148{
149 struct Scsi_Host *shost = class_to_shost(dev);
150 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
151 struct lpfc_hba *phba = vport->phba;
152
153 if (phba->hba_flag & HBA_FIP_SUPPORT)
154 return scnprintf(buf, PAGE_SIZE, "1\n");
155 else
156 return scnprintf(buf, PAGE_SIZE, "0\n");
157}
158
159static ssize_t
160lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
161 char *buf)
162{
163 struct Scsi_Host *shost = class_to_shost(dev);
164 struct lpfc_vport *vport = shost_priv(shost);
165 struct lpfc_hba *phba = vport->phba;
166 struct lpfc_nvmet_tgtport *tgtp;
167 struct nvme_fc_local_port *localport;
168 struct lpfc_nvme_lport *lport;
169 struct lpfc_nvme_rport *rport;
170 struct lpfc_nodelist *ndlp;
171 struct nvme_fc_remote_port *nrport;
172 struct lpfc_fc4_ctrl_stat *cstat;
173 uint64_t data1, data2, data3;
174 uint64_t totin, totout, tot;
175 char *statep;
176 int i;
177 int len = 0;
178 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
179 unsigned long iflags = 0;
180
181 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
182 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
183 return len;
184 }
185 if (phba->nvmet_support) {
186 if (!phba->targetport) {
187 len = scnprintf(buf, PAGE_SIZE,
188 "NVME Target: x%llx is not allocated\n",
189 wwn_to_u64(vport->fc_portname.u.wwn));
190 return len;
191 }
192
193 if (phba->targetport->port_id)
194 statep = "REGISTERED";
195 else
196 statep = "INIT";
197 scnprintf(tmp, sizeof(tmp),
198 "NVME Target Enabled State %s\n",
199 statep);
200 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
201 goto buffer_done;
202
203 scnprintf(tmp, sizeof(tmp),
204 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
205 "NVME Target: lpfc",
206 phba->brd_no,
207 wwn_to_u64(vport->fc_portname.u.wwn),
208 wwn_to_u64(vport->fc_nodename.u.wwn),
209 phba->targetport->port_id);
210 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
211 goto buffer_done;
212
213 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
214 >= PAGE_SIZE)
215 goto buffer_done;
216
217 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
218 scnprintf(tmp, sizeof(tmp),
219 "LS: Rcv %08x Drop %08x Abort %08x\n",
220 atomic_read(&tgtp->rcv_ls_req_in),
221 atomic_read(&tgtp->rcv_ls_req_drop),
222 atomic_read(&tgtp->xmt_ls_abort));
223 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
224 goto buffer_done;
225
226 if (atomic_read(&tgtp->rcv_ls_req_in) !=
227 atomic_read(&tgtp->rcv_ls_req_out)) {
228 scnprintf(tmp, sizeof(tmp),
229 "Rcv LS: in %08x != out %08x\n",
230 atomic_read(&tgtp->rcv_ls_req_in),
231 atomic_read(&tgtp->rcv_ls_req_out));
232 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
233 goto buffer_done;
234 }
235
236 scnprintf(tmp, sizeof(tmp),
237 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
238 atomic_read(&tgtp->xmt_ls_rsp),
239 atomic_read(&tgtp->xmt_ls_drop),
240 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
241 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
242 goto buffer_done;
243
244 scnprintf(tmp, sizeof(tmp),
245 "LS: RSP Abort %08x xb %08x Err %08x\n",
246 atomic_read(&tgtp->xmt_ls_rsp_aborted),
247 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
248 atomic_read(&tgtp->xmt_ls_rsp_error));
249 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
250 goto buffer_done;
251
252 scnprintf(tmp, sizeof(tmp),
253 "FCP: Rcv %08x Defer %08x Release %08x "
254 "Drop %08x\n",
255 atomic_read(&tgtp->rcv_fcp_cmd_in),
256 atomic_read(&tgtp->rcv_fcp_cmd_defer),
257 atomic_read(&tgtp->xmt_fcp_release),
258 atomic_read(&tgtp->rcv_fcp_cmd_drop));
259 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
260 goto buffer_done;
261
262 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
263 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
264 scnprintf(tmp, sizeof(tmp),
265 "Rcv FCP: in %08x != out %08x\n",
266 atomic_read(&tgtp->rcv_fcp_cmd_in),
267 atomic_read(&tgtp->rcv_fcp_cmd_out));
268 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
269 goto buffer_done;
270 }
271
272 scnprintf(tmp, sizeof(tmp),
273 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
274 "drop %08x\n",
275 atomic_read(&tgtp->xmt_fcp_read),
276 atomic_read(&tgtp->xmt_fcp_read_rsp),
277 atomic_read(&tgtp->xmt_fcp_write),
278 atomic_read(&tgtp->xmt_fcp_rsp),
279 atomic_read(&tgtp->xmt_fcp_drop));
280 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
281 goto buffer_done;
282
283 scnprintf(tmp, sizeof(tmp),
284 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
285 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
286 atomic_read(&tgtp->xmt_fcp_rsp_error),
287 atomic_read(&tgtp->xmt_fcp_rsp_drop));
288 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
289 goto buffer_done;
290
291 scnprintf(tmp, sizeof(tmp),
292 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
293 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
294 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
295 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
296 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
297 goto buffer_done;
298
299 scnprintf(tmp, sizeof(tmp),
300 "ABORT: Xmt %08x Cmpl %08x\n",
301 atomic_read(&tgtp->xmt_fcp_abort),
302 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
303 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
304 goto buffer_done;
305
306 scnprintf(tmp, sizeof(tmp),
307 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
308 atomic_read(&tgtp->xmt_abort_sol),
309 atomic_read(&tgtp->xmt_abort_unsol),
310 atomic_read(&tgtp->xmt_abort_rsp),
311 atomic_read(&tgtp->xmt_abort_rsp_error));
312 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
313 goto buffer_done;
314
315 scnprintf(tmp, sizeof(tmp),
316 "DELAY: ctx %08x fod %08x wqfull %08x\n",
317 atomic_read(&tgtp->defer_ctx),
318 atomic_read(&tgtp->defer_fod),
319 atomic_read(&tgtp->defer_wqfull));
320 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
321 goto buffer_done;
322
323
324 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
325 tot += atomic_read(&tgtp->xmt_fcp_release);
326 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
327
328 scnprintf(tmp, sizeof(tmp),
329 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
330 "CTX Outstanding %08llx\n\n",
331 phba->sli4_hba.nvmet_xri_cnt,
332 phba->sli4_hba.nvmet_io_wait_cnt,
333 phba->sli4_hba.nvmet_io_wait_total,
334 tot);
335 strlcat(buf, tmp, PAGE_SIZE);
336 goto buffer_done;
337 }
338
339 localport = vport->localport;
340 if (!localport) {
341 len = scnprintf(buf, PAGE_SIZE,
342 "NVME Initiator x%llx is not allocated\n",
343 wwn_to_u64(vport->fc_portname.u.wwn));
344 return len;
345 }
346 lport = (struct lpfc_nvme_lport *)localport->private;
347 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
348 goto buffer_done;
349
350 rcu_read_lock();
351 scnprintf(tmp, sizeof(tmp),
352 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
353 phba->brd_no,
354 phba->sli4_hba.max_cfg_param.max_xri,
355 phba->sli4_hba.io_xri_max,
356 lpfc_sli4_get_els_iocb_cnt(phba));
357 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
358 goto rcu_unlock_buf_done;
359
360
361 if (localport->port_id)
362 statep = "ONLINE";
363 else
364 statep = "UNKNOWN ";
365
366 scnprintf(tmp, sizeof(tmp),
367 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
368 "NVME LPORT lpfc",
369 phba->brd_no,
370 wwn_to_u64(vport->fc_portname.u.wwn),
371 wwn_to_u64(vport->fc_nodename.u.wwn),
372 localport->port_id, statep);
373 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
374 goto rcu_unlock_buf_done;
375
376 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
377 nrport = NULL;
378 spin_lock_irqsave(&vport->phba->hbalock, iflags);
379 rport = lpfc_ndlp_get_nrport(ndlp);
380 if (rport)
381 nrport = rport->remoteport;
382 spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
383 if (!nrport)
384 continue;
385
386
387 switch (nrport->port_state) {
388 case FC_OBJSTATE_ONLINE:
389 statep = "ONLINE";
390 break;
391 case FC_OBJSTATE_UNKNOWN:
392 statep = "UNKNOWN ";
393 break;
394 default:
395 statep = "UNSUPPORTED";
396 break;
397 }
398
399
400 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
401 goto rcu_unlock_buf_done;
402 if (phba->brd_no >= 10) {
403 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
404 goto rcu_unlock_buf_done;
405 }
406
407 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
408 nrport->port_name);
409 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
410 goto rcu_unlock_buf_done;
411
412 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
413 nrport->node_name);
414 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
415 goto rcu_unlock_buf_done;
416
417 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
418 nrport->port_id);
419 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
420 goto rcu_unlock_buf_done;
421
422
423 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
424 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
425 goto rcu_unlock_buf_done;
426 }
427 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
428 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
429 goto rcu_unlock_buf_done;
430 }
431 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
432 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
433 goto rcu_unlock_buf_done;
434 }
435 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
436 FC_PORT_ROLE_NVME_TARGET |
437 FC_PORT_ROLE_NVME_DISCOVERY)) {
438 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
439 nrport->port_role);
440 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
441 goto rcu_unlock_buf_done;
442 }
443
444 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
445 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
446 goto rcu_unlock_buf_done;
447 }
448 rcu_read_unlock();
449
450 if (!lport)
451 goto buffer_done;
452
453 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
454 goto buffer_done;
455
456 scnprintf(tmp, sizeof(tmp),
457 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
458 atomic_read(&lport->fc4NvmeLsRequests),
459 atomic_read(&lport->fc4NvmeLsCmpls),
460 atomic_read(&lport->xmt_ls_abort));
461 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
462 goto buffer_done;
463
464 scnprintf(tmp, sizeof(tmp),
465 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
466 atomic_read(&lport->xmt_ls_err),
467 atomic_read(&lport->cmpl_ls_xb),
468 atomic_read(&lport->cmpl_ls_err));
469 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
470 goto buffer_done;
471
472 totin = 0;
473 totout = 0;
474 for (i = 0; i < phba->cfg_hdw_queue; i++) {
475 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
476 tot = cstat->io_cmpls;
477 totin += tot;
478 data1 = cstat->input_requests;
479 data2 = cstat->output_requests;
480 data3 = cstat->control_requests;
481 totout += (data1 + data2 + data3);
482 }
483 scnprintf(tmp, sizeof(tmp),
484 "Total FCP Cmpl %016llx Issue %016llx "
485 "OutIO %016llx\n",
486 totin, totout, totout - totin);
487 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
488 goto buffer_done;
489
490 scnprintf(tmp, sizeof(tmp),
491 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
492 "wqerr %08x err %08x\n",
493 atomic_read(&lport->xmt_fcp_abort),
494 atomic_read(&lport->xmt_fcp_noxri),
495 atomic_read(&lport->xmt_fcp_bad_ndlp),
496 atomic_read(&lport->xmt_fcp_qdepth),
497 atomic_read(&lport->xmt_fcp_err),
498 atomic_read(&lport->xmt_fcp_wqerr));
499 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
500 goto buffer_done;
501
502 scnprintf(tmp, sizeof(tmp),
503 "FCP CMPL: xb %08x Err %08x\n",
504 atomic_read(&lport->cmpl_fcp_xb),
505 atomic_read(&lport->cmpl_fcp_err));
506 strlcat(buf, tmp, PAGE_SIZE);
507
508
509 goto buffer_done;
510
511 rcu_unlock_buf_done:
512 rcu_read_unlock();
513
514 buffer_done:
515 len = strnlen(buf, PAGE_SIZE);
516
517 if (unlikely(len >= (PAGE_SIZE - 1))) {
518 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
519 "6314 Catching potential buffer "
520 "overflow > PAGE_SIZE = %lu bytes\n",
521 PAGE_SIZE);
522 strlcpy(buf + PAGE_SIZE - 1 -
523 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
524 LPFC_NVME_INFO_MORE_STR,
525 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
526 + 1);
527 }
528
529 return len;
530}
531
532static ssize_t
533lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
534 char *buf)
535{
536 struct Scsi_Host *shost = class_to_shost(dev);
537 struct lpfc_vport *vport = shost_priv(shost);
538 struct lpfc_hba *phba = vport->phba;
539 int len;
540 struct lpfc_fc4_ctrl_stat *cstat;
541 u64 data1, data2, data3;
542 u64 tot, totin, totout;
543 int i;
544 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
545
546 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
547 (phba->sli_rev != LPFC_SLI_REV4))
548 return 0;
549
550 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
551
552 totin = 0;
553 totout = 0;
554 for (i = 0; i < phba->cfg_hdw_queue; i++) {
555 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
556 tot = cstat->io_cmpls;
557 totin += tot;
558 data1 = cstat->input_requests;
559 data2 = cstat->output_requests;
560 data3 = cstat->control_requests;
561 totout += (data1 + data2 + data3);
562
563 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
564 "IO %016llx ", i, data1, data2, data3);
565 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
566 goto buffer_done;
567
568 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
569 tot, ((data1 + data2 + data3) - tot));
570 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
571 goto buffer_done;
572 }
573 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
574 "OutIO %016llx\n", totin, totout, totout - totin);
575 strlcat(buf, tmp, PAGE_SIZE);
576
577buffer_done:
578 len = strnlen(buf, PAGE_SIZE);
579
580 return len;
581}
582
583static ssize_t
584lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
585 char *buf)
586{
587 struct Scsi_Host *shost = class_to_shost(dev);
588 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
589 struct lpfc_hba *phba = vport->phba;
590
591 if (phba->cfg_enable_bg) {
592 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
593 return scnprintf(buf, PAGE_SIZE,
594 "BlockGuard Enabled\n");
595 else
596 return scnprintf(buf, PAGE_SIZE,
597 "BlockGuard Not Supported\n");
598 } else
599 return scnprintf(buf, PAGE_SIZE,
600 "BlockGuard Disabled\n");
601}
602
603static ssize_t
604lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
605 char *buf)
606{
607 struct Scsi_Host *shost = class_to_shost(dev);
608 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
609 struct lpfc_hba *phba = vport->phba;
610
611 return scnprintf(buf, PAGE_SIZE, "%llu\n",
612 (unsigned long long)phba->bg_guard_err_cnt);
613}
614
615static ssize_t
616lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
617 char *buf)
618{
619 struct Scsi_Host *shost = class_to_shost(dev);
620 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
621 struct lpfc_hba *phba = vport->phba;
622
623 return scnprintf(buf, PAGE_SIZE, "%llu\n",
624 (unsigned long long)phba->bg_apptag_err_cnt);
625}
626
627static ssize_t
628lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
629 char *buf)
630{
631 struct Scsi_Host *shost = class_to_shost(dev);
632 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
633 struct lpfc_hba *phba = vport->phba;
634
635 return scnprintf(buf, PAGE_SIZE, "%llu\n",
636 (unsigned long long)phba->bg_reftag_err_cnt);
637}
638
639
640
641
642
643
644
645
646
647static ssize_t
648lpfc_info_show(struct device *dev, struct device_attribute *attr,
649 char *buf)
650{
651 struct Scsi_Host *host = class_to_shost(dev);
652
653 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
654}
655
656
657
658
659
660
661
662
663
664static ssize_t
665lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
666 char *buf)
667{
668 struct Scsi_Host *shost = class_to_shost(dev);
669 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
670 struct lpfc_hba *phba = vport->phba;
671
672 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687static ssize_t
688lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
689 char *buf)
690{
691 struct Scsi_Host *shost = class_to_shost(dev);
692 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
693 struct lpfc_hba *phba = vport->phba;
694 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
695}
696
697
698
699
700
701
702
703
704
705static ssize_t
706lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
707 char *buf)
708{
709 struct Scsi_Host *shost = class_to_shost(dev);
710 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
711 struct lpfc_hba *phba = vport->phba;
712
713 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
714}
715
716
717
718
719
720
721
722
723
724static ssize_t
725lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
726 char *buf)
727{
728 struct Scsi_Host *shost = class_to_shost(dev);
729 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
730 struct lpfc_hba *phba = vport->phba;
731
732 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
733}
734
735
736
737
738
739
740
741
742
743static ssize_t
744lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
745 char *buf)
746{
747 struct Scsi_Host *shost = class_to_shost(dev);
748 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
749 struct lpfc_hba *phba = vport->phba;
750
751 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
752}
753
754
755
756
757
758
759
760
761
762static ssize_t
763lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
764{
765 struct Scsi_Host *shost = class_to_shost(dev);
766 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
767 struct lpfc_hba *phba = vport->phba;
768
769 return scnprintf(buf, PAGE_SIZE, "%d\n",
770 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
771}
772
773
774
775
776
777
778
779
780
781static ssize_t
782lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
783 char *buf)
784{
785 struct Scsi_Host *shost = class_to_shost(dev);
786 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
787 struct lpfc_hba *phba = vport->phba;
788
789 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
790}
791
792
793
794
795
796
797
798
799
800static ssize_t
801lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
802 char *buf)
803{
804 struct Scsi_Host *shost = class_to_shost(dev);
805 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
806 struct lpfc_hba *phba = vport->phba;
807 uint32_t if_type;
808 uint8_t sli_family;
809 char fwrev[FW_REV_STR_SIZE];
810 int len;
811
812 lpfc_decode_firmware_rev(phba, fwrev, 1);
813 if_type = phba->sli4_hba.pc_sli4_params.if_type;
814 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
815
816 if (phba->sli_rev < LPFC_SLI_REV4)
817 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
818 fwrev, phba->sli_rev);
819 else
820 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
821 fwrev, phba->sli_rev, if_type, sli_family);
822
823 return len;
824}
825
826
827
828
829
830
831
832
833
834static ssize_t
835lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
836{
837 char hdw[9];
838 struct Scsi_Host *shost = class_to_shost(dev);
839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
840 struct lpfc_hba *phba = vport->phba;
841 lpfc_vpd_t *vp = &phba->vpd;
842
843 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
844 return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
845}
846
847
848
849
850
851
852
853
854
855static ssize_t
856lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
857 char *buf)
858{
859 struct Scsi_Host *shost = class_to_shost(dev);
860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
861 struct lpfc_hba *phba = vport->phba;
862 char fwrev[FW_REV_STR_SIZE];
863
864 if (phba->sli_rev < LPFC_SLI_REV4)
865 return scnprintf(buf, PAGE_SIZE, "%s\n",
866 phba->OptionROMVersion);
867
868 lpfc_decode_firmware_rev(phba, fwrev, 1);
869 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
870}
871
872
873
874
875
876
877
878
879
880
881
882
883static ssize_t
884lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
885 char *buf)
886{
887 struct Scsi_Host *shost = class_to_shost(dev);
888 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
889 struct lpfc_hba *phba = vport->phba;
890 int len = 0;
891
892 switch (phba->link_state) {
893 case LPFC_LINK_UNKNOWN:
894 case LPFC_WARM_START:
895 case LPFC_INIT_START:
896 case LPFC_INIT_MBX_CMDS:
897 case LPFC_LINK_DOWN:
898 case LPFC_HBA_ERROR:
899 if (phba->hba_flag & LINK_DISABLED)
900 len += scnprintf(buf + len, PAGE_SIZE-len,
901 "Link Down - User disabled\n");
902 else
903 len += scnprintf(buf + len, PAGE_SIZE-len,
904 "Link Down\n");
905 break;
906 case LPFC_LINK_UP:
907 case LPFC_CLEAR_LA:
908 case LPFC_HBA_READY:
909 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
910
911 switch (vport->port_state) {
912 case LPFC_LOCAL_CFG_LINK:
913 len += scnprintf(buf + len, PAGE_SIZE-len,
914 "Configuring Link\n");
915 break;
916 case LPFC_FDISC:
917 case LPFC_FLOGI:
918 case LPFC_FABRIC_CFG_LINK:
919 case LPFC_NS_REG:
920 case LPFC_NS_QRY:
921 case LPFC_BUILD_DISC_LIST:
922 case LPFC_DISC_AUTH:
923 len += scnprintf(buf + len, PAGE_SIZE - len,
924 "Discovery\n");
925 break;
926 case LPFC_VPORT_READY:
927 len += scnprintf(buf + len, PAGE_SIZE - len,
928 "Ready\n");
929 break;
930
931 case LPFC_VPORT_FAILED:
932 len += scnprintf(buf + len, PAGE_SIZE - len,
933 "Failed\n");
934 break;
935
936 case LPFC_VPORT_UNKNOWN:
937 len += scnprintf(buf + len, PAGE_SIZE - len,
938 "Unknown\n");
939 break;
940 }
941 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
942 len += scnprintf(buf + len, PAGE_SIZE-len,
943 " Menlo Maint Mode\n");
944 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
945 if (vport->fc_flag & FC_PUBLIC_LOOP)
946 len += scnprintf(buf + len, PAGE_SIZE-len,
947 " Public Loop\n");
948 else
949 len += scnprintf(buf + len, PAGE_SIZE-len,
950 " Private Loop\n");
951 } else {
952 if (vport->fc_flag & FC_FABRIC)
953 len += scnprintf(buf + len, PAGE_SIZE-len,
954 " Fabric\n");
955 else
956 len += scnprintf(buf + len, PAGE_SIZE-len,
957 " Point-2-Point\n");
958 }
959 }
960
961 if ((phba->sli_rev == LPFC_SLI_REV4) &&
962 ((bf_get(lpfc_sli_intf_if_type,
963 &phba->sli4_hba.sli_intf) ==
964 LPFC_SLI_INTF_IF_TYPE_6))) {
965 struct lpfc_trunk_link link = phba->trunk_link;
966
967 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
968 len += scnprintf(buf + len, PAGE_SIZE - len,
969 "Trunk port 0: Link %s %s\n",
970 (link.link0.state == LPFC_LINK_UP) ?
971 "Up" : "Down. ",
972 trunk_errmsg[link.link0.fault]);
973
974 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
975 len += scnprintf(buf + len, PAGE_SIZE - len,
976 "Trunk port 1: Link %s %s\n",
977 (link.link1.state == LPFC_LINK_UP) ?
978 "Up" : "Down. ",
979 trunk_errmsg[link.link1.fault]);
980
981 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
982 len += scnprintf(buf + len, PAGE_SIZE - len,
983 "Trunk port 2: Link %s %s\n",
984 (link.link2.state == LPFC_LINK_UP) ?
985 "Up" : "Down. ",
986 trunk_errmsg[link.link2.fault]);
987
988 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
989 len += scnprintf(buf + len, PAGE_SIZE - len,
990 "Trunk port 3: Link %s %s\n",
991 (link.link3.state == LPFC_LINK_UP) ?
992 "Up" : "Down. ",
993 trunk_errmsg[link.link3.fault]);
994
995 }
996
997 return len;
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static ssize_t
1009lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1010 char *buf)
1011{
1012 struct Scsi_Host *shost = class_to_shost(dev);
1013 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1014 struct lpfc_hba *phba = vport->phba;
1015
1016 if (phba->sli_rev < LPFC_SLI_REV4)
1017 return scnprintf(buf, PAGE_SIZE, "fc\n");
1018
1019 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1020 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1021 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1022 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1023 return scnprintf(buf, PAGE_SIZE, "fc\n");
1024 }
1025 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static ssize_t
1038lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1039 char *buf)
1040{
1041 struct Scsi_Host *shost = class_to_shost(dev);
1042 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1043 struct lpfc_hba *phba = vport->phba;
1044
1045 return scnprintf(buf, PAGE_SIZE, "%d\n",
1046 phba->sli4_hba.pc_sli4_params.oas_supported);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static ssize_t
1062lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1063 const char *buf, size_t count)
1064{
1065 struct Scsi_Host *shost = class_to_shost(dev);
1066 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1067 struct lpfc_hba *phba = vport->phba;
1068
1069 int status = -EINVAL;
1070
1071 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1072 (phba->link_state == LPFC_LINK_DOWN))
1073 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1074 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1075 (phba->link_state >= LPFC_LINK_UP))
1076 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1077
1078 if (status == 0)
1079 return strlen(buf);
1080 else
1081 return status;
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static ssize_t
1097lpfc_num_discovered_ports_show(struct device *dev,
1098 struct device_attribute *attr, char *buf)
1099{
1100 struct Scsi_Host *shost = class_to_shost(dev);
1101 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1102
1103 return scnprintf(buf, PAGE_SIZE, "%d\n",
1104 vport->fc_map_cnt + vport->fc_unmap_cnt);
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int
1122lpfc_issue_lip(struct Scsi_Host *shost)
1123{
1124 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1125 struct lpfc_hba *phba = vport->phba;
1126 LPFC_MBOXQ_t *pmboxq;
1127 int mbxstatus = MBXERR_ERROR;
1128
1129
1130
1131
1132
1133 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1134 (phba->hba_flag & LINK_DISABLED) ||
1135 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1136 return -EPERM;
1137
1138 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1139
1140 if (!pmboxq)
1141 return -ENOMEM;
1142
1143 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1144 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1145 pmboxq->u.mb.mbxOwner = OWN_HOST;
1146
1147 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1148
1149 if ((mbxstatus == MBX_SUCCESS) &&
1150 (pmboxq->u.mb.mbxStatus == 0 ||
1151 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1152 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1153 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1154 phba->cfg_link_speed);
1155 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1156 phba->fc_ratov * 2);
1157 if ((mbxstatus == MBX_SUCCESS) &&
1158 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1160 "2859 SLI authentication is required "
1161 "for INIT_LINK but has not done yet\n");
1162 }
1163
1164 lpfc_set_loopback_flag(phba);
1165 if (mbxstatus != MBX_TIMEOUT)
1166 mempool_free(pmboxq, phba->mbox_mem_pool);
1167
1168 if (mbxstatus == MBXERR_ERROR)
1169 return -EIO;
1170
1171 return 0;
1172}
1173
1174int
1175lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1176{
1177 int cnt = 0;
1178
1179 spin_lock_irq(lock);
1180 while (!list_empty(q)) {
1181 spin_unlock_irq(lock);
1182 msleep(20);
1183 if (cnt++ > 250) {
1184 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1185 "0466 %s %s\n",
1186 "Outstanding IO when ",
1187 "bringing Adapter offline\n");
1188 return 0;
1189 }
1190 spin_lock_irq(lock);
1191 }
1192 spin_unlock_irq(lock);
1193 return 1;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int
1212lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1213{
1214 struct completion online_compl;
1215 struct lpfc_queue *qp = NULL;
1216 struct lpfc_sli_ring *pring;
1217 struct lpfc_sli *psli;
1218 int status = 0;
1219 int i;
1220 int rc;
1221
1222 init_completion(&online_compl);
1223 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1224 LPFC_EVT_OFFLINE_PREP);
1225 if (rc == 0)
1226 return -ENOMEM;
1227
1228 wait_for_completion(&online_compl);
1229
1230 if (status != 0)
1231 return -EIO;
1232
1233 psli = &phba->sli;
1234
1235
1236
1237
1238
1239
1240 spin_lock_irq(&phba->hbalock);
1241 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1242 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1243 } else {
1244 spin_unlock_irq(&phba->hbalock);
1245 goto skip_wait;
1246 }
1247 spin_unlock_irq(&phba->hbalock);
1248
1249
1250
1251
1252 if (phba->sli_rev != LPFC_SLI_REV4) {
1253 for (i = 0; i < psli->num_rings; i++) {
1254 pring = &psli->sli3_ring[i];
1255 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1256 &phba->hbalock))
1257 goto out;
1258 }
1259 } else {
1260 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1261 pring = qp->pring;
1262 if (!pring)
1263 continue;
1264 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1265 &pring->ring_lock))
1266 goto out;
1267 }
1268 }
1269out:
1270 spin_lock_irq(&phba->hbalock);
1271 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1272 spin_unlock_irq(&phba->hbalock);
1273
1274skip_wait:
1275 init_completion(&online_compl);
1276 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1277 if (rc == 0)
1278 return -ENOMEM;
1279
1280 wait_for_completion(&online_compl);
1281
1282 if (status != 0)
1283 return -EIO;
1284
1285 return 0;
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int
1309lpfc_reset_pci_bus(struct lpfc_hba *phba)
1310{
1311 struct pci_dev *pdev = phba->pcidev;
1312 struct Scsi_Host *shost = NULL;
1313 struct lpfc_hba *phba_other = NULL;
1314 struct pci_dev *ptr = NULL;
1315 int res;
1316
1317 if (phba->cfg_enable_hba_reset != 2)
1318 return -ENOTSUPP;
1319
1320 if (!pdev) {
1321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1322 return -ENODEV;
1323 }
1324
1325 res = lpfc_check_pci_resettable(phba);
1326 if (res)
1327 return res;
1328
1329
1330 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1331
1332 shost = pci_get_drvdata(ptr);
1333 if (shost) {
1334 phba_other =
1335 ((struct lpfc_vport *)shost->hostdata)->phba;
1336 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1337 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1338 "8349 WWPN = 0x%02x%02x%02x%02x"
1339 "%02x%02x%02x%02x is not "
1340 "offline!\n",
1341 phba_other->wwpn[0],
1342 phba_other->wwpn[1],
1343 phba_other->wwpn[2],
1344 phba_other->wwpn[3],
1345 phba_other->wwpn[4],
1346 phba_other->wwpn[5],
1347 phba_other->wwpn[6],
1348 phba_other->wwpn[7]);
1349 return -EBUSY;
1350 }
1351 }
1352 }
1353
1354
1355 res = pci_reset_bus(pdev);
1356 if (res) {
1357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1358 "8350 PCI reset bus failed: %d\n", res);
1359 }
1360
1361 return res;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381int
1382lpfc_selective_reset(struct lpfc_hba *phba)
1383{
1384 struct completion online_compl;
1385 int status = 0;
1386 int rc;
1387
1388 if (!phba->cfg_enable_hba_reset)
1389 return -EACCES;
1390
1391 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1392 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1393
1394 if (status != 0)
1395 return status;
1396 }
1397
1398 init_completion(&online_compl);
1399 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1400 LPFC_EVT_ONLINE);
1401 if (rc == 0)
1402 return -ENOMEM;
1403
1404 wait_for_completion(&online_compl);
1405
1406 if (status != 0)
1407 return -EIO;
1408
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static ssize_t
1434lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1435 const char *buf, size_t count)
1436{
1437 struct Scsi_Host *shost = class_to_shost(dev);
1438 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1439 struct lpfc_hba *phba = vport->phba;
1440 int status = -EINVAL;
1441
1442 if (!phba->cfg_enable_hba_reset)
1443 return -EACCES;
1444
1445 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1446 status = phba->lpfc_selective_reset(phba);
1447
1448 if (status == 0)
1449 return strlen(buf);
1450 else
1451 return status;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470int
1471lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1472{
1473 struct lpfc_register portstat_reg = {0};
1474 int i;
1475
1476 msleep(100);
1477 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1478 &portstat_reg.word0);
1479
1480
1481 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1482 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1483 return -EPERM;
1484
1485
1486 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1487 msleep(10);
1488 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1489 &portstat_reg.word0);
1490 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1491 continue;
1492 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1493 continue;
1494 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1495 continue;
1496 break;
1497 }
1498
1499 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1500 return 0;
1501 else
1502 return -EIO;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static ssize_t
1517lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1518{
1519 struct completion online_compl;
1520 struct pci_dev *pdev = phba->pcidev;
1521 uint32_t before_fc_flag;
1522 uint32_t sriov_nr_virtfn;
1523 uint32_t reg_val;
1524 int status = 0, rc = 0;
1525 int job_posted = 1, sriov_err;
1526
1527 if (!phba->cfg_enable_hba_reset)
1528 return -EACCES;
1529
1530 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1531 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1532 LPFC_SLI_INTF_IF_TYPE_2))
1533 return -EPERM;
1534
1535
1536 before_fc_flag = phba->pport->fc_flag;
1537 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1538
1539
1540 if (phba->cfg_sriov_nr_virtfn) {
1541 pci_disable_sriov(pdev);
1542 phba->cfg_sriov_nr_virtfn = 0;
1543 }
1544
1545 if (opcode == LPFC_FW_DUMP)
1546 phba->hba_flag |= HBA_FW_DUMP_OP;
1547
1548 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1549
1550 if (status != 0) {
1551 phba->hba_flag &= ~HBA_FW_DUMP_OP;
1552 return status;
1553 }
1554
1555
1556 msleep(100);
1557
1558 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1559 LPFC_CTL_PDEV_CTL_OFFSET);
1560
1561 if (opcode == LPFC_FW_DUMP)
1562 reg_val |= LPFC_FW_DUMP_REQUEST;
1563 else if (opcode == LPFC_FW_RESET)
1564 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1565 else if (opcode == LPFC_DV_RESET)
1566 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1567
1568 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1569 LPFC_CTL_PDEV_CTL_OFFSET);
1570
1571 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1572
1573
1574 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1575
1576 if (rc == -EPERM) {
1577
1578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1579 "3150 No privilege to perform the requested "
1580 "access: x%x\n", reg_val);
1581 } else if (rc == -EIO) {
1582
1583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1584 "3153 Fail to perform the requested "
1585 "access: x%x\n", reg_val);
1586 return rc;
1587 }
1588
1589
1590 if (before_fc_flag & FC_OFFLINE_MODE)
1591 goto out;
1592
1593 init_completion(&online_compl);
1594 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1595 LPFC_EVT_ONLINE);
1596 if (!job_posted)
1597 goto out;
1598
1599 wait_for_completion(&online_compl);
1600
1601out:
1602
1603 if (sriov_nr_virtfn) {
1604 sriov_err =
1605 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1606 if (!sriov_err)
1607 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1608 }
1609
1610
1611 if (!rc) {
1612 if (!job_posted)
1613 rc = -ENOMEM;
1614 else if (status)
1615 rc = -EIO;
1616 }
1617 return rc;
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628static ssize_t
1629lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1630 char *buf)
1631{
1632 struct Scsi_Host *shost = class_to_shost(dev);
1633 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1634 struct lpfc_hba *phba = vport->phba;
1635
1636 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1637}
1638
1639static int
1640lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1641{
1642 LPFC_MBOXQ_t *mbox = NULL;
1643 unsigned long val = 0;
1644 char *pval = 0;
1645 int rc = 0;
1646
1647 if (!strncmp("enable", buff_out,
1648 strlen("enable"))) {
1649 pval = buff_out + strlen("enable") + 1;
1650 rc = kstrtoul(pval, 0, &val);
1651 if (rc)
1652 return rc;
1653 } else if (!strncmp("disable", buff_out,
1654 strlen("disable"))) {
1655 val = 0;
1656 } else {
1657 return -EINVAL;
1658 }
1659
1660 switch (val) {
1661 case 0:
1662 val = 0x0;
1663 break;
1664 case 2:
1665 val = 0x1;
1666 break;
1667 case 4:
1668 val = 0x2;
1669 break;
1670 default:
1671 return -EINVAL;
1672 }
1673
1674 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1675 "0070 Set trunk mode with val %ld ", val);
1676
1677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1678 if (!mbox)
1679 return -ENOMEM;
1680
1681 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1682 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1683 12, LPFC_SLI4_MBX_EMBED);
1684
1685 bf_set(lpfc_mbx_set_trunk_mode,
1686 &mbox->u.mqe.un.set_trunk_mode,
1687 val);
1688 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1689 if (rc)
1690 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1691 "0071 Set trunk mode failed with status: %d",
1692 rc);
1693 if (rc != MBX_TIMEOUT)
1694 mempool_free(mbox, phba->mbox_mem_pool);
1695
1696 return 0;
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707static ssize_t
1708lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1709 char *buf)
1710{
1711 struct Scsi_Host *shost = class_to_shost(dev);
1712 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1713 struct lpfc_hba *phba = vport->phba;
1714 char * state;
1715
1716 if (phba->link_state == LPFC_HBA_ERROR)
1717 state = "error";
1718 else if (phba->link_state == LPFC_WARM_START)
1719 state = "warm start";
1720 else if (phba->link_state == LPFC_INIT_START)
1721 state = "offline";
1722 else
1723 state = "online";
1724
1725 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741static ssize_t
1742lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1743 const char *buf, size_t count)
1744{
1745 struct Scsi_Host *shost = class_to_shost(dev);
1746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1747 struct lpfc_hba *phba = vport->phba;
1748 struct completion online_compl;
1749 char *board_mode_str = NULL;
1750 int status = 0;
1751 int rc;
1752
1753 if (!phba->cfg_enable_hba_reset) {
1754 status = -EACCES;
1755 goto board_mode_out;
1756 }
1757
1758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1759 "3050 lpfc_board_mode set to %s\n", buf);
1760
1761 init_completion(&online_compl);
1762
1763 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1764 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1765 LPFC_EVT_ONLINE);
1766 if (rc == 0) {
1767 status = -ENOMEM;
1768 goto board_mode_out;
1769 }
1770 wait_for_completion(&online_compl);
1771 if (status)
1772 status = -EIO;
1773 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1774 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1775 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1776 if (phba->sli_rev == LPFC_SLI_REV4)
1777 status = -EINVAL;
1778 else
1779 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1780 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1781 if (phba->sli_rev == LPFC_SLI_REV4)
1782 status = -EINVAL;
1783 else
1784 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1785 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1786 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1787 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1788 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1789 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1790 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1791 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1792 == 0)
1793 status = lpfc_reset_pci_bus(phba);
1794 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1795 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1796 else
1797 status = -EINVAL;
1798
1799board_mode_out:
1800 if (!status)
1801 return strlen(buf);
1802 else {
1803 board_mode_str = strchr(buf, '\n');
1804 if (board_mode_str)
1805 *board_mode_str = '\0';
1806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1807 "3097 Failed \"%s\", status(%d), "
1808 "fc_flag(x%x)\n",
1809 buf, status, phba->pport->fc_flag);
1810 return status;
1811 }
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832static int
1833lpfc_get_hba_info(struct lpfc_hba *phba,
1834 uint32_t *mxri, uint32_t *axri,
1835 uint32_t *mrpi, uint32_t *arpi,
1836 uint32_t *mvpi, uint32_t *avpi)
1837{
1838 struct lpfc_mbx_read_config *rd_config;
1839 LPFC_MBOXQ_t *pmboxq;
1840 MAILBOX_t *pmb;
1841 int rc = 0;
1842 uint32_t max_vpi;
1843
1844
1845
1846
1847
1848 if (phba->link_state < LPFC_LINK_DOWN ||
1849 !phba->mbox_mem_pool ||
1850 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1851 return 0;
1852
1853 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1854 return 0;
1855
1856 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1857 if (!pmboxq)
1858 return 0;
1859 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1860
1861 pmb = &pmboxq->u.mb;
1862 pmb->mbxCommand = MBX_READ_CONFIG;
1863 pmb->mbxOwner = OWN_HOST;
1864 pmboxq->ctx_buf = NULL;
1865
1866 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1867 rc = MBX_NOT_FINISHED;
1868 else
1869 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1870
1871 if (rc != MBX_SUCCESS) {
1872 if (rc != MBX_TIMEOUT)
1873 mempool_free(pmboxq, phba->mbox_mem_pool);
1874 return 0;
1875 }
1876
1877 if (phba->sli_rev == LPFC_SLI_REV4) {
1878 rd_config = &pmboxq->u.mqe.un.rd_config;
1879 if (mrpi)
1880 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1881 if (arpi)
1882 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1883 phba->sli4_hba.max_cfg_param.rpi_used;
1884 if (mxri)
1885 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1886 if (axri)
1887 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1888 phba->sli4_hba.max_cfg_param.xri_used;
1889
1890
1891
1892
1893 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1894 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1895
1896
1897 if (max_vpi > LPFC_MAX_VPI)
1898 max_vpi = LPFC_MAX_VPI;
1899 if (mvpi)
1900 *mvpi = max_vpi;
1901 if (avpi)
1902 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1903 } else {
1904 if (mrpi)
1905 *mrpi = pmb->un.varRdConfig.max_rpi;
1906 if (arpi)
1907 *arpi = pmb->un.varRdConfig.avail_rpi;
1908 if (mxri)
1909 *mxri = pmb->un.varRdConfig.max_xri;
1910 if (axri)
1911 *axri = pmb->un.varRdConfig.avail_xri;
1912 if (mvpi)
1913 *mvpi = pmb->un.varRdConfig.max_vpi;
1914 if (avpi) {
1915
1916 if (phba->link_state == LPFC_HBA_READY)
1917 *avpi = pmb->un.varRdConfig.avail_vpi;
1918 else
1919 *avpi = pmb->un.varRdConfig.max_vpi;
1920 }
1921 }
1922
1923 mempool_free(pmboxq, phba->mbox_mem_pool);
1924 return 1;
1925}
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static ssize_t
1942lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1943 char *buf)
1944{
1945 struct Scsi_Host *shost = class_to_shost(dev);
1946 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1947 struct lpfc_hba *phba = vport->phba;
1948 uint32_t cnt;
1949
1950 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1951 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1952 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static ssize_t
1970lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1971 char *buf)
1972{
1973 struct Scsi_Host *shost = class_to_shost(dev);
1974 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1975 struct lpfc_hba *phba = vport->phba;
1976 uint32_t cnt, acnt;
1977
1978 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1979 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
1980 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1981}
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997static ssize_t
1998lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
1999 char *buf)
2000{
2001 struct Scsi_Host *shost = class_to_shost(dev);
2002 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2003 struct lpfc_hba *phba = vport->phba;
2004 uint32_t cnt;
2005
2006 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2007 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2008 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025static ssize_t
2026lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2027 char *buf)
2028{
2029 struct Scsi_Host *shost = class_to_shost(dev);
2030 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2031 struct lpfc_hba *phba = vport->phba;
2032 uint32_t cnt, acnt;
2033
2034 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2035 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2036 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053static ssize_t
2054lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2055 char *buf)
2056{
2057 struct Scsi_Host *shost = class_to_shost(dev);
2058 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2059 struct lpfc_hba *phba = vport->phba;
2060 uint32_t cnt;
2061
2062 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2063 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2064 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2065}
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081static ssize_t
2082lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2083 char *buf)
2084{
2085 struct Scsi_Host *shost = class_to_shost(dev);
2086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2087 struct lpfc_hba *phba = vport->phba;
2088 uint32_t cnt, acnt;
2089
2090 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2091 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2092 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static ssize_t
2109lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2110 char *buf)
2111{
2112 struct Scsi_Host *shost = class_to_shost(dev);
2113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2114 struct lpfc_hba *phba = vport->phba;
2115
2116 if (!(phba->max_vpi))
2117 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2118 if (vport->port_type == LPFC_PHYSICAL_PORT)
2119 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2120 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134static ssize_t
2135lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2136 char *buf)
2137{
2138 struct Scsi_Host *shost = class_to_shost(dev);
2139 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2140 struct lpfc_hba *phba = vport->phba;
2141
2142 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159static ssize_t
2160lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2161 const char *buf, size_t count)
2162{
2163 struct Scsi_Host *shost = class_to_shost(dev);
2164 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2165 struct lpfc_hba *phba = vport->phba;
2166 uint32_t creg_val;
2167 uint32_t old_val;
2168 int val=0;
2169
2170 if (!isdigit(buf[0]))
2171 return -EINVAL;
2172
2173 if (sscanf(buf, "%i", &val) != 1)
2174 return -EINVAL;
2175
2176 if ((val & 0x3) != val)
2177 return -EINVAL;
2178
2179 if (phba->sli_rev == LPFC_SLI_REV4)
2180 val = 0;
2181
2182 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2183 "3051 lpfc_poll changed from %d to %d\n",
2184 phba->cfg_poll, val);
2185
2186 spin_lock_irq(&phba->hbalock);
2187
2188 old_val = phba->cfg_poll;
2189
2190 if (val & ENABLE_FCP_RING_POLLING) {
2191 if ((val & DISABLE_FCP_RING_INT) &&
2192 !(old_val & DISABLE_FCP_RING_INT)) {
2193 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2194 spin_unlock_irq(&phba->hbalock);
2195 return -EINVAL;
2196 }
2197 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2198 writel(creg_val, phba->HCregaddr);
2199 readl(phba->HCregaddr);
2200
2201 lpfc_poll_start_timer(phba);
2202 }
2203 } else if (val != 0x0) {
2204 spin_unlock_irq(&phba->hbalock);
2205 return -EINVAL;
2206 }
2207
2208 if (!(val & DISABLE_FCP_RING_INT) &&
2209 (old_val & DISABLE_FCP_RING_INT))
2210 {
2211 spin_unlock_irq(&phba->hbalock);
2212 del_timer(&phba->fcp_poll_timer);
2213 spin_lock_irq(&phba->hbalock);
2214 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2215 spin_unlock_irq(&phba->hbalock);
2216 return -EINVAL;
2217 }
2218 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2219 writel(creg_val, phba->HCregaddr);
2220 readl(phba->HCregaddr);
2221 }
2222
2223 phba->cfg_poll = val;
2224
2225 spin_unlock_irq(&phba->hbalock);
2226
2227 return strlen(buf);
2228}
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238static ssize_t
2239lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
2240 char *buf)
2241{
2242 struct Scsi_Host *shost = class_to_shost(dev);
2243 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2244 struct lpfc_hba *phba = vport->phba;
2245
2246 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257static ssize_t
2258lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
2259 char *buf)
2260{
2261 struct Scsi_Host *shost = class_to_shost(dev);
2262 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2263 struct lpfc_hba *phba = vport->phba;
2264
2265 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276static ssize_t
2277lpfc_dss_show(struct device *dev, struct device_attribute *attr,
2278 char *buf)
2279{
2280 struct Scsi_Host *shost = class_to_shost(dev);
2281 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2282 struct lpfc_hba *phba = vport->phba;
2283
2284 return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
2285 (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
2286 (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
2287 "" : "Not ");
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302static ssize_t
2303lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2304 struct device_attribute *attr,
2305 char *buf)
2306{
2307 struct Scsi_Host *shost = class_to_shost(dev);
2308 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2309 struct lpfc_hba *phba = vport->phba;
2310 uint16_t max_nr_virtfn;
2311
2312 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2313 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2314}
2315
2316static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2317{
2318 return val >= min && val <= max;
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335static ssize_t
2336lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2337{
2338 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2340 "3068 %s_enable_bbcr changed from %d to %d\n",
2341 LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
2342 phba->cfg_enable_bbcr = val;
2343 return 0;
2344 }
2345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2346 "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
2347 LPFC_DRIVER_NAME, val);
2348 return -EINVAL;
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365#define lpfc_param_show(attr) \
2366static ssize_t \
2367lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2368 char *buf) \
2369{ \
2370 struct Scsi_Host *shost = class_to_shost(dev);\
2371 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2372 struct lpfc_hba *phba = vport->phba;\
2373 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2374 phba->cfg_##attr);\
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391#define lpfc_param_hex_show(attr) \
2392static ssize_t \
2393lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2394 char *buf) \
2395{ \
2396 struct Scsi_Host *shost = class_to_shost(dev);\
2397 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2398 struct lpfc_hba *phba = vport->phba;\
2399 uint val = 0;\
2400 val = phba->cfg_##attr;\
2401 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2402 phba->cfg_##attr);\
2403}
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424#define lpfc_param_init(attr, default, minval, maxval) \
2425static int \
2426lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2427{ \
2428 if (lpfc_rangecheck(val, minval, maxval)) {\
2429 phba->cfg_##attr = val;\
2430 return 0;\
2431 }\
2432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2433 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2434 "allowed range is ["#minval", "#maxval"]\n", val); \
2435 phba->cfg_##attr = default;\
2436 return -EINVAL;\
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459#define lpfc_param_set(attr, default, minval, maxval) \
2460static int \
2461lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2462{ \
2463 if (lpfc_rangecheck(val, minval, maxval)) {\
2464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2465 "3052 lpfc_" #attr " changed from %d to %d\n", \
2466 phba->cfg_##attr, val); \
2467 phba->cfg_##attr = val;\
2468 return 0;\
2469 }\
2470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2471 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2472 "allowed range is ["#minval", "#maxval"]\n", val); \
2473 return -EINVAL;\
2474}
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497#define lpfc_param_store(attr) \
2498static ssize_t \
2499lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2500 const char *buf, size_t count) \
2501{ \
2502 struct Scsi_Host *shost = class_to_shost(dev);\
2503 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2504 struct lpfc_hba *phba = vport->phba;\
2505 uint val = 0;\
2506 if (!isdigit(buf[0]))\
2507 return -EINVAL;\
2508 if (sscanf(buf, "%i", &val) != 1)\
2509 return -EINVAL;\
2510 if (lpfc_##attr##_set(phba, val) == 0) \
2511 return strlen(buf);\
2512 else \
2513 return -EINVAL;\
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530#define lpfc_vport_param_show(attr) \
2531static ssize_t \
2532lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2533 char *buf) \
2534{ \
2535 struct Scsi_Host *shost = class_to_shost(dev);\
2536 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2537 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555#define lpfc_vport_param_hex_show(attr) \
2556static ssize_t \
2557lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2558 char *buf) \
2559{ \
2560 struct Scsi_Host *shost = class_to_shost(dev);\
2561 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2562 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583#define lpfc_vport_param_init(attr, default, minval, maxval) \
2584static int \
2585lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2586{ \
2587 if (lpfc_rangecheck(val, minval, maxval)) {\
2588 vport->cfg_##attr = val;\
2589 return 0;\
2590 }\
2591 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2592 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2593 "allowed range is ["#minval", "#maxval"]\n", val); \
2594 vport->cfg_##attr = default;\
2595 return -EINVAL;\
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615#define lpfc_vport_param_set(attr, default, minval, maxval) \
2616static int \
2617lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2618{ \
2619 if (lpfc_rangecheck(val, minval, maxval)) {\
2620 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2621 "3053 lpfc_" #attr \
2622 " changed from %d (x%x) to %d (x%x)\n", \
2623 vport->cfg_##attr, vport->cfg_##attr, \
2624 val, val); \
2625 vport->cfg_##attr = val;\
2626 return 0;\
2627 }\
2628 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2629 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2630 "allowed range is ["#minval", "#maxval"]\n", val); \
2631 return -EINVAL;\
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651#define lpfc_vport_param_store(attr) \
2652static ssize_t \
2653lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2654 const char *buf, size_t count) \
2655{ \
2656 struct Scsi_Host *shost = class_to_shost(dev);\
2657 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2658 uint val = 0;\
2659 if (!isdigit(buf[0]))\
2660 return -EINVAL;\
2661 if (sscanf(buf, "%i", &val) != 1)\
2662 return -EINVAL;\
2663 if (lpfc_##attr##_set(vport, val) == 0) \
2664 return strlen(buf);\
2665 else \
2666 return -EINVAL;\
2667}
2668
2669
2670static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2671static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2672static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2673static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2674static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2675static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2676static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2677static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2678static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2679static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2680static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2681static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2682static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2683static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2684static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2685 lpfc_link_state_store);
2686static DEVICE_ATTR(option_rom_version, S_IRUGO,
2687 lpfc_option_rom_version_show, NULL);
2688static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2689 lpfc_num_discovered_ports_show, NULL);
2690static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2691static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2692static DEVICE_ATTR_RO(lpfc_drvr_version);
2693static DEVICE_ATTR_RO(lpfc_enable_fip);
2694static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2695 lpfc_board_mode_show, lpfc_board_mode_store);
2696static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2697static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2698static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2699static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2700static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2701static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2702static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2703static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2704static DEVICE_ATTR_RO(lpfc_temp_sensor);
2705static DEVICE_ATTR_RO(lpfc_fips_level);
2706static DEVICE_ATTR_RO(lpfc_fips_rev);
2707static DEVICE_ATTR_RO(lpfc_dss);
2708static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2709static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2710static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2711 NULL);
2712
2713static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2714#define WWN_SZ 8
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725static size_t
2726lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2727{
2728 unsigned int i, j;
2729
2730
2731 if (buf[cnt-1] == '\n')
2732 cnt--;
2733
2734 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2735 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2736 return -EINVAL;
2737
2738 memset(wwn, 0, WWN_SZ);
2739
2740
2741 for (i = 0, j = 0; i < 16; i++) {
2742 if ((*buf >= 'a') && (*buf <= 'f'))
2743 j = ((j << 4) | ((*buf++ - 'a') + 10));
2744 else if ((*buf >= 'A') && (*buf <= 'F'))
2745 j = ((j << 4) | ((*buf++ - 'A') + 10));
2746 else if ((*buf >= '0') && (*buf <= '9'))
2747 j = ((j << 4) | (*buf++ - '0'));
2748 else
2749 return -EINVAL;
2750 if (i % 2) {
2751 wwn[i/2] = j & 0xff;
2752 j = 0;
2753 }
2754 }
2755 return 0;
2756}
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768static ssize_t
2769lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2770 const char *buf, size_t count)
2771{
2772 struct Scsi_Host *shost = class_to_shost(dev);
2773 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2774 struct lpfc_hba *phba = vport->phba;
2775 unsigned int cnt = count;
2776 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2777 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2793 "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
2794 " be enabled: fawwpn is enabled\n");
2795 return -EINVAL;
2796 }
2797
2798
2799 if (buf[cnt-1] == '\n')
2800 cnt--;
2801
2802 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2803 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2804 return -EINVAL;
2805
2806 phba->soft_wwn_enable = 1;
2807
2808 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2809 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2810 phba->brd_no);
2811 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2812 " The soft_wwpn feature is not supported by Broadcom.");
2813
2814 return count;
2815}
2816static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826static ssize_t
2827lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2828 char *buf)
2829{
2830 struct Scsi_Host *shost = class_to_shost(dev);
2831 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2832 struct lpfc_hba *phba = vport->phba;
2833
2834 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2835 (unsigned long long)phba->cfg_soft_wwpn);
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851static ssize_t
2852lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2853 const char *buf, size_t count)
2854{
2855 struct Scsi_Host *shost = class_to_shost(dev);
2856 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2857 struct lpfc_hba *phba = vport->phba;
2858 struct completion online_compl;
2859 int stat1 = 0, stat2 = 0;
2860 unsigned int cnt = count;
2861 u8 wwpn[WWN_SZ];
2862 int rc;
2863
2864 if (!phba->cfg_enable_hba_reset)
2865 return -EACCES;
2866 spin_lock_irq(&phba->hbalock);
2867 if (phba->over_temp_state == HBA_OVER_TEMP) {
2868 spin_unlock_irq(&phba->hbalock);
2869 return -EACCES;
2870 }
2871 spin_unlock_irq(&phba->hbalock);
2872
2873 if (buf[cnt-1] == '\n')
2874 cnt--;
2875
2876 if (!phba->soft_wwn_enable)
2877 return -EINVAL;
2878
2879
2880 phba->soft_wwn_enable = 0;
2881
2882 rc = lpfc_wwn_set(buf, cnt, wwpn);
2883 if (rc) {
2884
2885 phba->soft_wwn_enable = 1;
2886 return rc;
2887 }
2888
2889 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2890 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2891 if (phba->cfg_soft_wwnn)
2892 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2893
2894 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2895 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2896
2897 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2898 if (stat1)
2899 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2900 "0463 lpfc_soft_wwpn attribute set failed to "
2901 "reinit adapter - %d\n", stat1);
2902 init_completion(&online_compl);
2903 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2904 LPFC_EVT_ONLINE);
2905 if (rc == 0)
2906 return -ENOMEM;
2907
2908 wait_for_completion(&online_compl);
2909 if (stat2)
2910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2911 "0464 lpfc_soft_wwpn attribute set failed to "
2912 "reinit adapter - %d\n", stat2);
2913 return (stat1 || stat2) ? -EIO : count;
2914}
2915static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925static ssize_t
2926lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2927 char *buf)
2928{
2929 struct Scsi_Host *shost = class_to_shost(dev);
2930 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2931 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2932 (unsigned long long)phba->cfg_soft_wwnn);
2933}
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static ssize_t
2946lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2947 const char *buf, size_t count)
2948{
2949 struct Scsi_Host *shost = class_to_shost(dev);
2950 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2951 unsigned int cnt = count;
2952 u8 wwnn[WWN_SZ];
2953 int rc;
2954
2955
2956 if (buf[cnt-1] == '\n')
2957 cnt--;
2958
2959 if (!phba->soft_wwn_enable)
2960 return -EINVAL;
2961
2962 rc = lpfc_wwn_set(buf, cnt, wwnn);
2963 if (rc) {
2964
2965
2966
2967 return rc;
2968 }
2969
2970 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2971
2972 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2973 "lpfc%d: soft_wwnn set. Value will take effect upon "
2974 "setting of the soft_wwpn\n", phba->brd_no);
2975
2976 return count;
2977}
2978static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static ssize_t
2991lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2992 char *buf)
2993{
2994 struct Scsi_Host *shost = class_to_shost(dev);
2995 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2996
2997 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2998 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2999}
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014static ssize_t
3015lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
3016 const char *buf, size_t count)
3017{
3018 struct Scsi_Host *shost = class_to_shost(dev);
3019 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3020 unsigned int cnt = count;
3021 uint8_t wwpn[WWN_SZ];
3022 int rc;
3023
3024 if (!phba->cfg_fof)
3025 return -EPERM;
3026
3027
3028 if (buf[cnt-1] == '\n')
3029 cnt--;
3030
3031 rc = lpfc_wwn_set(buf, cnt, wwpn);
3032 if (rc)
3033 return rc;
3034
3035 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3036 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3037 if (wwn_to_u64(wwpn) == 0)
3038 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
3039 else
3040 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
3041 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3042 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3043 return count;
3044}
3045static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
3046 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058static ssize_t
3059lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
3060 char *buf)
3061{
3062 struct Scsi_Host *shost = class_to_shost(dev);
3063 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3064
3065 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081static ssize_t
3082lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3083 const char *buf, size_t count)
3084{
3085 struct Scsi_Host *shost = class_to_shost(dev);
3086 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3087 unsigned int cnt = count;
3088 unsigned long val;
3089 int ret;
3090
3091 if (!phba->cfg_fof)
3092 return -EPERM;
3093
3094
3095 if (buf[cnt-1] == '\n')
3096 cnt--;
3097
3098 ret = kstrtoul(buf, 0, &val);
3099 if (ret || (val > 0x7f))
3100 return -EINVAL;
3101
3102 if (val)
3103 phba->cfg_oas_priority = (uint8_t)val;
3104 else
3105 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3106 return count;
3107}
3108static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3109 lpfc_oas_priority_show, lpfc_oas_priority_store);
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121static ssize_t
3122lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3123 char *buf)
3124{
3125 struct Scsi_Host *shost = class_to_shost(dev);
3126 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3127
3128 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3129 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145static ssize_t
3146lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3147 const char *buf, size_t count)
3148{
3149 struct Scsi_Host *shost = class_to_shost(dev);
3150 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3151 unsigned int cnt = count;
3152 uint8_t wwpn[WWN_SZ];
3153 int rc;
3154
3155 if (!phba->cfg_fof)
3156 return -EPERM;
3157
3158
3159 if (buf[cnt-1] == '\n')
3160 cnt--;
3161
3162 rc = lpfc_wwn_set(buf, cnt, wwpn);
3163 if (rc)
3164 return rc;
3165
3166 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3167 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3168 if (wwn_to_u64(wwpn) == 0)
3169 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3170 else
3171 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3172 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3173 if (phba->cfg_oas_priority == 0)
3174 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3175 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3176 return count;
3177}
3178static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3179 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192static ssize_t
3193lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3194 char *buf)
3195{
3196 struct Scsi_Host *shost = class_to_shost(dev);
3197 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3198
3199 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216static ssize_t
3217lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct Scsi_Host *shost = class_to_shost(dev);
3221 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3222 int val = 0;
3223
3224 if (!phba->cfg_fof)
3225 return -EPERM;
3226
3227 if (!isdigit(buf[0]))
3228 return -EINVAL;
3229
3230 if (sscanf(buf, "%i", &val) != 1)
3231 return -EINVAL;
3232
3233 if ((val != 0) && (val != 1))
3234 return -EINVAL;
3235
3236 phba->cfg_oas_lun_state = val;
3237 return strlen(buf);
3238}
3239static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3240 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253static ssize_t
3254lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3255 char *buf)
3256{
3257 struct Scsi_Host *shost = class_to_shost(dev);
3258 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3259
3260 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3261 return -EFAULT;
3262
3263 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3264}
3265static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3266 lpfc_oas_lun_status_show, NULL);
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282static size_t
3283lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3284 uint8_t tgt_wwpn[], uint64_t lun,
3285 uint32_t oas_state, uint8_t pri)
3286{
3287
3288 int rc = 0;
3289
3290 if (!phba->cfg_fof)
3291 return -EPERM;
3292
3293 if (oas_state) {
3294 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3295 (struct lpfc_name *)tgt_wwpn,
3296 lun, pri))
3297 rc = -ENOMEM;
3298 } else {
3299 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3300 (struct lpfc_name *)tgt_wwpn, lun, pri);
3301 }
3302 return rc;
3303
3304}
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322static uint64_t
3323lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3324 uint8_t tgt_wwpn[], uint32_t *lun_status,
3325 uint32_t *lun_pri)
3326{
3327 uint64_t found_lun;
3328
3329 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3330 return NOT_OAS_ENABLED_LUN;
3331 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3332 phba->sli4_hba.oas_next_vpt_wwpn,
3333 (struct lpfc_name *)
3334 phba->sli4_hba.oas_next_tgt_wwpn,
3335 &phba->sli4_hba.oas_next_lun,
3336 (struct lpfc_name *)vpt_wwpn,
3337 (struct lpfc_name *)tgt_wwpn,
3338 &found_lun, lun_status, lun_pri))
3339 return found_lun;
3340 else
3341 return NOT_OAS_ENABLED_LUN;
3342}
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360static ssize_t
3361lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3362 uint8_t tgt_wwpn[], uint64_t lun,
3363 uint32_t oas_state, uint8_t pri)
3364{
3365
3366 int rc;
3367
3368 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3369 oas_state, pri);
3370 return rc;
3371}
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387static ssize_t
3388lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3389 char *buf)
3390{
3391 struct Scsi_Host *shost = class_to_shost(dev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393
3394 uint64_t oas_lun;
3395 int len = 0;
3396
3397 if (!phba->cfg_fof)
3398 return -EPERM;
3399
3400 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3401 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3402 return -EFAULT;
3403
3404 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3405 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3406 return -EFAULT;
3407
3408 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3409 phba->cfg_oas_tgt_wwpn,
3410 &phba->cfg_oas_lun_status,
3411 &phba->cfg_oas_priority);
3412 if (oas_lun != NOT_OAS_ENABLED_LUN)
3413 phba->cfg_oas_flags |= OAS_LUN_VALID;
3414
3415 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3416
3417 return len;
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435static ssize_t
3436lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3437 const char *buf, size_t count)
3438{
3439 struct Scsi_Host *shost = class_to_shost(dev);
3440 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3441 uint64_t scsi_lun;
3442 uint32_t pri;
3443 ssize_t rc;
3444
3445 if (!phba->cfg_fof)
3446 return -EPERM;
3447
3448 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3449 return -EFAULT;
3450
3451 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3452 return -EFAULT;
3453
3454 if (!isdigit(buf[0]))
3455 return -EINVAL;
3456
3457 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3458 return -EINVAL;
3459
3460 pri = phba->cfg_oas_priority;
3461 if (pri == 0)
3462 pri = phba->cfg_XLanePriority;
3463
3464 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3465 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3466 "priority 0x%x with oas state %d\n",
3467 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3468 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3469 pri, phba->cfg_oas_lun_state);
3470
3471 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3472 phba->cfg_oas_tgt_wwpn, scsi_lun,
3473 phba->cfg_oas_lun_state, pri);
3474 if (rc)
3475 return rc;
3476
3477 return count;
3478}
3479static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3480 lpfc_oas_lun_show, lpfc_oas_lun_store);
3481
3482int lpfc_enable_nvmet_cnt;
3483unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3484 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3485 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3486module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3487MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3488
3489static int lpfc_poll = 0;
3490module_param(lpfc_poll, int, S_IRUGO);
3491MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3492 " 0 - none,"
3493 " 1 - poll with interrupts enabled"
3494 " 3 - poll and disable FCP ring interrupts");
3495
3496static DEVICE_ATTR_RW(lpfc_poll);
3497
3498int lpfc_no_hba_reset_cnt;
3499unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3500 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3501module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3502MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3503
3504LPFC_ATTR(sli_mode, 0, 0, 3,
3505 "SLI mode selector:"
3506 " 0 - auto (SLI-3 if supported),"
3507 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
3508 " 3 - select SLI-3");
3509
3510LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3511 "Enable NPIV functionality");
3512
3513LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3514 "FCF Fast failover=1 Priority failover=2");
3515
3516
3517
3518
3519
3520
3521
3522LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3523 "Enable RRQ functionality");
3524
3525
3526
3527
3528
3529
3530
3531
3532LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3533 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3534 "Suppress Link Up at initialization");
3535
3536
3537
3538
3539
3540
3541
3542
3543static ssize_t
3544lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3545{
3546 struct Scsi_Host *shost = class_to_shost(dev);
3547 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3548
3549 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3550}
3551
3552static DEVICE_ATTR(iocb_hw, S_IRUGO,
3553 lpfc_iocb_hw_show, NULL);
3554static ssize_t
3555lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3556{
3557 struct Scsi_Host *shost = class_to_shost(dev);
3558 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3559 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3560
3561 return scnprintf(buf, PAGE_SIZE, "%d\n",
3562 pring ? pring->txq_max : 0);
3563}
3564
3565static DEVICE_ATTR(txq_hw, S_IRUGO,
3566 lpfc_txq_hw_show, NULL);
3567static ssize_t
3568lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3569 char *buf)
3570{
3571 struct Scsi_Host *shost = class_to_shost(dev);
3572 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3573 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3574
3575 return scnprintf(buf, PAGE_SIZE, "%d\n",
3576 pring ? pring->txcmplq_max : 0);
3577}
3578
3579static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3580 lpfc_txcmplq_hw_show, NULL);
3581
3582LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
3583 "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
3584
3585
3586
3587
3588
3589static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3590static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3591module_param(lpfc_nodev_tmo, int, 0);
3592MODULE_PARM_DESC(lpfc_nodev_tmo,
3593 "Seconds driver will hold I/O waiting "
3594 "for a device to come back");
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604static ssize_t
3605lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3606 char *buf)
3607{
3608 struct Scsi_Host *shost = class_to_shost(dev);
3609 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3610
3611 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3612}
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629static int
3630lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3631{
3632 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3633 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3634 if (val != LPFC_DEF_DEVLOSS_TMO)
3635 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3636 "0407 Ignoring lpfc_nodev_tmo module "
3637 "parameter because lpfc_devloss_tmo "
3638 "is set.\n");
3639 return 0;
3640 }
3641
3642 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3643 vport->cfg_nodev_tmo = val;
3644 vport->cfg_devloss_tmo = val;
3645 return 0;
3646 }
3647 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3648 "0400 lpfc_nodev_tmo attribute cannot be set to"
3649 " %d, allowed range is [%d, %d]\n",
3650 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3651 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3652 return -EINVAL;
3653}
3654
3655
3656
3657
3658
3659
3660
3661
3662static void
3663lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3664{
3665 struct Scsi_Host *shost;
3666 struct lpfc_nodelist *ndlp;
3667#if (IS_ENABLED(CONFIG_NVME_FC))
3668 struct lpfc_nvme_rport *rport;
3669 struct nvme_fc_remote_port *remoteport = NULL;
3670#endif
3671
3672 shost = lpfc_shost_from_vport(vport);
3673 spin_lock_irq(shost->host_lock);
3674 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3675 if (!NLP_CHK_NODE_ACT(ndlp))
3676 continue;
3677 if (ndlp->rport)
3678 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3679#if (IS_ENABLED(CONFIG_NVME_FC))
3680 spin_lock(&vport->phba->hbalock);
3681 rport = lpfc_ndlp_get_nrport(ndlp);
3682 if (rport)
3683 remoteport = rport->remoteport;
3684 spin_unlock(&vport->phba->hbalock);
3685 if (remoteport)
3686 nvme_fc_set_remoteport_devloss(rport->remoteport,
3687 vport->cfg_devloss_tmo);
3688#endif
3689 }
3690 spin_unlock_irq(shost->host_lock);
3691}
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708static int
3709lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3710{
3711 if (vport->dev_loss_tmo_changed ||
3712 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3713 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3714 "0401 Ignoring change to lpfc_nodev_tmo "
3715 "because lpfc_devloss_tmo is set.\n");
3716 return 0;
3717 }
3718 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3719 vport->cfg_nodev_tmo = val;
3720 vport->cfg_devloss_tmo = val;
3721
3722
3723
3724
3725 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3726 lpfc_update_rport_devloss_tmo(vport);
3727 return 0;
3728 }
3729 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3730 "0403 lpfc_nodev_tmo attribute cannot be set to "
3731 "%d, allowed range is [%d, %d]\n",
3732 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3733 return -EINVAL;
3734}
3735
3736lpfc_vport_param_store(nodev_tmo)
3737
3738static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3739
3740
3741
3742
3743
3744
3745module_param(lpfc_devloss_tmo, int, S_IRUGO);
3746MODULE_PARM_DESC(lpfc_devloss_tmo,
3747 "Seconds driver will hold I/O waiting "
3748 "for a device to come back");
3749lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3750 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3751lpfc_vport_param_show(devloss_tmo)
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767static int
3768lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3769{
3770 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3771 vport->cfg_nodev_tmo = val;
3772 vport->cfg_devloss_tmo = val;
3773 vport->dev_loss_tmo_changed = 1;
3774 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3775 lpfc_update_rport_devloss_tmo(vport);
3776 return 0;
3777 }
3778
3779 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3780 "0404 lpfc_devloss_tmo attribute cannot be set to "
3781 "%d, allowed range is [%d, %d]\n",
3782 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3783 return -EINVAL;
3784}
3785
3786lpfc_vport_param_store(devloss_tmo)
3787static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3788
3789
3790
3791
3792
3793
3794
3795LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3796 "Enable suppress rsp feature is firmware supports it");
3797
3798
3799
3800
3801
3802
3803
3804
3805LPFC_ATTR_R(nvmet_mrq,
3806 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3807 "Specify number of RQ pairs for processing NVMET cmds");
3808
3809
3810
3811
3812
3813LPFC_ATTR_R(nvmet_mrq_post,
3814 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3815 LPFC_NVMET_RQE_DEF_COUNT,
3816 "Specify number of RQ buffers to initially post");
3817
3818
3819
3820
3821
3822
3823
3824LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3825 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3826 "Enable FC4 Protocol support - FCP / NVME");
3827
3828
3829
3830
3831
3832
3833
3834LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3835 "Verbose logging bit-mask");
3836
3837
3838
3839
3840
3841LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3842 "Deregister nameserver objects before LOGO");
3843
3844
3845
3846
3847
3848
3849
3850
3851LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
3852 "Max number of FCP commands we can queue to a specific LUN");
3853
3854
3855
3856
3857
3858static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3859module_param(lpfc_tgt_queue_depth, uint, 0444);
3860MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3861lpfc_vport_param_show(tgt_queue_depth);
3862lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3863 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876static int
3877lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3878{
3879 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3880 struct lpfc_nodelist *ndlp;
3881
3882 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3883 return -EINVAL;
3884
3885 if (val == vport->cfg_tgt_queue_depth)
3886 return 0;
3887
3888 spin_lock_irq(shost->host_lock);
3889 vport->cfg_tgt_queue_depth = val;
3890
3891
3892 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3893 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3894
3895 spin_unlock_irq(shost->host_lock);
3896 return 0;
3897}
3898
3899lpfc_vport_param_store(tgt_queue_depth);
3900static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3901
3902
3903
3904
3905
3906
3907
3908
3909LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3910 "Max number of FCP commands we can queue to a lpfc HBA");
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3922 "Allow peer ports on the same physical port to login to each "
3923 "other.");
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936static int lpfc_restrict_login = 1;
3937module_param(lpfc_restrict_login, int, S_IRUGO);
3938MODULE_PARM_DESC(lpfc_restrict_login,
3939 "Restrict virtual ports login to remote initiators.");
3940lpfc_vport_param_show(restrict_login);
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957static int
3958lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3959{
3960 if (val < 0 || val > 1) {
3961 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3962 "0422 lpfc_restrict_login attribute cannot "
3963 "be set to %d, allowed range is [0, 1]\n",
3964 val);
3965 vport->cfg_restrict_login = 1;
3966 return -EINVAL;
3967 }
3968 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3969 vport->cfg_restrict_login = 0;
3970 return 0;
3971 }
3972 vport->cfg_restrict_login = val;
3973 return 0;
3974}
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992static int
3993lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3994{
3995 if (val < 0 || val > 1) {
3996 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3997 "0425 lpfc_restrict_login attribute cannot "
3998 "be set to %d, allowed range is [0, 1]\n",
3999 val);
4000 vport->cfg_restrict_login = 1;
4001 return -EINVAL;
4002 }
4003 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
4004 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4005 "0468 lpfc_restrict_login must be 0 for "
4006 "Physical ports.\n");
4007 vport->cfg_restrict_login = 0;
4008 return 0;
4009 }
4010 vport->cfg_restrict_login = val;
4011 return 0;
4012}
4013lpfc_vport_param_store(restrict_login);
4014static DEVICE_ATTR_RW(lpfc_restrict_login);
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
4034 "Start scanning for devices from highest ALPA to lowest");
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047LPFC_ATTR(topology, 0, 0, 6,
4048 "Select Fibre Channel topology");
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066static ssize_t
4067lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4068 const char *buf, size_t count)
4069{
4070 struct Scsi_Host *shost = class_to_shost(dev);
4071 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4072 struct lpfc_hba *phba = vport->phba;
4073 int val = 0;
4074 int nolip = 0;
4075 const char *val_buf = buf;
4076 int err;
4077 uint32_t prev_val;
4078
4079 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4080 nolip = 1;
4081 val_buf = &buf[strlen("nolip ")];
4082 }
4083
4084 if (!isdigit(val_buf[0]))
4085 return -EINVAL;
4086 if (sscanf(val_buf, "%i", &val) != 1)
4087 return -EINVAL;
4088
4089 if (val >= 0 && val <= 6) {
4090 prev_val = phba->cfg_topology;
4091 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4092 val == 4) {
4093 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4094 "3113 Loop mode not supported at speed %d\n",
4095 val);
4096 return -EINVAL;
4097 }
4098 if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4099 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
4100 val == 4) {
4101 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4102 "3114 Loop mode not supported\n");
4103 return -EINVAL;
4104 }
4105 phba->cfg_topology = val;
4106 if (nolip)
4107 return strlen(buf);
4108
4109 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4110 "3054 lpfc_topology changed from %d to %d\n",
4111 prev_val, val);
4112 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4113 phba->fc_topology_changed = 1;
4114 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4115 if (err) {
4116 phba->cfg_topology = prev_val;
4117 return -EINVAL;
4118 } else
4119 return strlen(buf);
4120 }
4121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4122 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4123 "allowed range is [0, 6]\n",
4124 phba->brd_no, val);
4125 return -EINVAL;
4126}
4127
4128lpfc_param_show(topology)
4129static DEVICE_ATTR_RW(lpfc_topology);
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142static ssize_t
4143lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4144 char *buf)
4145{
4146 struct Scsi_Host *shost = class_to_shost(dev);
4147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4148 if (vport->vport_flag & STATIC_VPORT)
4149 sprintf(buf, "1\n");
4150 else
4151 sprintf(buf, "0\n");
4152
4153 return strlen(buf);
4154}
4155
4156
4157
4158
4159static DEVICE_ATTR_RO(lpfc_static_vport);
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180static ssize_t
4181lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4182 const char *buf, size_t count)
4183{
4184 struct Scsi_Host *shost = class_to_shost(dev);
4185 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4186 struct lpfc_hba *phba = vport->phba;
4187#define LPFC_MAX_DATA_CTRL_LEN 1024
4188 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4189 unsigned long i;
4190 char *str_ptr, *token;
4191 struct lpfc_vport **vports;
4192 struct Scsi_Host *v_shost;
4193 char *bucket_type_str, *base_str, *step_str;
4194 unsigned long base, step, bucket_type;
4195
4196 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4197 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4198 return -EINVAL;
4199
4200 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4201 str_ptr = &bucket_data[0];
4202
4203 token = strsep(&str_ptr, "\t ");
4204 if (!token)
4205 return -EINVAL;
4206
4207 bucket_type_str = strsep(&str_ptr, "\t ");
4208 if (!bucket_type_str)
4209 return -EINVAL;
4210
4211 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4212 bucket_type = LPFC_LINEAR_BUCKET;
4213 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4214 bucket_type = LPFC_POWER2_BUCKET;
4215 else
4216 return -EINVAL;
4217
4218 base_str = strsep(&str_ptr, "\t ");
4219 if (!base_str)
4220 return -EINVAL;
4221 base = simple_strtoul(base_str, NULL, 0);
4222
4223 step_str = strsep(&str_ptr, "\t ");
4224 if (!step_str)
4225 return -EINVAL;
4226 step = simple_strtoul(step_str, NULL, 0);
4227 if (!step)
4228 return -EINVAL;
4229
4230
4231 vports = lpfc_create_vport_work_array(phba);
4232 if (vports == NULL)
4233 return -ENOMEM;
4234
4235 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4236 v_shost = lpfc_shost_from_vport(vports[i]);
4237 spin_lock_irq(v_shost->host_lock);
4238
4239 vports[i]->stat_data_blocked = 1;
4240 if (vports[i]->stat_data_enabled)
4241 lpfc_vport_reset_stat_data(vports[i]);
4242 spin_unlock_irq(v_shost->host_lock);
4243 }
4244
4245
4246 phba->bucket_type = bucket_type;
4247 phba->bucket_base = base;
4248 phba->bucket_step = step;
4249
4250 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4251 v_shost = lpfc_shost_from_vport(vports[i]);
4252
4253
4254 spin_lock_irq(v_shost->host_lock);
4255 vports[i]->stat_data_blocked = 0;
4256 spin_unlock_irq(v_shost->host_lock);
4257 }
4258 lpfc_destroy_vport_work_array(phba, vports);
4259 return strlen(buf);
4260 }
4261
4262 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4263 vports = lpfc_create_vport_work_array(phba);
4264 if (vports == NULL)
4265 return -ENOMEM;
4266
4267 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4268 v_shost = lpfc_shost_from_vport(vports[i]);
4269 spin_lock_irq(shost->host_lock);
4270 vports[i]->stat_data_blocked = 1;
4271 lpfc_free_bucket(vport);
4272 vport->stat_data_enabled = 0;
4273 vports[i]->stat_data_blocked = 0;
4274 spin_unlock_irq(shost->host_lock);
4275 }
4276 lpfc_destroy_vport_work_array(phba, vports);
4277 phba->bucket_type = LPFC_NO_BUCKET;
4278 phba->bucket_base = 0;
4279 phba->bucket_step = 0;
4280 return strlen(buf);
4281 }
4282
4283 if (!strncmp(buf, "start", strlen("start"))) {
4284
4285 if (phba->bucket_type == LPFC_NO_BUCKET)
4286 return -EINVAL;
4287 spin_lock_irq(shost->host_lock);
4288 if (vport->stat_data_enabled) {
4289 spin_unlock_irq(shost->host_lock);
4290 return strlen(buf);
4291 }
4292 lpfc_alloc_bucket(vport);
4293 vport->stat_data_enabled = 1;
4294 spin_unlock_irq(shost->host_lock);
4295 return strlen(buf);
4296 }
4297
4298 if (!strncmp(buf, "stop", strlen("stop"))) {
4299 spin_lock_irq(shost->host_lock);
4300 if (vport->stat_data_enabled == 0) {
4301 spin_unlock_irq(shost->host_lock);
4302 return strlen(buf);
4303 }
4304 lpfc_free_bucket(vport);
4305 vport->stat_data_enabled = 0;
4306 spin_unlock_irq(shost->host_lock);
4307 return strlen(buf);
4308 }
4309
4310 if (!strncmp(buf, "reset", strlen("reset"))) {
4311 if ((phba->bucket_type == LPFC_NO_BUCKET)
4312 || !vport->stat_data_enabled)
4313 return strlen(buf);
4314 spin_lock_irq(shost->host_lock);
4315 vport->stat_data_blocked = 1;
4316 lpfc_vport_reset_stat_data(vport);
4317 vport->stat_data_blocked = 0;
4318 spin_unlock_irq(shost->host_lock);
4319 return strlen(buf);
4320 }
4321 return -EINVAL;
4322}
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334static ssize_t
4335lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4336 char *buf)
4337{
4338 struct Scsi_Host *shost = class_to_shost(dev);
4339 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4340 struct lpfc_hba *phba = vport->phba;
4341 int index = 0;
4342 int i;
4343 char *bucket_type;
4344 unsigned long bucket_value;
4345
4346 switch (phba->bucket_type) {
4347 case LPFC_LINEAR_BUCKET:
4348 bucket_type = "linear";
4349 break;
4350 case LPFC_POWER2_BUCKET:
4351 bucket_type = "power2";
4352 break;
4353 default:
4354 bucket_type = "No Bucket";
4355 break;
4356 }
4357
4358 sprintf(&buf[index], "Statistical Data enabled :%d, "
4359 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4360 " Bucket step :%d\nLatency Ranges :",
4361 vport->stat_data_enabled, vport->stat_data_blocked,
4362 bucket_type, phba->bucket_base, phba->bucket_step);
4363 index = strlen(buf);
4364 if (phba->bucket_type != LPFC_NO_BUCKET) {
4365 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4366 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4367 bucket_value = phba->bucket_base +
4368 phba->bucket_step * i;
4369 else
4370 bucket_value = phba->bucket_base +
4371 (1 << i) * phba->bucket_step;
4372
4373 if (index + 10 > PAGE_SIZE)
4374 break;
4375 sprintf(&buf[index], "%08ld ", bucket_value);
4376 index = strlen(buf);
4377 }
4378 }
4379 sprintf(&buf[index], "\n");
4380 return strlen(buf);
4381}
4382
4383
4384
4385
4386static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4397#define MAX_STAT_DATA_SIZE_PER_TARGET \
4398 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414static ssize_t
4415sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4416 struct bin_attribute *bin_attr,
4417 char *buf, loff_t off, size_t count)
4418{
4419 struct device *dev = container_of(kobj, struct device,
4420 kobj);
4421 struct Scsi_Host *shost = class_to_shost(dev);
4422 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4423 struct lpfc_hba *phba = vport->phba;
4424 int i = 0, index = 0;
4425 unsigned long nport_index;
4426 struct lpfc_nodelist *ndlp = NULL;
4427 nport_index = (unsigned long)off /
4428 MAX_STAT_DATA_SIZE_PER_TARGET;
4429
4430 if (!vport->stat_data_enabled || vport->stat_data_blocked
4431 || (phba->bucket_type == LPFC_NO_BUCKET))
4432 return 0;
4433
4434 spin_lock_irq(shost->host_lock);
4435 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4436 if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
4437 continue;
4438
4439 if (nport_index > 0) {
4440 nport_index--;
4441 continue;
4442 }
4443
4444 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4445 > count)
4446 break;
4447
4448 if (!ndlp->lat_data)
4449 continue;
4450
4451
4452 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4453 ndlp->nlp_portname.u.wwn[0],
4454 ndlp->nlp_portname.u.wwn[1],
4455 ndlp->nlp_portname.u.wwn[2],
4456 ndlp->nlp_portname.u.wwn[3],
4457 ndlp->nlp_portname.u.wwn[4],
4458 ndlp->nlp_portname.u.wwn[5],
4459 ndlp->nlp_portname.u.wwn[6],
4460 ndlp->nlp_portname.u.wwn[7]);
4461
4462 index = strlen(buf);
4463
4464 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4465 sprintf(&buf[index], "%010u,",
4466 ndlp->lat_data[i].cmd_count);
4467 index = strlen(buf);
4468 }
4469 sprintf(&buf[index], "\n");
4470 index = strlen(buf);
4471 }
4472 spin_unlock_irq(shost->host_lock);
4473 return index;
4474}
4475
4476static struct bin_attribute sysfs_drvr_stat_data_attr = {
4477 .attr = {
4478 .name = "lpfc_drvr_stat_data",
4479 .mode = S_IRUSR,
4480 },
4481 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4482 .read = sysfs_drvr_stat_data_read,
4483 .write = NULL,
4484};
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508static ssize_t
4509lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4510 const char *buf, size_t count)
4511{
4512 struct Scsi_Host *shost = class_to_shost(dev);
4513 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4514 struct lpfc_hba *phba = vport->phba;
4515 int val = LPFC_USER_LINK_SPEED_AUTO;
4516 int nolip = 0;
4517 const char *val_buf = buf;
4518 int err;
4519 uint32_t prev_val, if_type;
4520
4521 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4522 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4523 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4524 return -EPERM;
4525
4526 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4527 nolip = 1;
4528 val_buf = &buf[strlen("nolip ")];
4529 }
4530
4531 if (!isdigit(val_buf[0]))
4532 return -EINVAL;
4533 if (sscanf(val_buf, "%i", &val) != 1)
4534 return -EINVAL;
4535
4536 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4537 "3055 lpfc_link_speed changed from %d to %d %s\n",
4538 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4539
4540 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4541 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4542 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4543 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4544 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4545 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4546 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4547 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4549 "2879 lpfc_link_speed attribute cannot be set "
4550 "to %d. Speed is not supported by this port.\n",
4551 val);
4552 return -EINVAL;
4553 }
4554 if (val >= LPFC_USER_LINK_SPEED_16G &&
4555 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4557 "3112 lpfc_link_speed attribute cannot be set "
4558 "to %d. Speed is not supported in loop mode.\n",
4559 val);
4560 return -EINVAL;
4561 }
4562
4563 switch (val) {
4564 case LPFC_USER_LINK_SPEED_AUTO:
4565 case LPFC_USER_LINK_SPEED_1G:
4566 case LPFC_USER_LINK_SPEED_2G:
4567 case LPFC_USER_LINK_SPEED_4G:
4568 case LPFC_USER_LINK_SPEED_8G:
4569 case LPFC_USER_LINK_SPEED_16G:
4570 case LPFC_USER_LINK_SPEED_32G:
4571 case LPFC_USER_LINK_SPEED_64G:
4572 prev_val = phba->cfg_link_speed;
4573 phba->cfg_link_speed = val;
4574 if (nolip)
4575 return strlen(buf);
4576
4577 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4578 if (err) {
4579 phba->cfg_link_speed = prev_val;
4580 return -EINVAL;
4581 }
4582 return strlen(buf);
4583 default:
4584 break;
4585 }
4586
4587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4588 "0469 lpfc_link_speed attribute cannot be set to %d, "
4589 "allowed values are [%s]\n",
4590 val, LPFC_LINK_SPEED_STRING);
4591 return -EINVAL;
4592
4593}
4594
4595static int lpfc_link_speed = 0;
4596module_param(lpfc_link_speed, int, S_IRUGO);
4597MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
4598lpfc_param_show(link_speed)
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616static int
4617lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4618{
4619 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4621 "3111 lpfc_link_speed of %d cannot "
4622 "support loop mode, setting topology to default.\n",
4623 val);
4624 phba->cfg_topology = 0;
4625 }
4626
4627 switch (val) {
4628 case LPFC_USER_LINK_SPEED_AUTO:
4629 case LPFC_USER_LINK_SPEED_1G:
4630 case LPFC_USER_LINK_SPEED_2G:
4631 case LPFC_USER_LINK_SPEED_4G:
4632 case LPFC_USER_LINK_SPEED_8G:
4633 case LPFC_USER_LINK_SPEED_16G:
4634 case LPFC_USER_LINK_SPEED_32G:
4635 case LPFC_USER_LINK_SPEED_64G:
4636 phba->cfg_link_speed = val;
4637 return 0;
4638 default:
4639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4640 "0405 lpfc_link_speed attribute cannot "
4641 "be set to %d, allowed values are "
4642 "["LPFC_LINK_SPEED_STRING"]\n", val);
4643 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4644 return -EINVAL;
4645 }
4646}
4647
4648static DEVICE_ATTR_RW(lpfc_link_speed);
4649
4650
4651
4652
4653
4654
4655
4656LPFC_ATTR(aer_support, 1, 0, 1,
4657 "Enable PCIe device AER support");
4658lpfc_param_show(aer_support)
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687static ssize_t
4688lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4689 const char *buf, size_t count)
4690{
4691 struct Scsi_Host *shost = class_to_shost(dev);
4692 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4693 struct lpfc_hba *phba = vport->phba;
4694 int val = 0, rc = -EINVAL;
4695
4696 if (!isdigit(buf[0]))
4697 return -EINVAL;
4698 if (sscanf(buf, "%i", &val) != 1)
4699 return -EINVAL;
4700
4701 switch (val) {
4702 case 0:
4703 if (phba->hba_flag & HBA_AER_ENABLED) {
4704 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4705 if (!rc) {
4706 spin_lock_irq(&phba->hbalock);
4707 phba->hba_flag &= ~HBA_AER_ENABLED;
4708 spin_unlock_irq(&phba->hbalock);
4709 phba->cfg_aer_support = 0;
4710 rc = strlen(buf);
4711 } else
4712 rc = -EPERM;
4713 } else {
4714 phba->cfg_aer_support = 0;
4715 rc = strlen(buf);
4716 }
4717 break;
4718 case 1:
4719 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4720 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4721 if (!rc) {
4722 spin_lock_irq(&phba->hbalock);
4723 phba->hba_flag |= HBA_AER_ENABLED;
4724 spin_unlock_irq(&phba->hbalock);
4725 phba->cfg_aer_support = 1;
4726 rc = strlen(buf);
4727 } else
4728 rc = -EPERM;
4729 } else {
4730 phba->cfg_aer_support = 1;
4731 rc = strlen(buf);
4732 }
4733 break;
4734 default:
4735 rc = -EINVAL;
4736 break;
4737 }
4738 return rc;
4739}
4740
4741static DEVICE_ATTR_RW(lpfc_aer_support);
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762static ssize_t
4763lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4764 const char *buf, size_t count)
4765{
4766 struct Scsi_Host *shost = class_to_shost(dev);
4767 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4768 struct lpfc_hba *phba = vport->phba;
4769 int val, rc = -1;
4770
4771 if (!isdigit(buf[0]))
4772 return -EINVAL;
4773 if (sscanf(buf, "%i", &val) != 1)
4774 return -EINVAL;
4775 if (val != 1)
4776 return -EINVAL;
4777
4778 if (phba->hba_flag & HBA_AER_ENABLED)
4779 rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
4780
4781 if (rc == 0)
4782 return strlen(buf);
4783 else
4784 return -EPERM;
4785}
4786
4787static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4788 lpfc_aer_cleanup_state);
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829static ssize_t
4830lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4831 const char *buf, size_t count)
4832{
4833 struct Scsi_Host *shost = class_to_shost(dev);
4834 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4835 struct lpfc_hba *phba = vport->phba;
4836 struct pci_dev *pdev = phba->pcidev;
4837 int val = 0, rc = -EINVAL;
4838
4839
4840 if (!isdigit(buf[0]))
4841 return -EINVAL;
4842 if (sscanf(buf, "%i", &val) != 1)
4843 return -EINVAL;
4844 if (val < 0)
4845 return -EINVAL;
4846
4847
4848 if (val == 0) {
4849 if (phba->cfg_sriov_nr_virtfn > 0) {
4850 pci_disable_sriov(pdev);
4851 phba->cfg_sriov_nr_virtfn = 0;
4852 }
4853 return strlen(buf);
4854 }
4855
4856
4857 if (phba->cfg_sriov_nr_virtfn > 0) {
4858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4859 "3018 There are %d virtual functions "
4860 "enabled on physical function.\n",
4861 phba->cfg_sriov_nr_virtfn);
4862 return -EEXIST;
4863 }
4864
4865 if (val <= LPFC_MAX_VFN_PER_PFN)
4866 phba->cfg_sriov_nr_virtfn = val;
4867 else {
4868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4869 "3019 Enabling %d virtual functions is not "
4870 "allowed.\n", val);
4871 return -EINVAL;
4872 }
4873
4874 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4875 if (rc) {
4876 phba->cfg_sriov_nr_virtfn = 0;
4877 rc = -EPERM;
4878 } else
4879 rc = strlen(buf);
4880
4881 return rc;
4882}
4883
4884LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4885 "Enable PCIe device SR-IOV virtual fn");
4886
4887lpfc_param_show(sriov_nr_virtfn)
4888static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905static ssize_t
4906lpfc_request_firmware_upgrade_store(struct device *dev,
4907 struct device_attribute *attr,
4908 const char *buf, size_t count)
4909{
4910 struct Scsi_Host *shost = class_to_shost(dev);
4911 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4912 struct lpfc_hba *phba = vport->phba;
4913 int val = 0, rc = -EINVAL;
4914
4915
4916 if (!isdigit(buf[0]))
4917 return -EINVAL;
4918 if (sscanf(buf, "%i", &val) != 1)
4919 return -EINVAL;
4920 if (val != 1)
4921 return -EINVAL;
4922
4923 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4924 if (rc)
4925 rc = -EPERM;
4926 else
4927 rc = strlen(buf);
4928 return rc;
4929}
4930
4931static int lpfc_req_fw_upgrade;
4932module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4933MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
4934lpfc_param_show(request_firmware_upgrade)
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948static int
4949lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4950{
4951 if (val >= 0 && val <= 1) {
4952 phba->cfg_request_firmware_upgrade = val;
4953 return 0;
4954 }
4955 return -EINVAL;
4956}
4957static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4958 lpfc_request_firmware_upgrade_show,
4959 lpfc_request_firmware_upgrade_store);
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978static ssize_t
4979lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
4980 const char *buf, size_t count)
4981{
4982 struct Scsi_Host *shost = class_to_shost(dev);
4983 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4984 struct lpfc_hba *phba = vport->phba;
4985 struct lpfc_eq_intr_info *eqi;
4986 uint32_t usdelay;
4987 int val = 0, i;
4988
4989
4990 if (phba->sli_rev != LPFC_SLI_REV4)
4991 return -EINVAL;
4992
4993
4994 if (!isdigit(buf[0]))
4995 return -EINVAL;
4996 if (sscanf(buf, "%i", &val) != 1)
4997 return -EINVAL;
4998
4999
5000
5001
5002
5003
5004 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5005 return -EINVAL;
5006
5007 phba->cfg_auto_imax = (val) ? 0 : 1;
5008 if (phba->cfg_fcp_imax && !val) {
5009 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5010 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5011
5012 for_each_present_cpu(i) {
5013 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5014 eqi->icnt = 0;
5015 }
5016 }
5017
5018 phba->cfg_fcp_imax = (uint32_t)val;
5019
5020 if (phba->cfg_fcp_imax)
5021 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5022 else
5023 usdelay = 0;
5024
5025 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5026 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5027 usdelay);
5028
5029 return strlen(buf);
5030}
5031
5032
5033
5034
5035
5036
5037
5038static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5039module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5040MODULE_PARM_DESC(lpfc_fcp_imax,
5041 "Set the maximum number of FCP interrupts per second per HBA");
5042lpfc_param_show(fcp_imax)
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057static int
5058lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5059{
5060 if (phba->sli_rev != LPFC_SLI_REV4) {
5061 phba->cfg_fcp_imax = 0;
5062 return 0;
5063 }
5064
5065 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5066 (val == 0)) {
5067 phba->cfg_fcp_imax = val;
5068 return 0;
5069 }
5070
5071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5072 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5073 val);
5074 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5075
5076 return 0;
5077}
5078
5079static DEVICE_ATTR_RW(lpfc_fcp_imax);
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097static ssize_t
5098lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5099 const char *buf, size_t count)
5100{
5101 struct Scsi_Host *shost = class_to_shost(dev);
5102 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5103 struct lpfc_hba *phba = vport->phba;
5104 struct lpfc_queue *eq, *cq;
5105 unsigned long val;
5106 int i;
5107
5108
5109 if (phba->sli_rev != LPFC_SLI_REV4)
5110 return -EINVAL;
5111
5112
5113 if (!isdigit(buf[0]))
5114 return -EINVAL;
5115 if (kstrtoul(buf, 0, &val))
5116 return -EINVAL;
5117
5118 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5119 return -ERANGE;
5120
5121 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5122
5123
5124 for (i = 0; i < phba->cfg_irq_chann; i++) {
5125
5126 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5127 if (!eq)
5128 continue;
5129
5130 list_for_each_entry(cq, &eq->child_list, list)
5131 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5132 cq->entry_count);
5133 }
5134
5135 return strlen(buf);
5136}
5137
5138
5139
5140
5141
5142static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5143module_param(lpfc_cq_max_proc_limit, int, 0644);
5144MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5145 "Set the maximum number CQEs processed in an iteration of "
5146 "CQ processing");
5147lpfc_param_show(cq_max_proc_limit)
5148
5149
5150
5151
5152
5153
5154LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5155 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5156 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5157 "CQE Processing Threshold to enable Polling");
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173static int
5174lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5175{
5176 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5177
5178 if (phba->sli_rev != LPFC_SLI_REV4)
5179 return 0;
5180
5181 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5182 phba->cfg_cq_max_proc_limit = val;
5183 return 0;
5184 }
5185
5186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5187 "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
5188 "%d out of range, using default\n",
5189 phba->cfg_cq_max_proc_limit);
5190
5191 return 0;
5192}
5193
5194static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204static ssize_t
5205lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5206 char *buf)
5207{
5208 struct Scsi_Host *shost = class_to_shost(dev);
5209 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5210 struct lpfc_hba *phba = vport->phba;
5211 struct lpfc_vector_map_info *cpup;
5212 int len = 0;
5213
5214 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5215 (phba->intr_type != MSIX))
5216 return len;
5217
5218 switch (phba->cfg_fcp_cpu_map) {
5219 case 0:
5220 len += scnprintf(buf + len, PAGE_SIZE-len,
5221 "fcp_cpu_map: No mapping (%d)\n",
5222 phba->cfg_fcp_cpu_map);
5223 return len;
5224 case 1:
5225 len += scnprintf(buf + len, PAGE_SIZE-len,
5226 "fcp_cpu_map: HBA centric mapping (%d): "
5227 "%d of %d CPUs online from %d possible CPUs\n",
5228 phba->cfg_fcp_cpu_map, num_online_cpus(),
5229 num_present_cpus(),
5230 phba->sli4_hba.num_possible_cpu);
5231 break;
5232 }
5233
5234 while (phba->sli4_hba.curr_disp_cpu <
5235 phba->sli4_hba.num_possible_cpu) {
5236 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5237
5238 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5239 len += scnprintf(buf + len, PAGE_SIZE - len,
5240 "CPU %02d not present\n",
5241 phba->sli4_hba.curr_disp_cpu);
5242 else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
5243 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5244 len += scnprintf(
5245 buf + len, PAGE_SIZE - len,
5246 "CPU %02d hdwq None "
5247 "physid %d coreid %d ht %d ua %d\n",
5248 phba->sli4_hba.curr_disp_cpu,
5249 cpup->phys_id, cpup->core_id,
5250 (cpup->flag & LPFC_CPU_MAP_HYPER),
5251 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5252 else
5253 len += scnprintf(
5254 buf + len, PAGE_SIZE - len,
5255 "CPU %02d EQ %04d hdwq %04d "
5256 "physid %d coreid %d ht %d ua %d\n",
5257 phba->sli4_hba.curr_disp_cpu,
5258 cpup->eq, cpup->hdwq, cpup->phys_id,
5259 cpup->core_id,
5260 (cpup->flag & LPFC_CPU_MAP_HYPER),
5261 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5262 } else {
5263 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5264 len += scnprintf(
5265 buf + len, PAGE_SIZE - len,
5266 "CPU %02d hdwq None "
5267 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5268 phba->sli4_hba.curr_disp_cpu,
5269 cpup->phys_id,
5270 cpup->core_id,
5271 (cpup->flag & LPFC_CPU_MAP_HYPER),
5272 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5273 cpup->irq);
5274 else
5275 len += scnprintf(
5276 buf + len, PAGE_SIZE - len,
5277 "CPU %02d EQ %04d hdwq %04d "
5278 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5279 phba->sli4_hba.curr_disp_cpu,
5280 cpup->eq, cpup->hdwq, cpup->phys_id,
5281 cpup->core_id,
5282 (cpup->flag & LPFC_CPU_MAP_HYPER),
5283 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5284 cpup->irq);
5285 }
5286
5287 phba->sli4_hba.curr_disp_cpu++;
5288
5289
5290 if (phba->sli4_hba.curr_disp_cpu <
5291 phba->sli4_hba.num_possible_cpu &&
5292 (len >= (PAGE_SIZE - 64))) {
5293 len += scnprintf(buf + len,
5294 PAGE_SIZE - len, "more...\n");
5295 break;
5296 }
5297 }
5298
5299 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5300 phba->sli4_hba.curr_disp_cpu = 0;
5301
5302 return len;
5303}
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315static ssize_t
5316lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5317 const char *buf, size_t count)
5318{
5319 int status = -EINVAL;
5320 return status;
5321}
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5334module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5335MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5336 "Defines how to map CPUs to IRQ vectors per HBA");
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351static int
5352lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5353{
5354 if (phba->sli_rev != LPFC_SLI_REV4) {
5355 phba->cfg_fcp_cpu_map = 0;
5356 return 0;
5357 }
5358
5359 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5360 phba->cfg_fcp_cpu_map = val;
5361 return 0;
5362 }
5363
5364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5365 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5366 "default\n", val);
5367 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5368
5369 return 0;
5370}
5371
5372static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5373
5374
5375
5376
5377
5378LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5379 "Select Fibre Channel class of service for FCP sequences");
5380
5381
5382
5383
5384
5385LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5386 "Use ADISC on rediscovery to authenticate FCP devices");
5387
5388
5389
5390
5391
5392
5393LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5394 "First burst size for Targets that support first burst");
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5406 "NVME Target mode first burst size in 512B increments.");
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5420 "Enable First Burst feature on I and T functions.");
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5431 "Use command completion time to control queue depth");
5432
5433lpfc_vport_param_show(max_scsicmpl_time);
5434static int
5435lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5436{
5437 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5438 struct lpfc_nodelist *ndlp, *next_ndlp;
5439
5440 if (val == vport->cfg_max_scsicmpl_time)
5441 return 0;
5442 if ((val < 0) || (val > 60000))
5443 return -EINVAL;
5444 vport->cfg_max_scsicmpl_time = val;
5445
5446 spin_lock_irq(shost->host_lock);
5447 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5448 if (!NLP_CHK_NODE_ACT(ndlp))
5449 continue;
5450 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5451 continue;
5452 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5453 }
5454 spin_unlock_irq(shost->host_lock);
5455 return 0;
5456}
5457lpfc_vport_param_store(max_scsicmpl_time);
5458static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5459
5460
5461
5462
5463
5464LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5465
5466
5467
5468
5469
5470LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5489 LPFC_FCP_SCHED_BY_HDWQ,
5490 LPFC_FCP_SCHED_BY_CPU,
5491 "Determine scheduling algorithm for "
5492 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5493
5494
5495
5496
5497
5498
5499
5500
5501LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5502 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5503 "Determine algorithm NameServer queries after RSCN "
5504 "[0] - GID_FT, [1] - GID_PT");
5505
5506
5507
5508
5509
5510
5511
5512LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5513 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5524 "interrupt response is generated");
5525
5526LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5527 "interrupt response is generated");
5528
5529
5530
5531
5532
5533
5534LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5535 "SLI rings to spread IOCB entries across");
5536
5537
5538
5539
5540
5541
5542LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5543 255, "Identifies RCTL for additional ring configuration");
5544
5545
5546
5547
5548
5549
5550LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5551 255, "Identifies TYPE for additional ring configuration");
5552
5553
5554
5555
5556
5557
5558
5559
5560LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5574
5575
5576
5577
5578
5579LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5580 "during discovery");
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5605
5606
5607
5608
5609
5610LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5611 "Milliseconds driver will wait between polling FCP ring");
5612
5613
5614
5615
5616
5617LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5618 "Maximum time to wait for task management commands to complete");
5619
5620
5621
5622
5623
5624
5625
5626
5627LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5628 "MSI-X (2), if possible");
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5639 "Use OAS bit on NVME IOs");
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5651 "Embed NVME Command in WQE");
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5663 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5664 "Set the number of SCSI Queues advertised");
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680LPFC_ATTR_R(hdw_queue,
5681 LPFC_HBA_HDWQ_DEF,
5682 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5683 "Set the number of I/O Hardware Queues");
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696LPFC_ATTR_R(irq_chann,
5697 LPFC_HBA_HDWQ_DEF,
5698 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5699 "Set the number of I/O IRQ Channels");
5700
5701
5702
5703
5704
5705
5706
5707
5708LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5709
5710
5711
5712
5713
5714
5715
5716LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5717
5718
5719
5720
5721
5722
5723
5724LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5725
5726
5727
5728
5729
5730
5731LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5732
5733
5734
5735
5736
5737
5738
5739LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756LPFC_ATTR(prot_mask,
5757 (SHOST_DIF_TYPE1_PROTECTION |
5758 SHOST_DIX_TYPE0_PROTECTION |
5759 SHOST_DIX_TYPE1_PROTECTION),
5760 0,
5761 (SHOST_DIF_TYPE1_PROTECTION |
5762 SHOST_DIX_TYPE0_PROTECTION |
5763 SHOST_DIX_TYPE1_PROTECTION),
5764 "T10-DIF host protection capabilities mask");
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774LPFC_ATTR(prot_guard,
5775 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5776 "T10-DIF host protection guard type");
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792LPFC_ATTR(delay_discovery, 0, 0, 1,
5793 "Delay NPort discovery when Clean Address bit is cleared.");
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5805module_param(lpfc_sg_seg_cnt, uint, 0444);
5806MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817static ssize_t
5818lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
5819 char *buf)
5820{
5821 struct Scsi_Host *shost = class_to_shost(dev);
5822 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5823 struct lpfc_hba *phba = vport->phba;
5824 int len;
5825
5826 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
5827 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
5828
5829 len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
5830 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
5831 phba->cfg_nvme_seg_cnt);
5832 return len;
5833}
5834
5835static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851static int
5852lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
5853{
5854 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
5855 phba->cfg_sg_seg_cnt = val;
5856 return 0;
5857 }
5858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5859 "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
5860 "be set to %d, allowed range is [%d, %d]\n",
5861 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
5862 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5863 return -EINVAL;
5864}
5865
5866
5867
5868
5869
5870
5871
5872LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
5873
5874
5875
5876
5877
5878
5879
5880LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
5881
5882
5883
5884
5885
5886
5887
5888LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
5889
5890
5891
5892
5893
5894
5895
5896
5897LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
5898
5899
5900
5901
5902
5903
5904
5905LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
5906
5907
5908
5909
5910
5911
5912
5913LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
5914
5915struct device_attribute *lpfc_hba_attrs[] = {
5916 &dev_attr_nvme_info,
5917 &dev_attr_scsi_stat,
5918 &dev_attr_bg_info,
5919 &dev_attr_bg_guard_err,
5920 &dev_attr_bg_apptag_err,
5921 &dev_attr_bg_reftag_err,
5922 &dev_attr_info,
5923 &dev_attr_serialnum,
5924 &dev_attr_modeldesc,
5925 &dev_attr_modelname,
5926 &dev_attr_programtype,
5927 &dev_attr_portnum,
5928 &dev_attr_fwrev,
5929 &dev_attr_hdw,
5930 &dev_attr_option_rom_version,
5931 &dev_attr_link_state,
5932 &dev_attr_num_discovered_ports,
5933 &dev_attr_menlo_mgmt_mode,
5934 &dev_attr_lpfc_drvr_version,
5935 &dev_attr_lpfc_enable_fip,
5936 &dev_attr_lpfc_temp_sensor,
5937 &dev_attr_lpfc_log_verbose,
5938 &dev_attr_lpfc_lun_queue_depth,
5939 &dev_attr_lpfc_tgt_queue_depth,
5940 &dev_attr_lpfc_hba_queue_depth,
5941 &dev_attr_lpfc_peer_port_login,
5942 &dev_attr_lpfc_nodev_tmo,
5943 &dev_attr_lpfc_devloss_tmo,
5944 &dev_attr_lpfc_enable_fc4_type,
5945 &dev_attr_lpfc_fcp_class,
5946 &dev_attr_lpfc_use_adisc,
5947 &dev_attr_lpfc_first_burst_size,
5948 &dev_attr_lpfc_ack0,
5949 &dev_attr_lpfc_xri_rebalancing,
5950 &dev_attr_lpfc_topology,
5951 &dev_attr_lpfc_scan_down,
5952 &dev_attr_lpfc_link_speed,
5953 &dev_attr_lpfc_fcp_io_sched,
5954 &dev_attr_lpfc_ns_query,
5955 &dev_attr_lpfc_fcp2_no_tgt_reset,
5956 &dev_attr_lpfc_cr_delay,
5957 &dev_attr_lpfc_cr_count,
5958 &dev_attr_lpfc_multi_ring_support,
5959 &dev_attr_lpfc_multi_ring_rctl,
5960 &dev_attr_lpfc_multi_ring_type,
5961 &dev_attr_lpfc_fdmi_on,
5962 &dev_attr_lpfc_enable_SmartSAN,
5963 &dev_attr_lpfc_max_luns,
5964 &dev_attr_lpfc_enable_npiv,
5965 &dev_attr_lpfc_fcf_failover_policy,
5966 &dev_attr_lpfc_enable_rrq,
5967 &dev_attr_nport_evt_cnt,
5968 &dev_attr_board_mode,
5969 &dev_attr_max_vpi,
5970 &dev_attr_used_vpi,
5971 &dev_attr_max_rpi,
5972 &dev_attr_used_rpi,
5973 &dev_attr_max_xri,
5974 &dev_attr_used_xri,
5975 &dev_attr_npiv_info,
5976 &dev_attr_issue_reset,
5977 &dev_attr_lpfc_poll,
5978 &dev_attr_lpfc_poll_tmo,
5979 &dev_attr_lpfc_task_mgmt_tmo,
5980 &dev_attr_lpfc_use_msi,
5981 &dev_attr_lpfc_nvme_oas,
5982 &dev_attr_lpfc_nvme_embed_cmd,
5983 &dev_attr_lpfc_fcp_imax,
5984 &dev_attr_lpfc_cq_poll_threshold,
5985 &dev_attr_lpfc_cq_max_proc_limit,
5986 &dev_attr_lpfc_fcp_cpu_map,
5987 &dev_attr_lpfc_fcp_mq_threshold,
5988 &dev_attr_lpfc_hdw_queue,
5989 &dev_attr_lpfc_irq_chann,
5990 &dev_attr_lpfc_suppress_rsp,
5991 &dev_attr_lpfc_nvmet_mrq,
5992 &dev_attr_lpfc_nvmet_mrq_post,
5993 &dev_attr_lpfc_nvme_enable_fb,
5994 &dev_attr_lpfc_nvmet_fb_size,
5995 &dev_attr_lpfc_enable_bg,
5996 &dev_attr_lpfc_soft_wwnn,
5997 &dev_attr_lpfc_soft_wwpn,
5998 &dev_attr_lpfc_soft_wwn_enable,
5999 &dev_attr_lpfc_enable_hba_reset,
6000 &dev_attr_lpfc_enable_hba_heartbeat,
6001 &dev_attr_lpfc_EnableXLane,
6002 &dev_attr_lpfc_XLanePriority,
6003 &dev_attr_lpfc_xlane_lun,
6004 &dev_attr_lpfc_xlane_tgt,
6005 &dev_attr_lpfc_xlane_vpt,
6006 &dev_attr_lpfc_xlane_lun_state,
6007 &dev_attr_lpfc_xlane_lun_status,
6008 &dev_attr_lpfc_xlane_priority,
6009 &dev_attr_lpfc_sg_seg_cnt,
6010 &dev_attr_lpfc_max_scsicmpl_time,
6011 &dev_attr_lpfc_stat_data_ctrl,
6012 &dev_attr_lpfc_aer_support,
6013 &dev_attr_lpfc_aer_state_cleanup,
6014 &dev_attr_lpfc_sriov_nr_virtfn,
6015 &dev_attr_lpfc_req_fw_upgrade,
6016 &dev_attr_lpfc_suppress_link_up,
6017 &dev_attr_lpfc_iocb_cnt,
6018 &dev_attr_iocb_hw,
6019 &dev_attr_txq_hw,
6020 &dev_attr_txcmplq_hw,
6021 &dev_attr_lpfc_fips_level,
6022 &dev_attr_lpfc_fips_rev,
6023 &dev_attr_lpfc_dss,
6024 &dev_attr_lpfc_sriov_hw_max_virtfn,
6025 &dev_attr_protocol,
6026 &dev_attr_lpfc_xlane_supported,
6027 &dev_attr_lpfc_enable_mds_diags,
6028 &dev_attr_lpfc_ras_fwlog_buffsize,
6029 &dev_attr_lpfc_ras_fwlog_level,
6030 &dev_attr_lpfc_ras_fwlog_func,
6031 &dev_attr_lpfc_enable_bbcr,
6032 &dev_attr_lpfc_enable_dpp,
6033 NULL,
6034};
6035
6036struct device_attribute *lpfc_vport_attrs[] = {
6037 &dev_attr_info,
6038 &dev_attr_link_state,
6039 &dev_attr_num_discovered_ports,
6040 &dev_attr_lpfc_drvr_version,
6041 &dev_attr_lpfc_log_verbose,
6042 &dev_attr_lpfc_lun_queue_depth,
6043 &dev_attr_lpfc_tgt_queue_depth,
6044 &dev_attr_lpfc_nodev_tmo,
6045 &dev_attr_lpfc_devloss_tmo,
6046 &dev_attr_lpfc_hba_queue_depth,
6047 &dev_attr_lpfc_peer_port_login,
6048 &dev_attr_lpfc_restrict_login,
6049 &dev_attr_lpfc_fcp_class,
6050 &dev_attr_lpfc_use_adisc,
6051 &dev_attr_lpfc_first_burst_size,
6052 &dev_attr_lpfc_max_luns,
6053 &dev_attr_nport_evt_cnt,
6054 &dev_attr_npiv_info,
6055 &dev_attr_lpfc_enable_da_id,
6056 &dev_attr_lpfc_max_scsicmpl_time,
6057 &dev_attr_lpfc_stat_data_ctrl,
6058 &dev_attr_lpfc_static_vport,
6059 &dev_attr_lpfc_fips_level,
6060 &dev_attr_lpfc_fips_rev,
6061 NULL,
6062};
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083static ssize_t
6084sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6085 struct bin_attribute *bin_attr,
6086 char *buf, loff_t off, size_t count)
6087{
6088 size_t buf_off;
6089 struct device *dev = container_of(kobj, struct device, kobj);
6090 struct Scsi_Host *shost = class_to_shost(dev);
6091 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6092 struct lpfc_hba *phba = vport->phba;
6093
6094 if (phba->sli_rev >= LPFC_SLI_REV4)
6095 return -EPERM;
6096
6097 if ((off + count) > FF_REG_AREA_SIZE)
6098 return -ERANGE;
6099
6100 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6101 return 0;
6102
6103 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6104 return -EINVAL;
6105
6106
6107 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6108 return -EINVAL;
6109
6110 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6111 return -EPERM;
6112
6113 spin_lock_irq(&phba->hbalock);
6114 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6115 buf_off += sizeof(uint32_t))
6116 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6117 phba->ctrl_regs_memmap_p + off + buf_off);
6118
6119 spin_unlock_irq(&phba->hbalock);
6120
6121 return count;
6122}
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142static ssize_t
6143sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6144 struct bin_attribute *bin_attr,
6145 char *buf, loff_t off, size_t count)
6146{
6147 size_t buf_off;
6148 uint32_t * tmp_ptr;
6149 struct device *dev = container_of(kobj, struct device, kobj);
6150 struct Scsi_Host *shost = class_to_shost(dev);
6151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6152 struct lpfc_hba *phba = vport->phba;
6153
6154 if (phba->sli_rev >= LPFC_SLI_REV4)
6155 return -EPERM;
6156
6157 if (off > FF_REG_AREA_SIZE)
6158 return -ERANGE;
6159
6160 if ((off + count) > FF_REG_AREA_SIZE)
6161 count = FF_REG_AREA_SIZE - off;
6162
6163 if (count == 0) return 0;
6164
6165 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6166 return -EINVAL;
6167
6168 spin_lock_irq(&phba->hbalock);
6169
6170 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6171 tmp_ptr = (uint32_t *)(buf + buf_off);
6172 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6173 }
6174
6175 spin_unlock_irq(&phba->hbalock);
6176
6177 return count;
6178}
6179
6180static struct bin_attribute sysfs_ctlreg_attr = {
6181 .attr = {
6182 .name = "ctlreg",
6183 .mode = S_IRUSR | S_IWUSR,
6184 },
6185 .size = 256,
6186 .read = sysfs_ctlreg_read,
6187 .write = sysfs_ctlreg_write,
6188};
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206static ssize_t
6207sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6208 struct bin_attribute *bin_attr,
6209 char *buf, loff_t off, size_t count)
6210{
6211 return -EPERM;
6212}
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230static ssize_t
6231sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6232 struct bin_attribute *bin_attr,
6233 char *buf, loff_t off, size_t count)
6234{
6235 return -EPERM;
6236}
6237
6238static struct bin_attribute sysfs_mbox_attr = {
6239 .attr = {
6240 .name = "mbox",
6241 .mode = S_IRUSR | S_IWUSR,
6242 },
6243 .size = MAILBOX_SYSFS_MAX,
6244 .read = sysfs_mbox_read,
6245 .write = sysfs_mbox_write,
6246};
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256int
6257lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6258{
6259 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6260 int error;
6261
6262 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6263 &sysfs_drvr_stat_data_attr);
6264
6265
6266 if (error || vport->port_type == LPFC_NPIV_PORT)
6267 goto out;
6268
6269 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6270 &sysfs_ctlreg_attr);
6271 if (error)
6272 goto out_remove_stat_attr;
6273
6274 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6275 &sysfs_mbox_attr);
6276 if (error)
6277 goto out_remove_ctlreg_attr;
6278
6279 return 0;
6280out_remove_ctlreg_attr:
6281 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6282out_remove_stat_attr:
6283 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6284 &sysfs_drvr_stat_data_attr);
6285out:
6286 return error;
6287}
6288
6289
6290
6291
6292
6293void
6294lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6295{
6296 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6297 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6298 &sysfs_drvr_stat_data_attr);
6299
6300 if (vport->port_type == LPFC_NPIV_PORT)
6301 return;
6302 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6303 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6304}
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314static void
6315lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6316{
6317 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6318
6319 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6320 sizeof fc_host_symbolic_name(shost));
6321}
6322
6323
6324
6325
6326
6327static void
6328lpfc_get_host_port_id(struct Scsi_Host *shost)
6329{
6330 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6331
6332
6333 fc_host_port_id(shost) = vport->fc_myDID;
6334}
6335
6336
6337
6338
6339
6340static void
6341lpfc_get_host_port_type(struct Scsi_Host *shost)
6342{
6343 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6344 struct lpfc_hba *phba = vport->phba;
6345
6346 spin_lock_irq(shost->host_lock);
6347
6348 if (vport->port_type == LPFC_NPIV_PORT) {
6349 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6350 } else if (lpfc_is_link_up(phba)) {
6351 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6352 if (vport->fc_flag & FC_PUBLIC_LOOP)
6353 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6354 else
6355 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6356 } else {
6357 if (vport->fc_flag & FC_FABRIC)
6358 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6359 else
6360 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6361 }
6362 } else
6363 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6364
6365 spin_unlock_irq(shost->host_lock);
6366}
6367
6368
6369
6370
6371
6372static void
6373lpfc_get_host_port_state(struct Scsi_Host *shost)
6374{
6375 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6376 struct lpfc_hba *phba = vport->phba;
6377
6378 spin_lock_irq(shost->host_lock);
6379
6380 if (vport->fc_flag & FC_OFFLINE_MODE)
6381 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6382 else {
6383 switch (phba->link_state) {
6384 case LPFC_LINK_UNKNOWN:
6385 case LPFC_LINK_DOWN:
6386 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6387 break;
6388 case LPFC_LINK_UP:
6389 case LPFC_CLEAR_LA:
6390 case LPFC_HBA_READY:
6391
6392 if (vport->port_state < LPFC_VPORT_READY)
6393 fc_host_port_state(shost) =
6394 FC_PORTSTATE_BYPASSED;
6395 else
6396 fc_host_port_state(shost) =
6397 FC_PORTSTATE_ONLINE;
6398 break;
6399 case LPFC_HBA_ERROR:
6400 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6401 break;
6402 default:
6403 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6404 break;
6405 }
6406 }
6407
6408 spin_unlock_irq(shost->host_lock);
6409}
6410
6411
6412
6413
6414
6415static void
6416lpfc_get_host_speed(struct Scsi_Host *shost)
6417{
6418 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6419 struct lpfc_hba *phba = vport->phba;
6420
6421 spin_lock_irq(shost->host_lock);
6422
6423 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6424 switch(phba->fc_linkspeed) {
6425 case LPFC_LINK_SPEED_1GHZ:
6426 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6427 break;
6428 case LPFC_LINK_SPEED_2GHZ:
6429 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6430 break;
6431 case LPFC_LINK_SPEED_4GHZ:
6432 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6433 break;
6434 case LPFC_LINK_SPEED_8GHZ:
6435 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6436 break;
6437 case LPFC_LINK_SPEED_10GHZ:
6438 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6439 break;
6440 case LPFC_LINK_SPEED_16GHZ:
6441 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6442 break;
6443 case LPFC_LINK_SPEED_32GHZ:
6444 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6445 break;
6446 case LPFC_LINK_SPEED_64GHZ:
6447 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6448 break;
6449 case LPFC_LINK_SPEED_128GHZ:
6450 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6451 break;
6452 default:
6453 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6454 break;
6455 }
6456 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6457 switch (phba->fc_linkspeed) {
6458 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6459 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6460 break;
6461 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6462 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6463 break;
6464 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6465 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6466 break;
6467 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6468 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6469 break;
6470 default:
6471 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6472 break;
6473 }
6474 } else
6475 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6476
6477 spin_unlock_irq(shost->host_lock);
6478}
6479
6480
6481
6482
6483
6484static void
6485lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6486{
6487 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6488 struct lpfc_hba *phba = vport->phba;
6489 u64 node_name;
6490
6491 spin_lock_irq(shost->host_lock);
6492
6493 if ((vport->port_state > LPFC_FLOGI) &&
6494 ((vport->fc_flag & FC_FABRIC) ||
6495 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6496 (vport->fc_flag & FC_PUBLIC_LOOP))))
6497 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6498 else
6499
6500 node_name = 0;
6501
6502 spin_unlock_irq(shost->host_lock);
6503
6504 fc_host_fabric_name(shost) = node_name;
6505}
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519static struct fc_host_statistics *
6520lpfc_get_stats(struct Scsi_Host *shost)
6521{
6522 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6523 struct lpfc_hba *phba = vport->phba;
6524 struct lpfc_sli *psli = &phba->sli;
6525 struct fc_host_statistics *hs = &phba->link_stats;
6526 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6527 LPFC_MBOXQ_t *pmboxq;
6528 MAILBOX_t *pmb;
6529 int rc = 0;
6530
6531
6532
6533
6534
6535 if (phba->link_state < LPFC_LINK_DOWN ||
6536 !phba->mbox_mem_pool ||
6537 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6538 return NULL;
6539
6540 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6541 return NULL;
6542
6543 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6544 if (!pmboxq)
6545 return NULL;
6546 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6547
6548 pmb = &pmboxq->u.mb;
6549 pmb->mbxCommand = MBX_READ_STATUS;
6550 pmb->mbxOwner = OWN_HOST;
6551 pmboxq->ctx_buf = NULL;
6552 pmboxq->vport = vport;
6553
6554 if (vport->fc_flag & FC_OFFLINE_MODE)
6555 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6556 else
6557 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6558
6559 if (rc != MBX_SUCCESS) {
6560 if (rc != MBX_TIMEOUT)
6561 mempool_free(pmboxq, phba->mbox_mem_pool);
6562 return NULL;
6563 }
6564
6565 memset(hs, 0, sizeof (struct fc_host_statistics));
6566
6567 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6568
6569
6570
6571
6572 hs->tx_words = (uint64_t)
6573 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6574 * (uint64_t)256);
6575 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6576 hs->rx_words = (uint64_t)
6577 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6578 * (uint64_t)256);
6579
6580 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6581 pmb->mbxCommand = MBX_READ_LNK_STAT;
6582 pmb->mbxOwner = OWN_HOST;
6583 pmboxq->ctx_buf = NULL;
6584 pmboxq->vport = vport;
6585
6586 if (vport->fc_flag & FC_OFFLINE_MODE)
6587 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6588 else
6589 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6590
6591 if (rc != MBX_SUCCESS) {
6592 if (rc != MBX_TIMEOUT)
6593 mempool_free(pmboxq, phba->mbox_mem_pool);
6594 return NULL;
6595 }
6596
6597 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6598 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6599 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6600 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6601 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6602 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6603 hs->error_frames = pmb->un.varRdLnk.crcCnt;
6604
6605 hs->link_failure_count -= lso->link_failure_count;
6606 hs->loss_of_sync_count -= lso->loss_of_sync_count;
6607 hs->loss_of_signal_count -= lso->loss_of_signal_count;
6608 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6609 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6610 hs->invalid_crc_count -= lso->invalid_crc_count;
6611 hs->error_frames -= lso->error_frames;
6612
6613 if (phba->hba_flag & HBA_FCOE_MODE) {
6614 hs->lip_count = -1;
6615 hs->nos_count = (phba->link_events >> 1);
6616 hs->nos_count -= lso->link_events;
6617 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6618 hs->lip_count = (phba->fc_eventTag >> 1);
6619 hs->lip_count -= lso->link_events;
6620 hs->nos_count = -1;
6621 } else {
6622 hs->lip_count = -1;
6623 hs->nos_count = (phba->fc_eventTag >> 1);
6624 hs->nos_count -= lso->link_events;
6625 }
6626
6627 hs->dumped_frames = -1;
6628
6629 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6630
6631 mempool_free(pmboxq, phba->mbox_mem_pool);
6632
6633 return hs;
6634}
6635
6636
6637
6638
6639
6640static void
6641lpfc_reset_stats(struct Scsi_Host *shost)
6642{
6643 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6644 struct lpfc_hba *phba = vport->phba;
6645 struct lpfc_sli *psli = &phba->sli;
6646 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6647 LPFC_MBOXQ_t *pmboxq;
6648 MAILBOX_t *pmb;
6649 int rc = 0;
6650
6651 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6652 return;
6653
6654 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6655 if (!pmboxq)
6656 return;
6657 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6658
6659 pmb = &pmboxq->u.mb;
6660 pmb->mbxCommand = MBX_READ_STATUS;
6661 pmb->mbxOwner = OWN_HOST;
6662 pmb->un.varWords[0] = 0x1;
6663 pmboxq->ctx_buf = NULL;
6664 pmboxq->vport = vport;
6665
6666 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6667 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6668 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6669 else
6670 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6671
6672 if (rc != MBX_SUCCESS) {
6673 if (rc != MBX_TIMEOUT)
6674 mempool_free(pmboxq, phba->mbox_mem_pool);
6675 return;
6676 }
6677
6678 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6679 pmb->mbxCommand = MBX_READ_LNK_STAT;
6680 pmb->mbxOwner = OWN_HOST;
6681 pmboxq->ctx_buf = NULL;
6682 pmboxq->vport = vport;
6683
6684 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6685 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6686 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6687 else
6688 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6689
6690 if (rc != MBX_SUCCESS) {
6691 if (rc != MBX_TIMEOUT)
6692 mempool_free( pmboxq, phba->mbox_mem_pool);
6693 return;
6694 }
6695
6696 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6697 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6698 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6699 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6700 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6701 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6702 lso->error_frames = pmb->un.varRdLnk.crcCnt;
6703 if (phba->hba_flag & HBA_FCOE_MODE)
6704 lso->link_events = (phba->link_events >> 1);
6705 else
6706 lso->link_events = (phba->fc_eventTag >> 1);
6707
6708 psli->stats_start = ktime_get_seconds();
6709
6710 mempool_free(pmboxq, phba->mbox_mem_pool);
6711
6712 return;
6713}
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728static struct lpfc_nodelist *
6729lpfc_get_node_by_target(struct scsi_target *starget)
6730{
6731 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
6732 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6733 struct lpfc_nodelist *ndlp;
6734
6735 spin_lock_irq(shost->host_lock);
6736
6737 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6738 if (NLP_CHK_NODE_ACT(ndlp) &&
6739 ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6740 starget->id == ndlp->nlp_sid) {
6741 spin_unlock_irq(shost->host_lock);
6742 return ndlp;
6743 }
6744 }
6745 spin_unlock_irq(shost->host_lock);
6746 return NULL;
6747}
6748
6749
6750
6751
6752
6753static void
6754lpfc_get_starget_port_id(struct scsi_target *starget)
6755{
6756 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
6757
6758 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
6759}
6760
6761
6762
6763
6764
6765
6766
6767static void
6768lpfc_get_starget_node_name(struct scsi_target *starget)
6769{
6770 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
6771
6772 fc_starget_node_name(starget) =
6773 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
6774}
6775
6776
6777
6778
6779
6780
6781
6782static void
6783lpfc_get_starget_port_name(struct scsi_target *starget)
6784{
6785 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
6786
6787 fc_starget_port_name(starget) =
6788 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
6789}
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800static void
6801lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
6802{
6803 if (timeout)
6804 rport->dev_loss_tmo = timeout;
6805 else
6806 rport->dev_loss_tmo = 1;
6807}
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821#define lpfc_rport_show_function(field, format_string, sz, cast) \
6822static ssize_t \
6823lpfc_show_rport_##field (struct device *dev, \
6824 struct device_attribute *attr, \
6825 char *buf) \
6826{ \
6827 struct fc_rport *rport = transport_class_to_rport(dev); \
6828 struct lpfc_rport_data *rdata = rport->hostdata; \
6829 return scnprintf(buf, sz, format_string, \
6830 (rdata->target) ? cast rdata->target->field : 0); \
6831}
6832
6833#define lpfc_rport_rd_attr(field, format_string, sz) \
6834 lpfc_rport_show_function(field, format_string, sz, ) \
6835static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845
6846static void
6847lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
6848{
6849 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
6850
6851 if (vport->port_state == LPFC_VPORT_READY)
6852 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
6853}
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864static void
6865lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
6866{
6867 phba->cfg_log_verbose = verbose;
6868}
6869
6870struct fc_function_template lpfc_transport_functions = {
6871
6872 .show_host_node_name = 1,
6873 .show_host_port_name = 1,
6874 .show_host_supported_classes = 1,
6875 .show_host_supported_fc4s = 1,
6876 .show_host_supported_speeds = 1,
6877 .show_host_maxframe_size = 1,
6878
6879 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
6880 .show_host_symbolic_name = 1,
6881
6882
6883 .get_host_port_id = lpfc_get_host_port_id,
6884 .show_host_port_id = 1,
6885
6886 .get_host_port_type = lpfc_get_host_port_type,
6887 .show_host_port_type = 1,
6888
6889 .get_host_port_state = lpfc_get_host_port_state,
6890 .show_host_port_state = 1,
6891
6892
6893 .show_host_active_fc4s = 1,
6894
6895 .get_host_speed = lpfc_get_host_speed,
6896 .show_host_speed = 1,
6897
6898 .get_host_fabric_name = lpfc_get_host_fabric_name,
6899 .show_host_fabric_name = 1,
6900
6901
6902
6903
6904
6905
6906 .get_fc_host_stats = lpfc_get_stats,
6907 .reset_fc_host_stats = lpfc_reset_stats,
6908
6909 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
6910 .show_rport_maxframe_size = 1,
6911 .show_rport_supported_classes = 1,
6912
6913 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
6914 .show_rport_dev_loss_tmo = 1,
6915
6916 .get_starget_port_id = lpfc_get_starget_port_id,
6917 .show_starget_port_id = 1,
6918
6919 .get_starget_node_name = lpfc_get_starget_node_name,
6920 .show_starget_node_name = 1,
6921
6922 .get_starget_port_name = lpfc_get_starget_port_name,
6923 .show_starget_port_name = 1,
6924
6925 .issue_fc_host_lip = lpfc_issue_lip,
6926 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
6927 .terminate_rport_io = lpfc_terminate_rport_io,
6928
6929 .dd_fcvport_size = sizeof(struct lpfc_vport *),
6930
6931 .vport_disable = lpfc_vport_disable,
6932
6933 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
6934
6935 .bsg_request = lpfc_bsg_request,
6936 .bsg_timeout = lpfc_bsg_timeout,
6937};
6938
6939struct fc_function_template lpfc_vport_transport_functions = {
6940
6941 .show_host_node_name = 1,
6942 .show_host_port_name = 1,
6943 .show_host_supported_classes = 1,
6944 .show_host_supported_fc4s = 1,
6945 .show_host_supported_speeds = 1,
6946 .show_host_maxframe_size = 1,
6947
6948 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
6949 .show_host_symbolic_name = 1,
6950
6951
6952 .get_host_port_id = lpfc_get_host_port_id,
6953 .show_host_port_id = 1,
6954
6955 .get_host_port_type = lpfc_get_host_port_type,
6956 .show_host_port_type = 1,
6957
6958 .get_host_port_state = lpfc_get_host_port_state,
6959 .show_host_port_state = 1,
6960
6961
6962 .show_host_active_fc4s = 1,
6963
6964 .get_host_speed = lpfc_get_host_speed,
6965 .show_host_speed = 1,
6966
6967 .get_host_fabric_name = lpfc_get_host_fabric_name,
6968 .show_host_fabric_name = 1,
6969
6970
6971
6972
6973
6974
6975 .get_fc_host_stats = lpfc_get_stats,
6976 .reset_fc_host_stats = lpfc_reset_stats,
6977
6978 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
6979 .show_rport_maxframe_size = 1,
6980 .show_rport_supported_classes = 1,
6981
6982 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
6983 .show_rport_dev_loss_tmo = 1,
6984
6985 .get_starget_port_id = lpfc_get_starget_port_id,
6986 .show_starget_port_id = 1,
6987
6988 .get_starget_node_name = lpfc_get_starget_node_name,
6989 .show_starget_node_name = 1,
6990
6991 .get_starget_port_name = lpfc_get_starget_port_name,
6992 .show_starget_port_name = 1,
6993
6994 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
6995 .terminate_rport_io = lpfc_terminate_rport_io,
6996
6997 .vport_disable = lpfc_vport_disable,
6998
6999 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7000};
7001
7002
7003
7004
7005
7006void
7007lpfc_get_cfgparam(struct lpfc_hba *phba)
7008{
7009 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7010 lpfc_ns_query_init(phba, lpfc_ns_query);
7011 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7012 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7013 lpfc_cr_count_init(phba, lpfc_cr_count);
7014 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7015 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7016 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7017 lpfc_ack0_init(phba, lpfc_ack0);
7018 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7019 lpfc_topology_init(phba, lpfc_topology);
7020 lpfc_link_speed_init(phba, lpfc_link_speed);
7021 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7022 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7023 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7024 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7025 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7026 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7027 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7028 lpfc_use_msi_init(phba, lpfc_use_msi);
7029 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7030 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7031 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7032 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7033 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7034 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7035 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7036 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7037
7038 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7039 if (phba->sli_rev != LPFC_SLI_REV4)
7040 phba->cfg_EnableXLane = 0;
7041 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7042
7043 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7044 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7045 phba->cfg_oas_lun_state = 0;
7046 phba->cfg_oas_lun_status = 0;
7047 phba->cfg_oas_flags = 0;
7048 phba->cfg_oas_priority = 0;
7049 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7050 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7051 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7052 if (phba->sli_rev == LPFC_SLI_REV4)
7053 phba->cfg_poll = 0;
7054 else
7055 phba->cfg_poll = lpfc_poll;
7056
7057 if (phba->cfg_enable_bg)
7058 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7059
7060 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7061
7062 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7063 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7064 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7065
7066
7067 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7068 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7069 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7070 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7071 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7072 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7073 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7074
7075 if (phba->sli_rev != LPFC_SLI_REV4) {
7076
7077 phba->nvmet_support = 0;
7078 phba->cfg_nvmet_mrq = 0;
7079 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7080 phba->cfg_enable_bbcr = 0;
7081 phba->cfg_xri_rebalancing = 0;
7082 } else {
7083
7084 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7085 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7086 }
7087
7088 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7089
7090 phba->cfg_enable_pbde = 0;
7091
7092
7093 if (phba->cfg_hdw_queue == 0)
7094 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7095 if (phba->cfg_irq_chann == 0)
7096 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7097 if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7098 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7099
7100 phba->cfg_soft_wwnn = 0L;
7101 phba->cfg_soft_wwpn = 0L;
7102 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7103 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7104 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7105 lpfc_aer_support_init(phba, lpfc_aer_support);
7106 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7107 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7108 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7109 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
7110 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7111 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7112 phba->cfg_enable_dss = 1;
7113 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7114 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7115 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7116 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7117
7118
7119
7120
7121
7122
7123 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7124 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7125 phba->cfg_iocb_cnt = 5;
7126 }
7127
7128 return;
7129}
7130
7131
7132
7133
7134
7135
7136void
7137lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7138{
7139 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
7140 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7141 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
7142 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7143 if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7144 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7145
7146 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7147 phba->nvmet_support) {
7148 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7149
7150 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7151 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7152 "NVME Target PRLI ACC enable_fb ",
7153 phba->cfg_nvme_enable_fb,
7154 phba->cfg_nvmet_fb_size,
7155 LPFC_NVMET_FB_SZ_MAX);
7156
7157 if (phba->cfg_nvme_enable_fb == 0)
7158 phba->cfg_nvmet_fb_size = 0;
7159 else {
7160 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7161 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7162 }
7163
7164 if (!phba->cfg_nvmet_mrq)
7165 phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
7166
7167
7168 if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
7169 phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
7170 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7171 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7172 phba->cfg_nvmet_mrq);
7173 }
7174 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7175 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7176
7177 } else {
7178
7179 phba->nvmet_support = 0;
7180 phba->cfg_nvmet_mrq = 0;
7181 phba->cfg_nvmet_fb_size = 0;
7182 }
7183}
7184
7185
7186
7187
7188
7189void
7190lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7191{
7192 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7193 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7194 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7195 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7196 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7197 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7198 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7199 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7200 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7201 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7202 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7203 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7204 lpfc_max_luns_init(vport, lpfc_max_luns);
7205 lpfc_scan_down_init(vport, lpfc_scan_down);
7206 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7207 return;
7208}
7209