1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97
98static struct scsi_transport_template *lpfc_transport_template = NULL;
99static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
100static DEFINE_IDR(lpfc_hba_index);
101#define LPFC_NVMET_BUF_POST 254
102static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118int
119lpfc_config_port_prep(struct lpfc_hba *phba)
120{
121 lpfc_vpd_t *vp = &phba->vpd;
122 int i = 0, rc;
123 LPFC_MBOXQ_t *pmb;
124 MAILBOX_t *mb;
125 char *lpfc_vpd_data = NULL;
126 uint16_t offset = 0;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
130
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
132 if (!pmb) {
133 phba->link_state = LPFC_HBA_ERROR;
134 return -ENOMEM;
135 }
136
137 mb = &pmb->u.mb;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
139
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
141 if (init_key) {
142 uint32_t *ptext = (uint32_t *) licensed;
143
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
146 init_key = 0;
147 }
148
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
153 sizeof (licensed));
154
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
161 "mbxStatus x%x\n",
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
164 return -ERESTART;
165 }
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
167 sizeof(phba->wwnn));
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
169 sizeof(phba->wwpn));
170 }
171
172
173
174
175
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
177
178
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
187 return -ERESTART;
188 }
189
190
191
192
193
194
195 if (mb->un.varRdRev.rr == 0) {
196 vp->rev.rBit = 0;
197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
201 return -ERESTART;
202 }
203
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
206 return -EINVAL;
207 }
208
209
210 vp->rev.rBit = 1;
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
226
227
228
229
230
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
233
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
237
238
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
240 if (!lpfc_vpd_data)
241 goto out_free_mbox;
242 do {
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
245
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
252 }
253
254
255
256 if (mb->un.varDmp.word_cnt == 0)
257 break;
258
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
263 mb->un.varDmp.word_cnt);
264 offset += mb->un.varDmp.word_cnt;
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
266
267 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
268
269 kfree(lpfc_vpd_data);
270out_free_mbox:
271 mempool_free(pmb, phba->mbox_mem_pool);
272 return 0;
273}
274
275
276
277
278
279
280
281
282
283
284
285static void
286lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
287{
288 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
289 phba->temp_sensor_support = 1;
290 else
291 phba->temp_sensor_support = 0;
292 mempool_free(pmboxq, phba->mbox_mem_pool);
293 return;
294}
295
296
297
298
299
300
301
302
303
304
305
306static void
307lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
308{
309 struct prog_id *prg;
310 uint32_t prog_id_word;
311 char dist = ' ';
312
313 char dist_char[] = "nabx";
314
315 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
316 mempool_free(pmboxq, phba->mbox_mem_pool);
317 return;
318 }
319
320 prg = (struct prog_id *) &prog_id_word;
321
322
323 prog_id_word = pmboxq->u.mb.un.varWords[7];
324
325
326 if (prg->dist < 4)
327 dist = dist_char[prg->dist];
328
329 if ((prg->dist == 3) && (prg->num == 0))
330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
331 prg->ver, prg->rev, prg->lev);
332 else
333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
334 prg->ver, prg->rev, prg->lev,
335 dist, prg->num);
336 mempool_free(pmboxq, phba->mbox_mem_pool);
337 return;
338}
339
340
341
342
343
344
345
346
347
348
349void
350lpfc_update_vport_wwn(struct lpfc_vport *vport)
351{
352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
354
355
356 if (vport->phba->cfg_soft_wwnn)
357 u64_to_wwn(vport->phba->cfg_soft_wwnn,
358 vport->fc_sparam.nodeName.u.wwn);
359 if (vport->phba->cfg_soft_wwpn)
360 u64_to_wwn(vport->phba->cfg_soft_wwpn,
361 vport->fc_sparam.portName.u.wwn);
362
363
364
365
366
367 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
368 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
369 sizeof(struct lpfc_name));
370 else
371 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
372 sizeof(struct lpfc_name));
373
374
375
376
377
378 if (vport->fc_portname.u.wwn[0] != 0 &&
379 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name)))
381 vport->vport_flag |= FAWWPN_PARAM_CHG;
382
383 if (vport->fc_portname.u.wwn[0] == 0 ||
384 vport->phba->cfg_soft_wwpn ||
385 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
386 vport->vport_flag & FAWWPN_SET) {
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
389 vport->vport_flag &= ~FAWWPN_SET;
390 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
391 vport->vport_flag |= FAWWPN_SET;
392 }
393 else
394 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
395 sizeof(struct lpfc_name));
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411int
412lpfc_config_port_post(struct lpfc_hba *phba)
413{
414 struct lpfc_vport *vport = phba->pport;
415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
416 LPFC_MBOXQ_t *pmb;
417 MAILBOX_t *mb;
418 struct lpfc_dmabuf *mp;
419 struct lpfc_sli *psli = &phba->sli;
420 uint32_t status, timeout;
421 int i, j;
422 int rc;
423
424 spin_lock_irq(&phba->hbalock);
425
426
427
428
429 if (phba->over_temp_state == HBA_OVER_TEMP)
430 phba->over_temp_state = HBA_NORMAL_TEMP;
431 spin_unlock_irq(&phba->hbalock);
432
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 if (!pmb) {
435 phba->link_state = LPFC_HBA_ERROR;
436 return -ENOMEM;
437 }
438 mb = &pmb->u.mb;
439
440
441 rc = lpfc_read_sparam(phba, pmb, 0);
442 if (rc) {
443 mempool_free(pmb, phba->mbox_mem_pool);
444 return -ENOMEM;
445 }
446
447 pmb->vport = vport;
448 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
450 "0448 Adapter failed init, mbxCmd x%x "
451 "READ_SPARM mbxStatus x%x\n",
452 mb->mbxCommand, mb->mbxStatus);
453 phba->link_state = LPFC_HBA_ERROR;
454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 mempool_free(pmb, phba->mbox_mem_pool);
456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
457 kfree(mp);
458 return -EIO;
459 }
460
461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
462
463 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
464 lpfc_mbuf_free(phba, mp->virt, mp->phys);
465 kfree(mp);
466 pmb->ctx_buf = NULL;
467 lpfc_update_vport_wwn(vport);
468
469
470 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
471 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
472 fc_host_max_npiv_vports(shost) = phba->max_vpi;
473
474
475
476 if (phba->SerialNumber[0] == 0) {
477 uint8_t *outptr;
478
479 outptr = &vport->fc_nodename.u.s.IEEE[0];
480 for (i = 0; i < 12; i++) {
481 status = *outptr++;
482 j = ((status & 0xf0) >> 4);
483 if (j <= 9)
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
486 else
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
489 i++;
490 j = (status & 0xf);
491 if (j <= 9)
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x30 + (uint8_t) j);
494 else
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
497 }
498 }
499
500 lpfc_read_config(phba, pmb);
501 pmb->vport = vport;
502 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
504 "0453 Adapter failed to init, mbxCmd x%x "
505 "READ_CONFIG, mbxStatus x%x\n",
506 mb->mbxCommand, mb->mbxStatus);
507 phba->link_state = LPFC_HBA_ERROR;
508 mempool_free( pmb, phba->mbox_mem_pool);
509 return -EIO;
510 }
511
512
513 lpfc_sli_read_link_ste(phba);
514
515
516 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 "3359 HBA queue depth changed from %d to %d\n",
519 phba->cfg_hba_queue_depth,
520 mb->un.varRdConfig.max_xri);
521 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
522 }
523
524 phba->lmt = mb->un.varRdConfig.lmt;
525
526
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528
529 phba->link_state = LPFC_LINK_DOWN;
530
531
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
536
537
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
540
541
542
543
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
546 if (rc) {
547 mempool_free(pmb, phba->mbox_mem_pool);
548 return -EIO;
549 }
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
557 mempool_free(pmb, phba->mbox_mem_pool);
558 return -EIO;
559 }
560 }
561
562 spin_lock_irq(&phba->hbalock);
563
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
565
566
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
569 return -EIO;
570 }
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
580
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 status &= ~(HC_R0INT_ENA);
584
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr);
587 spin_unlock_irq(&phba->hbalock);
588
589
590 timeout = phba->fc_ratov * 2;
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
593
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
597 phba->last_completion_time = jiffies;
598
599 mod_timer(&phba->eratt_poll,
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
601
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
604 "2598 Adapter Link is disabled.\n");
605 lpfc_down_link(phba, pmb);
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 "2599 Adapter failed to issue DOWN_LINK"
611 " mbox command rc 0x%x\n", rc);
612
613 mempool_free(pmb, phba->mbox_mem_pool);
614 return -EIO;
615 }
616 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
617 mempool_free(pmb, phba->mbox_mem_pool);
618 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
619 if (rc)
620 return rc;
621 }
622
623 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
624 if (!pmb) {
625 phba->link_state = LPFC_HBA_ERROR;
626 return -ENOMEM;
627 }
628
629 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
630 pmb->mbox_cmpl = lpfc_config_async_cmpl;
631 pmb->vport = phba->pport;
632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
633
634 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
636 "0456 Adapter failed to issue "
637 "ASYNCEVT_ENABLE mbox status x%x\n",
638 rc);
639 mempool_free(pmb, phba->mbox_mem_pool);
640 }
641
642
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 if (!pmb) {
645 phba->link_state = LPFC_HBA_ERROR;
646 return -ENOMEM;
647 }
648
649 lpfc_dump_wakeup_param(phba, pmb);
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 pmb->vport = phba->pport;
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653
654 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
656 "0435 Adapter failed "
657 "to get Option ROM version status x%x\n", rc);
658 mempool_free(pmb, phba->mbox_mem_pool);
659 }
660
661 return 0;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678static int
679lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
680{
681 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699int
700lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
701 uint32_t flag)
702{
703 struct lpfc_vport *vport = phba->pport;
704 LPFC_MBOXQ_t *pmb;
705 MAILBOX_t *mb;
706 int rc;
707
708 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
709 if (!pmb) {
710 phba->link_state = LPFC_HBA_ERROR;
711 return -ENOMEM;
712 }
713 mb = &pmb->u.mb;
714 pmb->vport = vport;
715
716 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
718 !(phba->lmt & LMT_1Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
720 !(phba->lmt & LMT_2Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
722 !(phba->lmt & LMT_4Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
724 !(phba->lmt & LMT_8Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
726 !(phba->lmt & LMT_10Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
728 !(phba->lmt & LMT_16Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
730 !(phba->lmt & LMT_32Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
732 !(phba->lmt & LMT_64Gb))) {
733
734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
735 "1302 Invalid speed for this board:%d "
736 "Reset link speed to auto.\n",
737 phba->cfg_link_speed);
738 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
739 }
740 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
741 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
742 if (phba->sli_rev < LPFC_SLI_REV4)
743 lpfc_set_loopback_flag(phba);
744 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
745 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
747 "0498 Adapter failed to init, mbxCmd x%x "
748 "INIT_LINK, mbxStatus x%x\n",
749 mb->mbxCommand, mb->mbxStatus);
750 if (phba->sli_rev <= LPFC_SLI_REV3) {
751
752 writel(0, phba->HCregaddr);
753 readl(phba->HCregaddr);
754
755 writel(0xffffffff, phba->HAregaddr);
756 readl(phba->HAregaddr);
757 }
758 phba->link_state = LPFC_HBA_ERROR;
759 if (rc != MBX_BUSY || flag == MBX_POLL)
760 mempool_free(pmb, phba->mbox_mem_pool);
761 return -EIO;
762 }
763 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
764 if (flag == MBX_POLL)
765 mempool_free(pmb, phba->mbox_mem_pool);
766
767 return 0;
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783static int
784lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
785{
786 LPFC_MBOXQ_t *pmb;
787 int rc;
788
789 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
790 if (!pmb) {
791 phba->link_state = LPFC_HBA_ERROR;
792 return -ENOMEM;
793 }
794
795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 "0491 Adapter Link is disabled.\n");
797 lpfc_down_link(phba, pmb);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
802 "2522 Adapter failed to issue DOWN_LINK"
803 " mbox command rc 0x%x\n", rc);
804
805 mempool_free(pmb, phba->mbox_mem_pool);
806 return -EIO;
807 }
808 if (flag == MBX_POLL)
809 mempool_free(pmb, phba->mbox_mem_pool);
810
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825int
826lpfc_hba_down_prep(struct lpfc_hba *phba)
827{
828 struct lpfc_vport **vports;
829 int i;
830
831 if (phba->sli_rev <= LPFC_SLI_REV3) {
832
833 writel(0, phba->HCregaddr);
834 readl(phba->HCregaddr);
835 }
836
837 if (phba->pport->load_flag & FC_UNLOADING)
838 lpfc_cleanup_discovery_resources(phba->pport);
839 else {
840 vports = lpfc_create_vport_work_array(phba);
841 if (vports != NULL)
842 for (i = 0; i <= phba->max_vports &&
843 vports[i] != NULL; i++)
844 lpfc_cleanup_discovery_resources(vports[i]);
845 lpfc_destroy_vport_work_array(phba, vports);
846 }
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863static void
864lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
865{
866 struct lpfc_iocbq *rspiocbq;
867 struct hbq_dmabuf *dmabuf;
868 struct lpfc_cq_event *cq_event;
869
870 spin_lock_irq(&phba->hbalock);
871 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
872 spin_unlock_irq(&phba->hbalock);
873
874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
875
876 spin_lock_irq(&phba->hbalock);
877 list_remove_head(&phba->sli4_hba.sp_queue_event,
878 cq_event, struct lpfc_cq_event, list);
879 spin_unlock_irq(&phba->hbalock);
880
881 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
882 case CQE_CODE_COMPL_WQE:
883 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
884 cq_event);
885 lpfc_sli_release_iocbq(phba, rspiocbq);
886 break;
887 case CQE_CODE_RECEIVE:
888 case CQE_CODE_RECEIVE_V1:
889 dmabuf = container_of(cq_event, struct hbq_dmabuf,
890 cq_event);
891 lpfc_in_buf_free(phba, &dmabuf->dbuf);
892 }
893 }
894}
895
896
897
898
899
900
901
902
903
904
905
906
907static void
908lpfc_hba_free_post_buf(struct lpfc_hba *phba)
909{
910 struct lpfc_sli *psli = &phba->sli;
911 struct lpfc_sli_ring *pring;
912 struct lpfc_dmabuf *mp, *next_mp;
913 LIST_HEAD(buflist);
914 int count;
915
916 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
917 lpfc_sli_hbqbuf_free_all(phba);
918 else {
919
920 pring = &psli->sli3_ring[LPFC_ELS_RING];
921 spin_lock_irq(&phba->hbalock);
922 list_splice_init(&pring->postbufq, &buflist);
923 spin_unlock_irq(&phba->hbalock);
924
925 count = 0;
926 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
927 list_del(&mp->list);
928 count++;
929 lpfc_mbuf_free(phba, mp->virt, mp->phys);
930 kfree(mp);
931 }
932
933 spin_lock_irq(&phba->hbalock);
934 pring->postbufq_cnt -= count;
935 spin_unlock_irq(&phba->hbalock);
936 }
937}
938
939
940
941
942
943
944
945
946
947
948
949static void
950lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
951{
952 struct lpfc_sli *psli = &phba->sli;
953 struct lpfc_queue *qp = NULL;
954 struct lpfc_sli_ring *pring;
955 LIST_HEAD(completions);
956 int i;
957 struct lpfc_iocbq *piocb, *next_iocb;
958
959 if (phba->sli_rev != LPFC_SLI_REV4) {
960 for (i = 0; i < psli->num_rings; i++) {
961 pring = &psli->sli3_ring[i];
962 spin_lock_irq(&phba->hbalock);
963
964
965
966
967 list_splice_init(&pring->txcmplq, &completions);
968 pring->txcmplq_cnt = 0;
969 spin_unlock_irq(&phba->hbalock);
970
971 lpfc_sli_abort_iocb_ring(phba, pring);
972 }
973
974 lpfc_sli_cancel_iocbs(phba, &completions,
975 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
976 return;
977 }
978 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
979 pring = qp->pring;
980 if (!pring)
981 continue;
982 spin_lock_irq(&pring->ring_lock);
983 list_for_each_entry_safe(piocb, next_iocb,
984 &pring->txcmplq, list)
985 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
986 list_splice_init(&pring->txcmplq, &completions);
987 pring->txcmplq_cnt = 0;
988 spin_unlock_irq(&pring->ring_lock);
989 lpfc_sli_abort_iocb_ring(phba, pring);
990 }
991
992 lpfc_sli_cancel_iocbs(phba, &completions,
993 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007static int
1008lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009{
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
1012 return 0;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static int
1027lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028{
1029 struct lpfc_io_buf *psb, *psb_next;
1030 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031 struct lpfc_sli4_hdw_queue *qp;
1032 LIST_HEAD(aborts);
1033 LIST_HEAD(nvme_aborts);
1034 LIST_HEAD(nvmet_aborts);
1035 struct lpfc_sglq *sglq_entry = NULL;
1036 int cnt, idx;
1037
1038
1039 lpfc_sli_hbqbuf_free_all(phba);
1040 lpfc_hba_clean_txcmplq(phba);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1056
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1059
1060
1061 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1062
1063
1064
1065
1066 spin_lock_irq(&phba->hbalock);
1067 cnt = 0;
1068 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069 qp = &phba->sli4_hba.hdwq[idx];
1070
1071 spin_lock(&qp->abts_io_buf_list_lock);
1072 list_splice_init(&qp->lpfc_abts_io_buf_list,
1073 &aborts);
1074
1075 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1076 psb->pCmd = NULL;
1077 psb->status = IOSTAT_SUCCESS;
1078 cnt++;
1079 }
1080 spin_lock(&qp->io_buf_list_put_lock);
1081 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084 qp->abts_scsi_io_bufs = 0;
1085 qp->abts_nvme_io_bufs = 0;
1086 spin_unlock(&qp->io_buf_list_put_lock);
1087 spin_unlock(&qp->abts_io_buf_list_lock);
1088 }
1089 spin_unlock_irq(&phba->hbalock);
1090
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1094 &nvmet_aborts);
1095 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1099 }
1100 }
1101
1102 lpfc_sli4_free_sp_events(phba);
1103 return cnt;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117int
1118lpfc_hba_down_post(struct lpfc_hba *phba)
1119{
1120 return (*phba->lpfc_hba_down_post)(phba);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static void
1136lpfc_hb_timeout(struct timer_list *t)
1137{
1138 struct lpfc_hba *phba;
1139 uint32_t tmo_posted;
1140 unsigned long iflag;
1141
1142 phba = from_timer(phba, t, hb_tmofunc);
1143
1144
1145 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1147 if (!tmo_posted)
1148 phba->pport->work_port_events |= WORKER_HB_TMO;
1149 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1150
1151
1152 if (!tmo_posted)
1153 lpfc_worker_wake_up(phba);
1154 return;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static void
1170lpfc_rrq_timeout(struct timer_list *t)
1171{
1172 struct lpfc_hba *phba;
1173 unsigned long iflag;
1174
1175 phba = from_timer(phba, t, rrq_tmr);
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 if (!(phba->pport->load_flag & FC_UNLOADING))
1178 phba->hba_flag |= HBA_RRQ_ACTIVE;
1179 else
1180 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182
1183 if (!(phba->pport->load_flag & FC_UNLOADING))
1184 lpfc_worker_wake_up(phba);
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203static void
1204lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1205{
1206 unsigned long drvr_flag;
1207
1208 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1211
1212
1213 mempool_free(pmboxq, phba->mbox_mem_pool);
1214 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215 !(phba->link_state == LPFC_HBA_ERROR) &&
1216 !(phba->pport->load_flag & FC_UNLOADING))
1217 mod_timer(&phba->hb_tmofunc,
1218 jiffies +
1219 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1220 return;
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231static void
1232lpfc_idle_stat_delay_work(struct work_struct *work)
1233{
1234 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1235 struct lpfc_hba,
1236 idle_stat_delay_work);
1237 struct lpfc_queue *cq;
1238 struct lpfc_sli4_hdw_queue *hdwq;
1239 struct lpfc_idle_stat *idle_stat;
1240 u32 i, idle_percent;
1241 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1242
1243 if (phba->pport->load_flag & FC_UNLOADING)
1244 return;
1245
1246 if (phba->link_state == LPFC_HBA_ERROR ||
1247 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248 phba->cmf_active_mode != LPFC_CFG_OFF)
1249 goto requeue;
1250
1251 for_each_present_cpu(i) {
1252 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1253 cq = hdwq->io_cq;
1254
1255
1256 if (cq->chann != i)
1257 continue;
1258
1259 idle_stat = &phba->sli4_hba.idle_stat[i];
1260
1261
1262
1263
1264
1265
1266
1267 wall_idle = get_cpu_idle_time(i, &wall, 1);
1268 diff_idle = wall_idle - idle_stat->prev_idle;
1269 diff_wall = wall - idle_stat->prev_wall;
1270
1271 if (diff_wall <= diff_idle)
1272 busy_time = 0;
1273 else
1274 busy_time = diff_wall - diff_idle;
1275
1276 idle_percent = div64_u64(100 * busy_time, diff_wall);
1277 idle_percent = 100 - idle_percent;
1278
1279 if (idle_percent < 15)
1280 cq->poll_mode = LPFC_QUEUE_WORK;
1281 else
1282 cq->poll_mode = LPFC_IRQ_POLL;
1283
1284 idle_stat->prev_idle = wall_idle;
1285 idle_stat->prev_wall = wall;
1286 }
1287
1288requeue:
1289 schedule_delayed_work(&phba->idle_stat_delay_work,
1290 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1291}
1292
1293static void
1294lpfc_hb_eq_delay_work(struct work_struct *work)
1295{
1296 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297 struct lpfc_hba, eq_delay_work);
1298 struct lpfc_eq_intr_info *eqi, *eqi_new;
1299 struct lpfc_queue *eq, *eq_next;
1300 unsigned char *ena_delay = NULL;
1301 uint32_t usdelay;
1302 int i;
1303
1304 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1305 return;
1306
1307 if (phba->link_state == LPFC_HBA_ERROR ||
1308 phba->pport->fc_flag & FC_OFFLINE_MODE)
1309 goto requeue;
1310
1311 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1312 GFP_KERNEL);
1313 if (!ena_delay)
1314 goto requeue;
1315
1316 for (i = 0; i < phba->cfg_irq_chann; i++) {
1317
1318 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1319 if (!eq)
1320 continue;
1321 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323 ena_delay[eq->last_cpu] = 1;
1324 }
1325 }
1326
1327 for_each_present_cpu(i) {
1328 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1329 if (ena_delay[i]) {
1330 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1333 } else {
1334 usdelay = 0;
1335 }
1336
1337 eqi->icnt = 0;
1338
1339 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340 if (unlikely(eq->last_cpu != i)) {
1341 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1342 eq->last_cpu);
1343 list_move_tail(&eq->cpu_list, &eqi_new->list);
1344 continue;
1345 }
1346 if (usdelay != eq->q_mode)
1347 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1348 usdelay);
1349 }
1350 }
1351
1352 kfree(ena_delay);
1353
1354requeue:
1355 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1367{
1368 u32 i;
1369 u32 hwq_count;
1370
1371 hwq_count = phba->cfg_hdw_queue;
1372 for (i = 0; i < hwq_count; i++) {
1373
1374 lpfc_adjust_pvt_pool_count(phba, i);
1375
1376
1377 lpfc_adjust_high_watermark(phba, i);
1378
1379#ifdef LPFC_MXP_STAT
1380
1381 lpfc_snapshot_mxp(phba, i);
1382#endif
1383 }
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int
1395lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1396{
1397 LPFC_MBOXQ_t *pmboxq;
1398 int retval;
1399
1400
1401 if (phba->hba_flag & HBA_HBEAT_INP)
1402 return 0;
1403
1404 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1405 if (!pmboxq)
1406 return -ENOMEM;
1407
1408 lpfc_heart_beat(phba, pmboxq);
1409 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410 pmboxq->vport = phba->pport;
1411 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1412
1413 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414 mempool_free(pmboxq, phba->mbox_mem_pool);
1415 return -ENXIO;
1416 }
1417 phba->hba_flag |= HBA_HBEAT_INP;
1418
1419 return 0;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432void
1433lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1434{
1435 if (phba->cfg_enable_hba_heartbeat)
1436 return;
1437 phba->hba_flag |= HBA_HBEAT_TMO;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456void
1457lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1458{
1459 struct lpfc_vport **vports;
1460 struct lpfc_dmabuf *buf_ptr;
1461 int retval = 0;
1462 int i, tmo;
1463 struct lpfc_sli *psli = &phba->sli;
1464 LIST_HEAD(completions);
1465
1466 if (phba->cfg_xri_rebalancing) {
1467
1468 lpfc_hb_mxp_handler(phba);
1469 }
1470
1471 vports = lpfc_create_vport_work_array(phba);
1472 if (vports != NULL)
1473 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474 lpfc_rcv_seq_check_edtov(vports[i]);
1475 lpfc_fdmi_change_check(vports[i]);
1476 }
1477 lpfc_destroy_vport_work_array(phba, vports);
1478
1479 if ((phba->link_state == LPFC_HBA_ERROR) ||
1480 (phba->pport->load_flag & FC_UNLOADING) ||
1481 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1482 return;
1483
1484 if (phba->elsbuf_cnt &&
1485 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486 spin_lock_irq(&phba->hbalock);
1487 list_splice_init(&phba->elsbuf, &completions);
1488 phba->elsbuf_cnt = 0;
1489 phba->elsbuf_prev_cnt = 0;
1490 spin_unlock_irq(&phba->hbalock);
1491
1492 while (!list_empty(&completions)) {
1493 list_remove_head(&completions, buf_ptr,
1494 struct lpfc_dmabuf, list);
1495 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1496 kfree(buf_ptr);
1497 }
1498 }
1499 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1500
1501
1502 if (phba->cfg_enable_hba_heartbeat) {
1503
1504 spin_lock_irq(&phba->pport->work_port_lock);
1505 if (time_after(phba->last_completion_time +
1506 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1507 jiffies)) {
1508 spin_unlock_irq(&phba->pport->work_port_lock);
1509 if (phba->hba_flag & HBA_HBEAT_INP)
1510 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1511 else
1512 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1513 goto out;
1514 }
1515 spin_unlock_irq(&phba->pport->work_port_lock);
1516
1517
1518 if (phba->hba_flag & HBA_HBEAT_INP) {
1519
1520
1521
1522
1523
1524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525 "0459 Adapter heartbeat still outstanding: "
1526 "last compl time was %d ms.\n",
1527 jiffies_to_msecs(jiffies
1528 - phba->last_completion_time));
1529 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1530 } else {
1531 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532 (list_empty(&psli->mboxq))) {
1533
1534 retval = lpfc_issue_hb_mbox(phba);
1535 if (retval) {
1536 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1537 goto out;
1538 }
1539 phba->skipped_hb = 0;
1540 } else if (time_before_eq(phba->last_completion_time,
1541 phba->skipped_hb)) {
1542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543 "2857 Last completion time not "
1544 " updated in %d ms\n",
1545 jiffies_to_msecs(jiffies
1546 - phba->last_completion_time));
1547 } else
1548 phba->skipped_hb = jiffies;
1549
1550 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1551 goto out;
1552 }
1553 } else {
1554
1555 if (phba->hba_flag & HBA_HBEAT_TMO) {
1556 retval = lpfc_issue_hb_mbox(phba);
1557 if (retval)
1558 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1559 else
1560 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1561 goto out;
1562 }
1563 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1564 }
1565out:
1566 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576static void
1577lpfc_offline_eratt(struct lpfc_hba *phba)
1578{
1579 struct lpfc_sli *psli = &phba->sli;
1580
1581 spin_lock_irq(&phba->hbalock);
1582 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583 spin_unlock_irq(&phba->hbalock);
1584 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1585
1586 lpfc_offline(phba);
1587 lpfc_reset_barrier(phba);
1588 spin_lock_irq(&phba->hbalock);
1589 lpfc_sli_brdreset(phba);
1590 spin_unlock_irq(&phba->hbalock);
1591 lpfc_hba_down_post(phba);
1592 lpfc_sli_brdready(phba, HS_MBRDY);
1593 lpfc_unblock_mgmt_io(phba);
1594 phba->link_state = LPFC_HBA_ERROR;
1595 return;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605void
1606lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1607{
1608 spin_lock_irq(&phba->hbalock);
1609 phba->link_state = LPFC_HBA_ERROR;
1610 spin_unlock_irq(&phba->hbalock);
1611
1612 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1613 lpfc_sli_flush_io_rings(phba);
1614 lpfc_offline(phba);
1615 lpfc_hba_down_post(phba);
1616 lpfc_unblock_mgmt_io(phba);
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628static void
1629lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1630{
1631 uint32_t old_host_status = phba->work_hs;
1632 struct lpfc_sli *psli = &phba->sli;
1633
1634
1635
1636
1637 if (pci_channel_offline(phba->pcidev)) {
1638 spin_lock_irq(&phba->hbalock);
1639 phba->hba_flag &= ~DEFER_ERATT;
1640 spin_unlock_irq(&phba->hbalock);
1641 return;
1642 }
1643
1644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1645 "0479 Deferred Adapter Hardware Error "
1646 "Data: x%x x%x x%x\n",
1647 phba->work_hs, phba->work_status[0],
1648 phba->work_status[1]);
1649
1650 spin_lock_irq(&phba->hbalock);
1651 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1652 spin_unlock_irq(&phba->hbalock);
1653
1654
1655
1656
1657
1658
1659
1660 lpfc_sli_abort_fcp_rings(phba);
1661
1662
1663
1664
1665
1666 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1667 lpfc_offline(phba);
1668
1669
1670 while (phba->work_hs & HS_FFER1) {
1671 msleep(100);
1672 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1673 phba->work_hs = UNPLUG_ERR ;
1674 break;
1675 }
1676
1677 if (phba->pport->load_flag & FC_UNLOADING) {
1678 phba->work_hs = 0;
1679 break;
1680 }
1681 }
1682
1683
1684
1685
1686
1687
1688 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1689 phba->work_hs = old_host_status & ~HS_FFER1;
1690
1691 spin_lock_irq(&phba->hbalock);
1692 phba->hba_flag &= ~DEFER_ERATT;
1693 spin_unlock_irq(&phba->hbalock);
1694 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1695 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1696}
1697
1698static void
1699lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1700{
1701 struct lpfc_board_event_header board_event;
1702 struct Scsi_Host *shost;
1703
1704 board_event.event_type = FC_REG_BOARD_EVENT;
1705 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1706 shost = lpfc_shost_from_vport(phba->pport);
1707 fc_host_post_vendor_event(shost, fc_get_event_number(),
1708 sizeof(board_event),
1709 (char *) &board_event,
1710 LPFC_NL_VENDOR_ID);
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void
1724lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1725{
1726 struct lpfc_vport *vport = phba->pport;
1727 struct lpfc_sli *psli = &phba->sli;
1728 uint32_t event_data;
1729 unsigned long temperature;
1730 struct temp_event temp_event_data;
1731 struct Scsi_Host *shost;
1732
1733
1734
1735
1736 if (pci_channel_offline(phba->pcidev)) {
1737 spin_lock_irq(&phba->hbalock);
1738 phba->hba_flag &= ~DEFER_ERATT;
1739 spin_unlock_irq(&phba->hbalock);
1740 return;
1741 }
1742
1743
1744 if (!phba->cfg_enable_hba_reset)
1745 return;
1746
1747
1748 lpfc_board_errevt_to_mgmt(phba);
1749
1750 if (phba->hba_flag & DEFER_ERATT)
1751 lpfc_handle_deferred_eratt(phba);
1752
1753 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1754 if (phba->work_hs & HS_FFER6)
1755
1756 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1757 "1301 Re-establishing Link "
1758 "Data: x%x x%x x%x\n",
1759 phba->work_hs, phba->work_status[0],
1760 phba->work_status[1]);
1761 if (phba->work_hs & HS_FFER8)
1762
1763 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1764 "2861 Host Authentication device "
1765 "zeroization Data:x%x x%x x%x\n",
1766 phba->work_hs, phba->work_status[0],
1767 phba->work_status[1]);
1768
1769 spin_lock_irq(&phba->hbalock);
1770 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1771 spin_unlock_irq(&phba->hbalock);
1772
1773
1774
1775
1776
1777
1778
1779 lpfc_sli_abort_fcp_rings(phba);
1780
1781
1782
1783
1784
1785 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1786 lpfc_offline(phba);
1787 lpfc_sli_brdrestart(phba);
1788 if (lpfc_online(phba) == 0) {
1789 lpfc_unblock_mgmt_io(phba);
1790 return;
1791 }
1792 lpfc_unblock_mgmt_io(phba);
1793 } else if (phba->work_hs & HS_CRIT_TEMP) {
1794 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1795 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1796 temp_event_data.event_code = LPFC_CRIT_TEMP;
1797 temp_event_data.data = (uint32_t)temperature;
1798
1799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1800 "0406 Adapter maximum temperature exceeded "
1801 "(%ld), taking this port offline "
1802 "Data: x%x x%x x%x\n",
1803 temperature, phba->work_hs,
1804 phba->work_status[0], phba->work_status[1]);
1805
1806 shost = lpfc_shost_from_vport(phba->pport);
1807 fc_host_post_vendor_event(shost, fc_get_event_number(),
1808 sizeof(temp_event_data),
1809 (char *) &temp_event_data,
1810 SCSI_NL_VID_TYPE_PCI
1811 | PCI_VENDOR_ID_EMULEX);
1812
1813 spin_lock_irq(&phba->hbalock);
1814 phba->over_temp_state = HBA_OVER_TEMP;
1815 spin_unlock_irq(&phba->hbalock);
1816 lpfc_offline_eratt(phba);
1817
1818 } else {
1819
1820
1821
1822
1823 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1824 "0457 Adapter Hardware Error "
1825 "Data: x%x x%x x%x\n",
1826 phba->work_hs,
1827 phba->work_status[0], phba->work_status[1]);
1828
1829 event_data = FC_REG_DUMP_EVENT;
1830 shost = lpfc_shost_from_vport(vport);
1831 fc_host_post_vendor_event(shost, fc_get_event_number(),
1832 sizeof(event_data), (char *) &event_data,
1833 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1834
1835 lpfc_offline_eratt(phba);
1836 }
1837 return;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851static int
1852lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1853 bool en_rn_msg)
1854{
1855 int rc;
1856 uint32_t intr_mode;
1857 LPFC_MBOXQ_t *mboxq;
1858
1859 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1860 LPFC_SLI_INTF_IF_TYPE_2) {
1861
1862
1863
1864
1865 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1866 if (rc)
1867 return rc;
1868 }
1869
1870
1871 if (en_rn_msg)
1872 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1873 "2887 Reset Needed: Attempting Port "
1874 "Recovery...\n");
1875
1876
1877
1878
1879
1880 if (mbx_action == LPFC_MBX_NO_WAIT) {
1881 spin_lock_irq(&phba->hbalock);
1882 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1883 if (phba->sli.mbox_active) {
1884 mboxq = phba->sli.mbox_active;
1885 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1886 __lpfc_mbox_cmpl_put(phba, mboxq);
1887 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1888 phba->sli.mbox_active = NULL;
1889 }
1890 spin_unlock_irq(&phba->hbalock);
1891 }
1892
1893 lpfc_offline_prep(phba, mbx_action);
1894 lpfc_sli_flush_io_rings(phba);
1895 lpfc_offline(phba);
1896
1897 lpfc_sli4_disable_intr(phba);
1898 rc = lpfc_sli_brdrestart(phba);
1899 if (rc) {
1900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1901 "6309 Failed to restart board\n");
1902 return rc;
1903 }
1904
1905 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1906 if (intr_mode == LPFC_INTR_ERROR) {
1907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1908 "3175 Failed to enable interrupt\n");
1909 return -EIO;
1910 }
1911 phba->intr_mode = intr_mode;
1912 rc = lpfc_online(phba);
1913 if (rc == 0)
1914 lpfc_unblock_mgmt_io(phba);
1915
1916 return rc;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926static void
1927lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1928{
1929 struct lpfc_vport *vport = phba->pport;
1930 uint32_t event_data;
1931 struct Scsi_Host *shost;
1932 uint32_t if_type;
1933 struct lpfc_register portstat_reg = {0};
1934 uint32_t reg_err1, reg_err2;
1935 uint32_t uerrlo_reg, uemasklo_reg;
1936 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1937 bool en_rn_msg = true;
1938 struct temp_event temp_event_data;
1939 struct lpfc_register portsmphr_reg;
1940 int rc, i;
1941
1942
1943
1944
1945 if (pci_channel_offline(phba->pcidev)) {
1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947 "3166 pci channel is offline\n");
1948 lpfc_sli4_offline_eratt(phba);
1949 return;
1950 }
1951
1952 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1953 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1954 switch (if_type) {
1955 case LPFC_SLI_INTF_IF_TYPE_0:
1956 pci_rd_rc1 = lpfc_readl(
1957 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1958 &uerrlo_reg);
1959 pci_rd_rc2 = lpfc_readl(
1960 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1961 &uemasklo_reg);
1962
1963 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1964 return;
1965 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1966 lpfc_sli4_offline_eratt(phba);
1967 return;
1968 }
1969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970 "7623 Checking UE recoverable");
1971
1972 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1973 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1974 &portsmphr_reg.word0))
1975 continue;
1976
1977 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1978 &portsmphr_reg);
1979 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1980 LPFC_PORT_SEM_UE_RECOVERABLE)
1981 break;
1982
1983 msleep(1000);
1984 }
1985
1986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1987 "4827 smphr_port_status x%x : Waited %dSec",
1988 smphr_port_status, i);
1989
1990
1991 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1992 LPFC_PORT_SEM_UE_RECOVERABLE) {
1993 for (i = 0; i < 20; i++) {
1994 msleep(1000);
1995 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1996 &portsmphr_reg.word0) &&
1997 (LPFC_POST_STAGE_PORT_READY ==
1998 bf_get(lpfc_port_smphr_port_status,
1999 &portsmphr_reg))) {
2000 rc = lpfc_sli4_port_sta_fn_reset(phba,
2001 LPFC_MBX_NO_WAIT, en_rn_msg);
2002 if (rc == 0)
2003 return;
2004 lpfc_printf_log(phba, KERN_ERR,
2005 LOG_TRACE_EVENT,
2006 "4215 Failed to recover UE");
2007 break;
2008 }
2009 }
2010 }
2011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2012 "7624 Firmware not ready: Failing UE recovery,"
2013 " waited %dSec", i);
2014 phba->link_state = LPFC_HBA_ERROR;
2015 break;
2016
2017 case LPFC_SLI_INTF_IF_TYPE_2:
2018 case LPFC_SLI_INTF_IF_TYPE_6:
2019 pci_rd_rc1 = lpfc_readl(
2020 phba->sli4_hba.u.if_type2.STATUSregaddr,
2021 &portstat_reg.word0);
2022
2023 if (pci_rd_rc1 == -EIO) {
2024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2025 "3151 PCI bus read access failure: x%x\n",
2026 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2027 lpfc_sli4_offline_eratt(phba);
2028 return;
2029 }
2030 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2031 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2032 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2034 "2889 Port Overtemperature event, "
2035 "taking port offline Data: x%x x%x\n",
2036 reg_err1, reg_err2);
2037
2038 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2039 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2040 temp_event_data.event_code = LPFC_CRIT_TEMP;
2041 temp_event_data.data = 0xFFFFFFFF;
2042
2043 shost = lpfc_shost_from_vport(phba->pport);
2044 fc_host_post_vendor_event(shost, fc_get_event_number(),
2045 sizeof(temp_event_data),
2046 (char *)&temp_event_data,
2047 SCSI_NL_VID_TYPE_PCI
2048 | PCI_VENDOR_ID_EMULEX);
2049
2050 spin_lock_irq(&phba->hbalock);
2051 phba->over_temp_state = HBA_OVER_TEMP;
2052 spin_unlock_irq(&phba->hbalock);
2053 lpfc_sli4_offline_eratt(phba);
2054 return;
2055 }
2056 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2057 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2059 "3143 Port Down: Firmware Update "
2060 "Detected\n");
2061 en_rn_msg = false;
2062 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2063 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2065 "3144 Port Down: Debug Dump\n");
2066 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069 "3145 Port Down: Provisioning\n");
2070
2071
2072 if (!phba->cfg_enable_hba_reset)
2073 return;
2074
2075
2076 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2077 en_rn_msg);
2078 if (rc == 0) {
2079
2080 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2081 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2082 return;
2083 else
2084 break;
2085 }
2086
2087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2088 "3152 Unrecoverable error\n");
2089 phba->link_state = LPFC_HBA_ERROR;
2090 break;
2091 case LPFC_SLI_INTF_IF_TYPE_1:
2092 default:
2093 break;
2094 }
2095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2096 "3123 Report dump event to upper layer\n");
2097
2098 lpfc_board_errevt_to_mgmt(phba);
2099
2100 event_data = FC_REG_DUMP_EVENT;
2101 shost = lpfc_shost_from_vport(vport);
2102 fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 sizeof(event_data), (char *) &event_data,
2104 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118void
2119lpfc_handle_eratt(struct lpfc_hba *phba)
2120{
2121 (*phba->lpfc_handle_eratt)(phba);
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131void
2132lpfc_handle_latt(struct lpfc_hba *phba)
2133{
2134 struct lpfc_vport *vport = phba->pport;
2135 struct lpfc_sli *psli = &phba->sli;
2136 LPFC_MBOXQ_t *pmb;
2137 volatile uint32_t control;
2138 struct lpfc_dmabuf *mp;
2139 int rc = 0;
2140
2141 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2142 if (!pmb) {
2143 rc = 1;
2144 goto lpfc_handle_latt_err_exit;
2145 }
2146
2147 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2148 if (!mp) {
2149 rc = 2;
2150 goto lpfc_handle_latt_free_pmb;
2151 }
2152
2153 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2154 if (!mp->virt) {
2155 rc = 3;
2156 goto lpfc_handle_latt_free_mp;
2157 }
2158
2159
2160 lpfc_els_flush_all_cmd(phba);
2161
2162 psli->slistat.link_event++;
2163 lpfc_read_topology(phba, pmb, mp);
2164 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2165 pmb->vport = vport;
2166
2167 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2168 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2169 if (rc == MBX_NOT_FINISHED) {
2170 rc = 4;
2171 goto lpfc_handle_latt_free_mbuf;
2172 }
2173
2174
2175 spin_lock_irq(&phba->hbalock);
2176 writel(HA_LATT, phba->HAregaddr);
2177 readl(phba->HAregaddr);
2178 spin_unlock_irq(&phba->hbalock);
2179
2180 return;
2181
2182lpfc_handle_latt_free_mbuf:
2183 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2184 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2185lpfc_handle_latt_free_mp:
2186 kfree(mp);
2187lpfc_handle_latt_free_pmb:
2188 mempool_free(pmb, phba->mbox_mem_pool);
2189lpfc_handle_latt_err_exit:
2190
2191 spin_lock_irq(&phba->hbalock);
2192 psli->sli_flag |= LPFC_PROCESS_LA;
2193 control = readl(phba->HCregaddr);
2194 control |= HC_LAINT_ENA;
2195 writel(control, phba->HCregaddr);
2196 readl(phba->HCregaddr);
2197
2198
2199 writel(HA_LATT, phba->HAregaddr);
2200 readl(phba->HAregaddr);
2201 spin_unlock_irq(&phba->hbalock);
2202 lpfc_linkdown(phba);
2203 phba->link_state = LPFC_HBA_ERROR;
2204
2205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2206 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2207
2208 return;
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225int
2226lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2227{
2228 uint8_t lenlo, lenhi;
2229 int Length;
2230 int i, j;
2231 int finished = 0;
2232 int index = 0;
2233
2234 if (!vpd)
2235 return 0;
2236
2237
2238 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2239 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2240 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2241 (uint32_t) vpd[3]);
2242 while (!finished && (index < (len - 4))) {
2243 switch (vpd[index]) {
2244 case 0x82:
2245 case 0x91:
2246 index += 1;
2247 lenlo = vpd[index];
2248 index += 1;
2249 lenhi = vpd[index];
2250 index += 1;
2251 i = ((((unsigned short)lenhi) << 8) + lenlo);
2252 index += i;
2253 break;
2254 case 0x90:
2255 index += 1;
2256 lenlo = vpd[index];
2257 index += 1;
2258 lenhi = vpd[index];
2259 index += 1;
2260 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2261 if (Length > len - index)
2262 Length = len - index;
2263 while (Length > 0) {
2264
2265 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2266 index += 2;
2267 i = vpd[index];
2268 index += 1;
2269 j = 0;
2270 Length -= (3+i);
2271 while(i--) {
2272 phba->SerialNumber[j++] = vpd[index++];
2273 if (j == 31)
2274 break;
2275 }
2276 phba->SerialNumber[j] = 0;
2277 continue;
2278 }
2279 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2280 phba->vpd_flag |= VPD_MODEL_DESC;
2281 index += 2;
2282 i = vpd[index];
2283 index += 1;
2284 j = 0;
2285 Length -= (3+i);
2286 while(i--) {
2287 phba->ModelDesc[j++] = vpd[index++];
2288 if (j == 255)
2289 break;
2290 }
2291 phba->ModelDesc[j] = 0;
2292 continue;
2293 }
2294 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2295 phba->vpd_flag |= VPD_MODEL_NAME;
2296 index += 2;
2297 i = vpd[index];
2298 index += 1;
2299 j = 0;
2300 Length -= (3+i);
2301 while(i--) {
2302 phba->ModelName[j++] = vpd[index++];
2303 if (j == 79)
2304 break;
2305 }
2306 phba->ModelName[j] = 0;
2307 continue;
2308 }
2309 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2310 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2311 index += 2;
2312 i = vpd[index];
2313 index += 1;
2314 j = 0;
2315 Length -= (3+i);
2316 while(i--) {
2317 phba->ProgramType[j++] = vpd[index++];
2318 if (j == 255)
2319 break;
2320 }
2321 phba->ProgramType[j] = 0;
2322 continue;
2323 }
2324 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2325 phba->vpd_flag |= VPD_PORT;
2326 index += 2;
2327 i = vpd[index];
2328 index += 1;
2329 j = 0;
2330 Length -= (3+i);
2331 while(i--) {
2332 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333 (phba->sli4_hba.pport_name_sta ==
2334 LPFC_SLI4_PPNAME_GET)) {
2335 j++;
2336 index++;
2337 } else
2338 phba->Port[j++] = vpd[index++];
2339 if (j == 19)
2340 break;
2341 }
2342 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343 (phba->sli4_hba.pport_name_sta ==
2344 LPFC_SLI4_PPNAME_NON))
2345 phba->Port[j] = 0;
2346 continue;
2347 }
2348 else {
2349 index += 2;
2350 i = vpd[index];
2351 index += 1;
2352 index += i;
2353 Length -= (3 + i);
2354 }
2355 }
2356 finished = 0;
2357 break;
2358 case 0x78:
2359 finished = 1;
2360 break;
2361 default:
2362 index ++;
2363 break;
2364 }
2365 }
2366
2367 return(1);
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382static void
2383lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2384{
2385 lpfc_vpd_t *vp;
2386 uint16_t dev_id = phba->pcidev->device;
2387 int max_speed;
2388 int GE = 0;
2389 int oneConnect = 0;
2390 struct {
2391 char *name;
2392 char *bus;
2393 char *function;
2394 } m = {"<Unknown>", "", ""};
2395
2396 if (mdp && mdp[0] != '\0'
2397 && descp && descp[0] != '\0')
2398 return;
2399
2400 if (phba->lmt & LMT_64Gb)
2401 max_speed = 64;
2402 else if (phba->lmt & LMT_32Gb)
2403 max_speed = 32;
2404 else if (phba->lmt & LMT_16Gb)
2405 max_speed = 16;
2406 else if (phba->lmt & LMT_10Gb)
2407 max_speed = 10;
2408 else if (phba->lmt & LMT_8Gb)
2409 max_speed = 8;
2410 else if (phba->lmt & LMT_4Gb)
2411 max_speed = 4;
2412 else if (phba->lmt & LMT_2Gb)
2413 max_speed = 2;
2414 else if (phba->lmt & LMT_1Gb)
2415 max_speed = 1;
2416 else
2417 max_speed = 0;
2418
2419 vp = &phba->vpd;
2420
2421 switch (dev_id) {
2422 case PCI_DEVICE_ID_FIREFLY:
2423 m = (typeof(m)){"LP6000", "PCI",
2424 "Obsolete, Unsupported Fibre Channel Adapter"};
2425 break;
2426 case PCI_DEVICE_ID_SUPERFLY:
2427 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2428 m = (typeof(m)){"LP7000", "PCI", ""};
2429 else
2430 m = (typeof(m)){"LP7000E", "PCI", ""};
2431 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432 break;
2433 case PCI_DEVICE_ID_DRAGONFLY:
2434 m = (typeof(m)){"LP8000", "PCI",
2435 "Obsolete, Unsupported Fibre Channel Adapter"};
2436 break;
2437 case PCI_DEVICE_ID_CENTAUR:
2438 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2439 m = (typeof(m)){"LP9002", "PCI", ""};
2440 else
2441 m = (typeof(m)){"LP9000", "PCI", ""};
2442 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2443 break;
2444 case PCI_DEVICE_ID_RFLY:
2445 m = (typeof(m)){"LP952", "PCI",
2446 "Obsolete, Unsupported Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_PEGASUS:
2449 m = (typeof(m)){"LP9802", "PCI-X",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2451 break;
2452 case PCI_DEVICE_ID_THOR:
2453 m = (typeof(m)){"LP10000", "PCI-X",
2454 "Obsolete, Unsupported Fibre Channel Adapter"};
2455 break;
2456 case PCI_DEVICE_ID_VIPER:
2457 m = (typeof(m)){"LPX1000", "PCI-X",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2459 break;
2460 case PCI_DEVICE_ID_PFLY:
2461 m = (typeof(m)){"LP982", "PCI-X",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 break;
2464 case PCI_DEVICE_ID_TFLY:
2465 m = (typeof(m)){"LP1050", "PCI-X",
2466 "Obsolete, Unsupported Fibre Channel Adapter"};
2467 break;
2468 case PCI_DEVICE_ID_HELIOS:
2469 m = (typeof(m)){"LP11000", "PCI-X2",
2470 "Obsolete, Unsupported Fibre Channel Adapter"};
2471 break;
2472 case PCI_DEVICE_ID_HELIOS_SCSP:
2473 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2474 "Obsolete, Unsupported Fibre Channel Adapter"};
2475 break;
2476 case PCI_DEVICE_ID_HELIOS_DCSP:
2477 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2478 "Obsolete, Unsupported Fibre Channel Adapter"};
2479 break;
2480 case PCI_DEVICE_ID_NEPTUNE:
2481 m = (typeof(m)){"LPe1000", "PCIe",
2482 "Obsolete, Unsupported Fibre Channel Adapter"};
2483 break;
2484 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2485 m = (typeof(m)){"LPe1000-SP", "PCIe",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2487 break;
2488 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2489 m = (typeof(m)){"LPe1002-SP", "PCIe",
2490 "Obsolete, Unsupported Fibre Channel Adapter"};
2491 break;
2492 case PCI_DEVICE_ID_BMID:
2493 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2494 break;
2495 case PCI_DEVICE_ID_BSMB:
2496 m = (typeof(m)){"LP111", "PCI-X2",
2497 "Obsolete, Unsupported Fibre Channel Adapter"};
2498 break;
2499 case PCI_DEVICE_ID_ZEPHYR:
2500 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2501 break;
2502 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2503 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2504 break;
2505 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2506 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2507 GE = 1;
2508 break;
2509 case PCI_DEVICE_ID_ZMID:
2510 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2511 break;
2512 case PCI_DEVICE_ID_ZSMB:
2513 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2514 break;
2515 case PCI_DEVICE_ID_LP101:
2516 m = (typeof(m)){"LP101", "PCI-X",
2517 "Obsolete, Unsupported Fibre Channel Adapter"};
2518 break;
2519 case PCI_DEVICE_ID_LP10000S:
2520 m = (typeof(m)){"LP10000-S", "PCI",
2521 "Obsolete, Unsupported Fibre Channel Adapter"};
2522 break;
2523 case PCI_DEVICE_ID_LP11000S:
2524 m = (typeof(m)){"LP11000-S", "PCI-X2",
2525 "Obsolete, Unsupported Fibre Channel Adapter"};
2526 break;
2527 case PCI_DEVICE_ID_LPE11000S:
2528 m = (typeof(m)){"LPe11000-S", "PCIe",
2529 "Obsolete, Unsupported Fibre Channel Adapter"};
2530 break;
2531 case PCI_DEVICE_ID_SAT:
2532 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2533 break;
2534 case PCI_DEVICE_ID_SAT_MID:
2535 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2536 break;
2537 case PCI_DEVICE_ID_SAT_SMB:
2538 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2539 break;
2540 case PCI_DEVICE_ID_SAT_DCSP:
2541 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2542 break;
2543 case PCI_DEVICE_ID_SAT_SCSP:
2544 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2545 break;
2546 case PCI_DEVICE_ID_SAT_S:
2547 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2548 break;
2549 case PCI_DEVICE_ID_HORNET:
2550 m = (typeof(m)){"LP21000", "PCIe",
2551 "Obsolete, Unsupported FCoE Adapter"};
2552 GE = 1;
2553 break;
2554 case PCI_DEVICE_ID_PROTEUS_VF:
2555 m = (typeof(m)){"LPev12000", "PCIe IOV",
2556 "Obsolete, Unsupported Fibre Channel Adapter"};
2557 break;
2558 case PCI_DEVICE_ID_PROTEUS_PF:
2559 m = (typeof(m)){"LPev12000", "PCIe IOV",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2561 break;
2562 case PCI_DEVICE_ID_PROTEUS_S:
2563 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2564 "Obsolete, Unsupported Fibre Channel Adapter"};
2565 break;
2566 case PCI_DEVICE_ID_TIGERSHARK:
2567 oneConnect = 1;
2568 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2569 break;
2570 case PCI_DEVICE_ID_TOMCAT:
2571 oneConnect = 1;
2572 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2573 break;
2574 case PCI_DEVICE_ID_FALCON:
2575 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2576 "EmulexSecure Fibre"};
2577 break;
2578 case PCI_DEVICE_ID_BALIUS:
2579 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2580 "Obsolete, Unsupported Fibre Channel Adapter"};
2581 break;
2582 case PCI_DEVICE_ID_LANCER_FC:
2583 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2584 break;
2585 case PCI_DEVICE_ID_LANCER_FC_VF:
2586 m = (typeof(m)){"LPe16000", "PCIe",
2587 "Obsolete, Unsupported Fibre Channel Adapter"};
2588 break;
2589 case PCI_DEVICE_ID_LANCER_FCOE:
2590 oneConnect = 1;
2591 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2592 break;
2593 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2594 oneConnect = 1;
2595 m = (typeof(m)){"OCe15100", "PCIe",
2596 "Obsolete, Unsupported FCoE"};
2597 break;
2598 case PCI_DEVICE_ID_LANCER_G6_FC:
2599 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2600 break;
2601 case PCI_DEVICE_ID_LANCER_G7_FC:
2602 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2603 break;
2604 case PCI_DEVICE_ID_LANCER_G7P_FC:
2605 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2606 break;
2607 case PCI_DEVICE_ID_SKYHAWK:
2608 case PCI_DEVICE_ID_SKYHAWK_VF:
2609 oneConnect = 1;
2610 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2611 break;
2612 default:
2613 m = (typeof(m)){"Unknown", "", ""};
2614 break;
2615 }
2616
2617 if (mdp && mdp[0] == '\0')
2618 snprintf(mdp, 79,"%s", m.name);
2619
2620
2621
2622
2623 if (descp && descp[0] == '\0') {
2624 if (oneConnect)
2625 snprintf(descp, 255,
2626 "Emulex OneConnect %s, %s Initiator %s",
2627 m.name, m.function,
2628 phba->Port);
2629 else if (max_speed == 0)
2630 snprintf(descp, 255,
2631 "Emulex %s %s %s",
2632 m.name, m.bus, m.function);
2633 else
2634 snprintf(descp, 255,
2635 "Emulex %s %d%s %s %s",
2636 m.name, max_speed, (GE) ? "GE" : "Gb",
2637 m.bus, m.function);
2638 }
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653int
2654lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2655{
2656 IOCB_t *icmd;
2657 struct lpfc_iocbq *iocb;
2658 struct lpfc_dmabuf *mp1, *mp2;
2659
2660 cnt += pring->missbufcnt;
2661
2662
2663 while (cnt > 0) {
2664
2665 iocb = lpfc_sli_get_iocbq(phba);
2666 if (iocb == NULL) {
2667 pring->missbufcnt = cnt;
2668 return cnt;
2669 }
2670 icmd = &iocb->iocb;
2671
2672
2673
2674 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2675 if (mp1)
2676 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2677 if (!mp1 || !mp1->virt) {
2678 kfree(mp1);
2679 lpfc_sli_release_iocbq(phba, iocb);
2680 pring->missbufcnt = cnt;
2681 return cnt;
2682 }
2683
2684 INIT_LIST_HEAD(&mp1->list);
2685
2686 if (cnt > 1) {
2687 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2688 if (mp2)
2689 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2690 &mp2->phys);
2691 if (!mp2 || !mp2->virt) {
2692 kfree(mp2);
2693 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2694 kfree(mp1);
2695 lpfc_sli_release_iocbq(phba, iocb);
2696 pring->missbufcnt = cnt;
2697 return cnt;
2698 }
2699
2700 INIT_LIST_HEAD(&mp2->list);
2701 } else {
2702 mp2 = NULL;
2703 }
2704
2705 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2706 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2707 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2708 icmd->ulpBdeCount = 1;
2709 cnt--;
2710 if (mp2) {
2711 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2712 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2713 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2714 cnt--;
2715 icmd->ulpBdeCount = 2;
2716 }
2717
2718 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2719 icmd->ulpLe = 1;
2720
2721 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2722 IOCB_ERROR) {
2723 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2724 kfree(mp1);
2725 cnt++;
2726 if (mp2) {
2727 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2728 kfree(mp2);
2729 cnt++;
2730 }
2731 lpfc_sli_release_iocbq(phba, iocb);
2732 pring->missbufcnt = cnt;
2733 return cnt;
2734 }
2735 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2736 if (mp2)
2737 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2738 }
2739 pring->missbufcnt = 0;
2740 return 0;
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static int
2755lpfc_post_rcv_buf(struct lpfc_hba *phba)
2756{
2757 struct lpfc_sli *psli = &phba->sli;
2758
2759
2760 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2761
2762
2763 return 0;
2764}
2765
2766#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2767
2768
2769
2770
2771
2772
2773
2774
2775static void
2776lpfc_sha_init(uint32_t * HashResultPointer)
2777{
2778 HashResultPointer[0] = 0x67452301;
2779 HashResultPointer[1] = 0xEFCDAB89;
2780 HashResultPointer[2] = 0x98BADCFE;
2781 HashResultPointer[3] = 0x10325476;
2782 HashResultPointer[4] = 0xC3D2E1F0;
2783}
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795static void
2796lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2797{
2798 int t;
2799 uint32_t TEMP;
2800 uint32_t A, B, C, D, E;
2801 t = 16;
2802 do {
2803 HashWorkingPointer[t] =
2804 S(1,
2805 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2806 8] ^
2807 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2808 } while (++t <= 79);
2809 t = 0;
2810 A = HashResultPointer[0];
2811 B = HashResultPointer[1];
2812 C = HashResultPointer[2];
2813 D = HashResultPointer[3];
2814 E = HashResultPointer[4];
2815
2816 do {
2817 if (t < 20) {
2818 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2819 } else if (t < 40) {
2820 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2821 } else if (t < 60) {
2822 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2823 } else {
2824 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2825 }
2826 TEMP += S(5, A) + E + HashWorkingPointer[t];
2827 E = D;
2828 D = C;
2829 C = S(30, B);
2830 B = A;
2831 A = TEMP;
2832 } while (++t <= 79);
2833
2834 HashResultPointer[0] += A;
2835 HashResultPointer[1] += B;
2836 HashResultPointer[2] += C;
2837 HashResultPointer[3] += D;
2838 HashResultPointer[4] += E;
2839
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852static void
2853lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2854{
2855 *HashWorking = (*RandomChallenge ^ *HashWorking);
2856}
2857
2858
2859
2860
2861
2862
2863
2864
2865void
2866lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2867{
2868 int t;
2869 uint32_t *HashWorking;
2870 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2871
2872 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2873 if (!HashWorking)
2874 return;
2875
2876 HashWorking[0] = HashWorking[78] = *pwwnn++;
2877 HashWorking[1] = HashWorking[79] = *pwwnn;
2878
2879 for (t = 0; t < 7; t++)
2880 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2881
2882 lpfc_sha_init(hbainit);
2883 lpfc_sha_iterate(hbainit, HashWorking);
2884 kfree(HashWorking);
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896void
2897lpfc_cleanup(struct lpfc_vport *vport)
2898{
2899 struct lpfc_hba *phba = vport->phba;
2900 struct lpfc_nodelist *ndlp, *next_ndlp;
2901 int i = 0;
2902
2903 if (phba->link_state > LPFC_LINK_DOWN)
2904 lpfc_port_link_failure(vport);
2905
2906
2907 if (lpfc_is_vmid_enabled(phba))
2908 lpfc_vmid_vport_cleanup(vport);
2909
2910 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2911 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2912 ndlp->nlp_DID == Fabric_DID) {
2913
2914 lpfc_nlp_put(ndlp);
2915 continue;
2916 }
2917
2918 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2919 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2920 lpfc_nlp_put(ndlp);
2921 continue;
2922 }
2923
2924
2925
2926
2927 if (ndlp->nlp_type & NLP_FABRIC &&
2928 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2929 lpfc_disc_state_machine(vport, ndlp, NULL,
2930 NLP_EVT_DEVICE_RECOVERY);
2931
2932 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2933 lpfc_disc_state_machine(vport, ndlp, NULL,
2934 NLP_EVT_DEVICE_RM);
2935 }
2936
2937
2938
2939
2940
2941 while (!list_empty(&vport->fc_nodes)) {
2942 if (i++ > 3000) {
2943 lpfc_printf_vlog(vport, KERN_ERR,
2944 LOG_TRACE_EVENT,
2945 "0233 Nodelist not empty\n");
2946 list_for_each_entry_safe(ndlp, next_ndlp,
2947 &vport->fc_nodes, nlp_listp) {
2948 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2949 LOG_TRACE_EVENT,
2950 "0282 did:x%x ndlp:x%px "
2951 "refcnt:%d xflags x%x nflag x%x\n",
2952 ndlp->nlp_DID, (void *)ndlp,
2953 kref_read(&ndlp->kref),
2954 ndlp->fc4_xpt_flags,
2955 ndlp->nlp_flag);
2956 }
2957 break;
2958 }
2959
2960
2961 msleep(10);
2962 }
2963 lpfc_cleanup_vports_rrqs(vport, NULL);
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974void
2975lpfc_stop_vport_timers(struct lpfc_vport *vport)
2976{
2977 del_timer_sync(&vport->els_tmofunc);
2978 del_timer_sync(&vport->delayed_disc_tmo);
2979 lpfc_can_disctmo(vport);
2980 return;
2981}
2982
2983
2984
2985
2986
2987
2988
2989
2990void
2991__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2992{
2993
2994 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2995
2996
2997 del_timer(&phba->fcf.redisc_wait);
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009void
3010lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3011{
3012 spin_lock_irq(&phba->hbalock);
3013 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3014
3015 spin_unlock_irq(&phba->hbalock);
3016 return;
3017 }
3018 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3019
3020 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3021 spin_unlock_irq(&phba->hbalock);
3022}
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032void
3033lpfc_cmf_stop(struct lpfc_hba *phba)
3034{
3035 int cpu;
3036 struct lpfc_cgn_stat *cgs;
3037
3038
3039 if (!phba->sli4_hba.pc_sli4_params.cmf)
3040 return;
3041
3042 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3043 "6221 Stop CMF / Cancel Timer\n");
3044
3045
3046 hrtimer_cancel(&phba->cmf_timer);
3047
3048
3049 atomic_set(&phba->cmf_busy, 0);
3050 for_each_present_cpu(cpu) {
3051 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3052 atomic64_set(&cgs->total_bytes, 0);
3053 atomic64_set(&cgs->rcv_bytes, 0);
3054 atomic_set(&cgs->rx_io_cnt, 0);
3055 atomic64_set(&cgs->rx_latency, 0);
3056 }
3057 atomic_set(&phba->cmf_bw_wait, 0);
3058
3059
3060 queue_work(phba->wq, &phba->unblock_request_work);
3061}
3062
3063static inline uint64_t
3064lpfc_get_max_line_rate(struct lpfc_hba *phba)
3065{
3066 uint64_t rate = lpfc_sli_port_speed_get(phba);
3067
3068 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3069}
3070
3071void
3072lpfc_cmf_signal_init(struct lpfc_hba *phba)
3073{
3074 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3075 "6223 Signal CMF init\n");
3076
3077
3078 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3079 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3080 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3081 phba->cmf_interval_rate, 1000);
3082 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3083
3084
3085 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095void
3096lpfc_cmf_start(struct lpfc_hba *phba)
3097{
3098 struct lpfc_cgn_stat *cgs;
3099 int cpu;
3100
3101
3102 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3103 phba->cmf_active_mode == LPFC_CFG_OFF)
3104 return;
3105
3106
3107 lpfc_init_congestion_buf(phba);
3108
3109 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3110 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3111 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3112 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3113
3114 atomic_set(&phba->cmf_busy, 0);
3115 for_each_present_cpu(cpu) {
3116 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3117 atomic64_set(&cgs->total_bytes, 0);
3118 atomic64_set(&cgs->rcv_bytes, 0);
3119 atomic_set(&cgs->rx_io_cnt, 0);
3120 atomic64_set(&cgs->rx_latency, 0);
3121 }
3122 phba->cmf_latency.tv_sec = 0;
3123 phba->cmf_latency.tv_nsec = 0;
3124
3125 lpfc_cmf_signal_init(phba);
3126
3127 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3128 "6222 Start CMF / Timer\n");
3129
3130 phba->cmf_timer_cnt = 0;
3131 hrtimer_start(&phba->cmf_timer,
3132 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3133 HRTIMER_MODE_REL);
3134
3135 ktime_get_real_ts64(&phba->cmf_latency);
3136
3137 atomic_set(&phba->cmf_bw_wait, 0);
3138 atomic_set(&phba->cmf_stop_io, 0);
3139}
3140
3141
3142
3143
3144
3145
3146
3147
3148void
3149lpfc_stop_hba_timers(struct lpfc_hba *phba)
3150{
3151 if (phba->pport)
3152 lpfc_stop_vport_timers(phba->pport);
3153 cancel_delayed_work_sync(&phba->eq_delay_work);
3154 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3155 del_timer_sync(&phba->sli.mbox_tmo);
3156 del_timer_sync(&phba->fabric_block_timer);
3157 del_timer_sync(&phba->eratt_poll);
3158 del_timer_sync(&phba->hb_tmofunc);
3159 if (phba->sli_rev == LPFC_SLI_REV4) {
3160 del_timer_sync(&phba->rrq_tmr);
3161 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3162 }
3163 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3164
3165 switch (phba->pci_dev_grp) {
3166 case LPFC_PCI_DEV_LP:
3167
3168 del_timer_sync(&phba->fcp_poll_timer);
3169 break;
3170 case LPFC_PCI_DEV_OC:
3171
3172 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3173 break;
3174 default:
3175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3176 "0297 Invalid device group (x%x)\n",
3177 phba->pci_dev_grp);
3178 break;
3179 }
3180 return;
3181}
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static void
3195lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3196{
3197 unsigned long iflag;
3198 uint8_t actcmd = MBX_HEARTBEAT;
3199 unsigned long timeout;
3200
3201 spin_lock_irqsave(&phba->hbalock, iflag);
3202 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3203 spin_unlock_irqrestore(&phba->hbalock, iflag);
3204 if (mbx_action == LPFC_MBX_NO_WAIT)
3205 return;
3206 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3207 spin_lock_irqsave(&phba->hbalock, iflag);
3208 if (phba->sli.mbox_active) {
3209 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3210
3211
3212
3213 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3214 phba->sli.mbox_active) * 1000) + jiffies;
3215 }
3216 spin_unlock_irqrestore(&phba->hbalock, iflag);
3217
3218
3219 while (phba->sli.mbox_active) {
3220
3221 msleep(2);
3222 if (time_after(jiffies, timeout)) {
3223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3224 "2813 Mgmt IO is Blocked %x "
3225 "- mbox cmd %x still active\n",
3226 phba->sli.sli_flag, actcmd);
3227 break;
3228 }
3229 }
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240void
3241lpfc_sli4_node_prep(struct lpfc_hba *phba)
3242{
3243 struct lpfc_nodelist *ndlp, *next_ndlp;
3244 struct lpfc_vport **vports;
3245 int i, rpi;
3246
3247 if (phba->sli_rev != LPFC_SLI_REV4)
3248 return;
3249
3250 vports = lpfc_create_vport_work_array(phba);
3251 if (vports == NULL)
3252 return;
3253
3254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3255 if (vports[i]->load_flag & FC_UNLOADING)
3256 continue;
3257
3258 list_for_each_entry_safe(ndlp, next_ndlp,
3259 &vports[i]->fc_nodes,
3260 nlp_listp) {
3261 rpi = lpfc_sli4_alloc_rpi(phba);
3262 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3263
3264 continue;
3265 }
3266 ndlp->nlp_rpi = rpi;
3267 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3268 LOG_NODE | LOG_DISCOVERY,
3269 "0009 Assign RPI x%x to ndlp x%px "
3270 "DID:x%06x flg:x%x\n",
3271 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3272 ndlp->nlp_flag);
3273 }
3274 }
3275 lpfc_destroy_vport_work_array(phba, vports);
3276}
3277
3278
3279
3280
3281
3282
3283
3284
3285static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3286{
3287 struct lpfc_sli4_hdw_queue *qp;
3288 struct lpfc_io_buf *lpfc_ncmd;
3289 struct lpfc_io_buf *lpfc_ncmd_next;
3290 struct lpfc_epd_pool *epd_pool;
3291 unsigned long iflag;
3292
3293 epd_pool = &phba->epd_pool;
3294 qp = &phba->sli4_hba.hdwq[0];
3295
3296 spin_lock_init(&epd_pool->lock);
3297 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3298 spin_lock(&epd_pool->lock);
3299 INIT_LIST_HEAD(&epd_pool->list);
3300 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3301 &qp->lpfc_io_buf_list_put, list) {
3302 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3303 lpfc_ncmd->expedite = true;
3304 qp->put_io_bufs--;
3305 epd_pool->count++;
3306 if (epd_pool->count >= XRI_BATCH)
3307 break;
3308 }
3309 spin_unlock(&epd_pool->lock);
3310 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3321{
3322 struct lpfc_sli4_hdw_queue *qp;
3323 struct lpfc_io_buf *lpfc_ncmd;
3324 struct lpfc_io_buf *lpfc_ncmd_next;
3325 struct lpfc_epd_pool *epd_pool;
3326 unsigned long iflag;
3327
3328 epd_pool = &phba->epd_pool;
3329 qp = &phba->sli4_hba.hdwq[0];
3330
3331 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3332 spin_lock(&epd_pool->lock);
3333 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3334 &epd_pool->list, list) {
3335 list_move_tail(&lpfc_ncmd->list,
3336 &qp->lpfc_io_buf_list_put);
3337 lpfc_ncmd->flags = false;
3338 qp->put_io_bufs++;
3339 epd_pool->count--;
3340 }
3341 spin_unlock(&epd_pool->lock);
3342 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3343}
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3354{
3355 u32 i, j;
3356 u32 hwq_count;
3357 u32 count_per_hwq;
3358 struct lpfc_io_buf *lpfc_ncmd;
3359 struct lpfc_io_buf *lpfc_ncmd_next;
3360 unsigned long iflag;
3361 struct lpfc_sli4_hdw_queue *qp;
3362 struct lpfc_multixri_pool *multixri_pool;
3363 struct lpfc_pbl_pool *pbl_pool;
3364 struct lpfc_pvt_pool *pvt_pool;
3365
3366 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3367 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3368 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3369 phba->sli4_hba.io_xri_cnt);
3370
3371 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3372 lpfc_create_expedite_pool(phba);
3373
3374 hwq_count = phba->cfg_hdw_queue;
3375 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3376
3377 for (i = 0; i < hwq_count; i++) {
3378 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3379
3380 if (!multixri_pool) {
3381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3382 "1238 Failed to allocate memory for "
3383 "multixri_pool\n");
3384
3385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3386 lpfc_destroy_expedite_pool(phba);
3387
3388 j = 0;
3389 while (j < i) {
3390 qp = &phba->sli4_hba.hdwq[j];
3391 kfree(qp->p_multixri_pool);
3392 j++;
3393 }
3394 phba->cfg_xri_rebalancing = 0;
3395 return;
3396 }
3397
3398 qp = &phba->sli4_hba.hdwq[i];
3399 qp->p_multixri_pool = multixri_pool;
3400
3401 multixri_pool->xri_limit = count_per_hwq;
3402 multixri_pool->rrb_next_hwqid = i;
3403
3404
3405 pbl_pool = &multixri_pool->pbl_pool;
3406 spin_lock_init(&pbl_pool->lock);
3407 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3408 spin_lock(&pbl_pool->lock);
3409 INIT_LIST_HEAD(&pbl_pool->list);
3410 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3411 &qp->lpfc_io_buf_list_put, list) {
3412 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3413 qp->put_io_bufs--;
3414 pbl_pool->count++;
3415 }
3416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3417 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3418 pbl_pool->count, i);
3419 spin_unlock(&pbl_pool->lock);
3420 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3421
3422
3423 pvt_pool = &multixri_pool->pvt_pool;
3424 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3425 pvt_pool->low_watermark = XRI_BATCH;
3426 spin_lock_init(&pvt_pool->lock);
3427 spin_lock_irqsave(&pvt_pool->lock, iflag);
3428 INIT_LIST_HEAD(&pvt_pool->list);
3429 pvt_pool->count = 0;
3430 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3431 }
3432}
3433
3434
3435
3436
3437
3438
3439
3440static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3441{
3442 u32 i;
3443 u32 hwq_count;
3444 struct lpfc_io_buf *lpfc_ncmd;
3445 struct lpfc_io_buf *lpfc_ncmd_next;
3446 unsigned long iflag;
3447 struct lpfc_sli4_hdw_queue *qp;
3448 struct lpfc_multixri_pool *multixri_pool;
3449 struct lpfc_pbl_pool *pbl_pool;
3450 struct lpfc_pvt_pool *pvt_pool;
3451
3452 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3453 lpfc_destroy_expedite_pool(phba);
3454
3455 if (!(phba->pport->load_flag & FC_UNLOADING))
3456 lpfc_sli_flush_io_rings(phba);
3457
3458 hwq_count = phba->cfg_hdw_queue;
3459
3460 for (i = 0; i < hwq_count; i++) {
3461 qp = &phba->sli4_hba.hdwq[i];
3462 multixri_pool = qp->p_multixri_pool;
3463 if (!multixri_pool)
3464 continue;
3465
3466 qp->p_multixri_pool = NULL;
3467
3468 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3469
3470
3471 pbl_pool = &multixri_pool->pbl_pool;
3472 spin_lock(&pbl_pool->lock);
3473
3474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3475 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3476 pbl_pool->count, i);
3477
3478 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3479 &pbl_pool->list, list) {
3480 list_move_tail(&lpfc_ncmd->list,
3481 &qp->lpfc_io_buf_list_put);
3482 qp->put_io_bufs++;
3483 pbl_pool->count--;
3484 }
3485
3486 INIT_LIST_HEAD(&pbl_pool->list);
3487 pbl_pool->count = 0;
3488
3489 spin_unlock(&pbl_pool->lock);
3490
3491
3492 pvt_pool = &multixri_pool->pvt_pool;
3493 spin_lock(&pvt_pool->lock);
3494
3495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3496 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3497 pvt_pool->count, i);
3498
3499 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3500 &pvt_pool->list, list) {
3501 list_move_tail(&lpfc_ncmd->list,
3502 &qp->lpfc_io_buf_list_put);
3503 qp->put_io_bufs++;
3504 pvt_pool->count--;
3505 }
3506
3507 INIT_LIST_HEAD(&pvt_pool->list);
3508 pvt_pool->count = 0;
3509
3510 spin_unlock(&pvt_pool->lock);
3511 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3512
3513 kfree(multixri_pool);
3514 }
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529int
3530lpfc_online(struct lpfc_hba *phba)
3531{
3532 struct lpfc_vport *vport;
3533 struct lpfc_vport **vports;
3534 int i, error = 0;
3535 bool vpis_cleared = false;
3536
3537 if (!phba)
3538 return 0;
3539 vport = phba->pport;
3540
3541 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3542 return 0;
3543
3544 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3545 "0458 Bring Adapter online\n");
3546
3547 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3548
3549 if (phba->sli_rev == LPFC_SLI_REV4) {
3550 if (lpfc_sli4_hba_setup(phba)) {
3551 lpfc_unblock_mgmt_io(phba);
3552 return 1;
3553 }
3554 spin_lock_irq(&phba->hbalock);
3555 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3556 vpis_cleared = true;
3557 spin_unlock_irq(&phba->hbalock);
3558
3559
3560
3561
3562 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3563 !phba->nvmet_support) {
3564 error = lpfc_nvme_create_localport(phba->pport);
3565 if (error)
3566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3567 "6132 NVME restore reg failed "
3568 "on nvmei error x%x\n", error);
3569 }
3570 } else {
3571 lpfc_sli_queue_init(phba);
3572 if (lpfc_sli_hba_setup(phba)) {
3573 lpfc_unblock_mgmt_io(phba);
3574 return 1;
3575 }
3576 }
3577
3578 vports = lpfc_create_vport_work_array(phba);
3579 if (vports != NULL) {
3580 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3581 struct Scsi_Host *shost;
3582 shost = lpfc_shost_from_vport(vports[i]);
3583 spin_lock_irq(shost->host_lock);
3584 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3585 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3586 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3587 if (phba->sli_rev == LPFC_SLI_REV4) {
3588 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3589 if ((vpis_cleared) &&
3590 (vports[i]->port_type !=
3591 LPFC_PHYSICAL_PORT))
3592 vports[i]->vpi = 0;
3593 }
3594 spin_unlock_irq(shost->host_lock);
3595 }
3596 }
3597 lpfc_destroy_vport_work_array(phba, vports);
3598
3599 if (phba->cfg_xri_rebalancing)
3600 lpfc_create_multixri_pools(phba);
3601
3602 lpfc_cpuhp_add(phba);
3603
3604 lpfc_unblock_mgmt_io(phba);
3605 return 0;
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619void
3620lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3621{
3622 unsigned long iflag;
3623
3624 spin_lock_irqsave(&phba->hbalock, iflag);
3625 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3626 spin_unlock_irqrestore(&phba->hbalock, iflag);
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638void
3639lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3640{
3641 struct lpfc_vport *vport = phba->pport;
3642 struct lpfc_nodelist *ndlp, *next_ndlp;
3643 struct lpfc_vport **vports;
3644 struct Scsi_Host *shost;
3645 int i;
3646
3647 if (vport->fc_flag & FC_OFFLINE_MODE)
3648 return;
3649
3650 lpfc_block_mgmt_io(phba, mbx_action);
3651
3652 lpfc_linkdown(phba);
3653
3654
3655 vports = lpfc_create_vport_work_array(phba);
3656 if (vports != NULL) {
3657 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3658 if (vports[i]->load_flag & FC_UNLOADING)
3659 continue;
3660 shost = lpfc_shost_from_vport(vports[i]);
3661 spin_lock_irq(shost->host_lock);
3662 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3663 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3664 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3665 spin_unlock_irq(shost->host_lock);
3666
3667 shost = lpfc_shost_from_vport(vports[i]);
3668 list_for_each_entry_safe(ndlp, next_ndlp,
3669 &vports[i]->fc_nodes,
3670 nlp_listp) {
3671
3672 spin_lock_irq(&ndlp->lock);
3673 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3674 spin_unlock_irq(&ndlp->lock);
3675
3676 lpfc_unreg_rpi(vports[i], ndlp);
3677
3678
3679
3680
3681
3682 if (phba->sli_rev == LPFC_SLI_REV4) {
3683 lpfc_printf_vlog(vports[i], KERN_INFO,
3684 LOG_NODE | LOG_DISCOVERY,
3685 "0011 Free RPI x%x on "
3686 "ndlp: x%px did x%x\n",
3687 ndlp->nlp_rpi, ndlp,
3688 ndlp->nlp_DID);
3689 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3690 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3691 }
3692
3693 if (ndlp->nlp_type & NLP_FABRIC) {
3694 lpfc_disc_state_machine(vports[i], ndlp,
3695 NULL, NLP_EVT_DEVICE_RECOVERY);
3696
3697
3698
3699
3700
3701
3702 if (!(ndlp->fc4_xpt_flags &
3703 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3704 lpfc_disc_state_machine
3705 (vports[i], ndlp,
3706 NULL,
3707 NLP_EVT_DEVICE_RM);
3708 }
3709 }
3710 }
3711 }
3712 lpfc_destroy_vport_work_array(phba, vports);
3713
3714 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3715
3716 if (phba->wq)
3717 flush_workqueue(phba->wq);
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728void
3729lpfc_offline(struct lpfc_hba *phba)
3730{
3731 struct Scsi_Host *shost;
3732 struct lpfc_vport **vports;
3733 int i;
3734
3735 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3736 return;
3737
3738
3739 lpfc_stop_port(phba);
3740
3741
3742
3743
3744 lpfc_nvmet_destroy_targetport(phba);
3745 lpfc_nvme_destroy_localport(phba->pport);
3746
3747 vports = lpfc_create_vport_work_array(phba);
3748 if (vports != NULL)
3749 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3750 lpfc_stop_vport_timers(vports[i]);
3751 lpfc_destroy_vport_work_array(phba, vports);
3752 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3753 "0460 Bring Adapter offline\n");
3754
3755
3756 lpfc_sli_hba_down(phba);
3757 spin_lock_irq(&phba->hbalock);
3758 phba->work_ha = 0;
3759 spin_unlock_irq(&phba->hbalock);
3760 vports = lpfc_create_vport_work_array(phba);
3761 if (vports != NULL)
3762 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3763 shost = lpfc_shost_from_vport(vports[i]);
3764 spin_lock_irq(shost->host_lock);
3765 vports[i]->work_port_events = 0;
3766 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3767 spin_unlock_irq(shost->host_lock);
3768 }
3769 lpfc_destroy_vport_work_array(phba, vports);
3770
3771
3772
3773 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3774 __lpfc_cpuhp_remove(phba);
3775
3776 if (phba->cfg_xri_rebalancing)
3777 lpfc_destroy_multixri_pools(phba);
3778}
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788static void
3789lpfc_scsi_free(struct lpfc_hba *phba)
3790{
3791 struct lpfc_io_buf *sb, *sb_next;
3792
3793 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3794 return;
3795
3796 spin_lock_irq(&phba->hbalock);
3797
3798
3799
3800 spin_lock(&phba->scsi_buf_list_put_lock);
3801 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3802 list) {
3803 list_del(&sb->list);
3804 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3805 sb->dma_handle);
3806 kfree(sb);
3807 phba->total_scsi_bufs--;
3808 }
3809 spin_unlock(&phba->scsi_buf_list_put_lock);
3810
3811 spin_lock(&phba->scsi_buf_list_get_lock);
3812 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3813 list) {
3814 list_del(&sb->list);
3815 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3816 sb->dma_handle);
3817 kfree(sb);
3818 phba->total_scsi_bufs--;
3819 }
3820 spin_unlock(&phba->scsi_buf_list_get_lock);
3821 spin_unlock_irq(&phba->hbalock);
3822}
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832void
3833lpfc_io_free(struct lpfc_hba *phba)
3834{
3835 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3836 struct lpfc_sli4_hdw_queue *qp;
3837 int idx;
3838
3839 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3840 qp = &phba->sli4_hba.hdwq[idx];
3841
3842 spin_lock(&qp->io_buf_list_put_lock);
3843 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3844 &qp->lpfc_io_buf_list_put,
3845 list) {
3846 list_del(&lpfc_ncmd->list);
3847 qp->put_io_bufs--;
3848 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3849 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3850 if (phba->cfg_xpsgl && !phba->nvmet_support)
3851 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3852 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3853 kfree(lpfc_ncmd);
3854 qp->total_io_bufs--;
3855 }
3856 spin_unlock(&qp->io_buf_list_put_lock);
3857
3858 spin_lock(&qp->io_buf_list_get_lock);
3859 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3860 &qp->lpfc_io_buf_list_get,
3861 list) {
3862 list_del(&lpfc_ncmd->list);
3863 qp->get_io_bufs--;
3864 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3865 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3866 if (phba->cfg_xpsgl && !phba->nvmet_support)
3867 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3868 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3869 kfree(lpfc_ncmd);
3870 qp->total_io_bufs--;
3871 }
3872 spin_unlock(&qp->io_buf_list_get_lock);
3873 }
3874}
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888int
3889lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3890{
3891 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3892 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3893 LIST_HEAD(els_sgl_list);
3894 int rc;
3895
3896
3897
3898
3899 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3900
3901 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3902
3903 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3904 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3905 "3157 ELS xri-sgl count increased from "
3906 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3907 els_xri_cnt);
3908
3909 for (i = 0; i < xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3911 GFP_KERNEL);
3912 if (sglq_entry == NULL) {
3913 lpfc_printf_log(phba, KERN_ERR,
3914 LOG_TRACE_EVENT,
3915 "2562 Failure to allocate an "
3916 "ELS sgl entry:%d\n", i);
3917 rc = -ENOMEM;
3918 goto out_free_mem;
3919 }
3920 sglq_entry->buff_type = GEN_BUFF_TYPE;
3921 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3922 &sglq_entry->phys);
3923 if (sglq_entry->virt == NULL) {
3924 kfree(sglq_entry);
3925 lpfc_printf_log(phba, KERN_ERR,
3926 LOG_TRACE_EVENT,
3927 "2563 Failure to allocate an "
3928 "ELS mbuf:%d\n", i);
3929 rc = -ENOMEM;
3930 goto out_free_mem;
3931 }
3932 sglq_entry->sgl = sglq_entry->virt;
3933 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3934 sglq_entry->state = SGL_FREED;
3935 list_add_tail(&sglq_entry->list, &els_sgl_list);
3936 }
3937 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3938 list_splice_init(&els_sgl_list,
3939 &phba->sli4_hba.lpfc_els_sgl_list);
3940 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3941 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3942
3943 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3944 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3945 "3158 ELS xri-sgl count decreased from "
3946 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3947 els_xri_cnt);
3948 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3949 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3950 &els_sgl_list);
3951
3952 for (i = 0; i < xri_cnt; i++) {
3953 list_remove_head(&els_sgl_list,
3954 sglq_entry, struct lpfc_sglq, list);
3955 if (sglq_entry) {
3956 __lpfc_mbuf_free(phba, sglq_entry->virt,
3957 sglq_entry->phys);
3958 kfree(sglq_entry);
3959 }
3960 }
3961 list_splice_init(&els_sgl_list,
3962 &phba->sli4_hba.lpfc_els_sgl_list);
3963 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3964 } else
3965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3966 "3163 ELS xri-sgl count unchanged: %d\n",
3967 els_xri_cnt);
3968 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3969
3970
3971 sglq_entry = NULL;
3972 sglq_entry_next = NULL;
3973 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3974 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3975 lxri = lpfc_sli4_next_xritag(phba);
3976 if (lxri == NO_XRI) {
3977 lpfc_printf_log(phba, KERN_ERR,
3978 LOG_TRACE_EVENT,
3979 "2400 Failed to allocate xri for "
3980 "ELS sgl\n");
3981 rc = -ENOMEM;
3982 goto out_free_mem;
3983 }
3984 sglq_entry->sli4_lxritag = lxri;
3985 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3986 }
3987 return 0;
3988
3989out_free_mem:
3990 lpfc_free_els_sgl_list(phba);
3991 return rc;
3992}
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006int
4007lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4008{
4009 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4010 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4011 uint16_t nvmet_xri_cnt;
4012 LIST_HEAD(nvmet_sgl_list);
4013 int rc;
4014
4015
4016
4017
4018 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4019
4020
4021 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4022 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4023
4024 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4025 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4026 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4027 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4028
4029 for (i = 0; i < xri_cnt; i++) {
4030 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4031 GFP_KERNEL);
4032 if (sglq_entry == NULL) {
4033 lpfc_printf_log(phba, KERN_ERR,
4034 LOG_TRACE_EVENT,
4035 "6303 Failure to allocate an "
4036 "NVMET sgl entry:%d\n", i);
4037 rc = -ENOMEM;
4038 goto out_free_mem;
4039 }
4040 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4041 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4042 &sglq_entry->phys);
4043 if (sglq_entry->virt == NULL) {
4044 kfree(sglq_entry);
4045 lpfc_printf_log(phba, KERN_ERR,
4046 LOG_TRACE_EVENT,
4047 "6304 Failure to allocate an "
4048 "NVMET buf:%d\n", i);
4049 rc = -ENOMEM;
4050 goto out_free_mem;
4051 }
4052 sglq_entry->sgl = sglq_entry->virt;
4053 memset(sglq_entry->sgl, 0,
4054 phba->cfg_sg_dma_buf_size);
4055 sglq_entry->state = SGL_FREED;
4056 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4057 }
4058 spin_lock_irq(&phba->hbalock);
4059 spin_lock(&phba->sli4_hba.sgl_list_lock);
4060 list_splice_init(&nvmet_sgl_list,
4061 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4062 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4063 spin_unlock_irq(&phba->hbalock);
4064 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4065
4066 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4067 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4068 "6305 NVMET xri-sgl count decreased from "
4069 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4070 nvmet_xri_cnt);
4071 spin_lock_irq(&phba->hbalock);
4072 spin_lock(&phba->sli4_hba.sgl_list_lock);
4073 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4074 &nvmet_sgl_list);
4075
4076 for (i = 0; i < xri_cnt; i++) {
4077 list_remove_head(&nvmet_sgl_list,
4078 sglq_entry, struct lpfc_sglq, list);
4079 if (sglq_entry) {
4080 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4081 sglq_entry->phys);
4082 kfree(sglq_entry);
4083 }
4084 }
4085 list_splice_init(&nvmet_sgl_list,
4086 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4087 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4088 spin_unlock_irq(&phba->hbalock);
4089 } else
4090 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4091 "6306 NVMET xri-sgl count unchanged: %d\n",
4092 nvmet_xri_cnt);
4093 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4094
4095
4096 sglq_entry = NULL;
4097 sglq_entry_next = NULL;
4098 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4099 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4100 lxri = lpfc_sli4_next_xritag(phba);
4101 if (lxri == NO_XRI) {
4102 lpfc_printf_log(phba, KERN_ERR,
4103 LOG_TRACE_EVENT,
4104 "6307 Failed to allocate xri for "
4105 "NVMET sgl\n");
4106 rc = -ENOMEM;
4107 goto out_free_mem;
4108 }
4109 sglq_entry->sli4_lxritag = lxri;
4110 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4111 }
4112 return 0;
4113
4114out_free_mem:
4115 lpfc_free_nvmet_sgl_list(phba);
4116 return rc;
4117}
4118
4119int
4120lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4121{
4122 LIST_HEAD(blist);
4123 struct lpfc_sli4_hdw_queue *qp;
4124 struct lpfc_io_buf *lpfc_cmd;
4125 struct lpfc_io_buf *iobufp, *prev_iobufp;
4126 int idx, cnt, xri, inserted;
4127
4128 cnt = 0;
4129 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4130 qp = &phba->sli4_hba.hdwq[idx];
4131 spin_lock_irq(&qp->io_buf_list_get_lock);
4132 spin_lock(&qp->io_buf_list_put_lock);
4133
4134
4135 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4136 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4137 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4138 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4139 cnt += qp->get_io_bufs + qp->put_io_bufs;
4140 qp->get_io_bufs = 0;
4141 qp->put_io_bufs = 0;
4142 qp->total_io_bufs = 0;
4143 spin_unlock(&qp->io_buf_list_put_lock);
4144 spin_unlock_irq(&qp->io_buf_list_get_lock);
4145 }
4146
4147
4148
4149
4150
4151
4152 for (idx = 0; idx < cnt; idx++) {
4153 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4154 if (!lpfc_cmd)
4155 return cnt;
4156 if (idx == 0) {
4157 list_add_tail(&lpfc_cmd->list, cbuf);
4158 continue;
4159 }
4160 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4161 inserted = 0;
4162 prev_iobufp = NULL;
4163 list_for_each_entry(iobufp, cbuf, list) {
4164 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4165 if (prev_iobufp)
4166 list_add(&lpfc_cmd->list,
4167 &prev_iobufp->list);
4168 else
4169 list_add(&lpfc_cmd->list, cbuf);
4170 inserted = 1;
4171 break;
4172 }
4173 prev_iobufp = iobufp;
4174 }
4175 if (!inserted)
4176 list_add_tail(&lpfc_cmd->list, cbuf);
4177 }
4178 return cnt;
4179}
4180
4181int
4182lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4183{
4184 struct lpfc_sli4_hdw_queue *qp;
4185 struct lpfc_io_buf *lpfc_cmd;
4186 int idx, cnt;
4187
4188 qp = phba->sli4_hba.hdwq;
4189 cnt = 0;
4190 while (!list_empty(cbuf)) {
4191 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4192 list_remove_head(cbuf, lpfc_cmd,
4193 struct lpfc_io_buf, list);
4194 if (!lpfc_cmd)
4195 return cnt;
4196 cnt++;
4197 qp = &phba->sli4_hba.hdwq[idx];
4198 lpfc_cmd->hdwq_no = idx;
4199 lpfc_cmd->hdwq = qp;
4200 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4201 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4202 spin_lock(&qp->io_buf_list_put_lock);
4203 list_add_tail(&lpfc_cmd->list,
4204 &qp->lpfc_io_buf_list_put);
4205 qp->put_io_bufs++;
4206 qp->total_io_bufs++;
4207 spin_unlock(&qp->io_buf_list_put_lock);
4208 }
4209 }
4210 return cnt;
4211}
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225int
4226lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4227{
4228 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4229 uint16_t i, lxri, els_xri_cnt;
4230 uint16_t io_xri_cnt, io_xri_max;
4231 LIST_HEAD(io_sgl_list);
4232 int rc, cnt;
4233
4234
4235
4236
4237
4238
4239 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4240 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4241 phba->sli4_hba.io_xri_max = io_xri_max;
4242
4243 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4244 "6074 Current allocated XRI sgl count:%d, "
4245 "maximum XRI count:%d\n",
4246 phba->sli4_hba.io_xri_cnt,
4247 phba->sli4_hba.io_xri_max);
4248
4249 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4250
4251 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4252
4253 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4254 phba->sli4_hba.io_xri_max;
4255
4256 for (i = 0; i < io_xri_cnt; i++) {
4257 list_remove_head(&io_sgl_list, lpfc_ncmd,
4258 struct lpfc_io_buf, list);
4259 if (lpfc_ncmd) {
4260 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4261 lpfc_ncmd->data,
4262 lpfc_ncmd->dma_handle);
4263 kfree(lpfc_ncmd);
4264 }
4265 }
4266 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4267 }
4268
4269
4270 lpfc_ncmd = NULL;
4271 lpfc_ncmd_next = NULL;
4272 phba->sli4_hba.io_xri_cnt = cnt;
4273 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4274 &io_sgl_list, list) {
4275 lxri = lpfc_sli4_next_xritag(phba);
4276 if (lxri == NO_XRI) {
4277 lpfc_printf_log(phba, KERN_ERR,
4278 LOG_TRACE_EVENT,
4279 "6075 Failed to allocate xri for "
4280 "nvme buffer\n");
4281 rc = -ENOMEM;
4282 goto out_free_mem;
4283 }
4284 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4285 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4286 }
4287 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4288 return 0;
4289
4290out_free_mem:
4291 lpfc_io_free(phba);
4292 return rc;
4293}
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309int
4310lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4311{
4312 struct lpfc_io_buf *lpfc_ncmd;
4313 struct lpfc_iocbq *pwqeq;
4314 uint16_t iotag, lxri = 0;
4315 int bcnt, num_posted;
4316 LIST_HEAD(prep_nblist);
4317 LIST_HEAD(post_nblist);
4318 LIST_HEAD(nvme_nblist);
4319
4320 phba->sli4_hba.io_xri_cnt = 0;
4321 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4322 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4323 if (!lpfc_ncmd)
4324 break;
4325
4326
4327
4328
4329
4330 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4331 GFP_KERNEL,
4332 &lpfc_ncmd->dma_handle);
4333 if (!lpfc_ncmd->data) {
4334 kfree(lpfc_ncmd);
4335 break;
4336 }
4337
4338 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4339 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4340 } else {
4341
4342
4343
4344
4345 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4346 (((unsigned long)(lpfc_ncmd->data) &
4347 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4348 lpfc_printf_log(phba, KERN_ERR,
4349 LOG_TRACE_EVENT,
4350 "3369 Memory alignment err: "
4351 "addr=%lx\n",
4352 (unsigned long)lpfc_ncmd->data);
4353 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4354 lpfc_ncmd->data,
4355 lpfc_ncmd->dma_handle);
4356 kfree(lpfc_ncmd);
4357 break;
4358 }
4359 }
4360
4361 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4362
4363 lxri = lpfc_sli4_next_xritag(phba);
4364 if (lxri == NO_XRI) {
4365 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4366 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4367 kfree(lpfc_ncmd);
4368 break;
4369 }
4370 pwqeq = &lpfc_ncmd->cur_iocbq;
4371
4372
4373 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4374 if (iotag == 0) {
4375 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4376 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4377 kfree(lpfc_ncmd);
4378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4379 "6121 Failed to allocate IOTAG for"
4380 " XRI:0x%x\n", lxri);
4381 lpfc_sli4_free_xri(phba, lxri);
4382 break;
4383 }
4384 pwqeq->sli4_lxritag = lxri;
4385 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4386 pwqeq->context1 = lpfc_ncmd;
4387
4388
4389 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4390 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4391 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4392 spin_lock_init(&lpfc_ncmd->buf_lock);
4393
4394
4395 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4396 phba->sli4_hba.io_xri_cnt++;
4397 }
4398 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4399 "6114 Allocate %d out of %d requested new NVME "
4400 "buffers\n", bcnt, num_to_alloc);
4401
4402
4403 if (!list_empty(&post_nblist))
4404 num_posted = lpfc_sli4_post_io_sgl_list(
4405 phba, &post_nblist, bcnt);
4406 else
4407 num_posted = 0;
4408
4409 return num_posted;
4410}
4411
4412static uint64_t
4413lpfc_get_wwpn(struct lpfc_hba *phba)
4414{
4415 uint64_t wwn;
4416 int rc;
4417 LPFC_MBOXQ_t *mboxq;
4418 MAILBOX_t *mb;
4419
4420 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4421 GFP_KERNEL);
4422 if (!mboxq)
4423 return (uint64_t)-1;
4424
4425
4426 lpfc_read_nv(phba, mboxq);
4427 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4428 if (rc != MBX_SUCCESS) {
4429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4430 "6019 Mailbox failed , mbxCmd x%x "
4431 "READ_NV, mbxStatus x%x\n",
4432 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4433 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4434 mempool_free(mboxq, phba->mbox_mem_pool);
4435 return (uint64_t) -1;
4436 }
4437 mb = &mboxq->u.mb;
4438 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4439
4440 mempool_free(mboxq, phba->mbox_mem_pool);
4441 if (phba->sli_rev == LPFC_SLI_REV4)
4442 return be64_to_cpu(wwn);
4443 else
4444 return rol64(wwn, 32);
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458static int
4459lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4460{
4461
4462 if (phba->sli_rev == LPFC_SLI_REV3) {
4463 phba->cfg_vmid_app_header = 0;
4464 phba->cfg_vmid_priority_tagging = 0;
4465 }
4466
4467 if (lpfc_is_vmid_enabled(phba)) {
4468 vport->vmid =
4469 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4470 GFP_KERNEL);
4471 if (!vport->vmid)
4472 return -ENOMEM;
4473
4474 rwlock_init(&vport->vmid_lock);
4475
4476
4477 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4478 vport->vmid_inactivity_timeout =
4479 phba->cfg_vmid_inactivity_timeout;
4480 vport->max_vmid = phba->cfg_max_vmid;
4481 vport->cur_vmid_cnt = 0;
4482
4483 vport->vmid_priority_range = bitmap_zalloc
4484 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4485
4486 if (!vport->vmid_priority_range) {
4487 kfree(vport->vmid);
4488 return -ENOMEM;
4489 }
4490
4491 hash_init(vport->hash_table);
4492 }
4493 return 0;
4494}
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512struct lpfc_vport *
4513lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4514{
4515 struct lpfc_vport *vport;
4516 struct Scsi_Host *shost = NULL;
4517 struct scsi_host_template *template;
4518 int error = 0;
4519 int i;
4520 uint64_t wwn;
4521 bool use_no_reset_hba = false;
4522 int rc;
4523
4524 if (lpfc_no_hba_reset_cnt) {
4525 if (phba->sli_rev < LPFC_SLI_REV4 &&
4526 dev == &phba->pcidev->dev) {
4527
4528 lpfc_sli_brdrestart(phba);
4529 rc = lpfc_sli_chipset_init(phba);
4530 if (rc)
4531 return NULL;
4532 }
4533 wwn = lpfc_get_wwpn(phba);
4534 }
4535
4536 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4537 if (wwn == lpfc_no_hba_reset[i]) {
4538 lpfc_printf_log(phba, KERN_ERR,
4539 LOG_TRACE_EVENT,
4540 "6020 Setting use_no_reset port=%llx\n",
4541 wwn);
4542 use_no_reset_hba = true;
4543 break;
4544 }
4545 }
4546
4547
4548 if (dev == &phba->pcidev->dev) {
4549 template = &phba->port_template;
4550
4551 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4552
4553 memcpy(template, &lpfc_template, sizeof(*template));
4554
4555 if (use_no_reset_hba)
4556
4557 template->eh_host_reset_handler = NULL;
4558
4559
4560 memcpy(&phba->vport_template, &lpfc_template,
4561 sizeof(*template));
4562 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4563 phba->vport_template.eh_bus_reset_handler = NULL;
4564 phba->vport_template.eh_host_reset_handler = NULL;
4565 phba->vport_template.vendor_id = 0;
4566
4567
4568 if (phba->sli_rev == LPFC_SLI_REV4) {
4569 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4570 phba->vport_template.sg_tablesize =
4571 phba->cfg_scsi_seg_cnt;
4572 } else {
4573 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4574 phba->vport_template.sg_tablesize =
4575 phba->cfg_sg_seg_cnt;
4576 }
4577
4578 } else {
4579
4580 memcpy(template, &lpfc_template_nvme,
4581 sizeof(*template));
4582 }
4583 } else {
4584 template = &phba->vport_template;
4585 }
4586
4587 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4588 if (!shost)
4589 goto out;
4590
4591 vport = (struct lpfc_vport *) shost->hostdata;
4592 vport->phba = phba;
4593 vport->load_flag |= FC_LOADING;
4594 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4595 vport->fc_rscn_flush = 0;
4596 lpfc_get_vport_cfgparam(vport);
4597
4598
4599 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4600
4601 shost->unique_id = instance;
4602 shost->max_id = LPFC_MAX_TARGET;
4603 shost->max_lun = vport->cfg_max_luns;
4604 shost->this_id = -1;
4605 shost->max_cmd_len = 16;
4606
4607 if (phba->sli_rev == LPFC_SLI_REV4) {
4608 if (!phba->cfg_fcp_mq_threshold ||
4609 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4610 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4611
4612 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4613 phba->cfg_fcp_mq_threshold);
4614
4615 shost->dma_boundary =
4616 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4617
4618 if (phba->cfg_xpsgl && !phba->nvmet_support)
4619 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4620 else
4621 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4622 } else
4623
4624
4625
4626 shost->nr_hw_queues = 1;
4627
4628
4629
4630
4631
4632
4633 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4634 if (dev != &phba->pcidev->dev) {
4635 shost->transportt = lpfc_vport_transport_template;
4636 vport->port_type = LPFC_NPIV_PORT;
4637 } else {
4638 shost->transportt = lpfc_transport_template;
4639 vport->port_type = LPFC_PHYSICAL_PORT;
4640 }
4641
4642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4643 "9081 CreatePort TMPLATE type %x TBLsize %d "
4644 "SEGcnt %d/%d\n",
4645 vport->port_type, shost->sg_tablesize,
4646 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4647
4648
4649 rc = lpfc_vmid_res_alloc(phba, vport);
4650
4651 if (rc)
4652 goto out;
4653
4654
4655 INIT_LIST_HEAD(&vport->fc_nodes);
4656 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4657 spin_lock_init(&vport->work_port_lock);
4658
4659 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4660
4661 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4662
4663 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4664
4665 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4666 lpfc_setup_bg(phba, shost);
4667
4668 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4669 if (error)
4670 goto out_put_shost;
4671
4672 spin_lock_irq(&phba->port_list_lock);
4673 list_add_tail(&vport->listentry, &phba->port_list);
4674 spin_unlock_irq(&phba->port_list_lock);
4675 return vport;
4676
4677out_put_shost:
4678 kfree(vport->vmid);
4679 bitmap_free(vport->vmid_priority_range);
4680 scsi_host_put(shost);
4681out:
4682 return NULL;
4683}
4684
4685
4686
4687
4688
4689
4690
4691
4692void
4693destroy_port(struct lpfc_vport *vport)
4694{
4695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4696 struct lpfc_hba *phba = vport->phba;
4697
4698 lpfc_debugfs_terminate(vport);
4699 fc_remove_host(shost);
4700 scsi_remove_host(shost);
4701
4702 spin_lock_irq(&phba->port_list_lock);
4703 list_del_init(&vport->listentry);
4704 spin_unlock_irq(&phba->port_list_lock);
4705
4706 lpfc_cleanup(vport);
4707 return;
4708}
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720int
4721lpfc_get_instance(void)
4722{
4723 int ret;
4724
4725 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4726 return ret < 0 ? -1 : ret;
4727}
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4745{
4746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4747 struct lpfc_hba *phba = vport->phba;
4748 int stat = 0;
4749
4750 spin_lock_irq(shost->host_lock);
4751
4752 if (vport->load_flag & FC_UNLOADING) {
4753 stat = 1;
4754 goto finished;
4755 }
4756 if (time >= msecs_to_jiffies(30 * 1000)) {
4757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4758 "0461 Scanning longer than 30 "
4759 "seconds. Continuing initialization\n");
4760 stat = 1;
4761 goto finished;
4762 }
4763 if (time >= msecs_to_jiffies(15 * 1000) &&
4764 phba->link_state <= LPFC_LINK_DOWN) {
4765 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4766 "0465 Link down longer than 15 "
4767 "seconds. Continuing initialization\n");
4768 stat = 1;
4769 goto finished;
4770 }
4771
4772 if (vport->port_state != LPFC_VPORT_READY)
4773 goto finished;
4774 if (vport->num_disc_nodes || vport->fc_prli_sent)
4775 goto finished;
4776 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4777 goto finished;
4778 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4779 goto finished;
4780
4781 stat = 1;
4782
4783finished:
4784 spin_unlock_irq(shost->host_lock);
4785 return stat;
4786}
4787
4788static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4789{
4790 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4791 struct lpfc_hba *phba = vport->phba;
4792
4793 fc_host_supported_speeds(shost) = 0;
4794
4795
4796
4797
4798 if (phba->hba_flag & HBA_FCOE_MODE)
4799 return;
4800
4801 if (phba->lmt & LMT_256Gb)
4802 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4803 if (phba->lmt & LMT_128Gb)
4804 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4805 if (phba->lmt & LMT_64Gb)
4806 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4807 if (phba->lmt & LMT_32Gb)
4808 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4809 if (phba->lmt & LMT_16Gb)
4810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4811 if (phba->lmt & LMT_10Gb)
4812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4813 if (phba->lmt & LMT_8Gb)
4814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4815 if (phba->lmt & LMT_4Gb)
4816 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4817 if (phba->lmt & LMT_2Gb)
4818 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4819 if (phba->lmt & LMT_1Gb)
4820 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4821}
4822
4823
4824
4825
4826
4827
4828
4829
4830void lpfc_host_attrib_init(struct Scsi_Host *shost)
4831{
4832 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4833 struct lpfc_hba *phba = vport->phba;
4834
4835
4836
4837
4838 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4839 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4840 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4841
4842 memset(fc_host_supported_fc4s(shost), 0,
4843 sizeof(fc_host_supported_fc4s(shost)));
4844 fc_host_supported_fc4s(shost)[2] = 1;
4845 fc_host_supported_fc4s(shost)[7] = 1;
4846
4847 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4848 sizeof fc_host_symbolic_name(shost));
4849
4850 lpfc_host_supported_speeds_set(shost);
4851
4852 fc_host_maxframe_size(shost) =
4853 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4854 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4855
4856 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4857
4858
4859 memset(fc_host_active_fc4s(shost), 0,
4860 sizeof(fc_host_active_fc4s(shost)));
4861 fc_host_active_fc4s(shost)[2] = 1;
4862 fc_host_active_fc4s(shost)[7] = 1;
4863
4864 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4865 spin_lock_irq(shost->host_lock);
4866 vport->load_flag &= ~FC_LOADING;
4867 spin_unlock_irq(shost->host_lock);
4868}
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878static void
4879lpfc_stop_port_s3(struct lpfc_hba *phba)
4880{
4881
4882 writel(0, phba->HCregaddr);
4883 readl(phba->HCregaddr);
4884
4885 writel(0xffffffff, phba->HAregaddr);
4886 readl(phba->HAregaddr);
4887
4888
4889 lpfc_stop_hba_timers(phba);
4890 phba->pport->work_port_events = 0;
4891}
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901static void
4902lpfc_stop_port_s4(struct lpfc_hba *phba)
4903{
4904
4905 lpfc_stop_hba_timers(phba);
4906 if (phba->pport)
4907 phba->pport->work_port_events = 0;
4908 phba->sli4_hba.intr_enable = 0;
4909}
4910
4911
4912
4913
4914
4915
4916
4917
4918void
4919lpfc_stop_port(struct lpfc_hba *phba)
4920{
4921 phba->lpfc_stop_port(phba);
4922
4923 if (phba->wq)
4924 flush_workqueue(phba->wq);
4925}
4926
4927
4928
4929
4930
4931
4932
4933void
4934lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4935{
4936 unsigned long fcf_redisc_wait_tmo =
4937 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4938
4939 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4940 spin_lock_irq(&phba->hbalock);
4941
4942 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4943
4944 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4945 spin_unlock_irq(&phba->hbalock);
4946}
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958static void
4959lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4960{
4961 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4962
4963
4964 spin_lock_irq(&phba->hbalock);
4965 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4966 spin_unlock_irq(&phba->hbalock);
4967 return;
4968 }
4969
4970 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4971
4972 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4973 spin_unlock_irq(&phba->hbalock);
4974 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4975 "2776 FCF rediscover quiescent timer expired\n");
4976
4977 lpfc_worker_wake_up(phba);
4978}
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989static void
4990lpfc_vmid_poll(struct timer_list *t)
4991{
4992 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4993 u32 wake_up = 0;
4994
4995
4996 if (phba->pport->vmid_priority_tagging) {
4997 wake_up = 1;
4998 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4999 }
5000
5001
5002 if (phba->pport->vmid_inactivity_timeout ||
5003 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5004 wake_up = 1;
5005 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5006 }
5007
5008 if (wake_up)
5009 lpfc_worker_wake_up(phba);
5010
5011
5012 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5013 LPFC_VMID_TIMER));
5014}
5015
5016
5017
5018
5019
5020
5021
5022
5023static void
5024lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5025 struct lpfc_acqe_link *acqe_link)
5026{
5027 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5028 case LPFC_ASYNC_LINK_FAULT_NONE:
5029 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5030 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5031 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5032 break;
5033 default:
5034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5035 "0398 Unknown link fault code: x%x\n",
5036 bf_get(lpfc_acqe_link_fault, acqe_link));
5037 break;
5038 }
5039}
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051static uint8_t
5052lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5053 struct lpfc_acqe_link *acqe_link)
5054{
5055 uint8_t att_type;
5056
5057 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5058 case LPFC_ASYNC_LINK_STATUS_DOWN:
5059 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5060 att_type = LPFC_ATT_LINK_DOWN;
5061 break;
5062 case LPFC_ASYNC_LINK_STATUS_UP:
5063
5064 att_type = LPFC_ATT_RESERVED;
5065 break;
5066 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5067 att_type = LPFC_ATT_LINK_UP;
5068 break;
5069 default:
5070 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5071 "0399 Invalid link attention type: x%x\n",
5072 bf_get(lpfc_acqe_link_status, acqe_link));
5073 att_type = LPFC_ATT_RESERVED;
5074 break;
5075 }
5076 return att_type;
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087uint32_t
5088lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5089{
5090 uint32_t link_speed;
5091
5092 if (!lpfc_is_link_up(phba))
5093 return 0;
5094
5095 if (phba->sli_rev <= LPFC_SLI_REV3) {
5096 switch (phba->fc_linkspeed) {
5097 case LPFC_LINK_SPEED_1GHZ:
5098 link_speed = 1000;
5099 break;
5100 case LPFC_LINK_SPEED_2GHZ:
5101 link_speed = 2000;
5102 break;
5103 case LPFC_LINK_SPEED_4GHZ:
5104 link_speed = 4000;
5105 break;
5106 case LPFC_LINK_SPEED_8GHZ:
5107 link_speed = 8000;
5108 break;
5109 case LPFC_LINK_SPEED_10GHZ:
5110