1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42
43#include <scsi/scsi.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport_fc.h>
47#include <scsi/scsi_tcq.h>
48#include <scsi/fc/fc_fs.h>
49
50#include <linux/nvme-fc-driver.h>
51
52#include "lpfc_hw4.h"
53#include "lpfc_hw.h"
54#include "lpfc_sli.h"
55#include "lpfc_sli4.h"
56#include "lpfc_nl.h"
57#include "lpfc_disc.h"
58#include "lpfc.h"
59#include "lpfc_scsi.h"
60#include "lpfc_nvme.h"
61#include "lpfc_nvmet.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68char *_dump_buf_data;
69unsigned long _dump_buf_data_order;
70char *_dump_buf_dif;
71unsigned long _dump_buf_dif_order;
72spinlock_t _dump_buf_lock;
73
74
75static uint32_t lpfc_present_cpu;
76
77static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
78static int lpfc_post_rcv_buf(struct lpfc_hba *);
79static int lpfc_sli4_queue_verify(struct lpfc_hba *);
80static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
81static int lpfc_setup_endian_order(struct lpfc_hba *);
82static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
83static void lpfc_free_els_sgl_list(struct lpfc_hba *);
84static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
85static void lpfc_init_sgl_list(struct lpfc_hba *);
86static int lpfc_init_active_sgl_array(struct lpfc_hba *);
87static void lpfc_free_active_sgl(struct lpfc_hba *);
88static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
89static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
90static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
91static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
92static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
93static void lpfc_sli4_disable_intr(struct lpfc_hba *);
94static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
95static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
96static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
97static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
98
99static struct scsi_transport_template *lpfc_transport_template = NULL;
100static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
101static DEFINE_IDR(lpfc_hba_index);
102#define LPFC_NVMET_BUF_POST 254
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118int
119lpfc_config_port_prep(struct lpfc_hba *phba)
120{
121 lpfc_vpd_t *vp = &phba->vpd;
122 int i = 0, rc;
123 LPFC_MBOXQ_t *pmb;
124 MAILBOX_t *mb;
125 char *lpfc_vpd_data = NULL;
126 uint16_t offset = 0;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
130
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
132 if (!pmb) {
133 phba->link_state = LPFC_HBA_ERROR;
134 return -ENOMEM;
135 }
136
137 mb = &pmb->u.mb;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
139
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
141 if (init_key) {
142 uint32_t *ptext = (uint32_t *) licensed;
143
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
146 init_key = 0;
147 }
148
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
153 sizeof (licensed));
154
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
161 "mbxStatus x%x\n",
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
164 return -ERESTART;
165 }
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
167 sizeof(phba->wwnn));
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
169 sizeof(phba->wwpn));
170 }
171
172
173
174
175
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
177
178
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
187 return -ERESTART;
188 }
189
190
191
192
193
194
195 if (mb->un.varRdRev.rr == 0) {
196 vp->rev.rBit = 0;
197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
201 return -ERESTART;
202 }
203
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
206 return -EINVAL;
207 }
208
209
210 vp->rev.rBit = 1;
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
226
227
228
229
230
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
233
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
237
238
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
240 if (!lpfc_vpd_data)
241 goto out_free_mbox;
242 do {
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
245
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
252 }
253
254
255
256 if (mb->un.varDmp.word_cnt == 0)
257 break;
258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
266
267 kfree(lpfc_vpd_data);
268out_free_mbox:
269 mempool_free(pmb, phba->mbox_mem_pool);
270 return 0;
271}
272
273
274
275
276
277
278
279
280
281
282
283static void
284lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
285{
286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
287 phba->temp_sensor_support = 1;
288 else
289 phba->temp_sensor_support = 0;
290 mempool_free(pmboxq, phba->mbox_mem_pool);
291 return;
292}
293
294
295
296
297
298
299
300
301
302
303
304static void
305lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
306{
307 struct prog_id *prg;
308 uint32_t prog_id_word;
309 char dist = ' ';
310
311 char dist_char[] = "nabx";
312
313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
314 mempool_free(pmboxq, phba->mbox_mem_pool);
315 return;
316 }
317
318 prg = (struct prog_id *) &prog_id_word;
319
320
321 prog_id_word = pmboxq->u.mb.un.varWords[7];
322
323
324 if (prg->dist < 4)
325 dist = dist_char[prg->dist];
326
327 if ((prg->dist == 3) && (prg->num == 0))
328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
329 prg->ver, prg->rev, prg->lev);
330 else
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
332 prg->ver, prg->rev, prg->lev,
333 dist, prg->num);
334 mempool_free(pmboxq, phba->mbox_mem_pool);
335 return;
336}
337
338
339
340
341
342
343
344
345
346
347void
348lpfc_update_vport_wwn(struct lpfc_vport *vport)
349{
350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
352
353
354 if (vport->phba->cfg_soft_wwnn)
355 u64_to_wwn(vport->phba->cfg_soft_wwnn,
356 vport->fc_sparam.nodeName.u.wwn);
357 if (vport->phba->cfg_soft_wwpn)
358 u64_to_wwn(vport->phba->cfg_soft_wwpn,
359 vport->fc_sparam.portName.u.wwn);
360
361
362
363
364
365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
367 sizeof(struct lpfc_name));
368 else
369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 sizeof(struct lpfc_name));
371
372
373
374
375
376 if (vport->fc_portname.u.wwn[0] != 0 &&
377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
378 sizeof(struct lpfc_name)))
379 vport->vport_flag |= FAWWPN_PARAM_CHG;
380
381 if (vport->fc_portname.u.wwn[0] == 0 ||
382 vport->phba->cfg_soft_wwpn ||
383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
384 vport->vport_flag & FAWWPN_SET) {
385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
386 sizeof(struct lpfc_name));
387 vport->vport_flag &= ~FAWWPN_SET;
388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
389 vport->vport_flag |= FAWWPN_SET;
390 }
391 else
392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
393 sizeof(struct lpfc_name));
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int
410lpfc_config_port_post(struct lpfc_hba *phba)
411{
412 struct lpfc_vport *vport = phba->pport;
413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 LPFC_MBOXQ_t *pmb;
415 MAILBOX_t *mb;
416 struct lpfc_dmabuf *mp;
417 struct lpfc_sli *psli = &phba->sli;
418 uint32_t status, timeout;
419 int i, j;
420 int rc;
421
422 spin_lock_irq(&phba->hbalock);
423
424
425
426
427 if (phba->over_temp_state == HBA_OVER_TEMP)
428 phba->over_temp_state = HBA_NORMAL_TEMP;
429 spin_unlock_irq(&phba->hbalock);
430
431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
432 if (!pmb) {
433 phba->link_state = LPFC_HBA_ERROR;
434 return -ENOMEM;
435 }
436 mb = &pmb->u.mb;
437
438
439 rc = lpfc_read_sparam(phba, pmb, 0);
440 if (rc) {
441 mempool_free(pmb, phba->mbox_mem_pool);
442 return -ENOMEM;
443 }
444
445 pmb->vport = vport;
446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb->mbxCommand, mb->mbxStatus);
451 phba->link_state = LPFC_HBA_ERROR;
452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
453 mempool_free(pmb, phba->mbox_mem_pool);
454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
455 kfree(mp);
456 return -EIO;
457 }
458
459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
460
461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
463 kfree(mp);
464 pmb->ctx_buf = NULL;
465 lpfc_update_vport_wwn(vport);
466
467
468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
470 fc_host_max_npiv_vports(shost) = phba->max_vpi;
471
472
473
474 if (phba->SerialNumber[0] == 0) {
475 uint8_t *outptr;
476
477 outptr = &vport->fc_nodename.u.s.IEEE[0];
478 for (i = 0; i < 12; i++) {
479 status = *outptr++;
480 j = ((status & 0xf0) >> 4);
481 if (j <= 9)
482 phba->SerialNumber[i] =
483 (char)((uint8_t) 0x30 + (uint8_t) j);
484 else
485 phba->SerialNumber[i] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
487 i++;
488 j = (status & 0xf);
489 if (j <= 9)
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
492 else
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
495 }
496 }
497
498 lpfc_read_config(phba, pmb);
499 pmb->vport = vport;
500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb->mbxCommand, mb->mbxStatus);
505 phba->link_state = LPFC_HBA_ERROR;
506 mempool_free( pmb, phba->mbox_mem_pool);
507 return -EIO;
508 }
509
510
511 lpfc_sli_read_link_ste(phba);
512
513
514 i = (mb->un.varRdConfig.max_xri + 1);
515 if (phba->cfg_hba_queue_depth > i) {
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
518 phba->cfg_hba_queue_depth, i);
519 phba->cfg_hba_queue_depth = i;
520 }
521
522
523 i = (mb->un.varRdConfig.max_xri >> 3);
524 if (phba->pport->cfg_lun_queue_depth > i) {
525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
526 "3360 LUN queue depth changed from %d to %d\n",
527 phba->pport->cfg_lun_queue_depth, i);
528 phba->pport->cfg_lun_queue_depth = i;
529 }
530
531 phba->lmt = mb->un.varRdConfig.lmt;
532
533
534 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
535
536 phba->link_state = LPFC_LINK_DOWN;
537
538
539 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
540 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
541 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
542 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
543
544
545 if (phba->sli_rev != 3)
546 lpfc_post_rcv_buf(phba);
547
548
549
550
551 if (phba->intr_type == MSIX) {
552 rc = lpfc_config_msi(phba, pmb);
553 if (rc) {
554 mempool_free(pmb, phba->mbox_mem_pool);
555 return -EIO;
556 }
557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
558 if (rc != MBX_SUCCESS) {
559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
560 "0352 Config MSI mailbox command "
561 "failed, mbxCmd x%x, mbxStatus x%x\n",
562 pmb->u.mb.mbxCommand,
563 pmb->u.mb.mbxStatus);
564 mempool_free(pmb, phba->mbox_mem_pool);
565 return -EIO;
566 }
567 }
568
569 spin_lock_irq(&phba->hbalock);
570
571 phba->hba_flag &= ~HBA_ERATT_HANDLED;
572
573
574 if (lpfc_readl(phba->HCregaddr, &status)) {
575 spin_unlock_irq(&phba->hbalock);
576 return -EIO;
577 }
578 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
579 if (psli->num_rings > 0)
580 status |= HC_R0INT_ENA;
581 if (psli->num_rings > 1)
582 status |= HC_R1INT_ENA;
583 if (psli->num_rings > 2)
584 status |= HC_R2INT_ENA;
585 if (psli->num_rings > 3)
586 status |= HC_R3INT_ENA;
587
588 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
589 (phba->cfg_poll & DISABLE_FCP_RING_INT))
590 status &= ~(HC_R0INT_ENA);
591
592 writel(status, phba->HCregaddr);
593 readl(phba->HCregaddr);
594 spin_unlock_irq(&phba->hbalock);
595
596
597 timeout = phba->fc_ratov * 2;
598 mod_timer(&vport->els_tmofunc,
599 jiffies + msecs_to_jiffies(1000 * timeout));
600
601 mod_timer(&phba->hb_tmofunc,
602 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
603 phba->hb_outstanding = 0;
604 phba->last_completion_time = jiffies;
605
606 mod_timer(&phba->eratt_poll,
607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
608
609 if (phba->hba_flag & LINK_DISABLED) {
610 lpfc_printf_log(phba,
611 KERN_ERR, LOG_INIT,
612 "2598 Adapter Link is disabled.\n");
613 lpfc_down_link(phba, pmb);
614 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
616 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
617 lpfc_printf_log(phba,
618 KERN_ERR, LOG_INIT,
619 "2599 Adapter failed to issue DOWN_LINK"
620 " mbox command rc 0x%x\n", rc);
621
622 mempool_free(pmb, phba->mbox_mem_pool);
623 return -EIO;
624 }
625 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
626 mempool_free(pmb, phba->mbox_mem_pool);
627 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
628 if (rc)
629 return rc;
630 }
631
632 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
633 if (!pmb) {
634 phba->link_state = LPFC_HBA_ERROR;
635 return -ENOMEM;
636 }
637
638 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
639 pmb->mbox_cmpl = lpfc_config_async_cmpl;
640 pmb->vport = phba->pport;
641 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
642
643 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
644 lpfc_printf_log(phba,
645 KERN_ERR,
646 LOG_INIT,
647 "0456 Adapter failed to issue "
648 "ASYNCEVT_ENABLE mbox status x%x\n",
649 rc);
650 mempool_free(pmb, phba->mbox_mem_pool);
651 }
652
653
654 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
655 if (!pmb) {
656 phba->link_state = LPFC_HBA_ERROR;
657 return -ENOMEM;
658 }
659
660 lpfc_dump_wakeup_param(phba, pmb);
661 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
662 pmb->vport = phba->pport;
663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
664
665 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
667 "to get Option ROM version status x%x\n", rc);
668 mempool_free(pmb, phba->mbox_mem_pool);
669 }
670
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688static int
689lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
690{
691 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
692}
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709int
710lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
711 uint32_t flag)
712{
713 struct lpfc_vport *vport = phba->pport;
714 LPFC_MBOXQ_t *pmb;
715 MAILBOX_t *mb;
716 int rc;
717
718 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
719 if (!pmb) {
720 phba->link_state = LPFC_HBA_ERROR;
721 return -ENOMEM;
722 }
723 mb = &pmb->u.mb;
724 pmb->vport = vport;
725
726 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
728 !(phba->lmt & LMT_1Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
730 !(phba->lmt & LMT_2Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
732 !(phba->lmt & LMT_4Gb)) ||
733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
734 !(phba->lmt & LMT_8Gb)) ||
735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
736 !(phba->lmt & LMT_10Gb)) ||
737 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
738 !(phba->lmt & LMT_16Gb)) ||
739 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
740 !(phba->lmt & LMT_32Gb)) ||
741 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
742 !(phba->lmt & LMT_64Gb))) {
743
744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
745 "1302 Invalid speed for this board:%d "
746 "Reset link speed to auto.\n",
747 phba->cfg_link_speed);
748 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
749 }
750 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
752 if (phba->sli_rev < LPFC_SLI_REV4)
753 lpfc_set_loopback_flag(phba);
754 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
755 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
757 "0498 Adapter failed to init, mbxCmd x%x "
758 "INIT_LINK, mbxStatus x%x\n",
759 mb->mbxCommand, mb->mbxStatus);
760 if (phba->sli_rev <= LPFC_SLI_REV3) {
761
762 writel(0, phba->HCregaddr);
763 readl(phba->HCregaddr);
764
765 writel(0xffffffff, phba->HAregaddr);
766 readl(phba->HAregaddr);
767 }
768 phba->link_state = LPFC_HBA_ERROR;
769 if (rc != MBX_BUSY || flag == MBX_POLL)
770 mempool_free(pmb, phba->mbox_mem_pool);
771 return -EIO;
772 }
773 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
774 if (flag == MBX_POLL)
775 mempool_free(pmb, phba->mbox_mem_pool);
776
777 return 0;
778}
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793static int
794lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
795{
796 LPFC_MBOXQ_t *pmb;
797 int rc;
798
799 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
800 if (!pmb) {
801 phba->link_state = LPFC_HBA_ERROR;
802 return -ENOMEM;
803 }
804
805 lpfc_printf_log(phba,
806 KERN_ERR, LOG_INIT,
807 "0491 Adapter Link is disabled.\n");
808 lpfc_down_link(phba, pmb);
809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
810 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
811 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
812 lpfc_printf_log(phba,
813 KERN_ERR, LOG_INIT,
814 "2522 Adapter failed to issue DOWN_LINK"
815 " mbox command rc 0x%x\n", rc);
816
817 mempool_free(pmb, phba->mbox_mem_pool);
818 return -EIO;
819 }
820 if (flag == MBX_POLL)
821 mempool_free(pmb, phba->mbox_mem_pool);
822
823 return 0;
824}
825
826
827
828
829
830
831
832
833
834
835
836
837int
838lpfc_hba_down_prep(struct lpfc_hba *phba)
839{
840 struct lpfc_vport **vports;
841 int i;
842
843 if (phba->sli_rev <= LPFC_SLI_REV3) {
844
845 writel(0, phba->HCregaddr);
846 readl(phba->HCregaddr);
847 }
848
849 if (phba->pport->load_flag & FC_UNLOADING)
850 lpfc_cleanup_discovery_resources(phba->pport);
851 else {
852 vports = lpfc_create_vport_work_array(phba);
853 if (vports != NULL)
854 for (i = 0; i <= phba->max_vports &&
855 vports[i] != NULL; i++)
856 lpfc_cleanup_discovery_resources(vports[i]);
857 lpfc_destroy_vport_work_array(phba, vports);
858 }
859 return 0;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static void
876lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
877{
878 struct lpfc_iocbq *rspiocbq;
879 struct hbq_dmabuf *dmabuf;
880 struct lpfc_cq_event *cq_event;
881
882 spin_lock_irq(&phba->hbalock);
883 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
884 spin_unlock_irq(&phba->hbalock);
885
886 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
887
888 spin_lock_irq(&phba->hbalock);
889 list_remove_head(&phba->sli4_hba.sp_queue_event,
890 cq_event, struct lpfc_cq_event, list);
891 spin_unlock_irq(&phba->hbalock);
892
893 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
894 case CQE_CODE_COMPL_WQE:
895 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
896 cq_event);
897 lpfc_sli_release_iocbq(phba, rspiocbq);
898 break;
899 case CQE_CODE_RECEIVE:
900 case CQE_CODE_RECEIVE_V1:
901 dmabuf = container_of(cq_event, struct hbq_dmabuf,
902 cq_event);
903 lpfc_in_buf_free(phba, &dmabuf->dbuf);
904 }
905 }
906}
907
908
909
910
911
912
913
914
915
916
917
918
919static void
920lpfc_hba_free_post_buf(struct lpfc_hba *phba)
921{
922 struct lpfc_sli *psli = &phba->sli;
923 struct lpfc_sli_ring *pring;
924 struct lpfc_dmabuf *mp, *next_mp;
925 LIST_HEAD(buflist);
926 int count;
927
928 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
929 lpfc_sli_hbqbuf_free_all(phba);
930 else {
931
932 pring = &psli->sli3_ring[LPFC_ELS_RING];
933 spin_lock_irq(&phba->hbalock);
934 list_splice_init(&pring->postbufq, &buflist);
935 spin_unlock_irq(&phba->hbalock);
936
937 count = 0;
938 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
939 list_del(&mp->list);
940 count++;
941 lpfc_mbuf_free(phba, mp->virt, mp->phys);
942 kfree(mp);
943 }
944
945 spin_lock_irq(&phba->hbalock);
946 pring->postbufq_cnt -= count;
947 spin_unlock_irq(&phba->hbalock);
948 }
949}
950
951
952
953
954
955
956
957
958
959
960
961static void
962lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
963{
964 struct lpfc_sli *psli = &phba->sli;
965 struct lpfc_queue *qp = NULL;
966 struct lpfc_sli_ring *pring;
967 LIST_HEAD(completions);
968 int i;
969 struct lpfc_iocbq *piocb, *next_iocb;
970
971 if (phba->sli_rev != LPFC_SLI_REV4) {
972 for (i = 0; i < psli->num_rings; i++) {
973 pring = &psli->sli3_ring[i];
974 spin_lock_irq(&phba->hbalock);
975
976
977
978
979 list_splice_init(&pring->txcmplq, &completions);
980 pring->txcmplq_cnt = 0;
981 spin_unlock_irq(&phba->hbalock);
982
983 lpfc_sli_abort_iocb_ring(phba, pring);
984 }
985
986 lpfc_sli_cancel_iocbs(phba, &completions,
987 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
988 return;
989 }
990 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
991 pring = qp->pring;
992 if (!pring)
993 continue;
994 spin_lock_irq(&pring->ring_lock);
995 list_for_each_entry_safe(piocb, next_iocb,
996 &pring->txcmplq, list)
997 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
998 list_splice_init(&pring->txcmplq, &completions);
999 pring->txcmplq_cnt = 0;
1000 spin_unlock_irq(&pring->ring_lock);
1001 lpfc_sli_abort_iocb_ring(phba, pring);
1002 }
1003
1004 lpfc_sli_cancel_iocbs(phba, &completions,
1005 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static int
1021lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1022{
1023 lpfc_hba_free_post_buf(phba);
1024 lpfc_hba_clean_txcmplq(phba);
1025 return 0;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039static int
1040lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1041{
1042 struct lpfc_io_buf *psb, *psb_next;
1043 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1044 struct lpfc_sli4_hdw_queue *qp;
1045 LIST_HEAD(aborts);
1046 LIST_HEAD(nvme_aborts);
1047 LIST_HEAD(nvmet_aborts);
1048 struct lpfc_sglq *sglq_entry = NULL;
1049 int cnt, idx;
1050
1051
1052 lpfc_sli_hbqbuf_free_all(phba);
1053 lpfc_hba_clean_txcmplq(phba);
1054
1055
1056
1057
1058
1059
1060
1061 spin_lock_irq(&phba->hbalock);
1062
1063
1064
1065
1066 spin_lock(&phba->sli4_hba.sgl_list_lock);
1067 list_for_each_entry(sglq_entry,
1068 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1069 sglq_entry->state = SGL_FREED;
1070
1071 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1072 &phba->sli4_hba.lpfc_els_sgl_list);
1073
1074
1075 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1076
1077
1078
1079
1080 cnt = 0;
1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1082 qp = &phba->sli4_hba.hdwq[idx];
1083
1084 spin_lock(&qp->abts_scsi_buf_list_lock);
1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list,
1086 &aborts);
1087
1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1089 psb->pCmd = NULL;
1090 psb->status = IOSTAT_SUCCESS;
1091 cnt++;
1092 }
1093 spin_lock(&qp->io_buf_list_put_lock);
1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1095 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1096 qp->abts_scsi_io_bufs = 0;
1097 spin_unlock(&qp->io_buf_list_put_lock);
1098 spin_unlock(&qp->abts_scsi_buf_list_lock);
1099
1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1101 spin_lock(&qp->abts_nvme_buf_list_lock);
1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list,
1103 &nvme_aborts);
1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
1105 list) {
1106 psb->pCmd = NULL;
1107 psb->status = IOSTAT_SUCCESS;
1108 cnt++;
1109 }
1110 spin_lock(&qp->io_buf_list_put_lock);
1111 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1112 qp->abts_nvme_io_bufs = 0;
1113 list_splice_init(&nvme_aborts,
1114 &qp->lpfc_io_buf_list_put);
1115 spin_unlock(&qp->io_buf_list_put_lock);
1116 spin_unlock(&qp->abts_nvme_buf_list_lock);
1117
1118 }
1119 }
1120 spin_unlock_irq(&phba->hbalock);
1121
1122 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1123 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1124 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1125 &nvmet_aborts);
1126 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1127 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1128 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1129 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1130 }
1131 }
1132
1133 lpfc_sli4_free_sp_events(phba);
1134 return cnt;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148int
1149lpfc_hba_down_post(struct lpfc_hba *phba)
1150{
1151 return (*phba->lpfc_hba_down_post)(phba);
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166static void
1167lpfc_hb_timeout(struct timer_list *t)
1168{
1169 struct lpfc_hba *phba;
1170 uint32_t tmo_posted;
1171 unsigned long iflag;
1172
1173 phba = from_timer(phba, t, hb_tmofunc);
1174
1175
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1178 if (!tmo_posted)
1179 phba->pport->work_port_events |= WORKER_HB_TMO;
1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1181
1182
1183 if (!tmo_posted)
1184 lpfc_worker_wake_up(phba);
1185 return;
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static void
1201lpfc_rrq_timeout(struct timer_list *t)
1202{
1203 struct lpfc_hba *phba;
1204 unsigned long iflag;
1205
1206 phba = from_timer(phba, t, rrq_tmr);
1207 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1208 if (!(phba->pport->load_flag & FC_UNLOADING))
1209 phba->hba_flag |= HBA_RRQ_ACTIVE;
1210 else
1211 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1212 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1213
1214 if (!(phba->pport->load_flag & FC_UNLOADING))
1215 lpfc_worker_wake_up(phba);
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static void
1235lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1236{
1237 unsigned long drvr_flag;
1238
1239 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1240 phba->hb_outstanding = 0;
1241 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1242
1243
1244 mempool_free(pmboxq, phba->mbox_mem_pool);
1245 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1246 !(phba->link_state == LPFC_HBA_ERROR) &&
1247 !(phba->pport->load_flag & FC_UNLOADING))
1248 mod_timer(&phba->hb_tmofunc,
1249 jiffies +
1250 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1251 return;
1252}
1253
1254static void
1255lpfc_hb_eq_delay_work(struct work_struct *work)
1256{
1257 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1258 struct lpfc_hba, eq_delay_work);
1259 struct lpfc_eq_intr_info *eqi, *eqi_new;
1260 struct lpfc_queue *eq, *eq_next;
1261 unsigned char *eqcnt = NULL;
1262 uint32_t usdelay;
1263 int i;
1264
1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1266 return;
1267
1268 if (phba->link_state == LPFC_HBA_ERROR ||
1269 phba->pport->fc_flag & FC_OFFLINE_MODE)
1270 goto requeue;
1271
1272 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
1273 GFP_KERNEL);
1274 if (!eqcnt)
1275 goto requeue;
1276
1277
1278 for (i = 0; i < phba->cfg_irq_chann; i++) {
1279
1280 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1281 if (eq && eqcnt[eq->last_cpu] < 2)
1282 eqcnt[eq->last_cpu]++;
1283 continue;
1284 }
1285
1286 for_each_present_cpu(i) {
1287 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
1288 continue;
1289
1290 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1291
1292 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1293 LPFC_EQ_DELAY_STEP;
1294 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1295 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1296
1297 eqi->icnt = 0;
1298
1299 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1300 if (eq->last_cpu != i) {
1301 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1302 eq->last_cpu);
1303 list_move_tail(&eq->cpu_list, &eqi_new->list);
1304 continue;
1305 }
1306 if (usdelay != eq->q_mode)
1307 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1308 usdelay);
1309 }
1310 }
1311
1312 kfree(eqcnt);
1313
1314requeue:
1315 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1316 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1327{
1328 u32 i;
1329 u32 hwq_count;
1330
1331 hwq_count = phba->cfg_hdw_queue;
1332 for (i = 0; i < hwq_count; i++) {
1333
1334 lpfc_adjust_pvt_pool_count(phba, i);
1335
1336
1337 lpfc_adjust_high_watermark(phba, i);
1338
1339#ifdef LPFC_MXP_STAT
1340
1341 lpfc_snapshot_mxp(phba, i);
1342#endif
1343 }
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362void
1363lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1364{
1365 struct lpfc_vport **vports;
1366 LPFC_MBOXQ_t *pmboxq;
1367 struct lpfc_dmabuf *buf_ptr;
1368 int retval, i;
1369 struct lpfc_sli *psli = &phba->sli;
1370 LIST_HEAD(completions);
1371
1372 if (phba->cfg_xri_rebalancing) {
1373
1374 lpfc_hb_mxp_handler(phba);
1375 }
1376
1377 vports = lpfc_create_vport_work_array(phba);
1378 if (vports != NULL)
1379 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1380 lpfc_rcv_seq_check_edtov(vports[i]);
1381 lpfc_fdmi_num_disc_check(vports[i]);
1382 }
1383 lpfc_destroy_vport_work_array(phba, vports);
1384
1385 if ((phba->link_state == LPFC_HBA_ERROR) ||
1386 (phba->pport->load_flag & FC_UNLOADING) ||
1387 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1388 return;
1389
1390 spin_lock_irq(&phba->pport->work_port_lock);
1391
1392 if (time_after(phba->last_completion_time +
1393 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1394 jiffies)) {
1395 spin_unlock_irq(&phba->pport->work_port_lock);
1396 if (!phba->hb_outstanding)
1397 mod_timer(&phba->hb_tmofunc,
1398 jiffies +
1399 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1400 else
1401 mod_timer(&phba->hb_tmofunc,
1402 jiffies +
1403 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1404 return;
1405 }
1406 spin_unlock_irq(&phba->pport->work_port_lock);
1407
1408 if (phba->elsbuf_cnt &&
1409 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1410 spin_lock_irq(&phba->hbalock);
1411 list_splice_init(&phba->elsbuf, &completions);
1412 phba->elsbuf_cnt = 0;
1413 phba->elsbuf_prev_cnt = 0;
1414 spin_unlock_irq(&phba->hbalock);
1415
1416 while (!list_empty(&completions)) {
1417 list_remove_head(&completions, buf_ptr,
1418 struct lpfc_dmabuf, list);
1419 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1420 kfree(buf_ptr);
1421 }
1422 }
1423 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1424
1425
1426 if (phba->cfg_enable_hba_heartbeat) {
1427 if (!phba->hb_outstanding) {
1428 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1429 (list_empty(&psli->mboxq))) {
1430 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1431 GFP_KERNEL);
1432 if (!pmboxq) {
1433 mod_timer(&phba->hb_tmofunc,
1434 jiffies +
1435 msecs_to_jiffies(1000 *
1436 LPFC_HB_MBOX_INTERVAL));
1437 return;
1438 }
1439
1440 lpfc_heart_beat(phba, pmboxq);
1441 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1442 pmboxq->vport = phba->pport;
1443 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1444 MBX_NOWAIT);
1445
1446 if (retval != MBX_BUSY &&
1447 retval != MBX_SUCCESS) {
1448 mempool_free(pmboxq,
1449 phba->mbox_mem_pool);
1450 mod_timer(&phba->hb_tmofunc,
1451 jiffies +
1452 msecs_to_jiffies(1000 *
1453 LPFC_HB_MBOX_INTERVAL));
1454 return;
1455 }
1456 phba->skipped_hb = 0;
1457 phba->hb_outstanding = 1;
1458 } else if (time_before_eq(phba->last_completion_time,
1459 phba->skipped_hb)) {
1460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1461 "2857 Last completion time not "
1462 " updated in %d ms\n",
1463 jiffies_to_msecs(jiffies
1464 - phba->last_completion_time));
1465 } else
1466 phba->skipped_hb = jiffies;
1467
1468 mod_timer(&phba->hb_tmofunc,
1469 jiffies +
1470 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1471 return;
1472 } else {
1473
1474
1475
1476
1477
1478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1479 "0459 Adapter heartbeat still out"
1480 "standing:last compl time was %d ms.\n",
1481 jiffies_to_msecs(jiffies
1482 - phba->last_completion_time));
1483 mod_timer(&phba->hb_tmofunc,
1484 jiffies +
1485 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1486 }
1487 } else {
1488 mod_timer(&phba->hb_tmofunc,
1489 jiffies +
1490 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1491 }
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501static void
1502lpfc_offline_eratt(struct lpfc_hba *phba)
1503{
1504 struct lpfc_sli *psli = &phba->sli;
1505
1506 spin_lock_irq(&phba->hbalock);
1507 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1508 spin_unlock_irq(&phba->hbalock);
1509 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1510
1511 lpfc_offline(phba);
1512 lpfc_reset_barrier(phba);
1513 spin_lock_irq(&phba->hbalock);
1514 lpfc_sli_brdreset(phba);
1515 spin_unlock_irq(&phba->hbalock);
1516 lpfc_hba_down_post(phba);
1517 lpfc_sli_brdready(phba, HS_MBRDY);
1518 lpfc_unblock_mgmt_io(phba);
1519 phba->link_state = LPFC_HBA_ERROR;
1520 return;
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530void
1531lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1532{
1533 spin_lock_irq(&phba->hbalock);
1534 phba->link_state = LPFC_HBA_ERROR;
1535 spin_unlock_irq(&phba->hbalock);
1536
1537 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1538 lpfc_offline(phba);
1539 lpfc_hba_down_post(phba);
1540 lpfc_unblock_mgmt_io(phba);
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static void
1553lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1554{
1555 uint32_t old_host_status = phba->work_hs;
1556 struct lpfc_sli *psli = &phba->sli;
1557
1558
1559
1560
1561 if (pci_channel_offline(phba->pcidev)) {
1562 spin_lock_irq(&phba->hbalock);
1563 phba->hba_flag &= ~DEFER_ERATT;
1564 spin_unlock_irq(&phba->hbalock);
1565 return;
1566 }
1567
1568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1569 "0479 Deferred Adapter Hardware Error "
1570 "Data: x%x x%x x%x\n",
1571 phba->work_hs,
1572 phba->work_status[0], phba->work_status[1]);
1573
1574 spin_lock_irq(&phba->hbalock);
1575 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1576 spin_unlock_irq(&phba->hbalock);
1577
1578
1579
1580
1581
1582
1583
1584 lpfc_sli_abort_fcp_rings(phba);
1585
1586
1587
1588
1589
1590 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1591 lpfc_offline(phba);
1592
1593
1594 while (phba->work_hs & HS_FFER1) {
1595 msleep(100);
1596 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1597 phba->work_hs = UNPLUG_ERR ;
1598 break;
1599 }
1600
1601 if (phba->pport->load_flag & FC_UNLOADING) {
1602 phba->work_hs = 0;
1603 break;
1604 }
1605 }
1606
1607
1608
1609
1610
1611
1612 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1613 phba->work_hs = old_host_status & ~HS_FFER1;
1614
1615 spin_lock_irq(&phba->hbalock);
1616 phba->hba_flag &= ~DEFER_ERATT;
1617 spin_unlock_irq(&phba->hbalock);
1618 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1619 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1620}
1621
1622static void
1623lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1624{
1625 struct lpfc_board_event_header board_event;
1626 struct Scsi_Host *shost;
1627
1628 board_event.event_type = FC_REG_BOARD_EVENT;
1629 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1630 shost = lpfc_shost_from_vport(phba->pport);
1631 fc_host_post_vendor_event(shost, fc_get_event_number(),
1632 sizeof(board_event),
1633 (char *) &board_event,
1634 LPFC_NL_VENDOR_ID);
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647static void
1648lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1649{
1650 struct lpfc_vport *vport = phba->pport;
1651 struct lpfc_sli *psli = &phba->sli;
1652 uint32_t event_data;
1653 unsigned long temperature;
1654 struct temp_event temp_event_data;
1655 struct Scsi_Host *shost;
1656
1657
1658
1659
1660 if (pci_channel_offline(phba->pcidev)) {
1661 spin_lock_irq(&phba->hbalock);
1662 phba->hba_flag &= ~DEFER_ERATT;
1663 spin_unlock_irq(&phba->hbalock);
1664 return;
1665 }
1666
1667
1668 if (!phba->cfg_enable_hba_reset)
1669 return;
1670
1671
1672 lpfc_board_errevt_to_mgmt(phba);
1673
1674 if (phba->hba_flag & DEFER_ERATT)
1675 lpfc_handle_deferred_eratt(phba);
1676
1677 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1678 if (phba->work_hs & HS_FFER6)
1679
1680 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1681 "1301 Re-establishing Link "
1682 "Data: x%x x%x x%x\n",
1683 phba->work_hs, phba->work_status[0],
1684 phba->work_status[1]);
1685 if (phba->work_hs & HS_FFER8)
1686
1687 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1688 "2861 Host Authentication device "
1689 "zeroization Data:x%x x%x x%x\n",
1690 phba->work_hs, phba->work_status[0],
1691 phba->work_status[1]);
1692
1693 spin_lock_irq(&phba->hbalock);
1694 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1695 spin_unlock_irq(&phba->hbalock);
1696
1697
1698
1699
1700
1701
1702
1703 lpfc_sli_abort_fcp_rings(phba);
1704
1705
1706
1707
1708
1709 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1710 lpfc_offline(phba);
1711 lpfc_sli_brdrestart(phba);
1712 if (lpfc_online(phba) == 0) {
1713 lpfc_unblock_mgmt_io(phba);
1714 return;
1715 }
1716 lpfc_unblock_mgmt_io(phba);
1717 } else if (phba->work_hs & HS_CRIT_TEMP) {
1718 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1719 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1720 temp_event_data.event_code = LPFC_CRIT_TEMP;
1721 temp_event_data.data = (uint32_t)temperature;
1722
1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1724 "0406 Adapter maximum temperature exceeded "
1725 "(%ld), taking this port offline "
1726 "Data: x%x x%x x%x\n",
1727 temperature, phba->work_hs,
1728 phba->work_status[0], phba->work_status[1]);
1729
1730 shost = lpfc_shost_from_vport(phba->pport);
1731 fc_host_post_vendor_event(shost, fc_get_event_number(),
1732 sizeof(temp_event_data),
1733 (char *) &temp_event_data,
1734 SCSI_NL_VID_TYPE_PCI
1735 | PCI_VENDOR_ID_EMULEX);
1736
1737 spin_lock_irq(&phba->hbalock);
1738 phba->over_temp_state = HBA_OVER_TEMP;
1739 spin_unlock_irq(&phba->hbalock);
1740 lpfc_offline_eratt(phba);
1741
1742 } else {
1743
1744
1745
1746
1747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1748 "0457 Adapter Hardware Error "
1749 "Data: x%x x%x x%x\n",
1750 phba->work_hs,
1751 phba->work_status[0], phba->work_status[1]);
1752
1753 event_data = FC_REG_DUMP_EVENT;
1754 shost = lpfc_shost_from_vport(vport);
1755 fc_host_post_vendor_event(shost, fc_get_event_number(),
1756 sizeof(event_data), (char *) &event_data,
1757 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1758
1759 lpfc_offline_eratt(phba);
1760 }
1761 return;
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static int
1776lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1777 bool en_rn_msg)
1778{
1779 int rc;
1780 uint32_t intr_mode;
1781
1782 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1783 LPFC_SLI_INTF_IF_TYPE_2) {
1784
1785
1786
1787
1788 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1789 if (rc)
1790 return rc;
1791 }
1792
1793
1794 if (en_rn_msg)
1795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1796 "2887 Reset Needed: Attempting Port "
1797 "Recovery...\n");
1798 lpfc_offline_prep(phba, mbx_action);
1799 lpfc_offline(phba);
1800
1801 lpfc_sli4_disable_intr(phba);
1802 rc = lpfc_sli_brdrestart(phba);
1803 if (rc) {
1804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1805 "6309 Failed to restart board\n");
1806 return rc;
1807 }
1808
1809 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1810 if (intr_mode == LPFC_INTR_ERROR) {
1811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1812 "3175 Failed to enable interrupt\n");
1813 return -EIO;
1814 }
1815 phba->intr_mode = intr_mode;
1816 rc = lpfc_online(phba);
1817 if (rc == 0)
1818 lpfc_unblock_mgmt_io(phba);
1819
1820 return rc;
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830static void
1831lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1832{
1833 struct lpfc_vport *vport = phba->pport;
1834 uint32_t event_data;
1835 struct Scsi_Host *shost;
1836 uint32_t if_type;
1837 struct lpfc_register portstat_reg = {0};
1838 uint32_t reg_err1, reg_err2;
1839 uint32_t uerrlo_reg, uemasklo_reg;
1840 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1841 bool en_rn_msg = true;
1842 struct temp_event temp_event_data;
1843 struct lpfc_register portsmphr_reg;
1844 int rc, i;
1845
1846
1847
1848
1849 if (pci_channel_offline(phba->pcidev)) {
1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1851 "3166 pci channel is offline\n");
1852 lpfc_sli4_offline_eratt(phba);
1853 return;
1854 }
1855
1856 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1857 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1858 switch (if_type) {
1859 case LPFC_SLI_INTF_IF_TYPE_0:
1860 pci_rd_rc1 = lpfc_readl(
1861 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1862 &uerrlo_reg);
1863 pci_rd_rc2 = lpfc_readl(
1864 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1865 &uemasklo_reg);
1866
1867 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1868 return;
1869 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1870 lpfc_sli4_offline_eratt(phba);
1871 return;
1872 }
1873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1874 "7623 Checking UE recoverable");
1875
1876 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1877 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1878 &portsmphr_reg.word0))
1879 continue;
1880
1881 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1882 &portsmphr_reg);
1883 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1884 LPFC_PORT_SEM_UE_RECOVERABLE)
1885 break;
1886
1887 msleep(1000);
1888 }
1889
1890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1891 "4827 smphr_port_status x%x : Waited %dSec",
1892 smphr_port_status, i);
1893
1894
1895 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1896 LPFC_PORT_SEM_UE_RECOVERABLE) {
1897 for (i = 0; i < 20; i++) {
1898 msleep(1000);
1899 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1900 &portsmphr_reg.word0) &&
1901 (LPFC_POST_STAGE_PORT_READY ==
1902 bf_get(lpfc_port_smphr_port_status,
1903 &portsmphr_reg))) {
1904 rc = lpfc_sli4_port_sta_fn_reset(phba,
1905 LPFC_MBX_NO_WAIT, en_rn_msg);
1906 if (rc == 0)
1907 return;
1908 lpfc_printf_log(phba,
1909 KERN_ERR, LOG_INIT,
1910 "4215 Failed to recover UE");
1911 break;
1912 }
1913 }
1914 }
1915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1916 "7624 Firmware not ready: Failing UE recovery,"
1917 " waited %dSec", i);
1918 lpfc_sli4_offline_eratt(phba);
1919 break;
1920
1921 case LPFC_SLI_INTF_IF_TYPE_2:
1922 case LPFC_SLI_INTF_IF_TYPE_6:
1923 pci_rd_rc1 = lpfc_readl(
1924 phba->sli4_hba.u.if_type2.STATUSregaddr,
1925 &portstat_reg.word0);
1926
1927 if (pci_rd_rc1 == -EIO) {
1928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1929 "3151 PCI bus read access failure: x%x\n",
1930 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1931 lpfc_sli4_offline_eratt(phba);
1932 return;
1933 }
1934 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1935 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1936 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1938 "2889 Port Overtemperature event, "
1939 "taking port offline Data: x%x x%x\n",
1940 reg_err1, reg_err2);
1941
1942 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1943 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1944 temp_event_data.event_code = LPFC_CRIT_TEMP;
1945 temp_event_data.data = 0xFFFFFFFF;
1946
1947 shost = lpfc_shost_from_vport(phba->pport);
1948 fc_host_post_vendor_event(shost, fc_get_event_number(),
1949 sizeof(temp_event_data),
1950 (char *)&temp_event_data,
1951 SCSI_NL_VID_TYPE_PCI
1952 | PCI_VENDOR_ID_EMULEX);
1953
1954 spin_lock_irq(&phba->hbalock);
1955 phba->over_temp_state = HBA_OVER_TEMP;
1956 spin_unlock_irq(&phba->hbalock);
1957 lpfc_sli4_offline_eratt(phba);
1958 return;
1959 }
1960 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1961 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1963 "3143 Port Down: Firmware Update "
1964 "Detected\n");
1965 en_rn_msg = false;
1966 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1967 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1969 "3144 Port Down: Debug Dump\n");
1970 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1971 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1973 "3145 Port Down: Provisioning\n");
1974
1975
1976 if (!phba->cfg_enable_hba_reset)
1977 return;
1978
1979
1980 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1981 en_rn_msg);
1982 if (rc == 0) {
1983
1984 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1985 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1986 return;
1987 else
1988 break;
1989 }
1990
1991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1992 "3152 Unrecoverable error, bring the port "
1993 "offline\n");
1994 lpfc_sli4_offline_eratt(phba);
1995 break;
1996 case LPFC_SLI_INTF_IF_TYPE_1:
1997 default:
1998 break;
1999 }
2000 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2001 "3123 Report dump event to upper layer\n");
2002
2003 lpfc_board_errevt_to_mgmt(phba);
2004
2005 event_data = FC_REG_DUMP_EVENT;
2006 shost = lpfc_shost_from_vport(vport);
2007 fc_host_post_vendor_event(shost, fc_get_event_number(),
2008 sizeof(event_data), (char *) &event_data,
2009 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023void
2024lpfc_handle_eratt(struct lpfc_hba *phba)
2025{
2026 (*phba->lpfc_handle_eratt)(phba);
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036void
2037lpfc_handle_latt(struct lpfc_hba *phba)
2038{
2039 struct lpfc_vport *vport = phba->pport;
2040 struct lpfc_sli *psli = &phba->sli;
2041 LPFC_MBOXQ_t *pmb;
2042 volatile uint32_t control;
2043 struct lpfc_dmabuf *mp;
2044 int rc = 0;
2045
2046 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2047 if (!pmb) {
2048 rc = 1;
2049 goto lpfc_handle_latt_err_exit;
2050 }
2051
2052 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2053 if (!mp) {
2054 rc = 2;
2055 goto lpfc_handle_latt_free_pmb;
2056 }
2057
2058 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2059 if (!mp->virt) {
2060 rc = 3;
2061 goto lpfc_handle_latt_free_mp;
2062 }
2063
2064
2065 lpfc_els_flush_all_cmd(phba);
2066
2067 psli->slistat.link_event++;
2068 lpfc_read_topology(phba, pmb, mp);
2069 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2070 pmb->vport = vport;
2071
2072 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2073 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2074 if (rc == MBX_NOT_FINISHED) {
2075 rc = 4;
2076 goto lpfc_handle_latt_free_mbuf;
2077 }
2078
2079
2080 spin_lock_irq(&phba->hbalock);
2081 writel(HA_LATT, phba->HAregaddr);
2082 readl(phba->HAregaddr);
2083 spin_unlock_irq(&phba->hbalock);
2084
2085 return;
2086
2087lpfc_handle_latt_free_mbuf:
2088 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2089 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2090lpfc_handle_latt_free_mp:
2091 kfree(mp);
2092lpfc_handle_latt_free_pmb:
2093 mempool_free(pmb, phba->mbox_mem_pool);
2094lpfc_handle_latt_err_exit:
2095
2096 spin_lock_irq(&phba->hbalock);
2097 psli->sli_flag |= LPFC_PROCESS_LA;
2098 control = readl(phba->HCregaddr);
2099 control |= HC_LAINT_ENA;
2100 writel(control, phba->HCregaddr);
2101 readl(phba->HCregaddr);
2102
2103
2104 writel(HA_LATT, phba->HAregaddr);
2105 readl(phba->HAregaddr);
2106 spin_unlock_irq(&phba->hbalock);
2107 lpfc_linkdown(phba);
2108 phba->link_state = LPFC_HBA_ERROR;
2109
2110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2111 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2112
2113 return;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130int
2131lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2132{
2133 uint8_t lenlo, lenhi;
2134 int Length;
2135 int i, j;
2136 int finished = 0;
2137 int index = 0;
2138
2139 if (!vpd)
2140 return 0;
2141
2142
2143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2144 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2145 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2146 (uint32_t) vpd[3]);
2147 while (!finished && (index < (len - 4))) {
2148 switch (vpd[index]) {
2149 case 0x82:
2150 case 0x91:
2151 index += 1;
2152 lenlo = vpd[index];
2153 index += 1;
2154 lenhi = vpd[index];
2155 index += 1;
2156 i = ((((unsigned short)lenhi) << 8) + lenlo);
2157 index += i;
2158 break;
2159 case 0x90:
2160 index += 1;
2161 lenlo = vpd[index];
2162 index += 1;
2163 lenhi = vpd[index];
2164 index += 1;
2165 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2166 if (Length > len - index)
2167 Length = len - index;
2168 while (Length > 0) {
2169
2170 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2171 index += 2;
2172 i = vpd[index];
2173 index += 1;
2174 j = 0;
2175 Length -= (3+i);
2176 while(i--) {
2177 phba->SerialNumber[j++] = vpd[index++];
2178 if (j == 31)
2179 break;
2180 }
2181 phba->SerialNumber[j] = 0;
2182 continue;
2183 }
2184 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2185 phba->vpd_flag |= VPD_MODEL_DESC;
2186 index += 2;
2187 i = vpd[index];
2188 index += 1;
2189 j = 0;
2190 Length -= (3+i);
2191 while(i--) {
2192 phba->ModelDesc[j++] = vpd[index++];
2193 if (j == 255)
2194 break;
2195 }
2196 phba->ModelDesc[j] = 0;
2197 continue;
2198 }
2199 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2200 phba->vpd_flag |= VPD_MODEL_NAME;
2201 index += 2;
2202 i = vpd[index];
2203 index += 1;
2204 j = 0;
2205 Length -= (3+i);
2206 while(i--) {
2207 phba->ModelName[j++] = vpd[index++];
2208 if (j == 79)
2209 break;
2210 }
2211 phba->ModelName[j] = 0;
2212 continue;
2213 }
2214 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2215 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2216 index += 2;
2217 i = vpd[index];
2218 index += 1;
2219 j = 0;
2220 Length -= (3+i);
2221 while(i--) {
2222 phba->ProgramType[j++] = vpd[index++];
2223 if (j == 255)
2224 break;
2225 }
2226 phba->ProgramType[j] = 0;
2227 continue;
2228 }
2229 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2230 phba->vpd_flag |= VPD_PORT;
2231 index += 2;
2232 i = vpd[index];
2233 index += 1;
2234 j = 0;
2235 Length -= (3+i);
2236 while(i--) {
2237 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2238 (phba->sli4_hba.pport_name_sta ==
2239 LPFC_SLI4_PPNAME_GET)) {
2240 j++;
2241 index++;
2242 } else
2243 phba->Port[j++] = vpd[index++];
2244 if (j == 19)
2245 break;
2246 }
2247 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2248 (phba->sli4_hba.pport_name_sta ==
2249 LPFC_SLI4_PPNAME_NON))
2250 phba->Port[j] = 0;
2251 continue;
2252 }
2253 else {
2254 index += 2;
2255 i = vpd[index];
2256 index += 1;
2257 index += i;
2258 Length -= (3 + i);
2259 }
2260 }
2261 finished = 0;
2262 break;
2263 case 0x78:
2264 finished = 1;
2265 break;
2266 default:
2267 index ++;
2268 break;
2269 }
2270 }
2271
2272 return(1);
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287static void
2288lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2289{
2290 lpfc_vpd_t *vp;
2291 uint16_t dev_id = phba->pcidev->device;
2292 int max_speed;
2293 int GE = 0;
2294 int oneConnect = 0;
2295 struct {
2296 char *name;
2297 char *bus;
2298 char *function;
2299 } m = {"<Unknown>", "", ""};
2300
2301 if (mdp && mdp[0] != '\0'
2302 && descp && descp[0] != '\0')
2303 return;
2304
2305 if (phba->lmt & LMT_64Gb)
2306 max_speed = 64;
2307 else if (phba->lmt & LMT_32Gb)
2308 max_speed = 32;
2309 else if (phba->lmt & LMT_16Gb)
2310 max_speed = 16;
2311 else if (phba->lmt & LMT_10Gb)
2312 max_speed = 10;
2313 else if (phba->lmt & LMT_8Gb)
2314 max_speed = 8;
2315 else if (phba->lmt & LMT_4Gb)
2316 max_speed = 4;
2317 else if (phba->lmt & LMT_2Gb)
2318 max_speed = 2;
2319 else if (phba->lmt & LMT_1Gb)
2320 max_speed = 1;
2321 else
2322 max_speed = 0;
2323
2324 vp = &phba->vpd;
2325
2326 switch (dev_id) {
2327 case PCI_DEVICE_ID_FIREFLY:
2328 m = (typeof(m)){"LP6000", "PCI",
2329 "Obsolete, Unsupported Fibre Channel Adapter"};
2330 break;
2331 case PCI_DEVICE_ID_SUPERFLY:
2332 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2333 m = (typeof(m)){"LP7000", "PCI", ""};
2334 else
2335 m = (typeof(m)){"LP7000E", "PCI", ""};
2336 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2337 break;
2338 case PCI_DEVICE_ID_DRAGONFLY:
2339 m = (typeof(m)){"LP8000", "PCI",
2340 "Obsolete, Unsupported Fibre Channel Adapter"};
2341 break;
2342 case PCI_DEVICE_ID_CENTAUR:
2343 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2344 m = (typeof(m)){"LP9002", "PCI", ""};
2345 else
2346 m = (typeof(m)){"LP9000", "PCI", ""};
2347 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2348 break;
2349 case PCI_DEVICE_ID_RFLY:
2350 m = (typeof(m)){"LP952", "PCI",
2351 "Obsolete, Unsupported Fibre Channel Adapter"};
2352 break;
2353 case PCI_DEVICE_ID_PEGASUS:
2354 m = (typeof(m)){"LP9802", "PCI-X",
2355 "Obsolete, Unsupported Fibre Channel Adapter"};
2356 break;
2357 case PCI_DEVICE_ID_THOR:
2358 m = (typeof(m)){"LP10000", "PCI-X",
2359 "Obsolete, Unsupported Fibre Channel Adapter"};
2360 break;
2361 case PCI_DEVICE_ID_VIPER:
2362 m = (typeof(m)){"LPX1000", "PCI-X",
2363 "Obsolete, Unsupported Fibre Channel Adapter"};
2364 break;
2365 case PCI_DEVICE_ID_PFLY:
2366 m = (typeof(m)){"LP982", "PCI-X",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2368 break;
2369 case PCI_DEVICE_ID_TFLY:
2370 m = (typeof(m)){"LP1050", "PCI-X",
2371 "Obsolete, Unsupported Fibre Channel Adapter"};
2372 break;
2373 case PCI_DEVICE_ID_HELIOS:
2374 m = (typeof(m)){"LP11000", "PCI-X2",
2375 "Obsolete, Unsupported Fibre Channel Adapter"};
2376 break;
2377 case PCI_DEVICE_ID_HELIOS_SCSP:
2378 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2379 "Obsolete, Unsupported Fibre Channel Adapter"};
2380 break;
2381 case PCI_DEVICE_ID_HELIOS_DCSP:
2382 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2383 "Obsolete, Unsupported Fibre Channel Adapter"};
2384 break;
2385 case PCI_DEVICE_ID_NEPTUNE:
2386 m = (typeof(m)){"LPe1000", "PCIe",
2387 "Obsolete, Unsupported Fibre Channel Adapter"};
2388 break;
2389 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2390 m = (typeof(m)){"LPe1000-SP", "PCIe",
2391 "Obsolete, Unsupported Fibre Channel Adapter"};
2392 break;
2393 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2394 m = (typeof(m)){"LPe1002-SP", "PCIe",
2395 "Obsolete, Unsupported Fibre Channel Adapter"};
2396 break;
2397 case PCI_DEVICE_ID_BMID:
2398 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2399 break;
2400 case PCI_DEVICE_ID_BSMB:
2401 m = (typeof(m)){"LP111", "PCI-X2",
2402 "Obsolete, Unsupported Fibre Channel Adapter"};
2403 break;
2404 case PCI_DEVICE_ID_ZEPHYR:
2405 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2406 break;
2407 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2408 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2409 break;
2410 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2411 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2412 GE = 1;
2413 break;
2414 case PCI_DEVICE_ID_ZMID:
2415 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2416 break;
2417 case PCI_DEVICE_ID_ZSMB:
2418 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2419 break;
2420 case PCI_DEVICE_ID_LP101:
2421 m = (typeof(m)){"LP101", "PCI-X",
2422 "Obsolete, Unsupported Fibre Channel Adapter"};
2423 break;
2424 case PCI_DEVICE_ID_LP10000S:
2425 m = (typeof(m)){"LP10000-S", "PCI",
2426 "Obsolete, Unsupported Fibre Channel Adapter"};
2427 break;
2428 case PCI_DEVICE_ID_LP11000S:
2429 m = (typeof(m)){"LP11000-S", "PCI-X2",
2430 "Obsolete, Unsupported Fibre Channel Adapter"};
2431 break;
2432 case PCI_DEVICE_ID_LPE11000S:
2433 m = (typeof(m)){"LPe11000-S", "PCIe",
2434 "Obsolete, Unsupported Fibre Channel Adapter"};
2435 break;
2436 case PCI_DEVICE_ID_SAT:
2437 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2438 break;
2439 case PCI_DEVICE_ID_SAT_MID:
2440 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2441 break;
2442 case PCI_DEVICE_ID_SAT_SMB:
2443 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2444 break;
2445 case PCI_DEVICE_ID_SAT_DCSP:
2446 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_SAT_SCSP:
2449 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2450 break;
2451 case PCI_DEVICE_ID_SAT_S:
2452 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2453 break;
2454 case PCI_DEVICE_ID_HORNET:
2455 m = (typeof(m)){"LP21000", "PCIe",
2456 "Obsolete, Unsupported FCoE Adapter"};
2457 GE = 1;
2458 break;
2459 case PCI_DEVICE_ID_PROTEUS_VF:
2460 m = (typeof(m)){"LPev12000", "PCIe IOV",
2461 "Obsolete, Unsupported Fibre Channel Adapter"};
2462 break;
2463 case PCI_DEVICE_ID_PROTEUS_PF:
2464 m = (typeof(m)){"LPev12000", "PCIe IOV",
2465 "Obsolete, Unsupported Fibre Channel Adapter"};
2466 break;
2467 case PCI_DEVICE_ID_PROTEUS_S:
2468 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2469 "Obsolete, Unsupported Fibre Channel Adapter"};
2470 break;
2471 case PCI_DEVICE_ID_TIGERSHARK:
2472 oneConnect = 1;
2473 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2474 break;
2475 case PCI_DEVICE_ID_TOMCAT:
2476 oneConnect = 1;
2477 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2478 break;
2479 case PCI_DEVICE_ID_FALCON:
2480 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2481 "EmulexSecure Fibre"};
2482 break;
2483 case PCI_DEVICE_ID_BALIUS:
2484 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2485 "Obsolete, Unsupported Fibre Channel Adapter"};
2486 break;
2487 case PCI_DEVICE_ID_LANCER_FC:
2488 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2489 break;
2490 case PCI_DEVICE_ID_LANCER_FC_VF:
2491 m = (typeof(m)){"LPe16000", "PCIe",
2492 "Obsolete, Unsupported Fibre Channel Adapter"};
2493 break;
2494 case PCI_DEVICE_ID_LANCER_FCOE:
2495 oneConnect = 1;
2496 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2497 break;
2498 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2499 oneConnect = 1;
2500 m = (typeof(m)){"OCe15100", "PCIe",
2501 "Obsolete, Unsupported FCoE"};
2502 break;
2503 case PCI_DEVICE_ID_LANCER_G6_FC:
2504 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2505 break;
2506 case PCI_DEVICE_ID_LANCER_G7_FC:
2507 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2508 break;
2509 case PCI_DEVICE_ID_SKYHAWK:
2510 case PCI_DEVICE_ID_SKYHAWK_VF:
2511 oneConnect = 1;
2512 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2513 break;
2514 default:
2515 m = (typeof(m)){"Unknown", "", ""};
2516 break;
2517 }
2518
2519 if (mdp && mdp[0] == '\0')
2520 snprintf(mdp, 79,"%s", m.name);
2521
2522
2523
2524
2525 if (descp && descp[0] == '\0') {
2526 if (oneConnect)
2527 snprintf(descp, 255,
2528 "Emulex OneConnect %s, %s Initiator %s",
2529 m.name, m.function,
2530 phba->Port);
2531 else if (max_speed == 0)
2532 snprintf(descp, 255,
2533 "Emulex %s %s %s",
2534 m.name, m.bus, m.function);
2535 else
2536 snprintf(descp, 255,
2537 "Emulex %s %d%s %s %s",
2538 m.name, max_speed, (GE) ? "GE" : "Gb",
2539 m.bus, m.function);
2540 }
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555int
2556lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2557{
2558 IOCB_t *icmd;
2559 struct lpfc_iocbq *iocb;
2560 struct lpfc_dmabuf *mp1, *mp2;
2561
2562 cnt += pring->missbufcnt;
2563
2564
2565 while (cnt > 0) {
2566
2567 iocb = lpfc_sli_get_iocbq(phba);
2568 if (iocb == NULL) {
2569 pring->missbufcnt = cnt;
2570 return cnt;
2571 }
2572 icmd = &iocb->iocb;
2573
2574
2575
2576 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2577 if (mp1)
2578 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2579 if (!mp1 || !mp1->virt) {
2580 kfree(mp1);
2581 lpfc_sli_release_iocbq(phba, iocb);
2582 pring->missbufcnt = cnt;
2583 return cnt;
2584 }
2585
2586 INIT_LIST_HEAD(&mp1->list);
2587
2588 if (cnt > 1) {
2589 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2590 if (mp2)
2591 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2592 &mp2->phys);
2593 if (!mp2 || !mp2->virt) {
2594 kfree(mp2);
2595 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2596 kfree(mp1);
2597 lpfc_sli_release_iocbq(phba, iocb);
2598 pring->missbufcnt = cnt;
2599 return cnt;
2600 }
2601
2602 INIT_LIST_HEAD(&mp2->list);
2603 } else {
2604 mp2 = NULL;
2605 }
2606
2607 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2608 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2609 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2610 icmd->ulpBdeCount = 1;
2611 cnt--;
2612 if (mp2) {
2613 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2614 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2615 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2616 cnt--;
2617 icmd->ulpBdeCount = 2;
2618 }
2619
2620 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2621 icmd->ulpLe = 1;
2622
2623 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2624 IOCB_ERROR) {
2625 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2626 kfree(mp1);
2627 cnt++;
2628 if (mp2) {
2629 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2630 kfree(mp2);
2631 cnt++;
2632 }
2633 lpfc_sli_release_iocbq(phba, iocb);
2634 pring->missbufcnt = cnt;
2635 return cnt;
2636 }
2637 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2638 if (mp2)
2639 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2640 }
2641 pring->missbufcnt = 0;
2642 return 0;
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656static int
2657lpfc_post_rcv_buf(struct lpfc_hba *phba)
2658{
2659 struct lpfc_sli *psli = &phba->sli;
2660
2661
2662 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2663
2664
2665 return 0;
2666}
2667
2668#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2669
2670
2671
2672
2673
2674
2675
2676
2677static void
2678lpfc_sha_init(uint32_t * HashResultPointer)
2679{
2680 HashResultPointer[0] = 0x67452301;
2681 HashResultPointer[1] = 0xEFCDAB89;
2682 HashResultPointer[2] = 0x98BADCFE;
2683 HashResultPointer[3] = 0x10325476;
2684 HashResultPointer[4] = 0xC3D2E1F0;
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697static void
2698lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2699{
2700 int t;
2701 uint32_t TEMP;
2702 uint32_t A, B, C, D, E;
2703 t = 16;
2704 do {
2705 HashWorkingPointer[t] =
2706 S(1,
2707 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2708 8] ^
2709 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2710 } while (++t <= 79);
2711 t = 0;
2712 A = HashResultPointer[0];
2713 B = HashResultPointer[1];
2714 C = HashResultPointer[2];
2715 D = HashResultPointer[3];
2716 E = HashResultPointer[4];
2717
2718 do {
2719 if (t < 20) {
2720 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2721 } else if (t < 40) {
2722 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2723 } else if (t < 60) {
2724 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2725 } else {
2726 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2727 }
2728 TEMP += S(5, A) + E + HashWorkingPointer[t];
2729 E = D;
2730 D = C;
2731 C = S(30, B);
2732 B = A;
2733 A = TEMP;
2734 } while (++t <= 79);
2735
2736 HashResultPointer[0] += A;
2737 HashResultPointer[1] += B;
2738 HashResultPointer[2] += C;
2739 HashResultPointer[3] += D;
2740 HashResultPointer[4] += E;
2741
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static void
2755lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2756{
2757 *HashWorking = (*RandomChallenge ^ *HashWorking);
2758}
2759
2760
2761
2762
2763
2764
2765
2766
2767void
2768lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2769{
2770 int t;
2771 uint32_t *HashWorking;
2772 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2773
2774 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2775 if (!HashWorking)
2776 return;
2777
2778 HashWorking[0] = HashWorking[78] = *pwwnn++;
2779 HashWorking[1] = HashWorking[79] = *pwwnn;
2780
2781 for (t = 0; t < 7; t++)
2782 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2783
2784 lpfc_sha_init(hbainit);
2785 lpfc_sha_iterate(hbainit, HashWorking);
2786 kfree(HashWorking);
2787}
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798void
2799lpfc_cleanup(struct lpfc_vport *vport)
2800{
2801 struct lpfc_hba *phba = vport->phba;
2802 struct lpfc_nodelist *ndlp, *next_ndlp;
2803 int i = 0;
2804
2805 if (phba->link_state > LPFC_LINK_DOWN)
2806 lpfc_port_link_failure(vport);
2807
2808 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2809 if (!NLP_CHK_NODE_ACT(ndlp)) {
2810 ndlp = lpfc_enable_node(vport, ndlp,
2811 NLP_STE_UNUSED_NODE);
2812 if (!ndlp)
2813 continue;
2814 spin_lock_irq(&phba->ndlp_lock);
2815 NLP_SET_FREE_REQ(ndlp);
2816 spin_unlock_irq(&phba->ndlp_lock);
2817
2818 lpfc_nlp_put(ndlp);
2819 continue;
2820 }
2821 spin_lock_irq(&phba->ndlp_lock);
2822 if (NLP_CHK_FREE_REQ(ndlp)) {
2823
2824 spin_unlock_irq(&phba->ndlp_lock);
2825 continue;
2826 } else
2827
2828 NLP_SET_FREE_REQ(ndlp);
2829 spin_unlock_irq(&phba->ndlp_lock);
2830
2831 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2832 ndlp->nlp_DID == Fabric_DID) {
2833
2834 lpfc_nlp_put(ndlp);
2835 continue;
2836 }
2837
2838
2839
2840
2841 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2842 lpfc_nlp_put(ndlp);
2843 continue;
2844 }
2845
2846 if (ndlp->nlp_type & NLP_FABRIC)
2847 lpfc_disc_state_machine(vport, ndlp, NULL,
2848 NLP_EVT_DEVICE_RECOVERY);
2849
2850 lpfc_disc_state_machine(vport, ndlp, NULL,
2851 NLP_EVT_DEVICE_RM);
2852 }
2853
2854
2855
2856
2857
2858 while (!list_empty(&vport->fc_nodes)) {
2859 if (i++ > 3000) {
2860 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2861 "0233 Nodelist not empty\n");
2862 list_for_each_entry_safe(ndlp, next_ndlp,
2863 &vport->fc_nodes, nlp_listp) {
2864 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2865 LOG_NODE,
2866 "0282 did:x%x ndlp:x%p "
2867 "usgmap:x%x refcnt:%d\n",
2868 ndlp->nlp_DID, (void *)ndlp,
2869 ndlp->nlp_usg_map,
2870 kref_read(&ndlp->kref));
2871 }
2872 break;
2873 }
2874
2875
2876 msleep(10);
2877 }
2878 lpfc_cleanup_vports_rrqs(vport, NULL);
2879}
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889void
2890lpfc_stop_vport_timers(struct lpfc_vport *vport)
2891{
2892 del_timer_sync(&vport->els_tmofunc);
2893 del_timer_sync(&vport->delayed_disc_tmo);
2894 lpfc_can_disctmo(vport);
2895 return;
2896}
2897
2898
2899
2900
2901
2902
2903
2904
2905void
2906__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2907{
2908
2909 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2910
2911
2912 del_timer(&phba->fcf.redisc_wait);
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924void
2925lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2926{
2927 spin_lock_irq(&phba->hbalock);
2928 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2929
2930 spin_unlock_irq(&phba->hbalock);
2931 return;
2932 }
2933 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2934
2935 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2936 spin_unlock_irq(&phba->hbalock);
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946void
2947lpfc_stop_hba_timers(struct lpfc_hba *phba)
2948{
2949 if (phba->pport)
2950 lpfc_stop_vport_timers(phba->pport);
2951 cancel_delayed_work_sync(&phba->eq_delay_work);
2952 del_timer_sync(&phba->sli.mbox_tmo);
2953 del_timer_sync(&phba->fabric_block_timer);
2954 del_timer_sync(&phba->eratt_poll);
2955 del_timer_sync(&phba->hb_tmofunc);
2956 if (phba->sli_rev == LPFC_SLI_REV4) {
2957 del_timer_sync(&phba->rrq_tmr);
2958 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2959 }
2960 phba->hb_outstanding = 0;
2961
2962 switch (phba->pci_dev_grp) {
2963 case LPFC_PCI_DEV_LP:
2964
2965 del_timer_sync(&phba->fcp_poll_timer);
2966 break;
2967 case LPFC_PCI_DEV_OC:
2968
2969 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2970 break;
2971 default:
2972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2973 "0297 Invalid device group (x%x)\n",
2974 phba->pci_dev_grp);
2975 break;
2976 }
2977 return;
2978}
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static void
2991lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2992{
2993 unsigned long iflag;
2994 uint8_t actcmd = MBX_HEARTBEAT;
2995 unsigned long timeout;
2996
2997 spin_lock_irqsave(&phba->hbalock, iflag);
2998 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2999 spin_unlock_irqrestore(&phba->hbalock, iflag);
3000 if (mbx_action == LPFC_MBX_NO_WAIT)
3001 return;
3002 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3003 spin_lock_irqsave(&phba->hbalock, iflag);
3004 if (phba->sli.mbox_active) {
3005 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3006
3007
3008
3009 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3010 phba->sli.mbox_active) * 1000) + jiffies;
3011 }
3012 spin_unlock_irqrestore(&phba->hbalock, iflag);
3013
3014
3015 while (phba->sli.mbox_active) {
3016
3017 msleep(2);
3018 if (time_after(jiffies, timeout)) {
3019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3020 "2813 Mgmt IO is Blocked %x "
3021 "- mbox cmd %x still active\n",
3022 phba->sli.sli_flag, actcmd);
3023 break;
3024 }
3025 }
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036void
3037lpfc_sli4_node_prep(struct lpfc_hba *phba)
3038{
3039 struct lpfc_nodelist *ndlp, *next_ndlp;
3040 struct lpfc_vport **vports;
3041 int i, rpi;
3042 unsigned long flags;
3043
3044 if (phba->sli_rev != LPFC_SLI_REV4)
3045 return;
3046
3047 vports = lpfc_create_vport_work_array(phba);
3048 if (vports == NULL)
3049 return;
3050
3051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3052 if (vports[i]->load_flag & FC_UNLOADING)
3053 continue;
3054
3055 list_for_each_entry_safe(ndlp, next_ndlp,
3056 &vports[i]->fc_nodes,
3057 nlp_listp) {
3058 if (!NLP_CHK_NODE_ACT(ndlp))
3059 continue;
3060 rpi = lpfc_sli4_alloc_rpi(phba);
3061 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3062 spin_lock_irqsave(&phba->ndlp_lock, flags);
3063 NLP_CLR_NODE_ACT(ndlp);
3064 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3065 continue;
3066 }
3067 ndlp->nlp_rpi = rpi;
3068 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3069 "0009 rpi:%x DID:%x "
3070 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3071 ndlp->nlp_DID, ndlp->nlp_flag,
3072 ndlp->nlp_usg_map, ndlp);
3073 }
3074 }
3075 lpfc_destroy_vport_work_array(phba, vports);
3076}
3077
3078
3079
3080
3081
3082
3083
3084
3085static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3086{
3087 struct lpfc_sli4_hdw_queue *qp;
3088 struct lpfc_io_buf *lpfc_ncmd;
3089 struct lpfc_io_buf *lpfc_ncmd_next;
3090 struct lpfc_epd_pool *epd_pool;
3091 unsigned long iflag;
3092
3093 epd_pool = &phba->epd_pool;
3094 qp = &phba->sli4_hba.hdwq[0];
3095
3096 spin_lock_init(&epd_pool->lock);
3097 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3098 spin_lock(&epd_pool->lock);
3099 INIT_LIST_HEAD(&epd_pool->list);
3100 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3101 &qp->lpfc_io_buf_list_put, list) {
3102 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3103 lpfc_ncmd->expedite = true;
3104 qp->put_io_bufs--;
3105 epd_pool->count++;
3106 if (epd_pool->count >= XRI_BATCH)
3107 break;
3108 }
3109 spin_unlock(&epd_pool->lock);
3110 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3121{
3122 struct lpfc_sli4_hdw_queue *qp;
3123 struct lpfc_io_buf *lpfc_ncmd;
3124 struct lpfc_io_buf *lpfc_ncmd_next;
3125 struct lpfc_epd_pool *epd_pool;
3126 unsigned long iflag;
3127
3128 epd_pool = &phba->epd_pool;
3129 qp = &phba->sli4_hba.hdwq[0];
3130
3131 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3132 spin_lock(&epd_pool->lock);
3133 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3134 &epd_pool->list, list) {
3135 list_move_tail(&lpfc_ncmd->list,
3136 &qp->lpfc_io_buf_list_put);
3137 lpfc_ncmd->flags = false;
3138 qp->put_io_bufs++;
3139 epd_pool->count--;
3140 }
3141 spin_unlock(&epd_pool->lock);
3142 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3143}
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3154{
3155 u32 i, j;
3156 u32 hwq_count;
3157 u32 count_per_hwq;
3158 struct lpfc_io_buf *lpfc_ncmd;
3159 struct lpfc_io_buf *lpfc_ncmd_next;
3160 unsigned long iflag;
3161 struct lpfc_sli4_hdw_queue *qp;
3162 struct lpfc_multixri_pool *multixri_pool;
3163 struct lpfc_pbl_pool *pbl_pool;
3164 struct lpfc_pvt_pool *pvt_pool;
3165
3166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3167 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3168 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3169 phba->sli4_hba.io_xri_cnt);
3170
3171 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3172 lpfc_create_expedite_pool(phba);
3173
3174 hwq_count = phba->cfg_hdw_queue;
3175 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3176
3177 for (i = 0; i < hwq_count; i++) {
3178 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3179
3180 if (!multixri_pool) {
3181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3182 "1238 Failed to allocate memory for "
3183 "multixri_pool\n");
3184
3185 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3186 lpfc_destroy_expedite_pool(phba);
3187
3188 j = 0;
3189 while (j < i) {
3190 qp = &phba->sli4_hba.hdwq[j];
3191 kfree(qp->p_multixri_pool);
3192 j++;
3193 }
3194 phba->cfg_xri_rebalancing = 0;
3195 return;
3196 }
3197
3198 qp = &phba->sli4_hba.hdwq[i];
3199 qp->p_multixri_pool = multixri_pool;
3200
3201 multixri_pool->xri_limit = count_per_hwq;
3202 multixri_pool->rrb_next_hwqid = i;
3203
3204
3205 pbl_pool = &multixri_pool->pbl_pool;
3206 spin_lock_init(&pbl_pool->lock);
3207 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3208 spin_lock(&pbl_pool->lock);
3209 INIT_LIST_HEAD(&pbl_pool->list);
3210 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3211 &qp->lpfc_io_buf_list_put, list) {
3212 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3213 qp->put_io_bufs--;
3214 pbl_pool->count++;
3215 }
3216 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3217 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3218 pbl_pool->count, i);
3219 spin_unlock(&pbl_pool->lock);
3220 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3221
3222
3223 pvt_pool = &multixri_pool->pvt_pool;
3224 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3225 pvt_pool->low_watermark = XRI_BATCH;
3226 spin_lock_init(&pvt_pool->lock);
3227 spin_lock_irqsave(&pvt_pool->lock, iflag);
3228 INIT_LIST_HEAD(&pvt_pool->list);
3229 pvt_pool->count = 0;
3230 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3231 }
3232}
3233
3234
3235
3236
3237
3238
3239
3240static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3241{
3242 u32 i;
3243 u32 hwq_count;
3244 struct lpfc_io_buf *lpfc_ncmd;
3245 struct lpfc_io_buf *lpfc_ncmd_next;
3246 unsigned long iflag;
3247 struct lpfc_sli4_hdw_queue *qp;
3248 struct lpfc_multixri_pool *multixri_pool;
3249 struct lpfc_pbl_pool *pbl_pool;
3250 struct lpfc_pvt_pool *pvt_pool;
3251
3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3253 lpfc_destroy_expedite_pool(phba);
3254
3255 if (!(phba->pport->load_flag & FC_UNLOADING)) {
3256 lpfc_sli_flush_fcp_rings(phba);
3257
3258 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3259 lpfc_sli_flush_nvme_rings(phba);
3260 }
3261
3262 hwq_count = phba->cfg_hdw_queue;
3263
3264 for (i = 0; i < hwq_count; i++) {
3265 qp = &phba->sli4_hba.hdwq[i];
3266 multixri_pool = qp->p_multixri_pool;
3267 if (!multixri_pool)
3268 continue;
3269
3270 qp->p_multixri_pool = NULL;
3271
3272 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3273
3274
3275 pbl_pool = &multixri_pool->pbl_pool;
3276 spin_lock(&pbl_pool->lock);
3277
3278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3279 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3280 pbl_pool->count, i);
3281
3282 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3283 &pbl_pool->list, list) {
3284 list_move_tail(&lpfc_ncmd->list,
3285 &qp->lpfc_io_buf_list_put);
3286 qp->put_io_bufs++;
3287 pbl_pool->count--;
3288 }
3289
3290 INIT_LIST_HEAD(&pbl_pool->list);
3291 pbl_pool->count = 0;
3292
3293 spin_unlock(&pbl_pool->lock);
3294
3295
3296 pvt_pool = &multixri_pool->pvt_pool;
3297 spin_lock(&pvt_pool->lock);
3298
3299 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3300 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3301 pvt_pool->count, i);
3302
3303 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3304 &pvt_pool->list, list) {
3305 list_move_tail(&lpfc_ncmd->list,
3306 &qp->lpfc_io_buf_list_put);
3307 qp->put_io_bufs++;
3308 pvt_pool->count--;
3309 }
3310
3311 INIT_LIST_HEAD(&pvt_pool->list);
3312 pvt_pool->count = 0;
3313
3314 spin_unlock(&pvt_pool->lock);
3315 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3316
3317 kfree(multixri_pool);
3318 }
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333int
3334lpfc_online(struct lpfc_hba *phba)
3335{
3336 struct lpfc_vport *vport;
3337 struct lpfc_vport **vports;
3338 int i, error = 0;
3339 bool vpis_cleared = false;
3340
3341 if (!phba)
3342 return 0;
3343 vport = phba->pport;
3344
3345 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3346 return 0;
3347
3348 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3349 "0458 Bring Adapter online\n");
3350
3351 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3352
3353 if (phba->sli_rev == LPFC_SLI_REV4) {
3354 if (lpfc_sli4_hba_setup(phba)) {
3355 lpfc_unblock_mgmt_io(phba);
3356 return 1;
3357 }
3358 spin_lock_irq(&phba->hbalock);
3359 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3360 vpis_cleared = true;
3361 spin_unlock_irq(&phba->hbalock);
3362
3363
3364
3365
3366 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3367 !phba->nvmet_support) {
3368 error = lpfc_nvme_create_localport(phba->pport);
3369 if (error)
3370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3371 "6132 NVME restore reg failed "
3372 "on nvmei error x%x\n", error);
3373 }
3374 } else {
3375 lpfc_sli_queue_init(phba);
3376 if (lpfc_sli_hba_setup(phba)) {
3377 lpfc_unblock_mgmt_io(phba);
3378 return 1;
3379 }
3380 }
3381
3382 vports = lpfc_create_vport_work_array(phba);
3383 if (vports != NULL) {
3384 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3385 struct Scsi_Host *shost;
3386 shost = lpfc_shost_from_vport(vports[i]);
3387 spin_lock_irq(shost->host_lock);
3388 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3389 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3390 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3391 if (phba->sli_rev == LPFC_SLI_REV4) {
3392 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3393 if ((vpis_cleared) &&
3394 (vports[i]->port_type !=
3395 LPFC_PHYSICAL_PORT))
3396 vports[i]->vpi = 0;
3397 }
3398 spin_unlock_irq(shost->host_lock);
3399 }
3400 }
3401 lpfc_destroy_vport_work_array(phba, vports);
3402
3403 if (phba->cfg_xri_rebalancing)
3404 lpfc_create_multixri_pools(phba);
3405
3406 lpfc_unblock_mgmt_io(phba);
3407 return 0;
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421void
3422lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3423{
3424 unsigned long iflag;
3425
3426 spin_lock_irqsave(&phba->hbalock, iflag);
3427 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3428 spin_unlock_irqrestore(&phba->hbalock, iflag);
3429}
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439void
3440lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3441{
3442 struct lpfc_vport *vport = phba->pport;
3443 struct lpfc_nodelist *ndlp, *next_ndlp;
3444 struct lpfc_vport **vports;
3445 struct Scsi_Host *shost;
3446 int i;
3447
3448 if (vport->fc_flag & FC_OFFLINE_MODE)
3449 return;
3450
3451 lpfc_block_mgmt_io(phba, mbx_action);
3452
3453 lpfc_linkdown(phba);
3454
3455
3456 vports = lpfc_create_vport_work_array(phba);
3457 if (vports != NULL) {
3458 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3459 if (vports[i]->load_flag & FC_UNLOADING)
3460 continue;
3461 shost = lpfc_shost_from_vport(vports[i]);
3462 spin_lock_irq(shost->host_lock);
3463 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3464 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3465 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3466 spin_unlock_irq(shost->host_lock);
3467
3468 shost = lpfc_shost_from_vport(vports[i]);
3469 list_for_each_entry_safe(ndlp, next_ndlp,
3470 &vports[i]->fc_nodes,
3471 nlp_listp) {
3472 if (!NLP_CHK_NODE_ACT(ndlp))
3473 continue;
3474 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3475 continue;
3476 if (ndlp->nlp_type & NLP_FABRIC) {
3477 lpfc_disc_state_machine(vports[i], ndlp,
3478 NULL, NLP_EVT_DEVICE_RECOVERY);
3479 lpfc_disc_state_machine(vports[i], ndlp,
3480 NULL, NLP_EVT_DEVICE_RM);
3481 }
3482 spin_lock_irq(shost->host_lock);
3483 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3484 spin_unlock_irq(shost->host_lock);
3485
3486
3487
3488
3489
3490 if (phba->sli_rev == LPFC_SLI_REV4) {
3491 lpfc_printf_vlog(ndlp->vport,
3492 KERN_INFO, LOG_NODE,
3493 "0011 lpfc_offline: "
3494 "ndlp:x%p did %x "
3495 "usgmap:x%x rpi:%x\n",
3496 ndlp, ndlp->nlp_DID,
3497 ndlp->nlp_usg_map,
3498 ndlp->nlp_rpi);
3499
3500 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3501 }
3502 lpfc_unreg_rpi(vports[i], ndlp);
3503 }
3504 }
3505 }
3506 lpfc_destroy_vport_work_array(phba, vports);
3507
3508 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3509
3510 if (phba->wq)
3511 flush_workqueue(phba->wq);
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522void
3523lpfc_offline(struct lpfc_hba *phba)
3524{
3525 struct Scsi_Host *shost;
3526 struct lpfc_vport **vports;
3527 int i;
3528
3529 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3530 return;
3531
3532
3533 lpfc_stop_port(phba);
3534
3535
3536
3537
3538 lpfc_nvmet_destroy_targetport(phba);
3539 lpfc_nvme_destroy_localport(phba->pport);
3540
3541 vports = lpfc_create_vport_work_array(phba);
3542 if (vports != NULL)
3543 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3544 lpfc_stop_vport_timers(vports[i]);
3545 lpfc_destroy_vport_work_array(phba, vports);
3546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3547 "0460 Bring Adapter offline\n");
3548
3549
3550 lpfc_sli_hba_down(phba);
3551 spin_lock_irq(&phba->hbalock);
3552 phba->work_ha = 0;
3553 spin_unlock_irq(&phba->hbalock);
3554 vports = lpfc_create_vport_work_array(phba);
3555 if (vports != NULL)
3556 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3557 shost = lpfc_shost_from_vport(vports[i]);
3558 spin_lock_irq(shost->host_lock);
3559 vports[i]->work_port_events = 0;
3560 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3561 spin_unlock_irq(shost->host_lock);
3562 }
3563 lpfc_destroy_vport_work_array(phba, vports);
3564
3565 if (phba->cfg_xri_rebalancing)
3566 lpfc_destroy_multixri_pools(phba);
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577static void
3578lpfc_scsi_free(struct lpfc_hba *phba)
3579{
3580 struct lpfc_io_buf *sb, *sb_next;
3581
3582 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3583 return;
3584
3585 spin_lock_irq(&phba->hbalock);
3586
3587
3588
3589 spin_lock(&phba->scsi_buf_list_put_lock);
3590 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3591 list) {
3592 list_del(&sb->list);
3593 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3594 sb->dma_handle);
3595 kfree(sb);
3596 phba->total_scsi_bufs--;
3597 }
3598 spin_unlock(&phba->scsi_buf_list_put_lock);
3599
3600 spin_lock(&phba->scsi_buf_list_get_lock);
3601 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3602 list) {
3603 list_del(&sb->list);
3604 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3605 sb->dma_handle);
3606 kfree(sb);
3607 phba->total_scsi_bufs--;
3608 }
3609 spin_unlock(&phba->scsi_buf_list_get_lock);
3610 spin_unlock_irq(&phba->hbalock);
3611}
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621void
3622lpfc_io_free(struct lpfc_hba *phba)
3623{
3624 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3625 struct lpfc_sli4_hdw_queue *qp;
3626 int idx;
3627
3628 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3629 qp = &phba->sli4_hba.hdwq[idx];
3630
3631 spin_lock(&qp->io_buf_list_put_lock);
3632 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3633 &qp->lpfc_io_buf_list_put,
3634 list) {
3635 list_del(&lpfc_ncmd->list);
3636 qp->put_io_bufs--;
3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3639 kfree(lpfc_ncmd);
3640 qp->total_io_bufs--;
3641 }
3642 spin_unlock(&qp->io_buf_list_put_lock);
3643
3644 spin_lock(&qp->io_buf_list_get_lock);
3645 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3646 &qp->lpfc_io_buf_list_get,
3647 list) {
3648 list_del(&lpfc_ncmd->list);
3649 qp->get_io_bufs--;
3650 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3651 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3652 kfree(lpfc_ncmd);
3653 qp->total_io_bufs--;
3654 }
3655 spin_unlock(&qp->io_buf_list_get_lock);
3656 }
3657}
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671int
3672lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3673{
3674 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3675 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3676 LIST_HEAD(els_sgl_list);
3677 int rc;
3678
3679
3680
3681
3682 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3683
3684 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3685
3686 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3688 "3157 ELS xri-sgl count increased from "
3689 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3690 els_xri_cnt);
3691
3692 for (i = 0; i < xri_cnt; i++) {
3693 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3694 GFP_KERNEL);
3695 if (sglq_entry == NULL) {
3696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3697 "2562 Failure to allocate an "
3698 "ELS sgl entry:%d\n", i);
3699 rc = -ENOMEM;
3700 goto out_free_mem;
3701 }
3702 sglq_entry->buff_type = GEN_BUFF_TYPE;
3703 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3704 &sglq_entry->phys);
3705 if (sglq_entry->virt == NULL) {
3706 kfree(sglq_entry);
3707 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3708 "2563 Failure to allocate an "
3709 "ELS mbuf:%d\n", i);
3710 rc = -ENOMEM;
3711 goto out_free_mem;
3712 }
3713 sglq_entry->sgl = sglq_entry->virt;
3714 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3715 sglq_entry->state = SGL_FREED;
3716 list_add_tail(&sglq_entry->list, &els_sgl_list);
3717 }
3718 spin_lock_irq(&phba->hbalock);
3719 spin_lock(&phba->sli4_hba.sgl_list_lock);
3720 list_splice_init(&els_sgl_list,
3721 &phba->sli4_hba.lpfc_els_sgl_list);
3722 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3723 spin_unlock_irq(&phba->hbalock);
3724 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3725
3726 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3727 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3728 "3158 ELS xri-sgl count decreased from "
3729 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3730 els_xri_cnt);
3731 spin_lock_irq(&phba->hbalock);
3732 spin_lock(&phba->sli4_hba.sgl_list_lock);
3733 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3734 &els_sgl_list);
3735
3736 for (i = 0; i < xri_cnt; i++) {
3737 list_remove_head(&els_sgl_list,
3738 sglq_entry, struct lpfc_sglq, list);
3739 if (sglq_entry) {
3740 __lpfc_mbuf_free(phba, sglq_entry->virt,
3741 sglq_entry->phys);
3742 kfree(sglq_entry);
3743 }
3744 }
3745 list_splice_init(&els_sgl_list,
3746 &phba->sli4_hba.lpfc_els_sgl_list);
3747 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3748 spin_unlock_irq(&phba->hbalock);
3749 } else
3750 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3751 "3163 ELS xri-sgl count unchanged: %d\n",
3752 els_xri_cnt);
3753 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3754
3755
3756 sglq_entry = NULL;
3757 sglq_entry_next = NULL;
3758 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3759 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3760 lxri = lpfc_sli4_next_xritag(phba);
3761 if (lxri == NO_XRI) {
3762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3763 "2400 Failed to allocate xri for "
3764 "ELS sgl\n");
3765 rc = -ENOMEM;
3766 goto out_free_mem;
3767 }
3768 sglq_entry->sli4_lxritag = lxri;
3769 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3770 }
3771 return 0;
3772
3773out_free_mem:
3774 lpfc_free_els_sgl_list(phba);
3775 return rc;
3776}
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790int
3791lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3792{
3793 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3794 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3795 uint16_t nvmet_xri_cnt;
3796 LIST_HEAD(nvmet_sgl_list);
3797 int rc;
3798
3799
3800
3801
3802 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3803
3804
3805 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3806 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3807
3808 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3809 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3810 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3811 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3812
3813 for (i = 0; i < xri_cnt; i++) {
3814 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3815 GFP_KERNEL);
3816 if (sglq_entry == NULL) {
3817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3818 "6303 Failure to allocate an "
3819 "NVMET sgl entry:%d\n", i);
3820 rc = -ENOMEM;
3821 goto out_free_mem;
3822 }
3823 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3824 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3825 &sglq_entry->phys);
3826 if (sglq_entry->virt == NULL) {
3827 kfree(sglq_entry);
3828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3829 "6304 Failure to allocate an "
3830 "NVMET buf:%d\n", i);
3831 rc = -ENOMEM;
3832 goto out_free_mem;
3833 }
3834 sglq_entry->sgl = sglq_entry->virt;
3835 memset(sglq_entry->sgl, 0,
3836 phba->cfg_sg_dma_buf_size);
3837 sglq_entry->state = SGL_FREED;
3838 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3839 }
3840 spin_lock_irq(&phba->hbalock);
3841 spin_lock(&phba->sli4_hba.sgl_list_lock);
3842 list_splice_init(&nvmet_sgl_list,
3843 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3844 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3845 spin_unlock_irq(&phba->hbalock);
3846 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3847
3848 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3850 "6305 NVMET xri-sgl count decreased from "
3851 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3852 nvmet_xri_cnt);
3853 spin_lock_irq(&phba->hbalock);
3854 spin_lock(&phba->sli4_hba.sgl_list_lock);
3855 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3856 &nvmet_sgl_list);
3857
3858 for (i = 0; i < xri_cnt; i++) {
3859 list_remove_head(&nvmet_sgl_list,
3860 sglq_entry, struct lpfc_sglq, list);
3861 if (sglq_entry) {
3862 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3863 sglq_entry->phys);
3864 kfree(sglq_entry);
3865 }
3866 }
3867 list_splice_init(&nvmet_sgl_list,
3868 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3869 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3870 spin_unlock_irq(&phba->hbalock);
3871 } else
3872 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3873 "6306 NVMET xri-sgl count unchanged: %d\n",
3874 nvmet_xri_cnt);
3875 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3876
3877
3878 sglq_entry = NULL;
3879 sglq_entry_next = NULL;
3880 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3881 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3882 lxri = lpfc_sli4_next_xritag(phba);
3883 if (lxri == NO_XRI) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "6307 Failed to allocate xri for "
3886 "NVMET sgl\n");
3887 rc = -ENOMEM;
3888 goto out_free_mem;
3889 }
3890 sglq_entry->sli4_lxritag = lxri;
3891 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3892 }
3893 return 0;
3894
3895out_free_mem:
3896 lpfc_free_nvmet_sgl_list(phba);
3897 return rc;
3898}
3899
3900int
3901lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3902{
3903 LIST_HEAD(blist);
3904 struct lpfc_sli4_hdw_queue *qp;
3905 struct lpfc_io_buf *lpfc_cmd;
3906 struct lpfc_io_buf *iobufp, *prev_iobufp;
3907 int idx, cnt, xri, inserted;
3908
3909 cnt = 0;
3910 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3911 qp = &phba->sli4_hba.hdwq[idx];
3912 spin_lock_irq(&qp->io_buf_list_get_lock);
3913 spin_lock(&qp->io_buf_list_put_lock);
3914
3915
3916 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3917 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3918 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3919 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3920 cnt += qp->get_io_bufs + qp->put_io_bufs;
3921 qp->get_io_bufs = 0;
3922 qp->put_io_bufs = 0;
3923 qp->total_io_bufs = 0;
3924 spin_unlock(&qp->io_buf_list_put_lock);
3925 spin_unlock_irq(&qp->io_buf_list_get_lock);
3926 }
3927
3928
3929
3930
3931
3932
3933 for (idx = 0; idx < cnt; idx++) {
3934 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3935 if (!lpfc_cmd)
3936 return cnt;
3937 if (idx == 0) {
3938 list_add_tail(&lpfc_cmd->list, cbuf);
3939 continue;
3940 }
3941 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3942 inserted = 0;
3943 prev_iobufp = NULL;
3944 list_for_each_entry(iobufp, cbuf, list) {
3945 if (xri < iobufp->cur_iocbq.sli4_xritag) {
3946 if (prev_iobufp)
3947 list_add(&lpfc_cmd->list,
3948 &prev_iobufp->list);
3949 else
3950 list_add(&lpfc_cmd->list, cbuf);
3951 inserted = 1;
3952 break;
3953 }
3954 prev_iobufp = iobufp;
3955 }
3956 if (!inserted)
3957 list_add_tail(&lpfc_cmd->list, cbuf);
3958 }
3959 return cnt;
3960}
3961
3962int
3963lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3964{
3965 struct lpfc_sli4_hdw_queue *qp;
3966 struct lpfc_io_buf *lpfc_cmd;
3967 int idx, cnt;
3968
3969 qp = phba->sli4_hba.hdwq;
3970 cnt = 0;
3971 while (!list_empty(cbuf)) {
3972 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3973 list_remove_head(cbuf, lpfc_cmd,
3974 struct lpfc_io_buf, list);
3975 if (!lpfc_cmd)
3976 return cnt;
3977 cnt++;
3978 qp = &phba->sli4_hba.hdwq[idx];
3979 lpfc_cmd->hdwq_no = idx;
3980 lpfc_cmd->hdwq = qp;
3981 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3982 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3983 spin_lock(&qp->io_buf_list_put_lock);
3984 list_add_tail(&lpfc_cmd->list,
3985 &qp->lpfc_io_buf_list_put);
3986 qp->put_io_bufs++;
3987 qp->total_io_bufs++;
3988 spin_unlock(&qp->io_buf_list_put_lock);
3989 }
3990 }
3991 return cnt;
3992}
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006int
4007lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4008{
4009 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4010 uint16_t i, lxri, els_xri_cnt;
4011 uint16_t io_xri_cnt, io_xri_max;
4012 LIST_HEAD(io_sgl_list);
4013 int rc, cnt;
4014
4015
4016
4017
4018
4019
4020 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4021 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4022 phba->sli4_hba.io_xri_max = io_xri_max;
4023
4024 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4025 "6074 Current allocated XRI sgl count:%d, "
4026 "maximum XRI count:%d\n",
4027 phba->sli4_hba.io_xri_cnt,
4028 phba->sli4_hba.io_xri_max);
4029
4030 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4031
4032 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4033
4034 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4035 phba->sli4_hba.io_xri_max;
4036
4037 for (i = 0; i < io_xri_cnt; i++) {
4038 list_remove_head(&io_sgl_list, lpfc_ncmd,
4039 struct lpfc_io_buf, list);
4040 if (lpfc_ncmd) {
4041 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4042 lpfc_ncmd->data,
4043 lpfc_ncmd->dma_handle);
4044 kfree(lpfc_ncmd);
4045 }
4046 }
4047 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4048 }
4049
4050
4051 lpfc_ncmd = NULL;
4052 lpfc_ncmd_next = NULL;
4053 phba->sli4_hba.io_xri_cnt = cnt;
4054 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4055 &io_sgl_list, list) {
4056 lxri = lpfc_sli4_next_xritag(phba);
4057 if (lxri == NO_XRI) {
4058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4059 "6075 Failed to allocate xri for "
4060 "nvme buffer\n");
4061 rc = -ENOMEM;
4062 goto out_free_mem;
4063 }
4064 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4065 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4066 }
4067 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4068 return 0;
4069
4070out_free_mem:
4071 lpfc_io_free(phba);
4072 return rc;
4073}
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089int
4090lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4091{
4092 struct lpfc_io_buf *lpfc_ncmd;
4093 struct lpfc_iocbq *pwqeq;
4094 uint16_t iotag, lxri = 0;
4095 int bcnt, num_posted;
4096 LIST_HEAD(prep_nblist);
4097 LIST_HEAD(post_nblist);
4098 LIST_HEAD(nvme_nblist);
4099
4100
4101 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
4102 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4103 "6426 Common buffer size %zd exceeds %d\n",
4104 sizeof(struct lpfc_io_buf),
4105 LPFC_COMMON_IO_BUF_SZ);
4106 return 0;
4107 }
4108
4109 phba->sli4_hba.io_xri_cnt = 0;
4110 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4111 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
4112 if (!lpfc_ncmd)
4113 break;
4114
4115
4116
4117
4118
4119 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4120 GFP_KERNEL,
4121 &lpfc_ncmd->dma_handle);
4122 if (!lpfc_ncmd->data) {
4123 kfree(lpfc_ncmd);
4124 break;
4125 }
4126
4127
4128
4129
4130
4131 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4132 (((unsigned long)(lpfc_ncmd->data) &
4133 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4134 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4135 "3369 Memory alignment err: addr=%lx\n",
4136 (unsigned long)lpfc_ncmd->data);
4137 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4138 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4139 kfree(lpfc_ncmd);
4140 break;
4141 }
4142
4143 lxri = lpfc_sli4_next_xritag(phba);
4144 if (lxri == NO_XRI) {
4145 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4146 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4147 kfree(lpfc_ncmd);
4148 break;
4149 }
4150 pwqeq = &lpfc_ncmd->cur_iocbq;
4151
4152
4153 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4154 if (iotag == 0) {
4155 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4156 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4157 kfree(lpfc_ncmd);
4158 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4159 "6121 Failed to allocate IOTAG for"
4160 " XRI:0x%x\n", lxri);
4161 lpfc_sli4_free_xri(phba, lxri);
4162 break;
4163 }
4164 pwqeq->sli4_lxritag = lxri;
4165 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4166 pwqeq->context1 = lpfc_ncmd;
4167
4168
4169 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4170 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4171 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4172 spin_lock_init(&lpfc_ncmd->buf_lock);
4173
4174
4175 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4176 phba->sli4_hba.io_xri_cnt++;
4177 }
4178 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4179 "6114 Allocate %d out of %d requested new NVME "
4180 "buffers\n", bcnt, num_to_alloc);
4181
4182
4183 if (!list_empty(&post_nblist))
4184 num_posted = lpfc_sli4_post_io_sgl_list(
4185 phba, &post_nblist, bcnt);
4186 else
4187 num_posted = 0;
4188
4189 return num_posted;
4190}
4191
4192static uint64_t
4193lpfc_get_wwpn(struct lpfc_hba *phba)
4194{
4195 uint64_t wwn;
4196 int rc;
4197 LPFC_MBOXQ_t *mboxq;
4198 MAILBOX_t *mb;
4199
4200 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4201 GFP_KERNEL);
4202 if (!mboxq)
4203 return (uint64_t)-1;
4204
4205
4206 lpfc_read_nv(phba, mboxq);
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc != MBX_SUCCESS) {
4209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4210 "6019 Mailbox failed , mbxCmd x%x "
4211 "READ_NV, mbxStatus x%x\n",
4212 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4213 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4214 mempool_free(mboxq, phba->mbox_mem_pool);
4215 return (uint64_t) -1;
4216 }
4217 mb = &mboxq->u.mb;
4218 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4219
4220 mempool_free(mboxq, phba->mbox_mem_pool);
4221 if (phba->sli_rev == LPFC_SLI_REV4)
4222 return be64_to_cpu(wwn);
4223 else
4224 return rol64(wwn, 32);
4225}
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243struct lpfc_vport *
4244lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4245{
4246 struct lpfc_vport *vport;
4247 struct Scsi_Host *shost = NULL;
4248 int error = 0;
4249 int i;
4250 uint64_t wwn;
4251 bool use_no_reset_hba = false;
4252 int rc;
4253
4254 if (lpfc_no_hba_reset_cnt) {
4255 if (phba->sli_rev < LPFC_SLI_REV4 &&
4256 dev == &phba->pcidev->dev) {
4257
4258 lpfc_sli_brdrestart(phba);
4259 rc = lpfc_sli_chipset_init(phba);
4260 if (rc)
4261 return NULL;
4262 }
4263 wwn = lpfc_get_wwpn(phba);
4264 }
4265
4266 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4267 if (wwn == lpfc_no_hba_reset[i]) {
4268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4269 "6020 Setting use_no_reset port=%llx\n",
4270 wwn);
4271 use_no_reset_hba = true;
4272 break;
4273 }
4274 }
4275
4276 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4277 if (dev != &phba->pcidev->dev) {
4278 shost = scsi_host_alloc(&lpfc_vport_template,
4279 sizeof(struct lpfc_vport));
4280 } else {
4281 if (!use_no_reset_hba)
4282 shost = scsi_host_alloc(&lpfc_template,
4283 sizeof(struct lpfc_vport));
4284 else
4285 shost = scsi_host_alloc(&lpfc_template_no_hr,
4286 sizeof(struct lpfc_vport));
4287 }
4288 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
4289 shost = scsi_host_alloc(&lpfc_template_nvme,
4290 sizeof(struct lpfc_vport));
4291 }
4292 if (!shost)
4293 goto out;
4294
4295 vport = (struct lpfc_vport *) shost->hostdata;
4296 vport->phba = phba;
4297 vport->load_flag |= FC_LOADING;
4298 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4299 vport->fc_rscn_flush = 0;
4300 lpfc_get_vport_cfgparam(vport);
4301
4302
4303 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4304
4305 shost->unique_id = instance;
4306 shost->max_id = LPFC_MAX_TARGET;
4307 shost->max_lun = vport->cfg_max_luns;
4308 shost->this_id = -1;
4309 shost->max_cmd_len = 16;
4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (!phba->cfg_fcp_mq_threshold ||
4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4318
4319 shost->dma_boundary =
4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4321 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4322 } else
4323
4324
4325
4326 shost->nr_hw_queues = 1;
4327
4328
4329
4330
4331
4332
4333 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4334 if (dev != &phba->pcidev->dev) {
4335 shost->transportt = lpfc_vport_transport_template;
4336 vport->port_type = LPFC_NPIV_PORT;
4337 } else {
4338 shost->transportt = lpfc_transport_template;
4339 vport->port_type = LPFC_PHYSICAL_PORT;
4340 }
4341
4342
4343 INIT_LIST_HEAD(&vport->fc_nodes);
4344 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4345 spin_lock_init(&vport->work_port_lock);
4346
4347 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4348
4349 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4350
4351 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4352
4353 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4354 lpfc_setup_bg(phba, shost);
4355
4356 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4357 if (error)
4358 goto out_put_shost;
4359
4360 spin_lock_irq(&phba->port_list_lock);
4361 list_add_tail(&vport->listentry, &phba->port_list);
4362 spin_unlock_irq(&phba->port_list_lock);
4363 return vport;
4364
4365out_put_shost:
4366 scsi_host_put(shost);
4367out:
4368 return NULL;
4369}
4370
4371
4372
4373
4374
4375
4376
4377
4378void
4379destroy_port(struct lpfc_vport *vport)
4380{
4381 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4382 struct lpfc_hba *phba = vport->phba;
4383
4384 lpfc_debugfs_terminate(vport);
4385 fc_remove_host(shost);
4386 scsi_remove_host(shost);
4387
4388 spin_lock_irq(&phba->port_list_lock);
4389 list_del_init(&vport->listentry);
4390 spin_unlock_irq(&phba->port_list_lock);
4391
4392 lpfc_cleanup(vport);
4393 return;
4394}
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406int
4407lpfc_get_instance(void)
4408{
4409 int ret;
4410
4411 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4412 return ret < 0 ? -1 : ret;
4413}
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4431{
4432 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4433 struct lpfc_hba *phba = vport->phba;
4434 int stat = 0;
4435
4436 spin_lock_irq(shost->host_lock);
4437
4438 if (vport->load_flag & FC_UNLOADING) {
4439 stat = 1;
4440 goto finished;
4441 }
4442 if (time >= msecs_to_jiffies(30 * 1000)) {
4443 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4444 "0461 Scanning longer than 30 "
4445 "seconds. Continuing initialization\n");
4446 stat = 1;
4447 goto finished;
4448 }
4449 if (time >= msecs_to_jiffies(15 * 1000) &&
4450 phba->link_state <= LPFC_LINK_DOWN) {
4451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4452 "0465 Link down longer than 15 "
4453 "seconds. Continuing initialization\n");
4454 stat = 1;
4455 goto finished;
4456 }
4457
4458 if (vport->port_state != LPFC_VPORT_READY)
4459 goto finished;
4460 if (vport->num_disc_nodes || vport->fc_prli_sent)
4461 goto finished;
4462 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4463 goto finished;
4464 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4465 goto finished;
4466
4467 stat = 1;
4468
4469finished:
4470 spin_unlock_irq(shost->host_lock);
4471 return stat;
4472}
4473
4474static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4475{
4476 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4477 struct lpfc_hba *phba = vport->phba;
4478
4479 fc_host_supported_speeds(shost) = 0;
4480 if (phba->lmt & LMT_128Gb)
4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4482 if (phba->lmt & LMT_64Gb)
4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4484 if (phba->lmt & LMT_32Gb)
4485 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4486 if (phba->lmt & LMT_16Gb)
4487 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4488 if (phba->lmt & LMT_10Gb)
4489 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4490 if (phba->lmt & LMT_8Gb)
4491 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4492 if (phba->lmt & LMT_4Gb)
4493 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4494 if (phba->lmt & LMT_2Gb)
4495 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4496 if (phba->lmt & LMT_1Gb)
4497 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4498}
4499
4500
4501
4502
4503
4504
4505
4506
4507void lpfc_host_attrib_init(struct Scsi_Host *shost)
4508{
4509 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4510 struct lpfc_hba *phba = vport->phba;
4511
4512
4513
4514
4515 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4516 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4517 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4518
4519 memset(fc_host_supported_fc4s(shost), 0,
4520 sizeof(fc_host_supported_fc4s(shost)));
4521 fc_host_supported_fc4s(shost)[2] = 1;
4522 fc_host_supported_fc4s(shost)[7] = 1;
4523
4524 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4525 sizeof fc_host_symbolic_name(shost));
4526
4527 lpfc_host_supported_speeds_set(shost);
4528
4529 fc_host_maxframe_size(shost) =
4530 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4531 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4532
4533 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4534
4535
4536 memset(fc_host_active_fc4s(shost), 0,
4537 sizeof(fc_host_active_fc4s(shost)));
4538 fc_host_active_fc4s(shost)[2] = 1;
4539 fc_host_active_fc4s(shost)[7] = 1;
4540
4541 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4542 spin_lock_irq(shost->host_lock);
4543 vport->load_flag &= ~FC_LOADING;
4544 spin_unlock_irq(shost->host_lock);
4545}
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555static void
4556lpfc_stop_port_s3(struct lpfc_hba *phba)
4557{
4558
4559 writel(0, phba->HCregaddr);
4560 readl(phba->HCregaddr);
4561
4562 writel(0xffffffff, phba->HAregaddr);
4563 readl(phba->HAregaddr);
4564
4565
4566 lpfc_stop_hba_timers(phba);
4567 phba->pport->work_port_events = 0;
4568}
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578static void
4579lpfc_stop_port_s4(struct lpfc_hba *phba)
4580{
4581
4582 lpfc_stop_hba_timers(phba);
4583 if (phba->pport)
4584 phba->pport->work_port_events = 0;
4585 phba->sli4_hba.intr_enable = 0;
4586}
4587
4588
4589
4590
4591
4592
4593
4594
4595void
4596lpfc_stop_port(struct lpfc_hba *phba)
4597{
4598 phba->lpfc_stop_port(phba);
4599
4600 if (phba->wq)
4601 flush_workqueue(phba->wq);
4602}
4603
4604
4605
4606
4607
4608
4609
4610void
4611lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4612{
4613 unsigned long fcf_redisc_wait_tmo =
4614 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4615
4616 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4617 spin_lock_irq(&phba->hbalock);
4618
4619 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4620
4621 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4622 spin_unlock_irq(&phba->hbalock);
4623}
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635static void
4636lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4637{
4638 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4639
4640
4641 spin_lock_irq(&phba->hbalock);
4642 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4643 spin_unlock_irq(&phba->hbalock);
4644 return;
4645 }
4646
4647 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4648
4649 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4650 spin_unlock_irq(&phba->hbalock);
4651 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4652 "2776 FCF rediscover quiescent timer expired\n");
4653
4654 lpfc_worker_wake_up(phba);
4655}
4656
4657
4658
4659
4660
4661
4662
4663
4664static void
4665lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4666 struct lpfc_acqe_link *acqe_link)
4667{
4668 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4669 case LPFC_ASYNC_LINK_FAULT_NONE:
4670 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4671 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4672 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4673 break;
4674 default:
4675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4676 "0398 Unknown link fault code: x%x\n",
4677 bf_get(lpfc_acqe_link_fault, acqe_link));
4678 break;
4679 }
4680}
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692static uint8_t
4693lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4694 struct lpfc_acqe_link *acqe_link)
4695{
4696 uint8_t att_type;
4697
4698 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4699 case LPFC_ASYNC_LINK_STATUS_DOWN:
4700 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4701 att_type = LPFC_ATT_LINK_DOWN;
4702 break;
4703 case LPFC_ASYNC_LINK_STATUS_UP:
4704
4705 att_type = LPFC_ATT_RESERVED;
4706 break;
4707 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4708 att_type = LPFC_ATT_LINK_UP;
4709 break;
4710 default:
4711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4712 "0399 Invalid link attention type: x%x\n",
4713 bf_get(lpfc_acqe_link_status, acqe_link));
4714 att_type = LPFC_ATT_RESERVED;
4715 break;
4716 }
4717 return att_type;
4718}
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728uint32_t
4729lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4730{
4731 uint32_t link_speed;
4732
4733 if (!lpfc_is_link_up(phba))
4734 return 0;
4735
4736 if (phba->sli_rev <= LPFC_SLI_REV3) {
4737 switch (phba->fc_linkspeed) {
4738 case LPFC_LINK_SPEED_1GHZ:
4739 link_speed = 1000;
4740 break;
4741 case LPFC_LINK_SPEED_2GHZ:
4742 link_speed = 2000;
4743 break;
4744 case LPFC_LINK_SPEED_4GHZ:
4745 link_speed = 4000;
4746 break;
4747 case LPFC_LINK_SPEED_8GHZ:
4748 link_speed = 8000;
4749 break;
4750 case LPFC_LINK_SPEED_10GHZ:
4751 link_speed = 10000;
4752 break;
4753 case LPFC_LINK_SPEED_16GHZ:
4754 link_speed = 16000;
4755 break;
4756 default:
4757 link_speed = 0;
4758 }
4759 } else {
4760 if (phba->sli4_hba.link_state.logical_speed)
4761 link_speed =
4762 phba->sli4_hba.link_state.logical_speed;
4763 else
4764 link_speed = phba->sli4_hba.link_state.speed;
4765 }
4766 return link_speed;
4767}
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780static uint32_t
4781lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4782 uint8_t speed_code)
4783{
4784 uint32_t port_speed;
4785
4786 switch (evt_code) {
4787 case LPFC_TRAILER_CODE_LINK:
4788 switch (speed_code) {
4789 case LPFC_ASYNC_LINK_SPEED_ZERO:
4790 port_speed = 0;
4791 break;
4792 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4793 port_speed = 10;
4794 break;
4795 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4796 port_speed = 100;
4797 break;
4798 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4799 port_speed = 1000;
4800 break;
4801 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4802 port_speed = 10000;
4803 break;
4804 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4805 port_speed = 20000;
4806 break;
4807 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4808 port_speed = 25000;
4809 break;
4810 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4811 port_speed = 40000;
4812 break;
4813 default:
4814 port_speed = 0;
4815 }
4816 break;
4817 case LPFC_TRAILER_CODE_FC:
4818 switch (speed_code) {
4819 case LPFC_FC_LA_SPEED_UNKNOWN:
4820 port_speed = 0;
4821 break;
4822 case LPFC_FC_LA_SPEED_1G:
4823 port_speed = 1000;
4824 break;
4825 case LPFC_FC_LA_SPEED_2G:
4826 port_speed = 2000;
4827 break;
4828 case LPFC_FC_LA_SPEED_4G:
4829 port_speed = 4000;
4830 break;
4831 case LPFC_FC_LA_SPEED_8G:
4832 port_speed = 8000;
4833 break;
4834 case LPFC_FC_LA_SPEED_10G:
4835 port_speed = 10000;
4836 break;
4837 case LPFC_FC_LA_SPEED_16G:
4838 port_speed = 16000;
4839 break;
4840 case LPFC_FC_LA_SPEED_32G:
4841 port_speed = 32000;
4842 break;
4843 case LPFC_FC_LA_SPEED_64G:
4844 port_speed = 64000;
4845 break;
4846 case LPFC_FC_LA_SPEED_128G:
4847 port_speed = 128000;
4848 break;
4849 default:
4850 port_speed = 0;
4851 }
4852 break;
4853 default:
4854 port_speed = 0;
4855 }
4856 return port_speed;
4857}
4858
4859
4860
4861
4862
4863
4864
4865
4866static void
4867lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4868 struct lpfc_acqe_link *acqe_link)
4869{
4870 struct lpfc_dmabuf *mp;
4871 LPFC_MBOXQ_t *pmb;
4872 MAILBOX_t *mb;
4873 struct lpfc_mbx_read_top *la;
4874 uint8_t att_type;
4875 int rc;
4876
4877 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4878 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4879 return;
4880 phba->fcoe_eventtag = acqe_link->event_tag;
4881 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4882 if (!pmb) {
4883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4884 "0395 The mboxq allocation failed\n");
4885 return;
4886 }
4887 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4888 if (!mp) {
4889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4890 "0396 The lpfc_dmabuf allocation failed\n");
4891 goto out_free_pmb;
4892 }
4893 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4894 if (!mp->virt) {
4895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4896 "0397 The mbuf allocation failed\n");
4897 goto out_free_dmabuf;
4898 }
4899
4900
4901 lpfc_els_flush_all_cmd(phba);
4902
4903
4904 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4905
4906
4907 phba->sli.slistat.link_event++;
4908
4909
4910 lpfc_read_topology(phba, pmb, mp);
4911 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4912 pmb->vport = phba->pport;
4913
4914
4915 phba->sli4_hba.link_state.speed =
4916 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4917 bf_get(lpfc_acqe_link_speed, acqe_link));
4918 phba->sli4_hba.link_state.duplex =
4919 bf_get(lpfc_acqe_link_duplex, acqe_link);
4920 phba->sli4_hba.link_state.status =
4921 bf_get(lpfc_acqe_link_status, acqe_link);
4922 phba->sli4_hba.link_state.type =
4923 bf_get(lpfc_acqe_link_type, acqe_link);
4924 phba->sli4_hba.link_state.number =
4925 bf_get(lpfc_acqe_link_number, acqe_link);
4926 phba->sli4_hba.link_state.fault =
4927 bf_get(lpfc_acqe_link_fault, acqe_link);
4928 phba->sli4_hba.link_state.logical_speed =
4929 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4930
4931 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4932 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4933 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4934 "Logical speed:%dMbps Fault:%d\n",
4935 phba->sli4_hba.link_state.speed,
4936 phba->sli4_hba.link_state.topology,
4937 phba->sli4_hba.link_state.status,
4938 phba->sli4_hba.link_state.type,
4939 phba->sli4_hba.link_state.number,
4940 phba->sli4_hba.link_state.logical_speed,
4941 phba->sli4_hba.link_state.fault);
4942
4943
4944
4945
4946 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4948 if (rc == MBX_NOT_FINISHED)
4949 goto out_free_dmabuf;
4950 return;
4951 }
4952
4953
4954
4955
4956
4957
4958 mb = &pmb->u.mb;
4959 mb->mbxStatus = MBX_SUCCESS;
4960
4961
4962 lpfc_sli4_parse_latt_fault(phba, acqe_link);
4963
4964
4965 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4966 la->eventTag = acqe_link->event_tag;
4967 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4968 bf_set(lpfc_mbx_read_top_link_spd, la,
4969 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4970
4971
4972 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4973 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4974 bf_set(lpfc_mbx_read_top_il, la, 0);
4975 bf_set(lpfc_mbx_read_top_pb, la, 0);
4976 bf_set(lpfc_mbx_read_top_fa, la, 0);
4977 bf_set(lpfc_mbx_read_top_mm, la, 0);
4978
4979
4980 lpfc_mbx_cmpl_read_topology(phba, pmb);
4981
4982 return;
4983
4984out_free_dmabuf:
4985 kfree(mp);
4986out_free_pmb:
4987 mempool_free(pmb, phba->mbox_mem_pool);
4988}
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002static uint8_t
5003lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5004{
5005 uint8_t port_speed;
5006
5007 switch (speed_code) {
5008 case LPFC_FC_LA_SPEED_1G:
5009 port_speed = LPFC_LINK_SPEED_1GHZ;
5010 break;
5011 case LPFC_FC_LA_SPEED_2G:
5012 port_speed = LPFC_LINK_SPEED_2GHZ;
5013 break;
5014 case LPFC_FC_LA_SPEED_4G:
5015 port_speed = LPFC_LINK_SPEED_4GHZ;
5016 break;
5017 case LPFC_FC_LA_SPEED_8G:
5018 port_speed = LPFC_LINK_SPEED_8GHZ;
5019 break;
5020 case LPFC_FC_LA_SPEED_16G:
5021 port_speed = LPFC_LINK_SPEED_16GHZ;
5022 break;
5023 case LPFC_FC_LA_SPEED_32G:
5024 port_speed = LPFC_LINK_SPEED_32GHZ;
5025 break;
5026 case LPFC_FC_LA_SPEED_64G:
5027 port_speed = LPFC_LINK_SPEED_64GHZ;
5028 break;
5029 case LPFC_FC_LA_SPEED_128G:
5030 port_speed = LPFC_LINK_SPEED_128GHZ;
5031 break;
5032 case LPFC_FC_LA_SPEED_256G:
5033 port_speed = LPFC_LINK_SPEED_256GHZ;
5034 break;
5035 default:
5036 port_speed = 0;
5037 break;
5038 }
5039
5040 return port_speed;
5041}
5042
5043#define trunk_link_status(__idx)\
5044 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5045 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5046 "Link up" : "Link down") : "NA"
5047
5048#define trunk_port_fault(__idx)\
5049 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5050 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5051
5052static void
5053lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5054 struct lpfc_acqe_fc_la *acqe_fc)
5055{
5056 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5057 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5058
5059 phba->sli4_hba.link_state.speed =
5060 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5061 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5062
5063 phba->sli4_hba.link_state.logical_speed =
5064 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5065
5066 phba->fc_linkspeed =
5067 lpfc_async_link_speed_to_read_top(
5068 phba,
5069 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5070
5071 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5072 phba->trunk_link.link0.state =
5073 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5074 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5075 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5076 }
5077 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5078 phba->trunk_link.link1.state =
5079 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5080 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5081 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5082 }
5083 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5084 phba->trunk_link.link2.state =
5085 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5086 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5087 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5088 }
5089 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5090 phba->trunk_link.link3.state =
5091 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5092 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5093 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5094 }
5095
5096 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5097 "2910 Async FC Trunking Event - Speed:%d\n"
5098 "\tLogical speed:%d "
5099 "port0: %s port1: %s port2: %s port3: %s\n",
5100 phba->sli4_hba.link_state.speed,
5101 phba->sli4_hba.link_state.logical_speed,
5102 trunk_link_status(0), trunk_link_status(1),
5103 trunk_link_status(2), trunk_link_status(3));
5104
5105 if (port_fault)
5106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5107 "3202 trunk error:0x%x (%s) seen on port0:%s "
5108
5109
5110
5111
5112
5113 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5114 "UNDEFINED. update driver." : trunk_errmsg[err],
5115 trunk_port_fault(0), trunk_port_fault(1),
5116 trunk_port_fault(2), trunk_port_fault(3));
5117}
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129static void
5130lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5131{
5132 struct lpfc_dmabuf *mp;
5133 LPFC_MBOXQ_t *pmb;
5134 MAILBOX_t *mb;
5135 struct lpfc_mbx_read_top *la;
5136 int rc;
5137
5138 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5139 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5141 "2895 Non FC link Event detected.(%d)\n",
5142 bf_get(lpfc_trailer_type, acqe_fc));
5143 return;
5144 }
5145
5146 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5147 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5148 lpfc_update_trunk_link_status(phba, acqe_fc);
5149 return;
5150 }
5151
5152
5153 phba->sli4_hba.link_state.speed =
5154 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5155 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5156 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5157 phba->sli4_hba.link_state.topology =
5158 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5159 phba->sli4_hba.link_state.status =
5160 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5161 phba->sli4_hba.link_state.type =
5162 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5163 phba->sli4_hba.link_state.number =
5164 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5165 phba->sli4_hba.link_state.fault =
5166 bf_get(lpfc_acqe_link_fault, acqe_fc);
5167
5168 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5169 LPFC_FC_LA_TYPE_LINK_DOWN)
5170 phba->sli4_hba.link_state.logical_speed = 0;
5171 else if (!phba->sli4_hba.conf_trunk)
5172 phba->sli4_hba.link_state.logical_speed =
5173 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5174
5175 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5176 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5177 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5178 "%dMbps Fault:%d\n",
5179 phba->sli4_hba.link_state.speed,
5180 phba->sli4_hba.link_state.topology,
5181 phba->sli4_hba.link_state.status,
5182 phba->sli4_hba.link_state.type,
5183 phba->sli4_hba.link_state.number,
5184 phba->sli4_hba.link_state.logical_speed,
5185 phba->sli4_hba.link_state.fault);
5186 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5187 if (!pmb) {
5188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5189 "2897 The mboxq allocation failed\n");
5190 return;
5191 }
5192 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5193 if (!mp) {
5194 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5195 "2898 The lpfc_dmabuf allocation failed\n");
5196 goto out_free_pmb;
5197 }
5198 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5199 if (!mp->virt) {
5200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5201 "2899 The mbuf allocation failed\n");
5202 goto out_free_dmabuf;
5203 }
5204
5205
5206 lpfc_els_flush_all_cmd(phba);
5207
5208
5209 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5210
5211
5212 phba->sli.slistat.link_event++;
5213
5214
5215 lpfc_read_topology(phba, pmb, mp);
5216 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5217 pmb->vport = phba->pport;
5218
5219 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5220 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5221
5222 switch (phba->sli4_hba.link_state.status) {
5223 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5224 phba->link_flag |= LS_MDS_LINK_DOWN;
5225 break;
5226 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5227 phba->link_flag |= LS_MDS_LOOPBACK;
5228 break;
5229 default:
5230 break;
5231 }
5232
5233
5234 mb = &pmb->u.mb;
5235 mb->mbxStatus = MBX_SUCCESS;
5236
5237
5238 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5239
5240
5241 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5242 la->eventTag = acqe_fc->event_tag;
5243
5244 if (phba->sli4_hba.link_state.status ==
5245 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5246 bf_set(lpfc_mbx_read_top_att_type, la,
5247 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5248 } else {
5249 bf_set(lpfc_mbx_read_top_att_type, la,
5250 LPFC_FC_LA_TYPE_LINK_DOWN);
5251 }
5252
5253 lpfc_mbx_cmpl_read_topology(phba, pmb);
5254
5255 return;
5256 }
5257
5258 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5259 if (rc == MBX_NOT_FINISHED)
5260 goto out_free_dmabuf;
5261 return;
5262
5263out_free_dmabuf:
5264 kfree(mp);
5265out_free_pmb:
5266 mempool_free(pmb, phba->mbox_mem_pool);
5267}
5268
5269
5270
5271
5272
5273
5274
5275
5276static void
5277lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5278{
5279 char port_name;
5280 char message[128];
5281 uint8_t status;
5282 uint8_t evt_type;
5283 uint8_t operational = 0;
5284 struct temp_event temp_event_data;
5285 struct lpfc_acqe_misconfigured_event *misconfigured;
5286 struct Scsi_Host *shost;
5287 struct lpfc_vport **vports;
5288 int rc, i;
5289
5290 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5291
5292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5293 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
5294 "x%08x SLI Event Type:%d\n",
5295 acqe_sli->event_data1, acqe_sli->event_data2,
5296 evt_type);
5297
5298 port_name = phba->Port[0];
5299 if (port_name == 0x00)
5300 port_name = '?';
5301
5302 switch (evt_type) {
5303 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5304 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5305 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5306 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5307
5308 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5309 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5310 acqe_sli->event_data1, port_name);
5311
5312 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5313 shost = lpfc_shost_from_vport(phba->pport);
5314 fc_host_post_vendor_event(shost, fc_get_event_number(),
5315 sizeof(temp_event_data),
5316 (char *)&temp_event_data,
5317 SCSI_NL_VID_TYPE_PCI
5318 | PCI_VENDOR_ID_EMULEX);
5319 break;
5320 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5321 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5322 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5323 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5324
5325 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5326 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5327 acqe_sli->event_data1, port_name);
5328
5329 shost = lpfc_shost_from_vport(phba->pport);
5330 fc_host_post_vendor_event(shost, fc_get_event_number(),
5331 sizeof(temp_event_data),
5332 (char *)&temp_event_data,
5333 SCSI_NL_VID_TYPE_PCI
5334 | PCI_VENDOR_ID_EMULEX);
5335 break;
5336 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5337 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5338 &acqe_sli->event_data1;
5339
5340
5341 switch (phba->sli4_hba.lnk_info.lnk_no) {
5342 case LPFC_LINK_NUMBER_0:
5343 status = bf_get(lpfc_sli_misconfigured_port0_state,
5344 &misconfigured->theEvent);
5345 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5346 &misconfigured->theEvent);
5347 break;
5348 case LPFC_LINK_NUMBER_1:
5349 status = bf_get(lpfc_sli_misconfigured_port1_state,
5350 &misconfigured->theEvent);
5351 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5352 &misconfigured->theEvent);
5353 break;
5354 case LPFC_LINK_NUMBER_2:
5355 status = bf_get(lpfc_sli_misconfigured_port2_state,
5356 &misconfigured->theEvent);
5357 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5358 &misconfigured->theEvent);
5359 break;
5360 case LPFC_LINK_NUMBER_3:
5361 status = bf_get(lpfc_sli_misconfigured_port3_state,
5362 &misconfigured->theEvent);
5363 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5364 &misconfigured->theEvent);
5365 break;
5366 default:
5367 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5368 "3296 "
5369 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5370 "event: Invalid link %d",
5371 phba->sli4_hba.lnk_info.lnk_no);
5372 return;
5373 }
5374
5375
5376 if (phba->sli4_hba.lnk_info.optic_state == status)
5377 return;
5378
5379 switch (status) {
5380 case LPFC_SLI_EVENT_STATUS_VALID:
5381 sprintf(message, "Physical Link is functional");
5382 break;
5383 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5384 sprintf(message, "Optics faulted/incorrectly "
5385 "installed/not installed - Reseat optics, "
5386 "if issue not resolved, replace.");
5387 break;
5388 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5389 sprintf(message,
5390 "Optics of two types installed - Remove one "
5391 "optic or install matching pair of optics.");
5392 break;
5393 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5394 sprintf(message, "Incompatible optics - Replace with "
5395 "compatible optics for card to function.");
5396 break;
5397 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5398 sprintf(message, "Unqualified optics - Replace with "
5399 "Avago optics for Warranty and Technical "
5400 "Support - Link is%s operational",
5401 (operational) ? " not" : "");
5402 break;
5403 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5404 sprintf(message, "Uncertified optics - Replace with "
5405 "Avago-certified optics to enable link "
5406 "operation - Link is%s operational",
5407 (operational) ? " not" : "");
5408 break;
5409 default:
5410
5411 sprintf(message, "Unknown event status x%02x", status);
5412 break;
5413 }
5414
5415
5416 rc = lpfc_sli4_read_config(phba);
5417 if (rc) {
5418 phba->lmt = 0;
5419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5420 "3194 Unable to retrieve supported "
5421 "speeds, rc = 0x%x\n", rc);
5422 }
5423 vports = lpfc_create_vport_work_array(phba);
5424 if (vports != NULL) {
5425 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5426 i++) {
5427 shost = lpfc_shost_from_vport(vports[i]);
5428 lpfc_host_supported_speeds_set(shost);
5429 }
5430 }
5431 lpfc_destroy_vport_work_array(phba, vports);
5432
5433 phba->sli4_hba.lnk_info.optic_state = status;
5434 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5435 "3176 Port Name %c %s\n", port_name, message);
5436 break;
5437 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5438 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5439 "3192 Remote DPort Test Initiated - "
5440 "Event Data1:x%08x Event Data2: x%08x\n",
5441 acqe_sli->event_data1, acqe_sli->event_data2);
5442 break;
5443 default:
5444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5445 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
5446 "x%08x SLI Event Type:%d\n",
5447 acqe_sli->event_data1, acqe_sli->event_data2,
5448 evt_type);
5449 break;
5450 }
5451}
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463static struct lpfc_nodelist *
5464lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5465{
5466 struct lpfc_nodelist *ndlp;
5467 struct Scsi_Host *shost;
5468 struct lpfc_hba *phba;
5469
5470 if (!vport)
5471 return NULL;
5472 phba = vport->phba;
5473 if (!phba)
5474 return NULL;
5475 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5476 if (!ndlp) {
5477
5478 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5479 if (!ndlp)
5480 return 0;
5481
5482 ndlp->nlp_type |= NLP_FABRIC;
5483
5484 lpfc_enqueue_node(vport, ndlp);
5485 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5486
5487 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5488 if (!ndlp)
5489 return 0;
5490 }
5491 if ((phba->pport->port_state < LPFC_FLOGI) &&
5492 (phba->pport->port_state != LPFC_VPORT_FAILED))
5493 return NULL;
5494
5495 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5496 && (vport->port_state != LPFC_VPORT_FAILED))
5497 return NULL;
5498 shost = lpfc_shost_from_vport(vport);
5499 if (!shost)
5500 return NULL;
5501 lpfc_linkdown_port(vport);
5502 lpfc_cleanup_pending_mbox(vport);
5503 spin_lock_irq(shost->host_lock);
5504 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5505 spin_unlock_irq(shost->host_lock);
5506
5507 return ndlp;
5508}
5509
5510
5511
5512
5513
5514
5515
5516
5517static void
5518lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5519{
5520 struct lpfc_vport **vports;
5521 int i;
5522
5523 vports = lpfc_create_vport_work_array(phba);
5524 if (vports)
5525 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5526 lpfc_sli4_perform_vport_cvl(vports[i]);
5527 lpfc_destroy_vport_work_array(phba, vports);
5528}
5529
5530
5531
5532
5533
5534
5535
5536
5537static void
5538lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5539 struct lpfc_acqe_fip *acqe_fip)
5540{
5541 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5542 int rc;
5543 struct lpfc_vport *vport;
5544 struct lpfc_nodelist *ndlp;
5545 struct Scsi_Host *shost;
5546 int active_vlink_present;
5547 struct lpfc_vport **vports;
5548 int i;
5549
5550 phba->fc_eventTag = acqe_fip->event_tag;
5551 phba->fcoe_eventtag = acqe_fip->event_tag;
5552 switch (event_type) {
5553 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5554 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5555 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5556 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5557 LOG_DISCOVERY,
5558 "2546 New FCF event, evt_tag:x%x, "
5559 "index:x%x\n",
5560 acqe_fip->event_tag,
5561 acqe_fip->index);
5562 else
5563 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5564 LOG_DISCOVERY,
5565 "2788 FCF param modified event, "
5566 "evt_tag:x%x, index:x%x\n",
5567 acqe_fip->event_tag,
5568 acqe_fip->index);
5569 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5570
5571
5572
5573
5574
5575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5576 LOG_DISCOVERY,
5577 "2779 Read FCF (x%x) for updating "
5578 "roundrobin FCF failover bmask\n",
5579 acqe_fip->index);
5580 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5581 }
5582
5583
5584 spin_lock_irq(&phba->hbalock);
5585 if (phba->hba_flag & FCF_TS_INPROG) {
5586 spin_unlock_irq(&phba->hbalock);
5587 break;
5588 }
5589
5590 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5591 spin_unlock_irq(&phba->hbalock);
5592 break;
5593 }
5594
5595
5596 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5597 spin_unlock_irq(&phba->hbalock);
5598 break;
5599 }
5600 spin_unlock_irq(&phba->hbalock);
5601
5602
5603 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5604 "2770 Start FCF table scan per async FCF "
5605 "event, evt_tag:x%x, index:x%x\n",
5606 acqe_fip->event_tag, acqe_fip->index);
5607 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5608 LPFC_FCOE_FCF_GET_FIRST);
5609 if (rc)
5610 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5611 "2547 Issue FCF scan read FCF mailbox "
5612 "command failed (x%x)\n", rc);
5613 break;
5614
5615 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5617 "2548 FCF Table full count 0x%x tag 0x%x\n",
5618 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5619 acqe_fip->event_tag);
5620 break;
5621
5622 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5623 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5624 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5625 "2549 FCF (x%x) disconnected from network, "
5626 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5627
5628
5629
5630
5631 spin_lock_irq(&phba->hbalock);
5632 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5633 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5634 spin_unlock_irq(&phba->hbalock);
5635
5636 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5637 break;
5638 }
5639 spin_unlock_irq(&phba->hbalock);
5640
5641
5642 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5643 break;
5644
5645
5646
5647
5648
5649
5650
5651 spin_lock_irq(&phba->hbalock);
5652
5653 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5654 spin_unlock_irq(&phba->hbalock);
5655
5656 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5657 "2771 Start FCF fast failover process due to "
5658 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5659 "\n", acqe_fip->event_tag, acqe_fip->index);
5660 rc = lpfc_sli4_redisc_fcf_table(phba);
5661 if (rc) {
5662 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5663 LOG_DISCOVERY,
5664 "2772 Issue FCF rediscover mailbox "
5665 "command failed, fail through to FCF "
5666 "dead event\n");
5667 spin_lock_irq(&phba->hbalock);
5668 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5669 spin_unlock_irq(&phba->hbalock);
5670
5671
5672
5673
5674 lpfc_sli4_fcf_dead_failthrough(phba);
5675 } else {
5676
5677 lpfc_sli4_clear_fcf_rr_bmask(phba);
5678
5679
5680
5681
5682 lpfc_sli4_perform_all_vport_cvl(phba);
5683 }
5684 break;
5685 case LPFC_FIP_EVENT_TYPE_CVL:
5686 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5687 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5688 "2718 Clear Virtual Link Received for VPI 0x%x"
5689 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5690
5691 vport = lpfc_find_vport_by_vpid(phba,
5692 acqe_fip->index);
5693 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5694 if (!ndlp)
5695 break;
5696 active_vlink_present = 0;
5697
5698 vports = lpfc_create_vport_work_array(phba);
5699 if (vports) {
5700 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5701 i++) {
5702 if ((!(vports[i]->fc_flag &
5703 FC_VPORT_CVL_RCVD)) &&
5704 (vports[i]->port_state > LPFC_FDISC)) {
5705 active_vlink_present = 1;
5706 break;
5707 }
5708 }
5709 lpfc_destroy_vport_work_array(phba, vports);
5710 }
5711
5712
5713
5714
5715
5716
5717 if (!(vport->load_flag & FC_UNLOADING) &&
5718 active_vlink_present) {
5719
5720
5721
5722
5723 mod_timer(&ndlp->nlp_delayfunc,
5724 jiffies + msecs_to_jiffies(1000));
5725 shost = lpfc_shost_from_vport(vport);
5726 spin_lock_irq(shost->host_lock);
5727 ndlp->nlp_flag |= NLP_DELAY_TMO;
5728 spin_unlock_irq(shost->host_lock);
5729 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5730 vport->port_state = LPFC_FDISC;
5731 } else {
5732
5733
5734
5735
5736
5737
5738
5739 spin_lock_irq(&phba->hbalock);
5740 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5741 spin_unlock_irq(&phba->hbalock);
5742 break;
5743 }
5744
5745 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5746 spin_unlock_irq(&phba->hbalock);
5747 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5748 LOG_DISCOVERY,
5749 "2773 Start FCF failover per CVL, "
5750 "evt_tag:x%x\n", acqe_fip->event_tag);
5751 rc = lpfc_sli4_redisc_fcf_table(phba);
5752 if (rc) {
5753 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5754 LOG_DISCOVERY,
5755 "2774 Issue FCF rediscover "
5756 "mailbox command failed, "
5757 "through to CVL event\n");
5758 spin_lock_irq(&phba->hbalock);
5759 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5760 spin_unlock_irq(&phba->hbalock);
5761
5762
5763
5764
5765 lpfc_retry_pport_discovery(phba);
5766 } else
5767
5768
5769
5770
5771 lpfc_sli4_clear_fcf_rr_bmask(phba);
5772 }
5773 break;
5774 default:
5775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5776 "0288 Unknown FCoE event type 0x%x event tag "
5777 "0x%x\n", event_type, acqe_fip->event_tag);
5778 break;
5779 }
5780}
5781
5782
5783
5784
5785
5786
5787
5788
5789static void
5790lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5791 struct lpfc_acqe_dcbx *acqe_dcbx)
5792{
5793 phba->fc_eventTag = acqe_dcbx->event_tag;
5794 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5795 "0290 The SLI4 DCBX asynchronous event is not "
5796 "handled yet\n");
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808static void
5809lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5810 struct lpfc_acqe_grp5 *acqe_grp5)
5811{
5812 uint16_t prev_ll_spd;
5813
5814 phba->fc_eventTag = acqe_grp5->event_tag;
5815 phba->fcoe_eventtag = acqe_grp5->event_tag;
5816 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5817 phba->sli4_hba.link_state.logical_speed =
5818 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5819 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5820 "2789 GRP5 Async Event: Updating logical link speed "
5821 "from %dMbps to %dMbps\n", prev_ll_spd,
5822 phba->sli4_hba.link_state.logical_speed);
5823}
5824
5825
5826
5827
5828
5829
5830
5831
5832void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5833{
5834 struct lpfc_cq_event *cq_event;
5835
5836
5837 spin_lock_irq(&phba->hbalock);
5838 phba->hba_flag &= ~ASYNC_EVENT;
5839 spin_unlock_irq(&phba->hbalock);
5840
5841 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5842
5843 spin_lock_irq(&phba->hbalock);
5844 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5845 cq_event, struct lpfc_cq_event, list);
5846 spin_unlock_irq(&phba->hbalock);
5847
5848 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5849 case LPFC_TRAILER_CODE_LINK:
5850 lpfc_sli4_async_link_evt(phba,
5851 &cq_event->cqe.acqe_link);
5852 break;
5853 case LPFC_TRAILER_CODE_FCOE:
5854 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5855 break;
5856 case LPFC_TRAILER_CODE_DCBX:
5857 lpfc_sli4_async_dcbx_evt(phba,
5858 &cq_event->cqe.acqe_dcbx);
5859 break;
5860 case LPFC_TRAILER_CODE_GRP5:
5861 lpfc_sli4_async_grp5_evt(phba,
5862 &cq_event->cqe.acqe_grp5);
5863 break;
5864 case LPFC_TRAILER_CODE_FC:
5865 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5866 break;
5867 case LPFC_TRAILER_CODE_SLI:
5868 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5869 break;
5870 default:
5871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5872 "1804 Invalid asynchrous event code: "
5873 "x%x\n", bf_get(lpfc_trailer_code,
5874 &cq_event->cqe.mcqe_cmpl));
5875 break;
5876 }
5877
5878 lpfc_sli4_cq_event_release(phba, cq_event);
5879 }
5880}
5881
5882
5883
5884
5885
5886
5887
5888
5889void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5890{
5891 int rc;
5892
5893 spin_lock_irq(&phba->hbalock);
5894
5895 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5896
5897 phba->fcf.failover_rec.flag = 0;
5898
5899 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5900 spin_unlock_irq(&phba->hbalock);
5901
5902
5903 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5904 "2777 Start post-quiescent FCF table scan\n");
5905 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5906 if (rc)
5907 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5908 "2747 Issue FCF scan read FCF mailbox "
5909 "command failed 0x%x\n", rc);
5910}
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922int
5923lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5924{
5925 int rc;
5926
5927
5928 phba->pci_dev_grp = dev_grp;
5929
5930
5931 if (dev_grp == LPFC_PCI_DEV_OC)
5932 phba->sli_rev = LPFC_SLI_REV4;
5933
5934
5935 rc = lpfc_init_api_table_setup(phba, dev_grp);
5936 if (rc)
5937 return -ENODEV;
5938
5939 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5940 if (rc)
5941 return -ENODEV;
5942
5943 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5944 if (rc)
5945 return -ENODEV;
5946
5947 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5948 if (rc)
5949 return -ENODEV;
5950
5951 return 0;
5952}
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5963{
5964 switch (intr_mode) {
5965 case 0:
5966 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5967 "0470 Enable INTx interrupt mode.\n");
5968 break;
5969 case 1:
5970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5971 "0481 Enabled MSI interrupt mode.\n");
5972 break;
5973 case 2:
5974 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5975 "0480 Enabled MSI-X interrupt mode.\n");
5976 break;
5977 default:
5978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5979 "0482 Illegal interrupt mode.\n");
5980 break;
5981 }
5982 return;
5983}
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996static int
5997lpfc_enable_pci_dev(struct lpfc_hba *phba)
5998{
5999 struct pci_dev *pdev;
6000
6001
6002 if (!phba->pcidev)
6003 goto out_error;
6004 else
6005 pdev = phba->pcidev;
6006
6007 if (pci_enable_device_mem(pdev))
6008 goto out_error;
6009
6010 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6011 goto out_disable_device;
6012
6013 pci_set_master(pdev);
6014 pci_try_set_mwi(pdev);
6015 pci_save_state(pdev);
6016
6017
6018 if (pci_is_pcie(pdev))
6019 pdev->needs_freset = 1;
6020
6021 return 0;
6022
6023out_disable_device:
6024 pci_disable_device(pdev);
6025out_error:
6026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6027 "1401 Failed to enable pci device\n");
6028 return -ENODEV;
6029}
6030
6031
6032
6033
6034
6035
6036
6037
6038static void
6039lpfc_disable_pci_dev(struct lpfc_hba *phba)
6040{
6041 struct pci_dev *pdev;
6042
6043
6044 if (!phba->pcidev)
6045 return;
6046 else
6047 pdev = phba->pcidev;
6048
6049 pci_release_mem_regions(pdev);
6050 pci_disable_device(pdev);
6051
6052 return;
6053}
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064void
6065lpfc_reset_hba(struct lpfc_hba *phba)
6066{
6067
6068 if (!phba->cfg_enable_hba_reset) {
6069 phba->link_state = LPFC_HBA_ERROR;
6070 return;
6071 }
6072 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6073 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6074 else
6075 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6076 lpfc_offline(phba);
6077 lpfc_sli_brdrestart(phba);
6078 lpfc_online(phba);
6079 lpfc_unblock_mgmt_io(phba);
6080}
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092uint16_t
6093lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6094{
6095 struct pci_dev *pdev = phba->pcidev;
6096 uint16_t nr_virtfn;
6097 int pos;
6098
6099 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6100 if (pos == 0)
6101 return 0;
6102
6103 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6104 return nr_virtfn;
6105}
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118int
6119lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6120{
6121 struct pci_dev *pdev = phba->pcidev;
6122 uint16_t max_nr_vfn;
6123 int rc;
6124
6125 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6126 if (nr_vfn > max_nr_vfn) {
6127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6128 "3057 Requested vfs (%d) greater than "
6129 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6130 return -EINVAL;
6131 }
6132
6133 rc = pci_enable_sriov(pdev, nr_vfn);
6134 if (rc) {
6135 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6136 "2806 Failed to enable sriov on this device "
6137 "with vfn number nr_vf:%d, rc:%d\n",
6138 nr_vfn, rc);
6139 } else
6140 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6141 "2807 Successful enable sriov on this device "
6142 "with vfn number nr_vf:%d\n", nr_vfn);
6143 return rc;
6144}
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157static int
6158lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6159{
6160 struct lpfc_sli *psli = &phba->sli;
6161
6162
6163
6164
6165 atomic_set(&phba->fast_event_count, 0);
6166 spin_lock_init(&phba->hbalock);
6167
6168
6169 spin_lock_init(&phba->ndlp_lock);
6170
6171
6172 spin_lock_init(&phba->port_list_lock);
6173 INIT_LIST_HEAD(&phba->port_list);
6174
6175 INIT_LIST_HEAD(&phba->work_list);
6176 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6177
6178
6179 init_waitqueue_head(&phba->work_waitq);
6180
6181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6182 "1403 Protocols supported %s %s %s\n",
6183 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6184 "SCSI" : " "),
6185 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6186 "NVME" : " "),
6187 (phba->nvmet_support ? "NVMET" : " "));
6188
6189
6190 spin_lock_init(&phba->scsi_buf_list_get_lock);
6191 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6192 spin_lock_init(&phba->scsi_buf_list_put_lock);
6193 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6194
6195
6196 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6197
6198
6199 INIT_LIST_HEAD(&phba->elsbuf);
6200
6201
6202 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6203
6204
6205 spin_lock_init(&phba->devicelock);
6206 INIT_LIST_HEAD(&phba->luns);
6207
6208
6209 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6210
6211 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6212
6213 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6214
6215 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6216
6217 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6218
6219 return 0;
6220}
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233static int
6234lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6235{
6236 int rc, entry_sz;
6237
6238
6239
6240
6241
6242
6243 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6244
6245
6246 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6247 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6248
6249
6250 lpfc_get_cfgparam(phba);
6251
6252
6253 rc = lpfc_setup_driver_resource_phase1(phba);
6254 if (rc)
6255 return -ENODEV;
6256
6257 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6258 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6259
6260 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6261 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6262 }
6263
6264 if (!phba->sli.sli3_ring)
6265 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6266 sizeof(struct lpfc_sli_ring),
6267 GFP_KERNEL);
6268 if (!phba->sli.sli3_ring)
6269 return -ENOMEM;
6270
6271
6272
6273
6274
6275
6276
6277 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6278 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
6279 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6280
6281 if (phba->sli_rev == LPFC_SLI_REV4)
6282 entry_sz = sizeof(struct sli4_sge);
6283 else
6284 entry_sz = sizeof(struct ulp_bde64);
6285
6286
6287 if (phba->cfg_enable_bg) {
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6298 sizeof(struct fcp_rsp) +
6299 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6300
6301 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6302 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6303
6304
6305 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6306 } else {
6307
6308
6309
6310
6311
6312 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6313 sizeof(struct fcp_rsp) +
6314 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6315
6316
6317 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6318 }
6319
6320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6321 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6322 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6323 phba->cfg_total_seg_cnt);
6324
6325 phba->max_vpi = LPFC_MAX_VPI;
6326
6327 phba->max_vports = 0;
6328
6329
6330
6331
6332 lpfc_sli_setup(phba);
6333 lpfc_sli_queue_init(phba);
6334
6335
6336 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6337 return -ENOMEM;
6338
6339
6340
6341
6342
6343 if (phba->cfg_sriov_nr_virtfn > 0) {
6344 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6345 phba->cfg_sriov_nr_virtfn);
6346 if (rc) {
6347 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6348 "2808 Requested number of SR-IOV "
6349 "virtual functions (%d) is not "
6350 "supported\n",
6351 phba->cfg_sriov_nr_virtfn);
6352 phba->cfg_sriov_nr_virtfn = 0;
6353 }
6354 }
6355
6356 return 0;
6357}
6358
6359
6360
6361
6362
6363
6364
6365
6366static void
6367lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6368{
6369
6370 lpfc_mem_free_all(phba);
6371
6372 return;
6373}
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386static int
6387lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6388{
6389 LPFC_MBOXQ_t *mboxq;
6390 MAILBOX_t *mb;
6391 int rc, i, max_buf_size;
6392 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6393 struct lpfc_mqe *mqe;
6394 int longs;
6395 int extra;
6396 uint64_t wwn;
6397 u32 if_type;
6398 u32 if_fam;
6399
6400 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6401 phba->sli4_hba.num_possible_cpu = num_possible_cpus();
6402 phba->sli4_hba.curr_disp_cpu = 0;
6403
6404
6405 lpfc_get_cfgparam(phba);
6406
6407
6408 rc = lpfc_setup_driver_resource_phase1(phba);
6409 if (rc)
6410 return -ENODEV;
6411
6412
6413 rc = lpfc_sli4_post_status_check(phba);
6414 if (rc)
6415 return -ENODEV;
6416
6417
6418
6419
6420
6421 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6422
6423
6424 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6425
6426
6427
6428
6429
6430 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6431 sizeof(struct lpfc_mbox_ext_buf_ctx));
6432 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6433
6434 phba->max_vpi = LPFC_MAX_VPI;
6435
6436
6437 phba->max_vports = 0;
6438
6439
6440 phba->valid_vlan = 0;
6441 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6442 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6443 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455 extra = 2;
6456 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6457 extra++;
6458
6459
6460
6461
6462
6463
6464 max_buf_size = (2 * SLI4_PAGE_SIZE);
6465
6466
6467
6468
6469
6470 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6481 sizeof(struct fcp_rsp) + max_buf_size;
6482
6483
6484 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6485
6486
6487
6488
6489
6490 if (phba->cfg_enable_bg &&
6491 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6492 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6493 else
6494 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6495
6496 } else {
6497
6498
6499
6500
6501
6502 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6503 sizeof(struct fcp_rsp) +
6504 ((phba->cfg_sg_seg_cnt + extra) *
6505 sizeof(struct sli4_sge));
6506
6507
6508 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6509 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6510
6511
6512
6513
6514
6515 }
6516
6517
6518 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6519 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6520 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6521 "6300 Reducing NVME sg segment "
6522 "cnt to %d\n",
6523 LPFC_MAX_NVME_SEG_CNT);
6524 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6525 } else
6526 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6527 }
6528
6529
6530 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6531 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6532 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6533
6534 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6535 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6536 else
6537 phba->cfg_sg_dma_buf_size =
6538 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6539
6540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6541 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6542 "total:%d scsi:%d nvme:%d\n",
6543 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6544 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6545 phba->cfg_nvme_seg_cnt);
6546
6547
6548 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6549 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6550 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6551
6552
6553
6554
6555 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6556
6557 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
6558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6559 }
6560
6561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6562
6563 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6564 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6565 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6566 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6567 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6568 }
6569
6570
6571 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6572 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6573
6574
6575
6576
6577
6578
6579 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6580
6581 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6582
6583 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6584
6585 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6586
6587 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6588
6589 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6590
6591
6592 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6593 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6594 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6595 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6596
6597
6598
6599
6600 INIT_LIST_HEAD(&phba->sli.mboxq);
6601 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6602
6603
6604 phba->sli4_hba.lnk_info.optic_state = 0xff;
6605
6606
6607 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6608 if (rc)
6609 return -ENOMEM;
6610
6611
6612 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6613 LPFC_SLI_INTF_IF_TYPE_2) {
6614 rc = lpfc_pci_function_reset(phba);
6615 if (unlikely(rc)) {
6616 rc = -ENODEV;
6617 goto out_free_mem;
6618 }
6619 phba->temp_sensor_support = 1;
6620 }
6621
6622
6623 rc = lpfc_create_bootstrap_mbox(phba);
6624 if (unlikely(rc))
6625 goto out_free_mem;
6626
6627
6628 rc = lpfc_setup_endian_order(phba);
6629 if (unlikely(rc))
6630 goto out_free_bsmbx;
6631
6632
6633 rc = lpfc_sli4_read_config(phba);
6634 if (unlikely(rc))
6635 goto out_free_bsmbx;
6636 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6637 if (unlikely(rc))
6638 goto out_free_bsmbx;
6639
6640
6641 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6642 LPFC_SLI_INTF_IF_TYPE_0) {
6643 rc = lpfc_pci_function_reset(phba);
6644 if (unlikely(rc))
6645 goto out_free_bsmbx;
6646 }
6647
6648 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6649 GFP_KERNEL);
6650 if (!mboxq) {
6651 rc = -ENOMEM;
6652 goto out_free_bsmbx;
6653 }
6654
6655
6656 phba->nvmet_support = 0;
6657 if (lpfc_enable_nvmet_cnt) {
6658
6659
6660 lpfc_read_nv(phba, mboxq);
6661 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6662 if (rc != MBX_SUCCESS) {
6663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6664 "6016 Mailbox failed , mbxCmd x%x "
6665 "READ_NV, mbxStatus x%x\n",
6666 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6667 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6668 mempool_free(mboxq, phba->mbox_mem_pool);
6669 rc = -EIO;
6670 goto out_free_bsmbx;
6671 }
6672 mb = &mboxq->u.mb;
6673 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6674 sizeof(uint64_t));
6675 wwn = cpu_to_be64(wwn);
6676 phba->sli4_hba.wwnn.u.name = wwn;
6677 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6678 sizeof(uint64_t));
6679
6680 wwn = cpu_to_be64(wwn);
6681 phba->sli4_hba.wwpn.u.name = wwn;
6682
6683
6684 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6685 if (wwn == lpfc_enable_nvmet[i]) {
6686#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6687 if (lpfc_nvmet_mem_alloc(phba))
6688 break;
6689
6690 phba->nvmet_support = 1;
6691
6692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6693 "6017 NVME Target %016llx\n",
6694 wwn);
6695#else
6696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6697 "6021 Can't enable NVME Target."
6698 " NVME_TARGET_FC infrastructure"
6699 " is not in kernel\n");
6700#endif
6701
6702 phba->cfg_xri_rebalancing = 0;
6703 break;
6704 }
6705 }
6706 }
6707
6708 lpfc_nvme_mod_param_dep(phba);
6709
6710
6711 lpfc_supported_pages(mboxq);
6712 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6713 if (!rc) {
6714 mqe = &mboxq->u.mqe;
6715 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6716 LPFC_MAX_SUPPORTED_PAGES);
6717 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6718 switch (pn_page[i]) {
6719 case LPFC_SLI4_PARAMETERS:
6720 phba->sli4_hba.pc_sli4_params.supported = 1;
6721 break;
6722 default:
6723 break;
6724 }
6725 }
6726
6727 if (phba->sli4_hba.pc_sli4_params.supported)
6728 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6729 if (rc) {
6730 mempool_free(mboxq, phba->mbox_mem_pool);
6731 rc = -EIO;
6732 goto out_free_bsmbx;
6733 }
6734 }
6735
6736
6737
6738
6739
6740
6741 rc = lpfc_get_sli4_parameters(phba, mboxq);
6742 if (rc) {
6743 if_type = bf_get(lpfc_sli_intf_if_type,
6744 &phba->sli4_hba.sli_intf);
6745 if_fam = bf_get(lpfc_sli_intf_sli_family,
6746 &phba->sli4_hba.sli_intf);
6747 if (phba->sli4_hba.extents_in_use &&
6748 phba->sli4_hba.rpi_hdrs_in_use) {
6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6750 "2999 Unsupported SLI4 Parameters "
6751 "Extents and RPI headers enabled.\n");
6752 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6753 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6754 mempool_free(mboxq, phba->mbox_mem_pool);
6755 rc = -EIO;
6756 goto out_free_bsmbx;
6757 }
6758 }
6759 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6760 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6761 mempool_free(mboxq, phba->mbox_mem_pool);
6762 rc = -EIO;
6763 goto out_free_bsmbx;
6764 }
6765 }
6766
6767 mempool_free(mboxq, phba->mbox_mem_pool);
6768
6769
6770 lpfc_sli4_oas_verify(phba);
6771
6772
6773 lpfc_sli4_ras_init(phba);
6774
6775
6776 rc = lpfc_sli4_queue_verify(phba);
6777 if (rc)
6778 goto out_free_bsmbx;
6779
6780
6781 rc = lpfc_sli4_cq_event_pool_create(phba);
6782 if (rc)
6783 goto out_free_bsmbx;
6784
6785
6786 lpfc_init_sgl_list(phba);
6787
6788
6789 rc = lpfc_init_active_sgl_array(phba);
6790 if (rc) {
6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6792 "1430 Failed to initialize sgl list.\n");
6793 goto out_destroy_cq_event_pool;
6794 }
6795 rc = lpfc_sli4_init_rpi_hdrs(phba);
6796 if (rc) {
6797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6798 "1432 Failed to initialize rpi headers.\n");
6799 goto out_free_active_sgl;
6800 }
6801
6802
6803 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6804 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6805 GFP_KERNEL);
6806 if (!phba->fcf.fcf_rr_bmask) {
6807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6808 "2759 Failed allocate memory for FCF round "
6809 "robin failover bmask\n");
6810 rc = -ENOMEM;
6811 goto out_remove_rpi_hdrs;
6812 }
6813
6814 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6815 sizeof(struct lpfc_hba_eq_hdl),
6816 GFP_KERNEL);
6817 if (!phba->sli4_hba.hba_eq_hdl) {
6818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6819 "2572 Failed allocate memory for "
6820 "fast-path per-EQ handle array\n");
6821 rc = -ENOMEM;
6822 goto out_free_fcf_rr_bmask;
6823 }
6824
6825 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6826 sizeof(struct lpfc_vector_map_info),
6827 GFP_KERNEL);
6828 if (!phba->sli4_hba.cpu_map) {
6829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6830 "3327 Failed allocate memory for msi-x "
6831 "interrupt vector mapping\n");
6832 rc = -ENOMEM;
6833 goto out_free_hba_eq_hdl;
6834 }
6835
6836 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6837 if (!phba->sli4_hba.eq_info) {
6838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6839 "3321 Failed allocation for per_cpu stats\n");
6840 rc = -ENOMEM;
6841 goto out_free_hba_cpu_map;
6842 }
6843
6844
6845
6846
6847 if (phba->cfg_sriov_nr_virtfn > 0) {
6848 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6849 phba->cfg_sriov_nr_virtfn);
6850 if (rc) {
6851 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6852 "3020 Requested number of SR-IOV "
6853 "virtual functions (%d) is not "
6854 "supported\n",
6855 phba->cfg_sriov_nr_virtfn);
6856 phba->cfg_sriov_nr_virtfn = 0;
6857 }
6858 }
6859
6860 return 0;
6861
6862out_free_hba_cpu_map:
6863 kfree(phba->sli4_hba.cpu_map);
6864out_free_hba_eq_hdl:
6865 kfree(phba->sli4_hba.hba_eq_hdl);
6866out_free_fcf_rr_bmask:
6867 kfree(phba->fcf.fcf_rr_bmask);
6868out_remove_rpi_hdrs:
6869 lpfc_sli4_remove_rpi_hdrs(phba);
6870out_free_active_sgl:
6871 lpfc_free_active_sgl(phba);
6872out_destroy_cq_event_pool:
6873 lpfc_sli4_cq_event_pool_destroy(phba);
6874out_free_bsmbx:
6875 lpfc_destroy_bootstrap_mbox(phba);
6876out_free_mem:
6877 lpfc_mem_free(phba);
6878 return rc;
6879}
6880
6881
6882
6883
6884
6885
6886
6887
6888static void
6889lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6890{
6891 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6892
6893 free_percpu(phba->sli4_hba.eq_info);
6894
6895
6896 kfree(phba->sli4_hba.cpu_map);
6897 phba->sli4_hba.num_possible_cpu = 0;
6898 phba->sli4_hba.num_present_cpu = 0;
6899 phba->sli4_hba.curr_disp_cpu = 0;
6900
6901
6902 kfree(phba->sli4_hba.hba_eq_hdl);
6903
6904
6905 lpfc_sli4_remove_rpi_hdrs(phba);
6906 lpfc_sli4_remove_rpis(phba);
6907
6908
6909 kfree(phba->fcf.fcf_rr_bmask);
6910
6911
6912 lpfc_free_active_sgl(phba);
6913 lpfc_free_els_sgl_list(phba);
6914 lpfc_free_nvmet_sgl_list(phba);
6915
6916
6917 lpfc_sli4_cq_event_release_all(phba);
6918 lpfc_sli4_cq_event_pool_destroy(phba);
6919
6920
6921 lpfc_sli4_dealloc_resource_identifiers(phba);
6922
6923
6924 lpfc_destroy_bootstrap_mbox(phba);
6925
6926
6927 lpfc_mem_free_all(phba);
6928
6929
6930 list_for_each_entry_safe(conn_entry, next_conn_entry,
6931 &phba->fcf_conn_rec_list, list) {
6932 list_del_init(&conn_entry->list);
6933 kfree(conn_entry);
6934 }
6935
6936 return;
6937}
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949int
6950lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6951{
6952 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6953 phba->lpfc_hba_down_link = lpfc_hba_down_link;
6954 phba->lpfc_selective_reset = lpfc_selective_reset;
6955 switch (dev_grp) {
6956 case LPFC_PCI_DEV_LP:
6957 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6958 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6959 phba->lpfc_stop_port = lpfc_stop_port_s3;
6960 break;
6961 case LPFC_PCI_DEV_OC:
6962 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6963 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6964 phba->lpfc_stop_port = lpfc_stop_port_s4;
6965 break;
6966 default:
6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6968 "1431 Invalid HBA PCI-device group: 0x%x\n",
6969 dev_grp);
6970 return -ENODEV;
6971 break;
6972 }
6973 return 0;
6974}
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987static int
6988lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6989{
6990 int error;
6991
6992
6993 phba->worker_thread = kthread_run(lpfc_do_work, phba,
6994 "lpfc_worker_%d", phba->brd_no);
6995 if (IS_ERR(phba->worker_thread)) {
6996 error = PTR_ERR(phba->worker_thread);
6997 return error;
6998 }
6999
7000
7001 if (phba->sli_rev == LPFC_SLI_REV4)
7002 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7003 else
7004 phba->wq = NULL;
7005
7006 return 0;
7007}
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017static void
7018lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7019{
7020 if (phba->wq) {
7021 flush_workqueue(phba->wq);
7022 destroy_workqueue(phba->wq);
7023 phba->wq = NULL;
7024 }
7025
7026
7027 if (phba->worker_thread)
7028 kthread_stop(phba->worker_thread);
7029}
7030
7031
7032
7033
7034
7035
7036
7037void
7038lpfc_free_iocb_list(struct lpfc_hba *phba)
7039{
7040 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7041
7042 spin_lock_irq(&phba->hbalock);
7043 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7044 &phba->lpfc_iocb_list, list) {
7045 list_del(&iocbq_entry->list);
7046 kfree(iocbq_entry);
7047 phba->total_iocbq_bufs--;
7048 }
7049 spin_unlock_irq(&phba->hbalock);
7050
7051 return;
7052}
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065int
7066lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7067{
7068 struct lpfc_iocbq *iocbq_entry = NULL;
7069 uint16_t iotag;
7070 int i;
7071
7072
7073 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7074 for (i = 0; i < iocb_count; i++) {
7075 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7076 if (iocbq_entry == NULL) {
7077 printk(KERN_ERR "%s: only allocated %d iocbs of "
7078 "expected %d count. Unloading driver.\n",
7079 __func__, i, LPFC_IOCB_LIST_CNT);
7080 goto out_free_iocbq;
7081 }
7082
7083 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7084 if (iotag == 0) {
7085 kfree(iocbq_entry);
7086 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7087 "Unloading driver.\n", __func__);
7088 goto out_free_iocbq;
7089 }
7090 iocbq_entry->sli4_lxritag = NO_XRI;
7091 iocbq_entry->sli4_xritag = NO_XRI;
7092
7093 spin_lock_irq(&phba->hbalock);
7094 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7095 phba->total_iocbq_bufs++;
7096 spin_unlock_irq(&phba->hbalock);
7097 }
7098
7099 return 0;
7100
7101out_free_iocbq:
7102 lpfc_free_iocb_list(phba);
7103
7104 return -ENOMEM;
7105}
7106
7107
7108
7109
7110
7111
7112
7113
7114void
7115lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7116{
7117 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7118
7119 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7120 list_del(&sglq_entry->list);
7121 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7122 kfree(sglq_entry);
7123 }
7124}
7125
7126
7127
7128
7129
7130
7131
7132static void
7133lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7134{
7135 LIST_HEAD(sglq_list);
7136
7137
7138 spin_lock_irq(&phba->hbalock);
7139 spin_lock(&phba->sli4_hba.sgl_list_lock);
7140 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7141 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7142 spin_unlock_irq(&phba->hbalock);
7143
7144
7145 lpfc_free_sgl_list(phba, &sglq_list);
7146}
7147
7148
7149
7150
7151
7152
7153
7154static void
7155lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7156{
7157 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7158 LIST_HEAD(sglq_list);
7159
7160
7161 spin_lock_irq(&phba->hbalock);
7162 spin_lock(&phba->sli4_hba.sgl_list_lock);
7163 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7164 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7165 spin_unlock_irq(&phba->hbalock);
7166
7167
7168 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7169 list_del(&sglq_entry->list);
7170 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7171 kfree(sglq_entry);
7172 }
7173
7174
7175
7176
7177
7178 phba->sli4_hba.nvmet_xri_cnt = 0;
7179}
7180
7181
7182
7183
7184
7185
7186
7187
7188static int
7189lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7190{
7191 int size;
7192 size = sizeof(struct lpfc_sglq *);
7193 size *= phba->sli4_hba.max_cfg_param.max_xri;
7194
7195 phba->sli4_hba.lpfc_sglq_active_list =
7196 kzalloc(size, GFP_KERNEL);
7197 if (!phba->sli4_hba.lpfc_sglq_active_list)
7198 return -ENOMEM;
7199 return 0;
7200}
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210static void
7211lpfc_free_active_sgl(struct lpfc_hba *phba)
7212{
7213 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7214}
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224static void
7225lpfc_init_sgl_list(struct lpfc_hba *phba)
7226{
7227
7228 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7229 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7230 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7231 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7232
7233
7234 phba->sli4_hba.els_xri_cnt = 0;
7235
7236
7237 phba->sli4_hba.io_xri_cnt = 0;
7238}
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254int
7255lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7256{
7257 int rc = 0;
7258 struct lpfc_rpi_hdr *rpi_hdr;
7259
7260 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7261 if (!phba->sli4_hba.rpi_hdrs_in_use)
7262 return rc;
7263 if (phba->sli4_hba.extents_in_use)
7264 return -EIO;
7265
7266 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7267 if (!rpi_hdr) {
7268 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7269 "0391 Error during rpi post operation\n");
7270 lpfc_sli4_remove_rpis(phba);
7271 rc = -ENODEV;
7272 }
7273
7274 return rc;
7275}
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290struct lpfc_rpi_hdr *
7291lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7292{
7293 uint16_t rpi_limit, curr_rpi_range;
7294 struct lpfc_dmabuf *dmabuf;
7295 struct lpfc_rpi_hdr *rpi_hdr;
7296
7297
7298
7299
7300
7301
7302 if (!phba->sli4_hba.rpi_hdrs_in_use)
7303 return NULL;
7304 if (phba->sli4_hba.extents_in_use)
7305 return NULL;
7306
7307
7308 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7309
7310 spin_lock_irq(&phba->hbalock);
7311
7312
7313
7314
7315
7316 curr_rpi_range = phba->sli4_hba.next_rpi;
7317 spin_unlock_irq(&phba->hbalock);
7318
7319
7320 if (curr_rpi_range == rpi_limit)
7321 return NULL;
7322
7323
7324
7325
7326
7327 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7328 if (!dmabuf)
7329 return NULL;
7330
7331 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7332 LPFC_HDR_TEMPLATE_SIZE,
7333 &dmabuf->phys, GFP_KERNEL);
7334 if (!dmabuf->virt) {
7335 rpi_hdr = NULL;
7336 goto err_free_dmabuf;
7337 }
7338
7339 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7340 rpi_hdr = NULL;
7341 goto err_free_coherent;
7342 }
7343
7344
7345 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7346 if (!rpi_hdr)
7347 goto err_free_coherent;
7348
7349 rpi_hdr->dmabuf = dmabuf;
7350 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7351 rpi_hdr->page_count = 1;
7352 spin_lock_irq(&phba->hbalock);
7353
7354
7355 rpi_hdr->start_rpi = curr_rpi_range;
7356 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7357 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7358
7359 spin_unlock_irq(&phba->hbalock);
7360 return rpi_hdr;
7361
7362 err_free_coherent:
7363 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7364 dmabuf->virt, dmabuf->phys);
7365 err_free_dmabuf:
7366 kfree(dmabuf);
7367 return NULL;
7368}
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379void
7380lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7381{
7382 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7383
7384 if (!phba->sli4_hba.rpi_hdrs_in_use)
7385 goto exit;
7386
7387 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7388 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7389 list_del(&rpi_hdr->list);
7390 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7391 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7392 kfree(rpi_hdr->dmabuf);
7393 kfree(rpi_hdr);
7394 }
7395 exit:
7396
7397 phba->sli4_hba.next_rpi = 0;
7398}
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412static struct lpfc_hba *
7413lpfc_hba_alloc(struct pci_dev *pdev)
7414{
7415 struct lpfc_hba *phba;
7416
7417
7418 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7419 if (!phba) {
7420 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7421 return NULL;
7422 }
7423
7424
7425 phba->pcidev = pdev;
7426
7427
7428 phba->brd_no = lpfc_get_instance();
7429 if (phba->brd_no < 0) {
7430 kfree(phba);
7431 return NULL;
7432 }
7433 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7434
7435 spin_lock_init(&phba->ct_ev_lock);
7436 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7437
7438 return phba;
7439}
7440
7441
7442
7443
7444
7445
7446
7447
7448static void
7449lpfc_hba_free(struct lpfc_hba *phba)
7450{
7451 if (phba->sli_rev == LPFC_SLI_REV4)
7452 kfree(phba->sli4_hba.hdwq);
7453
7454
7455 idr_remove(&lpfc_hba_index, phba->brd_no);
7456
7457
7458 kfree(phba->sli.sli3_ring);
7459 phba->sli.sli3_ring = NULL;
7460
7461 kfree(phba);
7462 return;
7463}
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476static int
7477lpfc_create_shost(struct lpfc_hba *phba)
7478{
7479 struct lpfc_vport *vport;
7480 struct Scsi_Host *shost;
7481
7482
7483 phba->fc_edtov = FF_DEF_EDTOV;
7484 phba->fc_ratov = FF_DEF_RATOV;
7485 phba->fc_altov = FF_DEF_ALTOV;
7486 phba->fc_arbtov = FF_DEF_ARBTOV;
7487
7488 atomic_set(&phba->sdev_cnt, 0);
7489 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7490 if (!vport)
7491 return -ENODEV;
7492
7493 shost = lpfc_shost_from_vport(vport);
7494 phba->pport = vport;
7495
7496 if (phba->nvmet_support) {
7497
7498 if (phba->txrdy_payload_pool == NULL) {
7499 phba->txrdy_payload_pool = dma_pool_create(
7500 "txrdy_pool", &phba->pcidev->dev,
7501 TXRDY_PAYLOAD_LEN, 16, 0);
7502 if (phba->txrdy_payload_pool) {
7503 phba->targetport = NULL;
7504 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7505 lpfc_printf_log(phba, KERN_INFO,
7506 LOG_INIT | LOG_NVME_DISC,
7507 "6076 NVME Target Found\n");
7508 }
7509 }
7510 }
7511
7512 lpfc_debugfs_initialize(vport);
7513
7514 pci_set_drvdata(phba->pcidev, shost);
7515
7516
7517
7518
7519
7520 vport->load_flag |= FC_ALLOW_FDMI;
7521 if (phba->cfg_enable_SmartSAN ||
7522 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7523
7524
7525 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7526 if (phba->cfg_enable_SmartSAN)
7527 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7528 else
7529 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7530 }
7531 return 0;
7532}
7533
7534
7535
7536
7537
7538
7539
7540
7541static void
7542lpfc_destroy_shost(struct lpfc_hba *phba)
7543{
7544 struct lpfc_vport *vport = phba->pport;
7545
7546
7547 destroy_port(vport);
7548
7549 return;
7550}
7551
7552
7553
7554
7555
7556
7557
7558
7559
7560static void
7561lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7562{
7563 uint32_t old_mask;
7564 uint32_t old_guard;
7565
7566 int pagecnt = 10;
7567 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7569 "1478 Registering BlockGuard with the "
7570 "SCSI layer\n");
7571
7572 old_mask = phba->cfg_prot_mask;
7573 old_guard = phba->cfg_prot_guard;
7574
7575
7576 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7577 SHOST_DIX_TYPE0_PROTECTION |
7578 SHOST_DIX_TYPE1_PROTECTION);
7579 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7580 SHOST_DIX_GUARD_CRC);
7581
7582
7583 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7584 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7585
7586 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7587 if ((old_mask != phba->cfg_prot_mask) ||
7588 (old_guard != phba->cfg_prot_guard))
7589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7590 "1475 Registering BlockGuard with the "
7591 "SCSI layer: mask %d guard %d\n",
7592 phba->cfg_prot_mask,
7593 phba->cfg_prot_guard);
7594
7595 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7596 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7597 } else
7598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7599 "1479 Not Registering BlockGuard with the SCSI "
7600 "layer, Bad protection parameters: %d %d\n",
7601 old_mask, old_guard);
7602 }
7603
7604 if (!_dump_buf_data) {
7605 while (pagecnt) {
7606 spin_lock_init(&_dump_buf_lock);
7607 _dump_buf_data =
7608 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7609 if (_dump_buf_data) {
7610 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7611 "9043 BLKGRD: allocated %d pages for "
7612 "_dump_buf_data at 0x%p\n",
7613 (1 << pagecnt), _dump_buf_data);
7614 _dump_buf_data_order = pagecnt;
7615 memset(_dump_buf_data, 0,
7616 ((1 << PAGE_SHIFT) << pagecnt));
7617 break;
7618 } else
7619 --pagecnt;
7620 }
7621 if (!_dump_buf_data_order)
7622 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7623 "9044 BLKGRD: ERROR unable to allocate "
7624 "memory for hexdump\n");
7625 } else
7626 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7627 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
7628 "\n", _dump_buf_data);
7629 if (!_dump_buf_dif) {
7630 while (pagecnt) {
7631 _dump_buf_dif =
7632 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7633 if (_dump_buf_dif) {
7634 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7635 "9046 BLKGRD: allocated %d pages for "
7636 "_dump_buf_dif at 0x%p\n",
7637 (1 << pagecnt), _dump_buf_dif);
7638 _dump_buf_dif_order = pagecnt;
7639 memset(_dump_buf_dif, 0,
7640 ((1 << PAGE_SHIFT) << pagecnt));
7641 break;
7642 } else
7643 --pagecnt;
7644 }
7645 if (!_dump_buf_dif_order)
7646 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7647 "9047 BLKGRD: ERROR unable to allocate "
7648 "memory for hexdump\n");
7649 } else
7650 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7651 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
7652 _dump_buf_dif);
7653}
7654
7655
7656
7657
7658
7659
7660
7661
7662static void
7663lpfc_post_init_setup(struct lpfc_hba *phba)
7664{
7665 struct Scsi_Host *shost;
7666 struct lpfc_adapter_event_header adapter_event;
7667
7668
7669 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7670
7671
7672
7673
7674
7675 shost = pci_get_drvdata(phba->pcidev);
7676 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7677
7678 lpfc_host_attrib_init(shost);
7679
7680 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7681 spin_lock_irq(shost->host_lock);
7682 lpfc_poll_start_timer(phba);
7683 spin_unlock_irq(shost->host_lock);
7684 }
7685
7686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7687 "0428 Perform SCSI scan\n");
7688
7689 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7690 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7691 fc_host_post_vendor_event(shost, fc_get_event_number(),
7692 sizeof(adapter_event),
7693 (char *) &adapter_event,
7694 LPFC_NL_VENDOR_ID);
7695 return;
7696}
7697
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707
7708
7709static int
7710lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7711{
7712 struct pci_dev *pdev = phba->pcidev;
7713 unsigned long bar0map_len, bar2map_len;
7714 int i, hbq_count;
7715 void *ptr;
7716 int error;
7717
7718 if (!pdev)
7719 return -ENODEV;
7720
7721
7722 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7723 if (error)
7724 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7725 if (error)
7726 return error;
7727 error = -ENODEV;
7728
7729
7730
7731
7732 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7733 bar0map_len = pci_resource_len(pdev, 0);
7734
7735 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7736 bar2map_len = pci_resource_len(pdev, 2);
7737
7738
7739 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7740 if (!phba->slim_memmap_p) {
7741 dev_printk(KERN_ERR, &pdev->dev,
7742 "ioremap failed for SLIM memory.\n");
7743 goto out;
7744 }
7745
7746
7747 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7748 if (!phba->ctrl_regs_memmap_p) {
7749 dev_printk(KERN_ERR, &pdev->dev,
7750 "ioremap failed for HBA control registers.\n");
7751 goto out_iounmap_slim;
7752 }
7753
7754
7755 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7756 &phba->slim2p.phys, GFP_KERNEL);
7757 if (!phba->slim2p.virt)
7758 goto out_iounmap;
7759
7760 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7761 phba->mbox_ext = (phba->slim2p.virt +
7762 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7763 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7764 phba->IOCBs = (phba->slim2p.virt +
7765 offsetof(struct lpfc_sli2_slim, IOCBs));
7766
7767 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7768 lpfc_sli_hbq_size(),
7769 &phba->hbqslimp.phys,
7770 GFP_KERNEL);
7771 if (!phba->hbqslimp.virt)
7772 goto out_free_slim;
7773
7774 hbq_count = lpfc_sli_hbq_count();
7775 ptr = phba->hbqslimp.virt;
7776 for (i = 0; i < hbq_count; ++i) {
7777 phba->hbqs[i].hbq_virt = ptr;
7778 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7779 ptr += (lpfc_hbq_defs[i]->entry_count *
7780 sizeof(struct lpfc_hbq_entry));
7781 }
7782 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7783 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7784
7785 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7786
7787 phba->MBslimaddr = phba->slim_memmap_p;
7788 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7789 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7790 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7791 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7792
7793 return 0;
7794
7795out_free_slim:
7796 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7797 phba->slim2p.virt, phba->slim2p.phys);
7798out_iounmap:
7799 iounmap(phba->ctrl_regs_memmap_p);
7800out_iounmap_slim:
7801 iounmap(phba->slim_memmap_p);
7802out:
7803 return error;
7804}
7805
7806
7807
7808
7809
7810
7811
7812
7813static void
7814lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7815{
7816 struct pci_dev *pdev;
7817
7818
7819 if (!phba->pcidev)
7820 return;
7821 else
7822 pdev = phba->pcidev;
7823
7824
7825 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7826 phba->hbqslimp.virt, phba->hbqslimp.phys);
7827 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7828 phba->slim2p.virt, phba->slim2p.phys);
7829
7830
7831 iounmap(phba->ctrl_regs_memmap_p);
7832 iounmap(phba->slim_memmap_p);
7833
7834 return;
7835}
7836
7837
7838
7839
7840
7841
7842
7843
7844
7845
7846int
7847lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7848{
7849 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7850 struct lpfc_register reg_data;
7851 int i, port_error = 0;
7852 uint32_t if_type;
7853
7854 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7855 memset(®_data, 0, sizeof(reg_data));
7856 if (!phba->sli4_hba.PSMPHRregaddr)
7857 return -ENODEV;
7858
7859
7860 for (i = 0; i < 3000; i++) {
7861 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7862 &portsmphr_reg.word0) ||
7863 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7864
7865 port_error = -ENODEV;
7866 break;
7867 }
7868 if (LPFC_POST_STAGE_PORT_READY ==
7869 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7870 break;
7871 msleep(10);
7872 }
7873
7874
7875
7876
7877
7878 if (port_error) {
7879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7880 "1408 Port Failed POST - portsmphr=0x%x, "
7881 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7882 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7883 portsmphr_reg.word0,
7884 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7885 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7886 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7887 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7888 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7889 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7890 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7891 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7892 } else {
7893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7894 "2534 Device Info: SLIFamily=0x%x, "
7895 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7896 "SLIHint_2=0x%x, FT=0x%x\n",
7897 bf_get(lpfc_sli_intf_sli_family,
7898 &phba->sli4_hba.sli_intf),
7899 bf_get(lpfc_sli_intf_slirev,
7900 &phba->sli4_hba.sli_intf),
7901 bf_get(lpfc_sli_intf_if_type,
7902 &phba->sli4_hba.sli_intf),
7903 bf_get(lpfc_sli_intf_sli_hint1,
7904 &phba->sli4_hba.sli_intf),
7905 bf_get(lpfc_sli_intf_sli_hint2,
7906 &phba->sli4_hba.sli_intf),
7907 bf_get(lpfc_sli_intf_func_type,
7908 &phba->sli4_hba.sli_intf));
7909
7910
7911
7912
7913
7914 if_type = bf_get(lpfc_sli_intf_if_type,
7915 &phba->sli4_hba.sli_intf);
7916 switch (if_type) {
7917 case LPFC_SLI_INTF_IF_TYPE_0:
7918 phba->sli4_hba.ue_mask_lo =
7919 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7920 phba->sli4_hba.ue_mask_hi =
7921 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7922 uerrlo_reg.word0 =
7923 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7924 uerrhi_reg.word0 =
7925 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7926 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7927 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7929 "1422 Unrecoverable Error "
7930 "Detected during POST "
7931 "uerr_lo_reg=0x%x, "
7932 "uerr_hi_reg=0x%x, "
7933 "ue_mask_lo_reg=0x%x, "
7934 "ue_mask_hi_reg=0x%x\n",
7935 uerrlo_reg.word0,
7936 uerrhi_reg.word0,
7937 phba->sli4_hba.ue_mask_lo,
7938 phba->sli4_hba.ue_mask_hi);
7939 port_error = -ENODEV;
7940 }
7941 break;
7942 case LPFC_SLI_INTF_IF_TYPE_2:
7943 case LPFC_SLI_INTF_IF_TYPE_6:
7944
7945 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7946 ®_data.word0) ||
7947 (bf_get(lpfc_sliport_status_err, ®_data) &&
7948 !bf_get(lpfc_sliport_status_rn, ®_data))) {
7949 phba->work_status[0] =
7950 readl(phba->sli4_hba.u.if_type2.
7951 ERR1regaddr);
7952 phba->work_status[1] =
7953 readl(phba->sli4_hba.u.if_type2.
7954 ERR2regaddr);
7955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7956 "2888 Unrecoverable port error "
7957 "following POST: port status reg "
7958 "0x%x, port_smphr reg 0x%x, "
7959 "error 1=0x%x, error 2=0x%x\n",
7960 reg_data.word0,
7961 portsmphr_reg.word0,
7962 phba->work_status[0],
7963 phba->work_status[1]);
7964 port_error = -ENODEV;
7965 }
7966 break;
7967 case LPFC_SLI_INTF_IF_TYPE_1:
7968 default:
7969 break;
7970 }
7971 }
7972 return port_error;
7973}
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983static void
7984lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7985{
7986 switch (if_type) {
7987 case LPFC_SLI_INTF_IF_TYPE_0:
7988 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7989 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7990 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7991 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7992 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7993 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7994 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7995 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7996 phba->sli4_hba.SLIINTFregaddr =
7997 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7998 break;
7999 case LPFC_SLI_INTF_IF_TYPE_2:
8000 phba->sli4_hba.u.if_type2.EQDregaddr =
8001 phba->sli4_hba.conf_regs_memmap_p +
8002 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8003 phba->sli4_hba.u.if_type2.ERR1regaddr =
8004 phba->sli4_hba.conf_regs_memmap_p +
8005 LPFC_CTL_PORT_ER1_OFFSET;
8006 phba->sli4_hba.u.if_type2.ERR2regaddr =
8007 phba->sli4_hba.conf_regs_memmap_p +
8008 LPFC_CTL_PORT_ER2_OFFSET;
8009 phba->sli4_hba.u.if_type2.CTRLregaddr =
8010 phba->sli4_hba.conf_regs_memmap_p +
8011 LPFC_CTL_PORT_CTL_OFFSET;
8012 phba->sli4_hba.u.if_type2.STATUSregaddr =
8013 phba->sli4_hba.conf_regs_memmap_p +
8014 LPFC_CTL_PORT_STA_OFFSET;
8015 phba->sli4_hba.SLIINTFregaddr =
8016 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8017 phba->sli4_hba.PSMPHRregaddr =
8018 phba->sli4_hba.conf_regs_memmap_p +
8019 LPFC_CTL_PORT_SEM_OFFSET;
8020 phba->sli4_hba.RQDBregaddr =
8021 phba->sli4_hba.conf_regs_memmap_p +
8022 LPFC_ULP0_RQ_DOORBELL;
8023 phba->sli4_hba.WQDBregaddr =
8024 phba->sli4_hba.conf_regs_memmap_p +
8025 LPFC_ULP0_WQ_DOORBELL;
8026 phba->sli4_hba.CQDBregaddr =
8027 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8028 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8029 phba->sli4_hba.MQDBregaddr =
8030 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8031 phba->sli4_hba.BMBXregaddr =
8032 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8033 break;
8034 case LPFC_SLI_INTF_IF_TYPE_6:
8035 phba->sli4_hba.u.if_type2.EQDregaddr =
8036 phba->sli4_hba.conf_regs_memmap_p +
8037 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8038 phba->sli4_hba.u.if_type2.ERR1regaddr =
8039 phba->sli4_hba.conf_regs_memmap_p +
8040 LPFC_CTL_PORT_ER1_OFFSET;
8041 phba->sli4_hba.u.if_type2.ERR2regaddr =
8042 phba->sli4_hba.conf_regs_memmap_p +
8043 LPFC_CTL_PORT_ER2_OFFSET;
8044 phba->sli4_hba.u.if_type2.CTRLregaddr =
8045 phba->sli4_hba.conf_regs_memmap_p +
8046 LPFC_CTL_PORT_CTL_OFFSET;
8047 phba->sli4_hba.u.if_type2.STATUSregaddr =
8048 phba->sli4_hba.conf_regs_memmap_p +
8049 LPFC_CTL_PORT_STA_OFFSET;
8050 phba->sli4_hba.PSMPHRregaddr =
8051 phba->sli4_hba.conf_regs_memmap_p +
8052 LPFC_CTL_PORT_SEM_OFFSET;
8053 phba->sli4_hba.BMBXregaddr =
8054 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8055 break;
8056 case LPFC_SLI_INTF_IF_TYPE_1:
8057 default:
8058 dev_printk(KERN_ERR, &phba->pcidev->dev,
8059 "FATAL - unsupported SLI4 interface type - %d\n",
8060 if_type);
8061 break;
8062 }
8063}
8064
8065
8066
8067
8068
8069
8070
8071static void
8072lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8073{
8074 switch (if_type) {
8075 case LPFC_SLI_INTF_IF_TYPE_0:
8076 phba->sli4_hba.PSMPHRregaddr =
8077 phba->sli4_hba.ctrl_regs_memmap_p +
8078 LPFC_SLIPORT_IF0_SMPHR;
8079 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8080 LPFC_HST_ISR0;
8081 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8082 LPFC_HST_IMR0;
8083 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8084 LPFC_HST_ISCR0;
8085 break;
8086 case LPFC_SLI_INTF_IF_TYPE_6:
8087 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8088 LPFC_IF6_RQ_DOORBELL;
8089 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8090 LPFC_IF6_WQ_DOORBELL;
8091 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8092 LPFC_IF6_CQ_DOORBELL;
8093 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8094 LPFC_IF6_EQ_DOORBELL;
8095 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8096 LPFC_IF6_MQ_DOORBELL;
8097 break;
8098 case LPFC_SLI_INTF_IF_TYPE_2:
8099 case LPFC_SLI_INTF_IF_TYPE_1:
8100 default:
8101 dev_err(&phba->pcidev->dev,
8102 "FATAL - unsupported SLI4 interface type - %d\n",
8103 if_type);
8104 break;
8105 }
8106}
8107
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118static int
8119lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8120{
8121 if (vf > LPFC_VIR_FUNC_MAX)
8122 return -ENODEV;
8123
8124 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8125 vf * LPFC_VFR_PAGE_SIZE +
8126 LPFC_ULP0_RQ_DOORBELL);
8127 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8128 vf * LPFC_VFR_PAGE_SIZE +
8129 LPFC_ULP0_WQ_DOORBELL);
8130 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8131 vf * LPFC_VFR_PAGE_SIZE +
8132 LPFC_EQCQ_DOORBELL);
8133 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8134 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8135 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8136 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8137 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8138 return 0;
8139}
8140
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150
8151
8152
8153
8154
8155
8156static int
8157lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8158{
8159 uint32_t bmbx_size;
8160 struct lpfc_dmabuf *dmabuf;
8161 struct dma_address *dma_address;
8162 uint32_t pa_addr;
8163 uint64_t phys_addr;
8164
8165 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8166 if (!dmabuf)
8167 return -ENOMEM;
8168
8169
8170
8171
8172
8173 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8174 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8175 &dmabuf->phys, GFP_KERNEL);
8176 if (!dmabuf->virt) {
8177 kfree(dmabuf);
8178 return -ENOMEM;
8179 }
8180
8181
8182
8183
8184
8185
8186
8187
8188 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8189 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8190
8191 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8192 LPFC_ALIGN_16_BYTE);
8193 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8194 LPFC_ALIGN_16_BYTE);
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204 dma_address = &phba->sli4_hba.bmbx.dma_address;
8205 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8206 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8207 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8208 LPFC_BMBX_BIT1_ADDR_HI);
8209
8210 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8211 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8212 LPFC_BMBX_BIT1_ADDR_LO);
8213 return 0;
8214}
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227static void
8228lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8229{
8230 dma_free_coherent(&phba->pcidev->dev,
8231 phba->sli4_hba.bmbx.bmbx_size,
8232 phba->sli4_hba.bmbx.dmabuf->virt,
8233 phba->sli4_hba.bmbx.dmabuf->phys);
8234
8235 kfree(phba->sli4_hba.bmbx.dmabuf);
8236 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8237}
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249
8250
8251
8252
8253int
8254lpfc_sli4_read_config(struct lpfc_hba *phba)
8255{
8256 LPFC_MBOXQ_t *pmb;
8257 struct lpfc_mbx_read_config *rd_config;
8258 union lpfc_sli4_cfg_shdr *shdr;
8259 uint32_t shdr_status, shdr_add_status;
8260 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8261 struct lpfc_rsrc_desc_fcfcoe *desc;
8262 char *pdesc_0;
8263 uint16_t forced_link_speed;
8264 uint32_t if_type, qmin;
8265 int length, i, rc = 0, rc2;
8266
8267 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8268 if (!pmb) {
8269 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8270 "2011 Unable to allocate memory for issuing "
8271 "SLI_CONFIG_SPECIAL mailbox command\n");
8272 return -ENOMEM;
8273 }
8274
8275 lpfc_read_config(phba, pmb);
8276
8277 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8278 if (rc != MBX_SUCCESS) {
8279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8280 "2012 Mailbox failed , mbxCmd x%x "
8281 "READ_CONFIG, mbxStatus x%x\n",
8282 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8283 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8284 rc = -EIO;
8285 } else {
8286 rd_config = &pmb->u.mqe.un.rd_config;
8287 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8288 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8289 phba->sli4_hba.lnk_info.lnk_tp =
8290 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8291 phba->sli4_hba.lnk_info.lnk_no =
8292 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8294 "3081 lnk_type:%d, lnk_numb:%d\n",
8295 phba->sli4_hba.lnk_info.lnk_tp,
8296 phba->sli4_hba.lnk_info.lnk_no);
8297 } else
8298 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8299 "3082 Mailbox (x%x) returned ldv:x0\n",
8300 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8301 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8302 phba->bbcredit_support = 1;
8303 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8304 }
8305
8306 phba->sli4_hba.conf_trunk =
8307 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8308 phba->sli4_hba.extents_in_use =
8309 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8310 phba->sli4_hba.max_cfg_param.max_xri =
8311 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8312 phba->sli4_hba.max_cfg_param.xri_base =
8313 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8314 phba->sli4_hba.max_cfg_param.max_vpi =
8315 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8316
8317 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8318 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8319 phba->sli4_hba.max_cfg_param.vpi_base =
8320 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8321 phba->sli4_hba.max_cfg_param.max_rpi =
8322 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8323 phba->sli4_hba.max_cfg_param.rpi_base =
8324 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8325 phba->sli4_hba.max_cfg_param.max_vfi =
8326 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8327 phba->sli4_hba.max_cfg_param.vfi_base =
8328 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8329 phba->sli4_hba.max_cfg_param.max_fcfi =
8330 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8331 phba->sli4_hba.max_cfg_param.max_eq =
8332 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8333 phba->sli4_hba.max_cfg_param.max_rq =
8334 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8335 phba->sli4_hba.max_cfg_param.max_wq =
8336 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8337 phba->sli4_hba.max_cfg_param.max_cq =
8338 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8339 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8340 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8341 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8342 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8343 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8344 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8345 phba->max_vports = phba->max_vpi;
8346 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8347 "2003 cfg params Extents? %d "
8348 "XRI(B:%d M:%d), "
8349 "VPI(B:%d M:%d) "
8350 "VFI(B:%d M:%d) "
8351 "RPI(B:%d M:%d) "
8352 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8353 phba->sli4_hba.extents_in_use,
8354 phba->sli4_hba.max_cfg_param.xri_base,
8355 phba->sli4_hba.max_cfg_param.max_xri,
8356 phba->sli4_hba.max_cfg_param.vpi_base,
8357 phba->sli4_hba.max_cfg_param.max_vpi,
8358 phba->sli4_hba.max_cfg_param.vfi_base,
8359 phba->sli4_hba.max_cfg_param.max_vfi,
8360 phba->sli4_hba.max_cfg_param.rpi_base,
8361 phba->sli4_hba.max_cfg_param.max_rpi,
8362 phba->sli4_hba.max_cfg_param.max_fcfi,
8363 phba->sli4_hba.max_cfg_param.max_eq,
8364 phba->sli4_hba.max_cfg_param.max_cq,
8365 phba->sli4_hba.max_cfg_param.max_wq,
8366 phba->sli4_hba.max_cfg_param.max_rq);
8367
8368
8369
8370
8371
8372 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8373 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8374 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8375 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8376 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8377
8378
8379
8380
8381
8382
8383 qmin -= 4;
8384
8385
8386 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
8387 !phba->nvmet_support)
8388 qmin /= 2;
8389
8390
8391 if ((phba->cfg_irq_chann > qmin) ||
8392 (phba->cfg_hdw_queue > qmin)) {
8393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8394 "2005 Reducing Queues: "
8395 "WQ %d CQ %d EQ %d: min %d: "
8396 "IRQ %d HDWQ %d\n",
8397 phba->sli4_hba.max_cfg_param.max_wq,
8398 phba->sli4_hba.max_cfg_param.max_cq,
8399 phba->sli4_hba.max_cfg_param.max_eq,
8400 qmin, phba->cfg_irq_chann,
8401 phba->cfg_hdw_queue);
8402
8403 if (phba->cfg_irq_chann > qmin)
8404 phba->cfg_irq_chann = qmin;
8405 if (phba->cfg_hdw_queue > qmin)
8406 phba->cfg_hdw_queue = qmin;
8407 }
8408 }
8409
8410 if (rc)
8411 goto read_cfg_out;
8412
8413
8414 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8415 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8416 forced_link_speed =
8417 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8418 if (forced_link_speed) {
8419 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8420
8421 switch (forced_link_speed) {
8422 case LINK_SPEED_1G:
8423 phba->cfg_link_speed =
8424 LPFC_USER_LINK_SPEED_1G;
8425 break;
8426 case LINK_SPEED_2G:
8427 phba->cfg_link_speed =
8428 LPFC_USER_LINK_SPEED_2G;
8429 break;
8430 case LINK_SPEED_4G:
8431 phba->cfg_link_speed =
8432 LPFC_USER_LINK_SPEED_4G;
8433 break;
8434 case LINK_SPEED_8G:
8435 phba->cfg_link_speed =
8436 LPFC_USER_LINK_SPEED_8G;
8437 break;
8438 case LINK_SPEED_10G:
8439 phba->cfg_link_speed =
8440 LPFC_USER_LINK_SPEED_10G;
8441 break;
8442 case LINK_SPEED_16G:
8443 phba->cfg_link_speed =
8444 LPFC_USER_LINK_SPEED_16G;
8445 break;
8446 case LINK_SPEED_32G:
8447 phba->cfg_link_speed =
8448 LPFC_USER_LINK_SPEED_32G;
8449 break;
8450 case LINK_SPEED_64G:
8451 phba->cfg_link_speed =
8452 LPFC_USER_LINK_SPEED_64G;
8453 break;
8454 case 0xffff:
8455 phba->cfg_link_speed =
8456 LPFC_USER_LINK_SPEED_AUTO;
8457 break;
8458 default:
8459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8460 "0047 Unrecognized link "
8461 "speed : %d\n",
8462 forced_link_speed);
8463 phba->cfg_link_speed =
8464 LPFC_USER_LINK_SPEED_AUTO;
8465 }
8466 }
8467 }
8468
8469
8470 length = phba->sli4_hba.max_cfg_param.max_xri -
8471 lpfc_sli4_get_els_iocb_cnt(phba);
8472 if (phba->cfg_hba_queue_depth > length) {
8473 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8474 "3361 HBA queue depth changed from %d to %d\n",
8475 phba->cfg_hba_queue_depth, length);
8476 phba->cfg_hba_queue_depth = length;
8477 }
8478
8479 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8480 LPFC_SLI_INTF_IF_TYPE_2)
8481 goto read_cfg_out;
8482
8483
8484 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8485 sizeof(struct lpfc_sli4_cfg_mhdr));
8486 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8487 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8488 length, LPFC_SLI4_MBX_EMBED);
8489
8490 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8491 shdr = (union lpfc_sli4_cfg_shdr *)
8492 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8493 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8494 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8495 if (rc2 || shdr_status || shdr_add_status) {
8496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8497 "3026 Mailbox failed , mbxCmd x%x "
8498 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8499 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8500 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8501 goto read_cfg_out;
8502 }
8503
8504
8505 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8506
8507 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8508 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8509 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8510 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8511 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8512 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8513 goto read_cfg_out;
8514
8515 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8516 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8517 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8518 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8519 phba->sli4_hba.iov.pf_number =
8520 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8521 phba->sli4_hba.iov.vf_number =
8522 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8523 break;
8524 }
8525 }
8526
8527 if (i < LPFC_RSRC_DESC_MAX_NUM)
8528 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8529 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8530 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8531 phba->sli4_hba.iov.vf_number);
8532 else
8533 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8534 "3028 GET_FUNCTION_CONFIG: failed to find "
8535 "Resource Descriptor:x%x\n",
8536 LPFC_RSRC_DESC_TYPE_FCFCOE);
8537
8538read_cfg_out:
8539 mempool_free(pmb, phba->mbox_mem_pool);
8540 return rc;
8541}
8542
8543
8544
8545
8546
8547
8548
8549
8550
8551
8552
8553
8554
8555
8556static int
8557lpfc_setup_endian_order(struct lpfc_hba *phba)
8558{
8559 LPFC_MBOXQ_t *mboxq;
8560 uint32_t if_type, rc = 0;
8561 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8562 HOST_ENDIAN_HIGH_WORD1};
8563
8564 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8565 switch (if_type) {
8566 case LPFC_SLI_INTF_IF_TYPE_0:
8567 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8568 GFP_KERNEL);
8569 if (!mboxq) {
8570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8571 "0492 Unable to allocate memory for "
8572 "issuing SLI_CONFIG_SPECIAL mailbox "
8573 "command\n");
8574 return -ENOMEM;
8575 }
8576
8577
8578
8579
8580
8581 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8582 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8583 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8584 if (rc != MBX_SUCCESS) {
8585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8586 "0493 SLI_CONFIG_SPECIAL mailbox "
8587 "failed with status x%x\n",
8588 rc);
8589 rc = -EIO;
8590 }
8591 mempool_free(mboxq, phba->mbox_mem_pool);
8592 break;
8593 case LPFC_SLI_INTF_IF_TYPE_6:
8594 case LPFC_SLI_INTF_IF_TYPE_2:
8595 case LPFC_SLI_INTF_IF_TYPE_1:
8596 default:
8597 break;
8598 }
8599 return rc;
8600}
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615static int
8616lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8617{
8618
8619
8620
8621
8622
8623 if (phba->nvmet_support) {
8624 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
8625 phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
8626 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8627 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8628 }
8629
8630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8631 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8632 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8633 phba->cfg_nvmet_mrq);
8634
8635
8636 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8637 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8638
8639
8640 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8641 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8642 return 0;
8643}
8644
8645static int
8646lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8647{
8648 struct lpfc_queue *qdesc;
8649 int cpu;
8650
8651 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
8652 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8653 phba->sli4_hba.cq_esize,
8654 LPFC_CQE_EXP_COUNT, cpu);
8655 if (!qdesc) {
8656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8657 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8658 wqidx);
8659 return 1;
8660 }
8661 qdesc->qe_valid = 1;
8662 qdesc->hdwq = wqidx;
8663 qdesc->chann = cpu;
8664 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
8665
8666 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8667 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
8668 cpu);
8669 if (!qdesc) {
8670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8671 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8672 wqidx);
8673 return 1;
8674 }
8675 qdesc->hdwq = wqidx;
8676 qdesc->chann = wqidx;
8677 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
8678 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8679 return 0;
8680}
8681
8682static int
8683lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8684{
8685 struct lpfc_queue *qdesc;
8686 uint32_t wqesize;
8687 int cpu;
8688
8689 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
8690
8691 if (phba->enab_exp_wqcq_pages)
8692
8693 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8694 phba->sli4_hba.cq_esize,
8695 LPFC_CQE_EXP_COUNT, cpu);
8696
8697 else
8698 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8699 phba->sli4_hba.cq_esize,
8700 phba->sli4_hba.cq_ecount, cpu);
8701 if (!qdesc) {
8702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8703 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8704 return 1;
8705 }
8706 qdesc->qe_valid = 1;
8707 qdesc->hdwq = wqidx;
8708 qdesc->chann = cpu;
8709 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
8710
8711
8712 if (phba->enab_exp_wqcq_pages) {
8713
8714 wqesize = (phba->fcp_embed_io) ?
8715 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8716 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8717 wqesize,
8718 LPFC_WQE_EXP_COUNT, cpu);
8719 } else
8720 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8721 phba->sli4_hba.wq_esize,
8722 phba->sli4_hba.wq_ecount, cpu);
8723
8724 if (!qdesc) {
8725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8726 "0503 Failed allocate fast-path FCP WQ (%d)\n",
8727 wqidx);
8728 return 1;
8729 }
8730 qdesc->hdwq = wqidx;
8731 qdesc->chann = wqidx;
8732 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
8733 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8734 return 0;
8735}
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746
8747
8748
8749
8750
8751int
8752lpfc_sli4_queue_create(struct lpfc_hba *phba)
8753{
8754 struct lpfc_queue *qdesc;
8755 int idx, cpu, eqcpu;
8756 struct lpfc_sli4_hdw_queue *qp;
8757 struct lpfc_vector_map_info *cpup;
8758 struct lpfc_vector_map_info *eqcpup;
8759 struct lpfc_eq_intr_info *eqi;
8760
8761
8762
8763
8764
8765 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8766 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8767 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8768 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8769 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8770 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8771 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8772 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8773 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8774 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8775
8776 if (!phba->sli4_hba.hdwq) {
8777 phba->sli4_hba.hdwq = kcalloc(
8778 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8779 GFP_KERNEL);
8780 if (!phba->sli4_hba.hdwq) {
8781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8782 "6427 Failed allocate memory for "
8783 "fast-path Hardware Queue array\n");
8784 goto out_error;
8785 }
8786
8787 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8788 qp = &phba->sli4_hba.hdwq[idx];
8789 spin_lock_init(&qp->io_buf_list_get_lock);
8790 spin_lock_init(&qp->io_buf_list_put_lock);
8791 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8792 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8793 qp->get_io_bufs = 0;
8794 qp->put_io_bufs = 0;
8795 qp->total_io_bufs = 0;
8796 spin_lock_init(&qp->abts_scsi_buf_list_lock);
8797 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
8798 qp->abts_scsi_io_bufs = 0;
8799 spin_lock_init(&qp->abts_nvme_buf_list_lock);
8800 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
8801 qp->abts_nvme_io_bufs = 0;
8802 }
8803 }
8804
8805 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8806 if (phba->nvmet_support) {
8807 phba->sli4_hba.nvmet_cqset = kcalloc(
8808 phba->cfg_nvmet_mrq,
8809 sizeof(struct lpfc_queue *),
8810 GFP_KERNEL);
8811 if (!phba->sli4_hba.nvmet_cqset) {
8812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8813 "3121 Fail allocate memory for "
8814 "fast-path CQ set array\n");
8815 goto out_error;
8816 }
8817 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8818 phba->cfg_nvmet_mrq,
8819 sizeof(struct lpfc_queue *),
8820 GFP_KERNEL);
8821 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8823 "3122 Fail allocate memory for "
8824 "fast-path RQ set hdr array\n");
8825 goto out_error;
8826 }
8827 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8828 phba->cfg_nvmet_mrq,
8829 sizeof(struct lpfc_queue *),
8830 GFP_KERNEL);
8831 if (!phba->sli4_hba.nvmet_mrq_data) {
8832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8833 "3124 Fail allocate memory for "
8834 "fast-path RQ set data array\n");
8835 goto out_error;
8836 }
8837 }
8838 }
8839
8840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8841
8842
8843 for_each_present_cpu(cpu) {
8844
8845
8846
8847
8848 cpup = &phba->sli4_hba.cpu_map[cpu];
8849 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8850 continue;
8851
8852
8853 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8854
8855
8856 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8857 phba->sli4_hba.eq_esize,
8858 phba->sli4_hba.eq_ecount, cpu);
8859 if (!qdesc) {
8860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8861 "0497 Failed allocate EQ (%d)\n",
8862 cpup->hdwq);
8863 goto out_error;
8864 }
8865 qdesc->qe_valid = 1;
8866 qdesc->hdwq = cpup->hdwq;
8867 qdesc->chann = cpu;
8868 qdesc->last_cpu = qdesc->chann;
8869
8870
8871 qp->hba_eq = qdesc;
8872
8873 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8874 list_add(&qdesc->cpu_list, &eqi->list);
8875 }
8876
8877
8878
8879
8880 for_each_present_cpu(cpu) {
8881 cpup = &phba->sli4_hba.cpu_map[cpu];
8882
8883
8884 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8885 continue;
8886
8887
8888 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8889 if (qp->hba_eq)
8890 continue;
8891
8892
8893 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8894 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8895 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8896 }
8897
8898
8899 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8900 if (lpfc_alloc_fcp_wq_cq(phba, idx))
8901 goto out_error;
8902 }
8903
8904
8905 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8906 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8907 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8908 goto out_error;
8909 }
8910
8911 if (phba->nvmet_support) {
8912 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8913 cpu = lpfc_find_cpu_handle(phba, idx,
8914 LPFC_FIND_BY_HDWQ);
8915 qdesc = lpfc_sli4_queue_alloc(
8916 phba,
8917 LPFC_DEFAULT_PAGE_SIZE,
8918 phba->sli4_hba.cq_esize,
8919 phba->sli4_hba.cq_ecount,
8920 cpu);
8921 if (!qdesc) {
8922 lpfc_printf_log(
8923 phba, KERN_ERR, LOG_INIT,
8924 "3142 Failed allocate NVME "
8925 "CQ Set (%d)\n", idx);
8926 goto out_error;
8927 }
8928 qdesc->qe_valid = 1;
8929 qdesc->hdwq = idx;
8930 qdesc->chann = cpu;
8931 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8932 }
8933 }
8934 }
8935
8936
8937
8938
8939
8940 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
8941
8942 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8943 phba->sli4_hba.cq_esize,
8944 phba->sli4_hba.cq_ecount, cpu);
8945 if (!qdesc) {
8946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8947 "0500 Failed allocate slow-path mailbox CQ\n");
8948 goto out_error;
8949 }
8950 qdesc->qe_valid = 1;
8951 phba->sli4_hba.mbx_cq = qdesc;
8952
8953
8954 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8955 phba->sli4_hba.cq_esize,
8956 phba->sli4_hba.cq_ecount, cpu);
8957 if (!qdesc) {
8958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8959 "0501 Failed allocate slow-path ELS CQ\n");
8960 goto out_error;
8961 }
8962 qdesc->qe_valid = 1;
8963 qdesc->chann = 0;
8964 phba->sli4_hba.els_cq = qdesc;
8965
8966
8967
8968
8969
8970
8971
8972
8973 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8974 phba->sli4_hba.mq_esize,
8975 phba->sli4_hba.mq_ecount, cpu);
8976 if (!qdesc) {
8977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8978 "0505 Failed allocate slow-path MQ\n");
8979 goto out_error;
8980 }
8981 qdesc->chann = 0;
8982 phba->sli4_hba.mbx_wq = qdesc;
8983
8984
8985
8986
8987
8988
8989 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8990 phba->sli4_hba.wq_esize,
8991 phba->sli4_hba.wq_ecount, cpu);
8992 if (!qdesc) {
8993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8994 "0504 Failed allocate slow-path ELS WQ\n");
8995 goto out_error;
8996 }
8997 qdesc->chann = 0;
8998 phba->sli4_hba.els_wq = qdesc;
8999 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9000
9001 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9002
9003 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9004 phba->sli4_hba.cq_esize,
9005 phba->sli4_hba.cq_ecount, cpu);
9006 if (!qdesc) {
9007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9008 "6079 Failed allocate NVME LS CQ\n");
9009 goto out_error;
9010 }
9011 qdesc->chann = 0;
9012 qdesc->qe_valid = 1;
9013 phba->sli4_hba.nvmels_cq = qdesc;
9014
9015
9016 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9017 phba->sli4_hba.wq_esize,
9018 phba->sli4_hba.wq_ecount, cpu);
9019 if (!qdesc) {
9020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9021 "6080 Failed allocate NVME LS WQ\n");
9022 goto out_error;
9023 }
9024 qdesc->chann = 0;
9025 phba->sli4_hba.nvmels_wq = qdesc;
9026 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9027 }
9028
9029
9030
9031
9032
9033
9034 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9035 phba->sli4_hba.rq_esize,
9036 phba->sli4_hba.rq_ecount, cpu);
9037 if (!qdesc) {
9038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9039 "0506 Failed allocate receive HRQ\n");
9040 goto out_error;
9041 }
9042 phba->sli4_hba.hdr_rq = qdesc;
9043
9044
9045 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9046 phba->sli4_hba.rq_esize,
9047 phba->sli4_hba.rq_ecount, cpu);
9048 if (!qdesc) {
9049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9050 "0507 Failed allocate receive DRQ\n");
9051 goto out_error;
9052 }
9053 phba->sli4_hba.dat_rq = qdesc;
9054
9055 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9056 phba->nvmet_support) {
9057 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9058 cpu = lpfc_find_cpu_handle(phba, idx,
9059 LPFC_FIND_BY_HDWQ);
9060
9061 qdesc = lpfc_sli4_queue_alloc(phba,
9062 LPFC_DEFAULT_PAGE_SIZE,
9063 phba->sli4_hba.rq_esize,
9064 LPFC_NVMET_RQE_DEF_COUNT,
9065 cpu);
9066 if (!qdesc) {
9067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9068 "3146 Failed allocate "
9069 "receive HRQ\n");
9070 goto out_error;
9071 }
9072 qdesc->hdwq = idx;
9073 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9074
9075
9076 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9077 GFP_KERNEL,
9078 cpu_to_node(cpu));
9079 if (qdesc->rqbp == NULL) {
9080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9081 "6131 Failed allocate "
9082 "Header RQBP\n");
9083 goto out_error;
9084 }
9085
9086
9087 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9088
9089
9090 qdesc = lpfc_sli4_queue_alloc(phba,
9091 LPFC_DEFAULT_PAGE_SIZE,
9092 phba->sli4_hba.rq_esize,
9093 LPFC_NVMET_RQE_DEF_COUNT,
9094 cpu);
9095 if (!qdesc) {
9096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9097 "3156 Failed allocate "
9098 "receive DRQ\n");
9099 goto out_error;
9100 }
9101 qdesc->hdwq = idx;
9102 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9103 }
9104 }
9105
9106#if defined(BUILD_NVME)
9107
9108 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9109 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9110 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9111 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9112 }
9113 }
9114#endif
9115
9116
9117 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9118 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9119 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9120 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9121 }
9122 }
9123
9124 return 0;
9125
9126out_error:
9127 lpfc_sli4_queue_destroy(phba);
9128 return -ENOMEM;
9129}
9130
9131static inline void
9132__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9133{
9134 if (*qp != NULL) {
9135 lpfc_sli4_queue_free(*qp);
9136 *qp = NULL;
9137 }
9138}
9139
9140static inline void
9141lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9142{
9143 int idx;
9144
9145 if (*qs == NULL)
9146 return;
9147
9148 for (idx = 0; idx < max; idx++)
9149 __lpfc_sli4_release_queue(&(*qs)[idx]);
9150
9151 kfree(*qs);
9152 *qs = NULL;
9153}
9154
9155static inline void
9156lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9157{
9158 struct lpfc_sli4_hdw_queue *hdwq;
9159 struct lpfc_queue *eq;
9160 uint32_t idx;
9161
9162 hdwq = phba->sli4_hba.hdwq;
9163
9164
9165 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9166
9167 lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
9168 lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
9169 lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
9170 lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
9171 hdwq[idx].hba_eq = NULL;
9172 hdwq[idx].fcp_cq = NULL;
9173 hdwq[idx].nvme_cq = NULL;
9174 hdwq[idx].fcp_wq = NULL;
9175 hdwq[idx].nvme_wq = NULL;
9176 }
9177
9178 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9179
9180 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9181 lpfc_sli4_queue_free(eq);
9182 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9183 }
9184}
9185
9186
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197
9198void
9199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9200{
9201
9202
9203
9204
9205
9206 spin_lock_irq(&phba->hbalock);
9207 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9208 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9209 spin_unlock_irq(&phba->hbalock);
9210 msleep(20);
9211 spin_lock_irq(&phba->hbalock);
9212 }
9213 spin_unlock_irq(&phba->hbalock);
9214
9215
9216 if (phba->sli4_hba.hdwq)
9217 lpfc_sli4_release_hdwq(phba);
9218
9219 if (phba->nvmet_support) {
9220 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9221 phba->cfg_nvmet_mrq);
9222
9223 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9224 phba->cfg_nvmet_mrq);
9225 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9226 phba->cfg_nvmet_mrq);
9227 }
9228
9229
9230 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9231
9232
9233 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9234
9235
9236 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9237
9238
9239 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9240 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9241
9242
9243 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9244
9245
9246 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9247
9248
9249 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9250
9251
9252 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9253
9254
9255 spin_lock_irq(&phba->hbalock);
9256 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9257 spin_unlock_irq(&phba->hbalock);
9258}
9259
9260int
9261lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9262{
9263 struct lpfc_rqb *rqbp;
9264 struct lpfc_dmabuf *h_buf;
9265 struct rqb_dmabuf *rqb_buffer;
9266
9267 rqbp = rq->rqbp;
9268 while (!list_empty(&rqbp->rqb_buffer_list)) {
9269 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9270 struct lpfc_dmabuf, list);
9271
9272 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9273 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9274 rqbp->buffer_count--;
9275 }
9276 return 1;
9277}
9278
9279static int
9280lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9281 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9282 int qidx, uint32_t qtype)
9283{
9284 struct lpfc_sli_ring *pring;
9285 int rc;
9286
9287 if (!eq || !cq || !wq) {
9288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9289 "6085 Fast-path %s (%d) not allocated\n",
9290 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9291 return -ENOMEM;
9292 }
9293
9294
9295 rc = lpfc_cq_create(phba, cq, eq,
9296 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9297 if (rc) {
9298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9299 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9300 qidx, (uint32_t)rc);
9301 return rc;
9302 }
9303
9304 if (qtype != LPFC_MBOX) {
9305
9306 if (cq_map)
9307 *cq_map = cq->queue_id;
9308
9309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9310 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9311 qidx, cq->queue_id, qidx, eq->queue_id);
9312
9313
9314 rc = lpfc_wq_create(phba, wq, cq, qtype);
9315 if (rc) {
9316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9317 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9318 qidx, (uint32_t)rc);
9319
9320 return rc;
9321 }
9322
9323
9324 pring = wq->pring;
9325 pring->sli.sli4.wqp = (void *)wq;
9326 cq->pring = pring;
9327
9328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9329 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9330 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9331 } else {
9332 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9333 if (rc) {
9334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9335 "0539 Failed setup of slow-path MQ: "
9336 "rc = 0x%x\n", rc);
9337
9338 return rc;
9339 }
9340
9341 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9342 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9343 phba->sli4_hba.mbx_wq->queue_id,
9344 phba->sli4_hba.mbx_cq->queue_id);
9345 }
9346
9347 return 0;
9348}
9349
9350
9351
9352
9353
9354
9355
9356
9357static void
9358lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9359{
9360 struct lpfc_queue *eq, *childq;
9361 int qidx;
9362
9363 memset(phba->sli4_hba.cq_lookup, 0,
9364 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9365
9366 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9367
9368 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9369 if (!eq)
9370 continue;
9371
9372 list_for_each_entry(childq, &eq->child_list, list) {
9373 if (childq->queue_id > phba->sli4_hba.cq_max)
9374 continue;
9375 if ((childq->subtype == LPFC_FCP) ||
9376 (childq->subtype == LPFC_NVME))
9377 phba->sli4_hba.cq_lookup[childq->queue_id] =
9378 childq;
9379 }
9380 }
9381}
9382
9383
9384
9385
9386
9387
9388
9389
9390
9391
9392
9393
9394
9395int
9396lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9397{
9398 uint32_t shdr_status, shdr_add_status;
9399 union lpfc_sli4_cfg_shdr *shdr;
9400 struct lpfc_vector_map_info *cpup;
9401 struct lpfc_sli4_hdw_queue *qp;
9402 LPFC_MBOXQ_t *mboxq;
9403 int qidx, cpu;
9404 uint32_t length, usdelay;
9405 int rc = -ENOMEM;
9406
9407
9408 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9409 if (!mboxq) {
9410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9411 "3249 Unable to allocate memory for "
9412 "QUERY_FW_CFG mailbox command\n");
9413 return -ENOMEM;
9414 }
9415 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9416 sizeof(struct lpfc_sli4_cfg_mhdr));
9417 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9418 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9419 length, LPFC_SLI4_MBX_EMBED);
9420
9421 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9422
9423 shdr = (union lpfc_sli4_cfg_shdr *)
9424 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9425 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9426 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9427 if (shdr_status || shdr_add_status || rc) {
9428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9429 "3250 QUERY_FW_CFG mailbox failed with status "
9430 "x%x add_status x%x, mbx status x%x\n",
9431 shdr_status, shdr_add_status, rc);
9432 if (rc != MBX_TIMEOUT)
9433 mempool_free(mboxq, phba->mbox_mem_pool);
9434 rc = -ENXIO;
9435 goto out_error;
9436 }
9437
9438 phba->sli4_hba.fw_func_mode =
9439 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9440 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9441 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9442 phba->sli4_hba.physical_port =
9443 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9445 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9446 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9447 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9448
9449 if (rc != MBX_TIMEOUT)
9450 mempool_free(mboxq, phba->mbox_mem_pool);
9451
9452
9453
9454
9455 qp = phba->sli4_hba.hdwq;
9456
9457
9458 if (!qp) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "3147 Fast-path EQs not allocated\n");
9461 rc = -ENOMEM;
9462 goto out_error;
9463 }
9464
9465
9466 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9467
9468 for_each_present_cpu(cpu) {
9469 cpup = &phba->sli4_hba.cpu_map[cpu];
9470
9471
9472
9473
9474 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9475 continue;
9476 if (qidx != cpup->eq)
9477 continue;
9478
9479
9480 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9481 phba->cfg_fcp_imax);
9482 if (rc) {
9483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9484 "0523 Failed setup of fast-path"
9485 " EQ (%d), rc = 0x%x\n",
9486 cpup->eq, (uint32_t)rc);
9487 goto out_destroy;
9488 }
9489
9490
9491 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9492 qp[cpup->hdwq].hba_eq;
9493
9494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9495 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9496 cpup->eq,
9497 qp[cpup->hdwq].hba_eq->queue_id);
9498 }
9499 }
9500
9501
9502 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9503 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9504 cpu = lpfc_find_cpu_handle(phba, qidx,
9505 LPFC_FIND_BY_HDWQ);
9506 cpup = &phba->sli4_hba.cpu_map[cpu];
9507
9508
9509
9510
9511 rc = lpfc_create_wq_cq(phba,
9512 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9513 qp[qidx].nvme_cq,
9514 qp[qidx].nvme_wq,
9515 &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
9516 qidx, LPFC_NVME);
9517 if (rc) {
9518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9519 "6123 Failed to setup fastpath "
9520 "NVME WQ/CQ (%d), rc = 0x%x\n",
9521 qidx, (uint32_t)rc);
9522 goto out_destroy;
9523 }
9524 }
9525 }
9526
9527 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9528 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9529 cpup = &phba->sli4_hba.cpu_map[cpu];
9530
9531
9532 rc = lpfc_create_wq_cq(phba,
9533 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9534 qp[qidx].fcp_cq,
9535 qp[qidx].fcp_wq,
9536 &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
9537 qidx, LPFC_FCP);
9538 if (rc) {
9539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9540 "0535 Failed to setup fastpath "
9541 "FCP WQ/CQ (%d), rc = 0x%x\n",
9542 qidx, (uint32_t)rc);
9543 goto out_destroy;
9544 }
9545 }
9546
9547
9548
9549
9550
9551
9552
9553 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9555 "0528 %s not allocated\n",
9556 phba->sli4_hba.mbx_cq ?
9557 "Mailbox WQ" : "Mailbox CQ");
9558 rc = -ENOMEM;
9559 goto out_destroy;
9560 }
9561
9562 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9563 phba->sli4_hba.mbx_cq,
9564 phba->sli4_hba.mbx_wq,
9565 NULL, 0, LPFC_MBOX);
9566 if (rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9569 (uint32_t)rc);
9570 goto out_destroy;
9571 }
9572 if (phba->nvmet_support) {
9573 if (!phba->sli4_hba.nvmet_cqset) {
9574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9575 "3165 Fast-path NVME CQ Set "
9576 "array not allocated\n");
9577 rc = -ENOMEM;
9578 goto out_destroy;
9579 }
9580 if (phba->cfg_nvmet_mrq > 1) {
9581 rc = lpfc_cq_create_set(phba,
9582 phba->sli4_hba.nvmet_cqset,
9583 qp,
9584 LPFC_WCQ, LPFC_NVMET);
9585 if (rc) {
9586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9587 "3164 Failed setup of NVME CQ "
9588 "Set, rc = 0x%x\n",
9589 (uint32_t)rc);
9590 goto out_destroy;
9591 }
9592 } else {
9593
9594 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9595 qp[0].hba_eq,
9596 LPFC_WCQ, LPFC_NVMET);
9597 if (rc) {
9598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9599 "6089 Failed setup NVMET CQ: "
9600 "rc = 0x%x\n", (uint32_t)rc);
9601 goto out_destroy;
9602 }
9603 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9604
9605 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9606 "6090 NVMET CQ setup: cq-id=%d, "
9607 "parent eq-id=%d\n",
9608 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9609 qp[0].hba_eq->queue_id);
9610 }
9611 }
9612
9613
9614 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9616 "0530 ELS %s not allocated\n",
9617 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9618 rc = -ENOMEM;
9619 goto out_destroy;
9620 }
9621 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9622 phba->sli4_hba.els_cq,
9623 phba->sli4_hba.els_wq,
9624 NULL, 0, LPFC_ELS);
9625 if (rc) {
9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9627 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9628 (uint32_t)rc);
9629 goto out_destroy;
9630 }
9631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9632 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9633 phba->sli4_hba.els_wq->queue_id,
9634 phba->sli4_hba.els_cq->queue_id);
9635
9636 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9637
9638 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9640 "6091 LS %s not allocated\n",
9641 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9642 rc = -ENOMEM;
9643 goto out_destroy;
9644 }
9645 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9646 phba->sli4_hba.nvmels_cq,
9647 phba->sli4_hba.nvmels_wq,
9648 NULL, 0, LPFC_NVME_LS);
9649 if (rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "0526 Failed setup of NVVME LS WQ/CQ: "
9652 "rc = 0x%x\n", (uint32_t)rc);
9653 goto out_destroy;
9654 }
9655
9656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9657 "6096 ELS WQ setup: wq-id=%d, "
9658 "parent cq-id=%d\n",
9659 phba->sli4_hba.nvmels_wq->queue_id,
9660 phba->sli4_hba.nvmels_cq->queue_id);
9661 }
9662
9663
9664
9665
9666 if (phba->nvmet_support) {
9667 if ((!phba->sli4_hba.nvmet_cqset) ||
9668 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9669 (!phba->sli4_hba.nvmet_mrq_data)) {
9670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9671 "6130 MRQ CQ Queues not "
9672 "allocated\n");
9673 rc = -ENOMEM;
9674 goto out_destroy;
9675 }
9676 if (phba->cfg_nvmet_mrq > 1) {
9677 rc = lpfc_mrq_create(phba,
9678 phba->sli4_hba.nvmet_mrq_hdr,
9679 phba->sli4_hba.nvmet_mrq_data,
9680 phba->sli4_hba.nvmet_cqset,
9681 LPFC_NVMET);
9682 if (rc) {
9683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9684 "6098 Failed setup of NVMET "
9685 "MRQ: rc = 0x%x\n",
9686 (uint32_t)rc);
9687 goto out_destroy;
9688 }
9689
9690 } else {
9691 rc = lpfc_rq_create(phba,
9692 phba->sli4_hba.nvmet_mrq_hdr[0],
9693 phba->sli4_hba.nvmet_mrq_data[0],
9694 phba->sli4_hba.nvmet_cqset[0],
9695 LPFC_NVMET);
9696 if (rc) {
9697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9698 "6057 Failed setup of NVMET "
9699 "Receive Queue: rc = 0x%x\n",
9700 (uint32_t)rc);
9701 goto out_destroy;
9702 }
9703
9704 lpfc_printf_log(
9705 phba, KERN_INFO, LOG_INIT,
9706 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9707 "dat-rq-id=%d parent cq-id=%d\n",
9708 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9709 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9710 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9711
9712 }
9713 }
9714
9715 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9717 "0540 Receive Queue not allocated\n");
9718 rc = -ENOMEM;
9719 goto out_destroy;
9720 }
9721
9722 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9723 phba->sli4_hba.els_cq, LPFC_USOL);
9724 if (rc) {
9725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9726 "0541 Failed setup of Receive Queue: "
9727 "rc = 0x%x\n", (uint32_t)rc);
9728 goto out_destroy;
9729 }
9730
9731 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9732 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9733 "parent cq-id=%d\n",
9734 phba->sli4_hba.hdr_rq->queue_id,
9735 phba->sli4_hba.dat_rq->queue_id,
9736 phba->sli4_hba.els_cq->queue_id);
9737
9738 if (phba->cfg_fcp_imax)
9739 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9740 else
9741 usdelay = 0;
9742
9743 for (qidx = 0; qidx < phba->cfg_irq_chann;
9744 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9745 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9746 usdelay);
9747
9748 if (phba->sli4_hba.cq_max) {
9749 kfree(phba->sli4_hba.cq_lookup);
9750 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9751 sizeof(struct lpfc_queue *), GFP_KERNEL);
9752 if (!phba->sli4_hba.cq_lookup) {
9753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9754 "0549 Failed setup of CQ Lookup table: "
9755 "size 0x%x\n", phba->sli4_hba.cq_max);
9756 rc = -ENOMEM;
9757 goto out_destroy;
9758 }
9759 lpfc_setup_cq_lookup(phba);
9760 }
9761 return 0;
9762
9763out_destroy:
9764 lpfc_sli4_queue_unset(phba);
9765out_error:
9766 return rc;
9767}
9768
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781void
9782lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9783{
9784 struct lpfc_sli4_hdw_queue *qp;
9785 struct lpfc_queue *eq;
9786 int qidx;
9787
9788
9789 if (phba->sli4_hba.mbx_wq)
9790 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9791
9792
9793 if (phba->sli4_hba.nvmels_wq)
9794 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9795
9796
9797 if (phba->sli4_hba.els_wq)
9798 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9799
9800
9801 if (phba->sli4_hba.hdr_rq)
9802 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9803 phba->sli4_hba.dat_rq);
9804
9805
9806 if (phba->sli4_hba.mbx_cq)
9807 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9808
9809
9810 if (phba->sli4_hba.els_cq)
9811 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9812
9813
9814 if (phba->sli4_hba.nvmels_cq)
9815 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9816
9817 if (phba->nvmet_support) {
9818
9819 if (phba->sli4_hba.nvmet_mrq_hdr) {
9820 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9821 lpfc_rq_destroy(
9822 phba,
9823 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9824 phba->sli4_hba.nvmet_mrq_data[qidx]);
9825 }
9826
9827
9828 if (phba->sli4_hba.nvmet_cqset) {
9829 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9830 lpfc_cq_destroy(
9831 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9832 }
9833 }
9834
9835
9836 if (phba->sli4_hba.hdwq) {
9837
9838 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9839
9840 qp = &phba->sli4_hba.hdwq[qidx];
9841 lpfc_wq_destroy(phba, qp->fcp_wq);
9842 lpfc_wq_destroy(phba, qp->nvme_wq);
9843 lpfc_cq_destroy(phba, qp->fcp_cq);
9844 lpfc_cq_destroy(phba, qp->nvme_cq);
9845 }
9846
9847 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9848
9849 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9850 lpfc_eq_destroy(phba, eq);
9851 }
9852 }
9853
9854 kfree(phba->sli4_hba.cq_lookup);
9855 phba->sli4_hba.cq_lookup = NULL;
9856 phba->sli4_hba.cq_max = 0;
9857}
9858
9859
9860
9861
9862
9863
9864
9865
9866
9867
9868
9869
9870
9871
9872
9873
9874
9875static int
9876lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9877{
9878 struct lpfc_cq_event *cq_event;
9879 int i;
9880
9881 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9882 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9883 if (!cq_event)
9884 goto out_pool_create_fail;
9885 list_add_tail(&cq_event->list,
9886 &phba->sli4_hba.sp_cqe_event_pool);
9887 }
9888 return 0;
9889
9890out_pool_create_fail:
9891 lpfc_sli4_cq_event_pool_destroy(phba);
9892 return -ENOMEM;
9893}
9894
9895
9896
9897
9898
9899
9900
9901
9902
9903
9904
9905static void
9906lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9907{
9908 struct lpfc_cq_event *cq_event, *next_cq_event;
9909
9910 list_for_each_entry_safe(cq_event, next_cq_event,
9911 &phba->sli4_hba.sp_cqe_event_pool, list) {
9912 list_del(&cq_event->list);
9913 kfree(cq_event);
9914 }
9915}
9916
9917
9918
9919
9920
9921
9922
9923
9924
9925
9926
9927struct lpfc_cq_event *
9928__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9929{
9930 struct lpfc_cq_event *cq_event = NULL;
9931
9932 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9933 struct lpfc_cq_event, list);
9934 return cq_event;
9935}
9936
9937
9938
9939
9940
9941
9942
9943
9944
9945
9946
9947struct lpfc_cq_event *
9948lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9949{
9950 struct lpfc_cq_event *cq_event;
9951 unsigned long iflags;
9952
9953 spin_lock_irqsave(&phba->hbalock, iflags);
9954 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9955 spin_unlock_irqrestore(&phba->hbalock, iflags);
9956 return cq_event;
9957}
9958
9959
9960
9961
9962
9963
9964
9965
9966
9967void
9968__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9969 struct lpfc_cq_event *cq_event)
9970{
9971 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9972}
9973
9974
9975
9976
9977
9978
9979
9980
9981
9982void
9983lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9984 struct lpfc_cq_event *cq_event)
9985{
9986 unsigned long iflags;
9987 spin_lock_irqsave(&phba->hbalock, iflags);
9988 __lpfc_sli4_cq_event_release(phba, cq_event);
9989 spin_unlock_irqrestore(&phba->hbalock, iflags);
9990}
9991
9992
9993
9994
9995
9996
9997
9998
9999static void
10000lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10001{
10002 LIST_HEAD(cqelist);
10003 struct lpfc_cq_event *cqe;
10004 unsigned long iflags;
10005
10006
10007 spin_lock_irqsave(&phba->hbalock, iflags);
10008
10009 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10010 &cqelist);
10011
10012 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10013 &cqelist);
10014
10015 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10016 &cqelist);
10017 spin_unlock_irqrestore(&phba->hbalock, iflags);
10018
10019 while (!list_empty(&cqelist)) {
10020 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10021 lpfc_sli4_cq_event_release(phba, cqe);
10022 }
10023}
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037int
10038lpfc_pci_function_reset(struct lpfc_hba *phba)
10039{
10040 LPFC_MBOXQ_t *mboxq;
10041 uint32_t rc = 0, if_type;
10042 uint32_t shdr_status, shdr_add_status;
10043 uint32_t rdy_chk;
10044 uint32_t port_reset = 0;
10045 union lpfc_sli4_cfg_shdr *shdr;
10046 struct lpfc_register reg_data;
10047 uint16_t devid;
10048
10049 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10050 switch (if_type) {
10051 case LPFC_SLI_INTF_IF_TYPE_0:
10052 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10053 GFP_KERNEL);
10054 if (!mboxq) {
10055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10056 "0494 Unable to allocate memory for "
10057 "issuing SLI_FUNCTION_RESET mailbox "
10058 "command\n");
10059 return -ENOMEM;
10060 }
10061
10062
10063 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10064 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10065 LPFC_SLI4_MBX_EMBED);
10066 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10067 shdr = (union lpfc_sli4_cfg_shdr *)
10068 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10069 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10070 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10071 &shdr->response);
10072 if (rc != MBX_TIMEOUT)
10073 mempool_free(mboxq, phba->mbox_mem_pool);
10074 if (shdr_status || shdr_add_status || rc) {
10075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10076 "0495 SLI_FUNCTION_RESET mailbox "
10077 "failed with status x%x add_status x%x,"
10078 " mbx status x%x\n",
10079 shdr_status, shdr_add_status, rc);
10080 rc = -ENXIO;
10081 }
10082 break;
10083 case LPFC_SLI_INTF_IF_TYPE_2:
10084 case LPFC_SLI_INTF_IF_TYPE_6:
10085wait:
10086
10087
10088
10089
10090
10091 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10092 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10093 STATUSregaddr, ®_data.word0)) {
10094 rc = -ENODEV;
10095 goto out;
10096 }
10097 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10098 break;
10099 msleep(20);
10100 }
10101
10102 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10103 phba->work_status[0] = readl(
10104 phba->sli4_hba.u.if_type2.ERR1regaddr);
10105 phba->work_status[1] = readl(
10106 phba->sli4_hba.u.if_type2.ERR2regaddr);
10107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10108 "2890 Port not ready, port status reg "
10109 "0x%x error 1=0x%x, error 2=0x%x\n",
10110 reg_data.word0,
10111 phba->work_status[0],
10112 phba->work_status[1]);
10113 rc = -ENODEV;
10114 goto out;
10115 }
10116
10117 if (!port_reset) {
10118
10119
10120
10121 reg_data.word0 = 0;
10122 bf_set(lpfc_sliport_ctrl_end, ®_data,
10123 LPFC_SLIPORT_LITTLE_ENDIAN);
10124 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10125 LPFC_SLIPORT_INIT_PORT);
10126 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10127 CTRLregaddr);
10128
10129 pci_read_config_word(phba->pcidev,
10130 PCI_DEVICE_ID, &devid);
10131
10132 port_reset = 1;
10133 msleep(20);
10134 goto wait;
10135 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10136 rc = -ENODEV;
10137 goto out;
10138 }
10139 break;
10140
10141 case LPFC_SLI_INTF_IF_TYPE_1:
10142 default:
10143 break;
10144 }
10145
10146out:
10147
10148 if (rc) {
10149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10150 "3317 HBA not functional: IP Reset Failed "
10151 "try: echo fw_reset > board_mode\n");
10152 rc = -ENODEV;
10153 }
10154
10155 return rc;
10156}
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169static int
10170lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10171{
10172 struct pci_dev *pdev = phba->pcidev;
10173 unsigned long bar0map_len, bar1map_len, bar2map_len;
10174 int error;
10175 uint32_t if_type;
10176
10177 if (!pdev)
10178 return -ENODEV;
10179
10180
10181 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10182 if (error)
10183 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10184 if (error)
10185 return error;
10186
10187
10188
10189
10190
10191 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10192 &phba->sli4_hba.sli_intf.word0)) {
10193 return -ENODEV;
10194 }
10195
10196
10197 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10198 LPFC_SLI_INTF_VALID) {
10199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10200 "2894 SLI_INTF reg contents invalid "
10201 "sli_intf reg 0x%x\n",
10202 phba->sli4_hba.sli_intf.word0);
10203 return -ENODEV;
10204 }
10205
10206 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10207
10208
10209
10210
10211
10212
10213 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10214 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10215 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10216
10217
10218
10219
10220
10221 phba->sli4_hba.conf_regs_memmap_p =
10222 ioremap(phba->pci_bar0_map, bar0map_len);
10223 if (!phba->sli4_hba.conf_regs_memmap_p) {
10224 dev_printk(KERN_ERR, &pdev->dev,
10225 "ioremap failed for SLI4 PCI config "
10226 "registers.\n");
10227 return -ENODEV;
10228 }
10229 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10230
10231 lpfc_sli4_bar0_register_memmap(phba, if_type);
10232 } else {
10233 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10234 bar0map_len = pci_resource_len(pdev, 1);
10235 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10236 dev_printk(KERN_ERR, &pdev->dev,
10237 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10238 return -ENODEV;
10239 }
10240 phba->sli4_hba.conf_regs_memmap_p =
10241 ioremap(phba->pci_bar0_map, bar0map_len);
10242 if (!phba->sli4_hba.conf_regs_memmap_p) {
10243 dev_printk(KERN_ERR, &pdev->dev,
10244 "ioremap failed for SLI4 PCI config "
10245 "registers.\n");
10246 return -ENODEV;
10247 }
10248 lpfc_sli4_bar0_register_memmap(phba, if_type);
10249 }
10250
10251 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10252 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10253
10254
10255
10256
10257 phba->pci_bar1_map = pci_resource_start(pdev,
10258 PCI_64BIT_BAR2);
10259 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10260 phba->sli4_hba.ctrl_regs_memmap_p =
10261 ioremap(phba->pci_bar1_map,
10262 bar1map_len);
10263 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10264 dev_err(&pdev->dev,
10265 "ioremap failed for SLI4 HBA "
10266 "control registers.\n");
10267 error = -ENOMEM;
10268 goto out_iounmap_conf;
10269 }
10270 phba->pci_bar2_memmap_p =
10271 phba->sli4_hba.ctrl_regs_memmap_p;
10272 lpfc_sli4_bar1_register_memmap(phba, if_type);
10273 } else {
10274 error = -ENOMEM;
10275 goto out_iounmap_conf;
10276 }
10277 }
10278
10279 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10280 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10281
10282
10283
10284
10285 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10286 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10287 phba->sli4_hba.drbl_regs_memmap_p =
10288 ioremap(phba->pci_bar1_map, bar1map_len);
10289 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10290 dev_err(&pdev->dev,
10291 "ioremap failed for SLI4 HBA doorbell registers.\n");
10292 error = -ENOMEM;
10293 goto out_iounmap_conf;
10294 }
10295 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10296 lpfc_sli4_bar1_register_memmap(phba, if_type);
10297 }
10298
10299 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10300 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10301
10302
10303
10304
10305 phba->pci_bar2_map = pci_resource_start(pdev,
10306 PCI_64BIT_BAR4);
10307 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10308 phba->sli4_hba.drbl_regs_memmap_p =
10309 ioremap(phba->pci_bar2_map,
10310 bar2map_len);
10311 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10312 dev_err(&pdev->dev,
10313 "ioremap failed for SLI4 HBA"
10314 " doorbell registers.\n");
10315 error = -ENOMEM;
10316 goto out_iounmap_ctrl;
10317 }
10318 phba->pci_bar4_memmap_p =
10319 phba->sli4_hba.drbl_regs_memmap_p;
10320 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10321 if (error)
10322 goto out_iounmap_all;
10323 } else {
10324 error = -ENOMEM;
10325 goto out_iounmap_all;
10326 }
10327 }
10328
10329 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10330 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10331
10332
10333
10334
10335 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10336 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10337 phba->sli4_hba.dpp_regs_memmap_p =
10338 ioremap(phba->pci_bar2_map, bar2map_len);
10339 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10340 dev_err(&pdev->dev,
10341 "ioremap failed for SLI4 HBA dpp registers.\n");
10342 error = -ENOMEM;
10343 goto out_iounmap_ctrl;
10344 }
10345 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10346 }
10347
10348
10349 switch (if_type) {
10350 case LPFC_SLI_INTF_IF_TYPE_0:
10351 case LPFC_SLI_INTF_IF_TYPE_2:
10352 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10353 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10354 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10355 break;
10356 case LPFC_SLI_INTF_IF_TYPE_6:
10357 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10358 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10359 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10360 break;
10361 default:
10362 break;
10363 }
10364
10365 return 0;
10366
10367out_iounmap_all:
10368 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10369out_iounmap_ctrl:
10370 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10371out_iounmap_conf:
10372 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10373
10374 return error;
10375}
10376
10377
10378
10379
10380
10381
10382
10383
10384static void
10385lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10386{
10387 uint32_t if_type;
10388 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10389
10390 switch (if_type) {
10391 case LPFC_SLI_INTF_IF_TYPE_0:
10392 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10393 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10394 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10395 break;
10396 case LPFC_SLI_INTF_IF_TYPE_2:
10397 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10398 break;
10399 case LPFC_SLI_INTF_IF_TYPE_6:
10400 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10401 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10402 break;
10403 case LPFC_SLI_INTF_IF_TYPE_1:
10404 default:
10405 dev_printk(KERN_ERR, &phba->pcidev->dev,
10406 "FATAL - unsupported SLI4 interface type - %d\n",
10407 if_type);
10408 break;
10409 }
10410}
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423static int
10424lpfc_sli_enable_msix(struct lpfc_hba *phba)
10425{
10426 int rc;
10427 LPFC_MBOXQ_t *pmb;
10428
10429
10430 rc = pci_alloc_irq_vectors(phba->pcidev,
10431 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10432 if (rc < 0) {
10433 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10434 "0420 PCI enable MSI-X failed (%d)\n", rc);
10435 goto vec_fail_out;
10436 }
10437
10438
10439
10440
10441
10442
10443 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10444 &lpfc_sli_sp_intr_handler, 0,
10445 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10446 if (rc) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0421 MSI-X slow-path request_irq failed "
10449 "(%d)\n", rc);
10450 goto msi_fail_out;
10451 }
10452
10453
10454 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10455 &lpfc_sli_fp_intr_handler, 0,
10456 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10457
10458 if (rc) {
10459 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10460 "0429 MSI-X fast-path request_irq failed "
10461 "(%d)\n", rc);
10462 goto irq_fail_out;
10463 }
10464
10465
10466
10467
10468 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10469
10470 if (!pmb) {
10471 rc = -ENOMEM;
10472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10473 "0474 Unable to allocate memory for issuing "
10474 "MBOX_CONFIG_MSI command\n");
10475 goto mem_fail_out;
10476 }
10477 rc = lpfc_config_msi(phba, pmb);
10478 if (rc)
10479 goto mbx_fail_out;
10480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10481 if (rc != MBX_SUCCESS) {
10482 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10483 "0351 Config MSI mailbox command failed, "
10484 "mbxCmd x%x, mbxStatus x%x\n",
10485 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10486 goto mbx_fail_out;
10487 }
10488
10489
10490 mempool_free(pmb, phba->mbox_mem_pool);
10491 return rc;
10492
10493mbx_fail_out:
10494
10495 mempool_free(pmb, phba->mbox_mem_pool);
10496
10497mem_fail_out:
10498
10499 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10500
10501irq_fail_out:
10502
10503 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10504
10505msi_fail_out:
10506
10507 pci_free_irq_vectors(phba->pcidev);
10508
10509vec_fail_out:
10510 return rc;
10511}
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527static int
10528lpfc_sli_enable_msi(struct lpfc_hba *phba)
10529{
10530 int rc;
10531
10532 rc = pci_enable_msi(phba->pcidev);
10533 if (!rc)
10534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10535 "0462 PCI enable MSI mode success.\n");
10536 else {
10537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10538 "0471 PCI enable MSI mode failed (%d)\n", rc);
10539 return rc;
10540 }
10541
10542 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10543 0, LPFC_DRIVER_NAME, phba);
10544 if (rc) {
10545 pci_disable_msi(phba->pcidev);
10546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10547 "0478 MSI request_irq failed (%d)\n", rc);
10548 }
10549 return rc;
10550}
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568static uint32_t
10569lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10570{
10571 uint32_t intr_mode = LPFC_INTR_ERROR;
10572 int retval;
10573
10574 if (cfg_mode == 2) {
10575
10576 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10577 if (!retval) {
10578
10579 retval = lpfc_sli_enable_msix(phba);
10580 if (!retval) {
10581
10582 phba->intr_type = MSIX;
10583 intr_mode = 2;
10584 }
10585 }
10586 }
10587
10588
10589 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10590 retval = lpfc_sli_enable_msi(phba);
10591 if (!retval) {
10592
10593 phba->intr_type = MSI;
10594 intr_mode = 1;
10595 }
10596 }
10597
10598
10599 if (phba->intr_type == NONE) {
10600 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10601 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10602 if (!retval) {
10603
10604 phba->intr_type = INTx;
10605 intr_mode = 0;
10606 }
10607 }
10608 return intr_mode;
10609}
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620static void
10621lpfc_sli_disable_intr(struct lpfc_hba *phba)
10622{
10623 int nr_irqs, i;
10624
10625 if (phba->intr_type == MSIX)
10626 nr_irqs = LPFC_MSIX_VECTORS;
10627 else
10628 nr_irqs = 1;
10629
10630 for (i = 0; i < nr_irqs; i++)
10631 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10632 pci_free_irq_vectors(phba->pcidev);
10633
10634
10635 phba->intr_type = NONE;
10636 phba->sli.slistat.sli_intr = 0;
10637}
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647static uint16_t
10648lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10649{
10650 struct lpfc_vector_map_info *cpup;
10651 int cpu;
10652
10653
10654 for_each_present_cpu(cpu) {
10655 cpup = &phba->sli4_hba.cpu_map[cpu];
10656
10657
10658
10659
10660
10661 if ((match == LPFC_FIND_BY_EQ) &&
10662 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10663 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10664 (cpup->eq == id))
10665 return cpu;
10666
10667
10668 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10669 return cpu;
10670 }
10671 return 0;
10672}
10673
10674#ifdef CONFIG_X86
10675
10676
10677
10678
10679
10680
10681
10682static int
10683lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10684 uint16_t phys_id, uint16_t core_id)
10685{
10686 struct lpfc_vector_map_info *cpup;
10687 int idx;
10688
10689 for_each_present_cpu(idx) {
10690 cpup = &phba->sli4_hba.cpu_map[idx];
10691
10692 if ((cpup->phys_id == phys_id) &&
10693 (cpup->core_id == core_id) &&
10694 (cpu != idx))
10695 return 1;
10696 }
10697 return 0;
10698}
10699#endif
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711static void
10712lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10713{
10714 int i, cpu, idx, new_cpu, start_cpu, first_cpu;
10715 int max_phys_id, min_phys_id;
10716 int max_core_id, min_core_id;
10717 struct lpfc_vector_map_info *cpup;
10718 struct lpfc_vector_map_info *new_cpup;
10719 const struct cpumask *maskp;
10720#ifdef CONFIG_X86
10721 struct cpuinfo_x86 *cpuinfo;
10722#endif
10723
10724
10725 for_each_possible_cpu(cpu) {
10726 cpup = &phba->sli4_hba.cpu_map[cpu];
10727 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10728 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10729 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10730 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10731 cpup->irq = LPFC_VECTOR_MAP_EMPTY;
10732 cpup->flag = 0;
10733 }
10734
10735 max_phys_id = 0;
10736 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10737 max_core_id = 0;
10738 min_core_id = LPFC_VECTOR_MAP_EMPTY;
10739
10740
10741 for_each_present_cpu(cpu) {
10742 cpup = &phba->sli4_hba.cpu_map[cpu];
10743#ifdef CONFIG_X86
10744 cpuinfo = &cpu_data(cpu);
10745 cpup->phys_id = cpuinfo->phys_proc_id;
10746 cpup->core_id = cpuinfo->cpu_core_id;
10747 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10748 cpup->flag |= LPFC_CPU_MAP_HYPER;
10749#else
10750
10751 cpup->phys_id = 0;
10752 cpup->core_id = cpu;
10753#endif
10754
10755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10756 "3328 CPU physid %d coreid %d\n",
10757 cpup->phys_id, cpup->core_id);
10758
10759 if (cpup->phys_id > max_phys_id)
10760 max_phys_id = cpup->phys_id;
10761 if (cpup->phys_id < min_phys_id)
10762 min_phys_id = cpup->phys_id;
10763
10764 if (cpup->core_id > max_core_id)
10765 max_core_id = cpup->core_id;
10766 if (cpup->core_id < min_core_id)
10767 min_core_id = cpup->core_id;
10768 }
10769
10770 for_each_possible_cpu(i) {
10771 struct lpfc_eq_intr_info *eqi =
10772 per_cpu_ptr(phba->sli4_hba.eq_info, i);
10773
10774 INIT_LIST_HEAD(&eqi->list);
10775 eqi->icnt = 0;
10776 }
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10789
10790 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10791 if (!maskp) {
10792 if (phba->cfg_irq_chann > 1)
10793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10794 "3329 No affinity mask found "
10795 "for vector %d (%d)\n",
10796 idx, phba->cfg_irq_chann);
10797 if (!idx) {
10798 cpu = cpumask_first(cpu_present_mask);
10799 cpup = &phba->sli4_hba.cpu_map[cpu];
10800 cpup->eq = idx;
10801 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10802 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10803 }
10804 break;
10805 }
10806
10807 i = 0;
10808
10809 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
10810
10811 cpup = &phba->sli4_hba.cpu_map[cpu];
10812 cpup->eq = idx;
10813 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10814
10815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10816 "3336 Set Affinity: CPU %d "
10817 "irq %d eq %d\n",
10818 cpu, cpup->irq, cpup->eq);
10819
10820
10821
10822
10823 if (!i)
10824 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10825 i++;
10826 }
10827 }
10828
10829
10830
10831
10832
10833
10834 first_cpu = cpumask_first(cpu_present_mask);
10835 start_cpu = first_cpu;
10836
10837 for_each_present_cpu(cpu) {
10838 cpup = &phba->sli4_hba.cpu_map[cpu];
10839
10840
10841 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10842
10843 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10844
10845
10846
10847
10848
10849
10850 new_cpu = start_cpu;
10851 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10852 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10853 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10854 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10855 (new_cpup->phys_id == cpup->phys_id))
10856 goto found_same;
10857 new_cpu = cpumask_next(
10858 new_cpu, cpu_present_mask);
10859 if (new_cpu == nr_cpumask_bits)
10860 new_cpu = first_cpu;
10861 }
10862
10863 continue;
10864found_same:
10865
10866 cpup->eq = new_cpup->eq;
10867 cpup->irq = new_cpup->irq;
10868
10869
10870
10871
10872
10873 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10874 if (start_cpu == nr_cpumask_bits)
10875 start_cpu = first_cpu;
10876
10877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10878 "3337 Set Affinity: CPU %d "
10879 "irq %d from id %d same "
10880 "phys_id (%d)\n",
10881 cpu, cpup->irq, new_cpu, cpup->phys_id);
10882 }
10883 }
10884
10885
10886 start_cpu = first_cpu;
10887
10888 for_each_present_cpu(cpu) {
10889 cpup = &phba->sli4_hba.cpu_map[cpu];
10890
10891
10892 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10893
10894 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10895
10896
10897
10898
10899
10900
10901 new_cpu = start_cpu;
10902 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10903 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10904 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10905 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
10906 goto found_any;
10907 new_cpu = cpumask_next(
10908 new_cpu, cpu_present_mask);
10909 if (new_cpu == nr_cpumask_bits)
10910 new_cpu = first_cpu;
10911 }
10912
10913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10914 "3339 Set Affinity: CPU %d "
10915 "irq %d UNASSIGNED\n",
10916 cpup->hdwq, cpup->irq);
10917 continue;
10918found_any:
10919
10920 cpup->eq = new_cpup->eq;
10921 cpup->irq = new_cpup->irq;
10922
10923
10924
10925
10926
10927 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10928 if (start_cpu == nr_cpumask_bits)
10929 start_cpu = first_cpu;
10930
10931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10932 "3338 Set Affinity: CPU %d "
10933 "irq %d from id %d (%d/%d)\n",
10934 cpu, cpup->irq, new_cpu,
10935 new_cpup->phys_id, new_cpup->core_id);
10936 }
10937 }
10938
10939
10940
10941
10942
10943
10944 idx = 0;
10945 start_cpu = 0;
10946 for_each_present_cpu(cpu) {
10947 cpup = &phba->sli4_hba.cpu_map[cpu];
10948 if (idx >= phba->cfg_hdw_queue) {
10949
10950
10951
10952
10953
10954 new_cpu = start_cpu;
10955 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10956 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10957 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
10958 (new_cpup->phys_id == cpup->phys_id) &&
10959 (new_cpup->core_id == cpup->core_id))
10960 goto found_hdwq;
10961 new_cpu = cpumask_next(
10962 new_cpu, cpu_present_mask);
10963 if (new_cpu == nr_cpumask_bits)
10964 new_cpu = first_cpu;
10965 }
10966
10967
10968
10969
10970 new_cpu = start_cpu;
10971 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10972 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10973 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
10974 (new_cpup->phys_id == cpup->phys_id))
10975 goto found_hdwq;
10976 new_cpu = cpumask_next(
10977 new_cpu, cpu_present_mask);
10978 if (new_cpu == nr_cpumask_bits)
10979 new_cpu = first_cpu;
10980 }
10981
10982
10983 cpup->hdwq = idx % phba->cfg_hdw_queue;
10984 goto logit;
10985found_hdwq:
10986
10987 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10988 if (start_cpu == nr_cpumask_bits)
10989 start_cpu = first_cpu;
10990 cpup->hdwq = new_cpup->hdwq;
10991 } else {
10992
10993 cpup->hdwq = idx;
10994 }
10995logit:
10996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10997 "3335 Set Affinity: CPU %d (phys %d core %d): "
10998 "hdwq %d eq %d irq %d flg x%x\n",
10999 cpu, cpup->phys_id, cpup->core_id,
11000 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
11001 idx++;
11002 }
11003
11004
11005
11006
11007 return;
11008}
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021static int
11022lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11023{
11024 int vectors, rc, index;
11025 char *name;
11026
11027
11028 vectors = phba->cfg_irq_chann;
11029
11030 rc = pci_alloc_irq_vectors(phba->pcidev,
11031 1,
11032 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
11033 if (rc < 0) {
11034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11035 "0484 PCI enable MSI-X failed (%d)\n", rc);
11036 goto vec_fail_out;
11037 }
11038 vectors = rc;
11039
11040
11041 for (index = 0; index < vectors; index++) {
11042 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11043 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11044 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11045 LPFC_DRIVER_HANDLER_NAME"%d", index);
11046
11047 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11048 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11049 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11050 &lpfc_sli4_hba_intr_handler, 0,
11051 name,
11052 &phba->sli4_hba.hba_eq_hdl[index]);
11053 if (rc) {
11054 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11055 "0486 MSI-X fast-path (%d) "
11056 "request_irq failed (%d)\n", index, rc);
11057 goto cfg_fail_out;
11058 }
11059 }
11060
11061 if (vectors != phba->cfg_irq_chann) {
11062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11063 "3238 Reducing IO channels to match number of "
11064 "MSI-X vectors, requested %d got %d\n",
11065 phba->cfg_irq_chann, vectors);
11066 if (phba->cfg_irq_chann > vectors)
11067 phba->cfg_irq_chann = vectors;
11068 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
11069 phba->cfg_nvmet_mrq = vectors;
11070 }
11071
11072 return rc;
11073
11074cfg_fail_out:
11075
11076 for (--index; index >= 0; index--)
11077 free_irq(pci_irq_vector(phba->pcidev, index),
11078 &phba->sli4_hba.hba_eq_hdl[index]);
11079
11080
11081 pci_free_irq_vectors(phba->pcidev);
11082
11083vec_fail_out:
11084 return rc;
11085}
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101static int
11102lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11103{
11104 int rc, index;
11105
11106 rc = pci_enable_msi(phba->pcidev);
11107 if (!rc)
11108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11109 "0487 PCI enable MSI mode success.\n");
11110 else {
11111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11112 "0488 PCI enable MSI mode failed (%d)\n", rc);
11113 return rc;
11114 }
11115
11116 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11117 0, LPFC_DRIVER_NAME, phba);
11118 if (rc) {
11119 pci_disable_msi(phba->pcidev);
11120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11121 "0490 MSI request_irq failed (%d)\n", rc);
11122 return rc;
11123 }
11124
11125 for (index = 0; index < phba->cfg_irq_chann; index++) {
11126 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11127 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11128 }
11129
11130 return 0;
11131}
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149static uint32_t
11150lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11151{
11152 uint32_t intr_mode = LPFC_INTR_ERROR;
11153 int retval, idx;
11154
11155 if (cfg_mode == 2) {
11156
11157 retval = 0;
11158 if (!retval) {
11159
11160 retval = lpfc_sli4_enable_msix(phba);
11161 if (!retval) {
11162
11163 phba->intr_type = MSIX;
11164 intr_mode = 2;
11165 }
11166 }
11167 }
11168
11169
11170 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11171 retval = lpfc_sli4_enable_msi(phba);
11172 if (!retval) {
11173
11174 phba->intr_type = MSI;
11175 intr_mode = 1;
11176 }
11177 }
11178
11179
11180 if (phba->intr_type == NONE) {
11181 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11182 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11183 if (!retval) {
11184 struct lpfc_hba_eq_hdl *eqhdl;
11185
11186
11187 phba->intr_type = INTx;
11188 intr_mode = 0;
11189
11190 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11191 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11192 eqhdl->idx = idx;
11193 eqhdl->phba = phba;
11194 }
11195 }
11196 }
11197 return intr_mode;
11198}
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209static void
11210lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11211{
11212
11213 if (phba->intr_type == MSIX) {
11214 int index;
11215
11216
11217 for (index = 0; index < phba->cfg_irq_chann; index++) {
11218 irq_set_affinity_hint(
11219 pci_irq_vector(phba->pcidev, index),
11220 NULL);
11221 free_irq(pci_irq_vector(phba->pcidev, index),
11222 &phba->sli4_hba.hba_eq_hdl[index]);
11223 }
11224 } else {
11225 free_irq(phba->pcidev->irq, phba);
11226 }
11227
11228 pci_free_irq_vectors(phba->pcidev);
11229
11230
11231 phba->intr_type = NONE;
11232 phba->sli.slistat.sli_intr = 0;
11233}
11234
11235
11236
11237
11238
11239
11240
11241
11242static void
11243lpfc_unset_hba(struct lpfc_hba *phba)
11244{
11245 struct lpfc_vport *vport = phba->pport;
11246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11247
11248 spin_lock_irq(shost->host_lock);
11249 vport->load_flag |= FC_UNLOADING;
11250 spin_unlock_irq(shost->host_lock);
11251
11252 kfree(phba->vpi_bmask);
11253 kfree(phba->vpi_ids);
11254
11255 lpfc_stop_hba_timers(phba);
11256
11257 phba->pport->work_port_events = 0;
11258
11259 lpfc_sli_hba_down(phba);
11260
11261 lpfc_sli_brdrestart(phba);
11262
11263 lpfc_sli_disable_intr(phba);
11264
11265 return;
11266}
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281static void
11282lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11283{
11284 struct lpfc_sli4_hdw_queue *qp;
11285 int idx, ccnt, fcnt;
11286 int wait_time = 0;
11287 int io_xri_cmpl = 1;
11288 int nvmet_xri_cmpl = 1;
11289 int fcp_xri_cmpl = 1;
11290 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11291
11292
11293
11294
11295
11296 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11297
11298
11299 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11300 lpfc_nvme_wait_for_io_drain(phba);
11301
11302 ccnt = 0;
11303 fcnt = 0;
11304 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11305 qp = &phba->sli4_hba.hdwq[idx];
11306 fcp_xri_cmpl = list_empty(
11307 &qp->lpfc_abts_scsi_buf_list);
11308 if (!fcp_xri_cmpl)
11309 fcnt++;
11310 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11311 io_xri_cmpl = list_empty(
11312 &qp->lpfc_abts_nvme_buf_list);
11313 if (!io_xri_cmpl)
11314 ccnt++;
11315 }
11316 }
11317 if (ccnt)
11318 io_xri_cmpl = 0;
11319 if (fcnt)
11320 fcp_xri_cmpl = 0;
11321
11322 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11323 nvmet_xri_cmpl =
11324 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11325 }
11326
11327 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
11328 !nvmet_xri_cmpl) {
11329 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11330 if (!nvmet_xri_cmpl)
11331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11332 "6424 NVMET XRI exchange busy "
11333 "wait time: %d seconds.\n",
11334 wait_time/1000);
11335 if (!io_xri_cmpl)
11336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11337 "6100 NVME XRI exchange busy "
11338 "wait time: %d seconds.\n",
11339 wait_time/1000);
11340 if (!fcp_xri_cmpl)
11341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11342 "2877 FCP XRI exchange busy "
11343 "wait time: %d seconds.\n",
11344 wait_time/1000);
11345 if (!els_xri_cmpl)
11346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11347 "2878 ELS XRI exchange busy "
11348 "wait time: %d seconds.\n",
11349 wait_time/1000);
11350 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11351 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11352 } else {
11353 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11354 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11355 }
11356
11357 ccnt = 0;
11358 fcnt = 0;
11359 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11360 qp = &phba->sli4_hba.hdwq[idx];
11361 fcp_xri_cmpl = list_empty(
11362 &qp->lpfc_abts_scsi_buf_list);
11363 if (!fcp_xri_cmpl)
11364 fcnt++;
11365 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11366 io_xri_cmpl = list_empty(
11367 &qp->lpfc_abts_nvme_buf_list);
11368 if (!io_xri_cmpl)
11369 ccnt++;
11370 }
11371 }
11372 if (ccnt)
11373 io_xri_cmpl = 0;
11374 if (fcnt)
11375 fcp_xri_cmpl = 0;
11376
11377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11378 nvmet_xri_cmpl = list_empty(
11379 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11380 }
11381 els_xri_cmpl =
11382 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11383
11384 }
11385}
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397static void
11398lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11399{
11400 int wait_cnt = 0;
11401 LPFC_MBOXQ_t *mboxq;
11402 struct pci_dev *pdev = phba->pcidev;
11403
11404 lpfc_stop_hba_timers(phba);
11405 if (phba->pport)
11406 phba->sli4_hba.intr_enable = 0;
11407
11408
11409
11410
11411
11412
11413
11414 spin_lock_irq(&phba->hbalock);
11415 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11416 spin_unlock_irq(&phba->hbalock);
11417
11418 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11419 msleep(10);
11420 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11421 break;
11422 }
11423
11424 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11425 spin_lock_irq(&phba->hbalock);
11426 mboxq = phba->sli.mbox_active;
11427 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11428 __lpfc_mbox_cmpl_put(phba, mboxq);
11429 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11430 phba->sli.mbox_active = NULL;
11431 spin_unlock_irq(&phba->hbalock);
11432 }
11433
11434
11435 lpfc_sli_hba_iocb_abort(phba);
11436
11437
11438 lpfc_sli4_xri_exchange_busy_wait(phba);
11439
11440
11441 lpfc_sli4_disable_intr(phba);
11442
11443
11444 if (phba->cfg_sriov_nr_virtfn)
11445 pci_disable_sriov(pdev);
11446
11447
11448 kthread_stop(phba->worker_thread);
11449
11450
11451 lpfc_ras_stop_fwlog(phba);
11452
11453
11454
11455
11456 lpfc_sli4_queue_unset(phba);
11457 lpfc_sli4_queue_destroy(phba);
11458
11459
11460 lpfc_pci_function_reset(phba);
11461
11462
11463 if (phba->ras_fwlog.ras_enabled)
11464 lpfc_sli4_ras_dma_free(phba);
11465
11466
11467 if (phba->pport)
11468 phba->pport->work_port_events = 0;
11469}
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483int
11484lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11485{
11486 int rc;
11487 struct lpfc_mqe *mqe;
11488 struct lpfc_pc_sli4_params *sli4_params;
11489 uint32_t mbox_tmo;
11490
11491 rc = 0;
11492 mqe = &mboxq->u.mqe;
11493
11494
11495 lpfc_pc_sli4_params(mboxq);
11496 if (!phba->sli4_hba.intr_enable)
11497 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11498 else {
11499 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11500 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11501 }
11502
11503 if (unlikely(rc))
11504 return 1;
11505
11506 sli4_params = &phba->sli4_hba.pc_sli4_params;
11507 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11508 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11509 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11510 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11511 &mqe->un.sli4_params);
11512 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11513 &mqe->un.sli4_params);
11514 sli4_params->proto_types = mqe->un.sli4_params.word3;
11515 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11516 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11517 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11518 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11519 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11520 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11521 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11522 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11523 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11524 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11525 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11526 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11527 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11528 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11529 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11530 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11531 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11532 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11533 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11534 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11535
11536
11537 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11538 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11539
11540 return rc;
11541}
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555int
11556lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11557{
11558 int rc;
11559 struct lpfc_mqe *mqe = &mboxq->u.mqe;
11560 struct lpfc_pc_sli4_params *sli4_params;
11561 uint32_t mbox_tmo;
11562 int length;
11563 bool exp_wqcq_pages = true;
11564 struct lpfc_sli4_parameters *mbx_sli4_parameters;
11565
11566
11567
11568
11569
11570
11571 phba->sli4_hba.rpi_hdrs_in_use = 1;
11572
11573
11574 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
11575 sizeof(struct lpfc_sli4_cfg_mhdr));
11576 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11577 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
11578 length, LPFC_SLI4_MBX_EMBED);
11579 if (!phba->sli4_hba.intr_enable)
11580 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11581 else {
11582 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11583 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11584 }
11585 if (unlikely(rc))
11586 return rc;
11587 sli4_params = &phba->sli4_hba.pc_sli4_params;
11588 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
11589 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
11590 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
11591 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
11592 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
11593 mbx_sli4_parameters);
11594 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
11595 mbx_sli4_parameters);
11596 if (bf_get(cfg_phwq, mbx_sli4_parameters))
11597 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
11598 else
11599 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
11600 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
11601 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
11602 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
11603 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
11604 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
11605 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
11606 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
11607 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
11608 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
11609 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
11610 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
11611 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
11612 mbx_sli4_parameters);
11613 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
11614 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
11615 mbx_sli4_parameters);
11616 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11617 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
11618
11619
11620 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11621 bf_get(cfg_xib, mbx_sli4_parameters));
11622
11623 if (rc) {
11624
11625 sli4_params->nvme = 1;
11626
11627
11628 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
11629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11630 "6133 Disabling NVME support: "
11631 "FC4 type not supported: x%x\n",
11632 phba->cfg_enable_fc4_type);
11633 goto fcponly;
11634 }
11635 } else {
11636
11637 sli4_params->nvme = 0;
11638 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
11640 "6101 Disabling NVME support: Not "
11641 "supported by firmware (%d %d) x%x\n",
11642 bf_get(cfg_nvme, mbx_sli4_parameters),
11643 bf_get(cfg_xib, mbx_sli4_parameters),
11644 phba->cfg_enable_fc4_type);
11645fcponly:
11646 phba->nvme_support = 0;
11647 phba->nvmet_support = 0;
11648 phba->cfg_nvmet_mrq = 0;
11649
11650
11651 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
11652 return -ENODEV;
11653 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
11654 }
11655 }
11656
11657
11658 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11659 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
11660 phba->cfg_enable_pbde = 0;
11661
11662
11663
11664
11665
11666
11667
11668
11669
11670 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
11671 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
11672 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
11673 else
11674 phba->cfg_suppress_rsp = 0;
11675
11676 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
11677 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
11678
11679
11680 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11681 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11682
11683
11684
11685
11686
11687
11688 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
11689 phba->fcp_embed_io = 1;
11690 else
11691 phba->fcp_embed_io = 0;
11692
11693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11694 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
11695 bf_get(cfg_xib, mbx_sli4_parameters),
11696 phba->cfg_enable_pbde,
11697 phba->fcp_embed_io, phba->nvme_support,
11698 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
11699
11700 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
11701 LPFC_SLI_INTF_IF_TYPE_2) &&
11702 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
11703 LPFC_SLI_INTF_FAMILY_LNCR_A0))
11704 exp_wqcq_pages = false;
11705
11706 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
11707 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
11708 exp_wqcq_pages &&
11709 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
11710 phba->enab_exp_wqcq_pages = 1;
11711 else
11712 phba->enab_exp_wqcq_pages = 0;
11713
11714
11715
11716 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
11717 phba->mds_diags_support = 1;
11718 else
11719 phba->mds_diags_support = 0;
11720
11721 return 0;
11722}
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741static int
11742lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
11743{
11744 struct lpfc_hba *phba;
11745 struct lpfc_vport *vport = NULL;
11746 struct Scsi_Host *shost = NULL;
11747 int error;
11748 uint32_t cfg_mode, intr_mode;
11749
11750
11751 phba = lpfc_hba_alloc(pdev);
11752 if (!phba)
11753 return -ENOMEM;
11754
11755
11756 error = lpfc_enable_pci_dev(phba);
11757 if (error)
11758 goto out_free_phba;
11759
11760
11761 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
11762 if (error)
11763 goto out_disable_pci_dev;
11764
11765
11766 error = lpfc_sli_pci_mem_setup(phba);
11767 if (error) {
11768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11769 "1402 Failed to set up pci memory space.\n");
11770 goto out_disable_pci_dev;
11771 }
11772
11773
11774 error = lpfc_sli_driver_resource_setup(phba);
11775 if (error) {
11776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11777 "1404 Failed to set up driver resource.\n");
11778 goto out_unset_pci_mem_s3;
11779 }
11780
11781
11782
11783 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
11784 if (error) {
11785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11786 "1405 Failed to initialize iocb list.\n");
11787 goto out_unset_driver_resource_s3;
11788 }
11789
11790
11791 error = lpfc_setup_driver_resource_phase2(phba);
11792 if (error) {
11793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11794 "1406 Failed to set up driver resource.\n");
11795 goto out_free_iocb_list;
11796 }
11797
11798
11799 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11800
11801
11802 error = lpfc_create_shost(phba);
11803 if (error) {
11804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11805 "1407 Failed to create scsi host.\n");
11806 goto out_unset_driver_resource;
11807 }
11808
11809
11810 vport = phba->pport;
11811 error = lpfc_alloc_sysfs_attr(vport);
11812 if (error) {
11813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11814 "1476 Failed to allocate sysfs attr\n");
11815 goto out_destroy_shost;
11816 }
11817
11818 shost = lpfc_shost_from_vport(vport);
11819
11820 cfg_mode = phba->cfg_use_msi;
11821 while (true) {
11822
11823 lpfc_stop_port(phba);
11824
11825 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
11826 if (intr_mode == LPFC_INTR_ERROR) {
11827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11828 "0431 Failed to enable interrupt.\n");
11829 error = -ENODEV;
11830 goto out_free_sysfs_attr;
11831 }
11832
11833 if (lpfc_sli_hba_setup(phba)) {
11834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11835 "1477 Failed to set up hba\n");
11836 error = -ENODEV;
11837 goto out_remove_device;
11838 }
11839
11840
11841 msleep(50);
11842
11843 if (intr_mode == 0 ||
11844 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
11845
11846 phba->intr_mode = intr_mode;
11847 lpfc_log_intr_mode(phba, intr_mode);
11848 break;
11849 } else {
11850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11851 "0447 Configure interrupt mode (%d) "
11852 "failed active interrupt test.\n",
11853 intr_mode);
11854
11855 lpfc_sli_disable_intr(phba);
11856
11857 cfg_mode = --intr_mode;
11858 }
11859 }
11860
11861
11862 lpfc_post_init_setup(phba);
11863
11864
11865 lpfc_create_static_vport(phba);
11866
11867 return 0;
11868
11869out_remove_device:
11870 lpfc_unset_hba(phba);
11871out_free_sysfs_attr:
11872 lpfc_free_sysfs_attr(vport);
11873out_destroy_shost:
11874 lpfc_destroy_shost(phba);
11875out_unset_driver_resource:
11876 lpfc_unset_driver_resource_phase2(phba);
11877out_free_iocb_list:
11878 lpfc_free_iocb_list(phba);
11879out_unset_driver_resource_s3:
11880 lpfc_sli_driver_resource_unset(phba);
11881out_unset_pci_mem_s3:
11882 lpfc_sli_pci_mem_unset(phba);
11883out_disable_pci_dev:
11884 lpfc_disable_pci_dev(phba);
11885 if (shost)
11886 scsi_host_put(shost);
11887out_free_phba:
11888 lpfc_hba_free(phba);
11889 return error;
11890}
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901static void
11902lpfc_pci_remove_one_s3(struct pci_dev *pdev)
11903{
11904 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11905 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
11906 struct lpfc_vport **vports;
11907 struct lpfc_hba *phba = vport->phba;
11908 int i;
11909
11910 spin_lock_irq(&phba->hbalock);
11911 vport->load_flag |= FC_UNLOADING;
11912 spin_unlock_irq(&phba->hbalock);
11913
11914 lpfc_free_sysfs_attr(vport);
11915
11916
11917 vports = lpfc_create_vport_work_array(phba);
11918 if (vports != NULL)
11919 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11920 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11921 continue;
11922 fc_vport_terminate(vports[i]->fc_vport);
11923 }
11924 lpfc_destroy_vport_work_array(phba, vports);
11925
11926
11927 fc_remove_host(shost);
11928 scsi_remove_host(shost);
11929
11930 lpfc_cleanup(vport);
11931
11932
11933
11934
11935
11936
11937
11938
11939 lpfc_sli_hba_down(phba);
11940
11941 kthread_stop(phba->worker_thread);
11942
11943 lpfc_sli_brdrestart(phba);
11944
11945 kfree(phba->vpi_bmask);
11946 kfree(phba->vpi_ids);
11947
11948 lpfc_stop_hba_timers(phba);
11949 spin_lock_irq(&phba->port_list_lock);
11950 list_del_init(&vport->listentry);
11951 spin_unlock_irq(&phba->port_list_lock);
11952
11953 lpfc_debugfs_terminate(vport);
11954
11955
11956 if (phba->cfg_sriov_nr_virtfn)
11957 pci_disable_sriov(pdev);
11958
11959
11960 lpfc_sli_disable_intr(phba);
11961
11962 scsi_host_put(shost);
11963
11964
11965
11966
11967
11968 lpfc_scsi_free(phba);
11969 lpfc_free_iocb_list(phba);
11970
11971 lpfc_mem_free_all(phba);
11972
11973 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
11974 phba->hbqslimp.virt, phba->hbqslimp.phys);
11975
11976
11977 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
11978 phba->slim2p.virt, phba->slim2p.phys);
11979
11980
11981 iounmap(phba->ctrl_regs_memmap_p);
11982 iounmap(phba->slim_memmap_p);
11983
11984 lpfc_hba_free(phba);
11985
11986 pci_release_mem_regions(pdev);
11987 pci_disable_device(pdev);
11988}
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011static int
12012lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12013{
12014 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12015 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12016
12017 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12018 "0473 PCI device Power Management suspend.\n");
12019
12020
12021 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12022 lpfc_offline(phba);
12023 kthread_stop(phba->worker_thread);
12024
12025
12026 lpfc_sli_disable_intr(phba);
12027
12028
12029 pci_save_state(pdev);
12030 pci_set_power_state(pdev, PCI_D3hot);
12031
12032 return 0;
12033}
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054static int
12055lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12056{
12057 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12058 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12059 uint32_t intr_mode;
12060 int error;
12061
12062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12063 "0452 PCI device Power Management resume.\n");
12064
12065
12066 pci_set_power_state(pdev, PCI_D0);
12067 pci_restore_state(pdev);
12068
12069
12070
12071
12072
12073 pci_save_state(pdev);
12074
12075 if (pdev->is_busmaster)
12076 pci_set_master(pdev);
12077
12078
12079 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12080 "lpfc_worker_%d", phba->brd_no);
12081 if (IS_ERR(phba->worker_thread)) {
12082 error = PTR_ERR(phba->worker_thread);
12083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12084 "0434 PM resume failed to start worker "
12085 "thread: error=x%x.\n", error);
12086 return error;
12087 }
12088
12089
12090 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12091 if (intr_mode == LPFC_INTR_ERROR) {
12092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12093 "0430 PM resume Failed to enable interrupt\n");
12094 return -EIO;
12095 } else
12096 phba->intr_mode = intr_mode;
12097
12098
12099 lpfc_sli_brdrestart(phba);
12100 lpfc_online(phba);
12101
12102
12103 lpfc_log_intr_mode(phba, phba->intr_mode);
12104
12105 return 0;
12106}
12107
12108
12109
12110
12111
12112
12113
12114
12115static void
12116lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12117{
12118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12119 "2723 PCI channel I/O abort preparing for recovery\n");
12120
12121
12122
12123
12124
12125 lpfc_sli_abort_fcp_rings(phba);
12126}
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136static void
12137lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12138{
12139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12140 "2710 PCI channel disable preparing for reset\n");
12141
12142
12143 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12144
12145
12146 lpfc_scsi_dev_block(phba);
12147
12148
12149 lpfc_sli_flush_fcp_rings(phba);
12150
12151
12152 lpfc_stop_hba_timers(phba);
12153
12154
12155 lpfc_sli_disable_intr(phba);
12156 pci_disable_device(phba->pcidev);
12157}
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167static void
12168lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12169{
12170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12171 "2711 PCI channel permanent disable for failure\n");
12172
12173 lpfc_scsi_dev_block(phba);
12174
12175
12176 lpfc_stop_hba_timers(phba);
12177
12178
12179 lpfc_sli_flush_fcp_rings(phba);
12180}
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200static pci_ers_result_t
12201lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12202{
12203 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12204 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12205
12206 switch (state) {
12207 case pci_channel_io_normal:
12208
12209 lpfc_sli_prep_dev_for_recover(phba);
12210 return PCI_ERS_RESULT_CAN_RECOVER;
12211 case pci_channel_io_frozen:
12212
12213 lpfc_sli_prep_dev_for_reset(phba);
12214 return PCI_ERS_RESULT_NEED_RESET;
12215 case pci_channel_io_perm_failure:
12216
12217 lpfc_sli_prep_dev_for_perm_failure(phba);
12218 return PCI_ERS_RESULT_DISCONNECT;
12219 default:
12220
12221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12222 "0472 Unknown PCI error state: x%x\n", state);
12223 lpfc_sli_prep_dev_for_reset(phba);
12224 return PCI_ERS_RESULT_NEED_RESET;
12225 }
12226}
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242
12243
12244
12245
12246static pci_ers_result_t
12247lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12248{
12249 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12250 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12251 struct lpfc_sli *psli = &phba->sli;
12252 uint32_t intr_mode;
12253
12254 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12255 if (pci_enable_device_mem(pdev)) {
12256 printk(KERN_ERR "lpfc: Cannot re-enable "
12257 "PCI device after reset.\n");
12258 return PCI_ERS_RESULT_DISCONNECT;
12259 }
12260
12261 pci_restore_state(pdev);
12262
12263
12264
12265
12266
12267 pci_save_state(pdev);
12268
12269 if (pdev->is_busmaster)
12270 pci_set_master(pdev);
12271
12272 spin_lock_irq(&phba->hbalock);
12273 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12274 spin_unlock_irq(&phba->hbalock);
12275
12276
12277 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12278 if (intr_mode == LPFC_INTR_ERROR) {
12279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12280 "0427 Cannot re-enable interrupt after "
12281 "slot reset.\n");
12282 return PCI_ERS_RESULT_DISCONNECT;
12283 } else
12284 phba->intr_mode = intr_mode;
12285
12286
12287 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12288 lpfc_offline(phba);
12289 lpfc_sli_brdrestart(phba);
12290
12291
12292 lpfc_log_intr_mode(phba, phba->intr_mode);
12293
12294 return PCI_ERS_RESULT_RECOVERED;
12295}
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307static void
12308lpfc_io_resume_s3(struct pci_dev *pdev)
12309{
12310 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12311 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12312
12313
12314 lpfc_online(phba);
12315}
12316
12317
12318
12319
12320
12321
12322
12323int
12324lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12325{
12326 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12327
12328 if (phba->sli_rev == LPFC_SLI_REV4) {
12329 if (max_xri <= 100)
12330 return 10;
12331 else if (max_xri <= 256)
12332 return 25;
12333 else if (max_xri <= 512)
12334 return 50;
12335 else if (max_xri <= 1024)
12336 return 100;
12337 else if (max_xri <= 1536)
12338 return 150;
12339 else if (max_xri <= 2048)
12340 return 200;
12341 else
12342 return 250;
12343 } else
12344 return 0;
12345}
12346
12347
12348
12349
12350
12351
12352
12353int
12354lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12355{
12356 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12357
12358 if (phba->nvmet_support)
12359 max_xri += LPFC_NVMET_BUF_POST;
12360 return max_xri;
12361}
12362
12363
12364static void
12365lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12366 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12367 const struct firmware *fw)
12368{
12369 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
12370 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12371 magic_number != MAGIC_NUMER_G6) ||
12372 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12373 magic_number != MAGIC_NUMER_G7))
12374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12375 "3030 This firmware version is not supported on "
12376 "this HBA model. Device:%x Magic:%x Type:%x "
12377 "ID:%x Size %d %zd\n",
12378 phba->pcidev->device, magic_number, ftype, fid,
12379 fsize, fw->size);
12380 else
12381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12382 "3022 FW Download failed. Device:%x Magic:%x Type:%x "
12383 "ID:%x Size %d %zd\n",
12384 phba->pcidev->device, magic_number, ftype, fid,
12385 fsize, fw->size);
12386}
12387
12388
12389
12390
12391
12392
12393
12394
12395static void
12396lpfc_write_firmware(const struct firmware *fw, void *context)
12397{
12398 struct lpfc_hba *phba = (struct lpfc_hba *)context;
12399 char fwrev[FW_REV_STR_SIZE];
12400 struct lpfc_grp_hdr *image;
12401 struct list_head dma_buffer_list;
12402 int i, rc = 0;
12403 struct lpfc_dmabuf *dmabuf, *next;
12404 uint32_t offset = 0, temp_offset = 0;
12405 uint32_t magic_number, ftype, fid, fsize;
12406
12407
12408 if (!fw) {
12409 rc = -ENXIO;
12410 goto out;
12411 }
12412 image = (struct lpfc_grp_hdr *)fw->data;
12413
12414 magic_number = be32_to_cpu(image->magic_number);
12415 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12416 fid = bf_get_be32(lpfc_grp_hdr_id, image);
12417 fsize = be32_to_cpu(image->size);
12418
12419 INIT_LIST_HEAD(&dma_buffer_list);
12420 lpfc_decode_firmware_rev(phba, fwrev, 1);
12421 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12423 "3023 Updating Firmware, Current Version:%s "
12424 "New Version:%s\n",
12425 fwrev, image->revision);
12426 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12427 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12428 GFP_KERNEL);
12429 if (!dmabuf) {
12430 rc = -ENOMEM;
12431 goto release_out;
12432 }
12433 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12434 SLI4_PAGE_SIZE,
12435 &dmabuf->phys,
12436 GFP_KERNEL);
12437 if (!dmabuf->virt) {
12438 kfree(dmabuf);
12439 rc = -ENOMEM;
12440 goto release_out;
12441 }
12442 list_add_tail(&dmabuf->list, &dma_buffer_list);
12443 }
12444 while (offset < fw->size) {
12445 temp_offset = offset;
12446 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12447 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12448 memcpy(dmabuf->virt,
12449 fw->data + temp_offset,
12450 fw->size - temp_offset);
12451 temp_offset = fw->size;
12452 break;
12453 }
12454 memcpy(dmabuf->virt, fw->data + temp_offset,
12455 SLI4_PAGE_SIZE);
12456 temp_offset += SLI4_PAGE_SIZE;
12457 }
12458 rc = lpfc_wr_object(phba, &dma_buffer_list,
12459 (fw->size - offset), &offset);
12460 if (rc) {
12461 lpfc_log_write_firmware_error(phba, offset,
12462 magic_number, ftype, fid, fsize, fw);
12463 goto release_out;
12464 }
12465 }
12466 rc = offset;
12467 } else
12468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12469 "3029 Skipped Firmware update, Current "
12470 "Version:%s New Version:%s\n",
12471 fwrev, image->revision);
12472
12473release_out:
12474 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12475 list_del(&dmabuf->list);
12476 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12477 dmabuf->virt, dmabuf->phys);
12478 kfree(dmabuf);
12479 }
12480 release_firmware(fw);
12481out:
12482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12483 "3024 Firmware update done: %d.\n", rc);
12484 return;
12485}
12486
12487
12488
12489
12490
12491
12492
12493
12494int
12495lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12496{
12497 uint8_t file_name[ELX_MODEL_NAME_SIZE];
12498 int ret;
12499 const struct firmware *fw;
12500
12501
12502 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12503 LPFC_SLI_INTF_IF_TYPE_2)
12504 return -EPERM;
12505
12506 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12507
12508 if (fw_upgrade == INT_FW_UPGRADE) {
12509 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
12510 file_name, &phba->pcidev->dev,
12511 GFP_KERNEL, (void *)phba,
12512 lpfc_write_firmware);
12513 } else if (fw_upgrade == RUN_FW_UPGRADE) {
12514 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
12515 if (!ret)
12516 lpfc_write_firmware(fw, (void *)phba);
12517 } else {
12518 ret = -EINVAL;
12519 }
12520
12521 return ret;
12522}
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542static int
12543lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
12544{
12545 struct lpfc_hba *phba;
12546 struct lpfc_vport *vport = NULL;
12547 struct Scsi_Host *shost = NULL;
12548 int error;
12549 uint32_t cfg_mode, intr_mode;
12550
12551
12552 phba = lpfc_hba_alloc(pdev);
12553 if (!phba)
12554 return -ENOMEM;
12555
12556
12557 error = lpfc_enable_pci_dev(phba);
12558 if (error)
12559 goto out_free_phba;
12560
12561
12562 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
12563 if (error)
12564 goto out_disable_pci_dev;
12565
12566
12567 error = lpfc_sli4_pci_mem_setup(phba);
12568 if (error) {
12569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12570 "1410 Failed to set up pci memory space.\n");
12571 goto out_disable_pci_dev;
12572 }
12573
12574
12575 error = lpfc_sli4_driver_resource_setup(phba);
12576 if (error) {
12577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12578 "1412 Failed to set up driver resource.\n");
12579 goto out_unset_pci_mem_s4;
12580 }
12581
12582 INIT_LIST_HEAD(&phba->active_rrq_list);
12583 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
12584
12585
12586 error = lpfc_setup_driver_resource_phase2(phba);
12587 if (error) {
12588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12589 "1414 Failed to set up driver resource.\n");
12590 goto out_unset_driver_resource_s4;
12591 }
12592
12593
12594 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12595
12596
12597 cfg_mode = phba->cfg_use_msi;
12598
12599
12600 phba->pport = NULL;
12601 lpfc_stop_port(phba);
12602
12603
12604 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
12605 if (intr_mode == LPFC_INTR_ERROR) {
12606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12607 "0426 Failed to enable interrupt.\n");
12608 error = -ENODEV;
12609 goto out_unset_driver_resource;
12610 }
12611
12612 if (phba->intr_type != MSIX) {
12613 phba->cfg_irq_chann = 1;
12614 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12615 if (phba->nvmet_support)
12616 phba->cfg_nvmet_mrq = 1;
12617 }
12618 }
12619 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
12620
12621
12622 error = lpfc_create_shost(phba);
12623 if (error) {
12624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12625 "1415 Failed to create scsi host.\n");
12626 goto out_disable_intr;
12627 }
12628 vport = phba->pport;
12629 shost = lpfc_shost_from_vport(vport);
12630
12631
12632 error = lpfc_alloc_sysfs_attr(vport);
12633 if (error) {
12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12635 "1416 Failed to allocate sysfs attr\n");
12636 goto out_destroy_shost;
12637 }
12638
12639
12640 if (lpfc_sli4_hba_setup(phba)) {
12641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12642 "1421 Failed to set up hba\n");
12643 error = -ENODEV;
12644 goto out_free_sysfs_attr;
12645 }
12646
12647
12648 phba->intr_mode = intr_mode;
12649 lpfc_log_intr_mode(phba, intr_mode);
12650
12651
12652 lpfc_post_init_setup(phba);
12653
12654
12655
12656
12657 if (phba->nvmet_support == 0) {
12658 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12659
12660
12661
12662
12663
12664 error = lpfc_nvme_create_localport(vport);
12665 if (error) {
12666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12667 "6004 NVME registration "
12668 "failed, error x%x\n",
12669 error);
12670 }
12671 }
12672 }
12673
12674
12675 if (phba->cfg_request_firmware_upgrade)
12676 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
12677
12678
12679 lpfc_create_static_vport(phba);
12680
12681
12682 lpfc_sli4_ras_setup(phba);
12683
12684 return 0;
12685
12686out_free_sysfs_attr:
12687 lpfc_free_sysfs_attr(vport);
12688out_destroy_shost:
12689 lpfc_destroy_shost(phba);
12690out_disable_intr:
12691 lpfc_sli4_disable_intr(phba);
12692out_unset_driver_resource:
12693 lpfc_unset_driver_resource_phase2(phba);
12694out_unset_driver_resource_s4:
12695 lpfc_sli4_driver_resource_unset(phba);
12696out_unset_pci_mem_s4:
12697 lpfc_sli4_pci_mem_unset(phba);
12698out_disable_pci_dev:
12699 lpfc_disable_pci_dev(phba);
12700 if (shost)
12701 scsi_host_put(shost);
12702out_free_phba:
12703 lpfc_hba_free(phba);
12704 return error;
12705}
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716static void
12717lpfc_pci_remove_one_s4(struct pci_dev *pdev)
12718{
12719 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12720 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12721 struct lpfc_vport **vports;
12722 struct lpfc_hba *phba = vport->phba;
12723 int i;
12724
12725
12726 spin_lock_irq(&phba->hbalock);
12727 vport->load_flag |= FC_UNLOADING;
12728 spin_unlock_irq(&phba->hbalock);
12729
12730
12731 lpfc_free_sysfs_attr(vport);
12732
12733
12734 vports = lpfc_create_vport_work_array(phba);
12735 if (vports != NULL)
12736 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12737 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12738 continue;
12739 fc_vport_terminate(vports[i]->fc_vport);
12740 }
12741 lpfc_destroy_vport_work_array(phba, vports);
12742
12743
12744 fc_remove_host(shost);
12745 scsi_remove_host(shost);
12746
12747
12748
12749
12750 lpfc_cleanup(vport);
12751 lpfc_nvmet_destroy_targetport(phba);
12752 lpfc_nvme_destroy_localport(vport);
12753
12754
12755 if (phba->cfg_xri_rebalancing)
12756 lpfc_destroy_multixri_pools(phba);
12757
12758
12759
12760
12761
12762
12763 lpfc_debugfs_terminate(vport);
12764
12765 lpfc_stop_hba_timers(phba);
12766 spin_lock_irq(&phba->port_list_lock);
12767 list_del_init(&vport->listentry);
12768 spin_unlock_irq(&phba->port_list_lock);
12769
12770
12771
12772
12773 lpfc_io_free(phba);
12774 lpfc_free_iocb_list(phba);
12775 lpfc_sli4_hba_unset(phba);
12776
12777 lpfc_unset_driver_resource_phase2(phba);
12778 lpfc_sli4_driver_resource_unset(phba);
12779
12780
12781 lpfc_sli4_pci_mem_unset(phba);
12782
12783
12784 scsi_host_put(shost);
12785 lpfc_disable_pci_dev(phba);
12786
12787
12788 lpfc_hba_free(phba);
12789
12790 return;
12791}
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814static int
12815lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
12816{
12817 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12818 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12819
12820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12821 "2843 PCI device Power Management suspend.\n");
12822
12823
12824 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12825 lpfc_offline(phba);
12826 kthread_stop(phba->worker_thread);
12827
12828
12829 lpfc_sli4_disable_intr(phba);
12830 lpfc_sli4_queue_destroy(phba);
12831
12832
12833 pci_save_state(pdev);
12834 pci_set_power_state(pdev, PCI_D3hot);
12835
12836 return 0;
12837}
12838
12839
12840
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857
12858static int
12859lpfc_pci_resume_one_s4(struct pci_dev *pdev)
12860{
12861 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12862 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12863 uint32_t intr_mode;
12864 int error;
12865
12866 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12867 "0292 PCI device Power Management resume.\n");
12868
12869
12870 pci_set_power_state(pdev, PCI_D0);
12871 pci_restore_state(pdev);
12872
12873
12874
12875
12876
12877 pci_save_state(pdev);
12878
12879 if (pdev->is_busmaster)
12880 pci_set_master(pdev);
12881
12882
12883 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12884 "lpfc_worker_%d", phba->brd_no);
12885 if (IS_ERR(phba->worker_thread)) {
12886 error = PTR_ERR(phba->worker_thread);
12887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12888 "0293 PM resume failed to start worker "
12889 "thread: error=x%x.\n", error);
12890 return error;
12891 }
12892
12893
12894 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
12895 if (intr_mode == LPFC_INTR_ERROR) {
12896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12897 "0294 PM resume Failed to enable interrupt\n");
12898 return -EIO;
12899 } else
12900 phba->intr_mode = intr_mode;
12901
12902
12903 lpfc_sli_brdrestart(phba);
12904 lpfc_online(phba);
12905
12906
12907 lpfc_log_intr_mode(phba, phba->intr_mode);
12908
12909 return 0;
12910}
12911
12912
12913
12914
12915
12916
12917
12918
12919static void
12920lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
12921{
12922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12923 "2828 PCI channel I/O abort preparing for recovery\n");
12924
12925
12926
12927
12928 lpfc_sli_abort_fcp_rings(phba);
12929}
12930
12931
12932
12933
12934
12935
12936
12937
12938
12939static void
12940lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
12941{
12942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12943 "2826 PCI channel disable preparing for reset\n");
12944
12945
12946 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
12947
12948
12949 lpfc_scsi_dev_block(phba);
12950
12951
12952 lpfc_sli_flush_fcp_rings(phba);
12953
12954
12955 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12956 lpfc_sli_flush_nvme_rings(phba);
12957
12958
12959 lpfc_stop_hba_timers(phba);
12960
12961
12962 lpfc_sli4_disable_intr(phba);
12963 lpfc_sli4_queue_destroy(phba);
12964 pci_disable_device(phba->pcidev);
12965}
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975static void
12976lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12977{
12978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12979 "2827 PCI channel permanent disable for failure\n");
12980
12981
12982 lpfc_scsi_dev_block(phba);
12983
12984
12985 lpfc_stop_hba_timers(phba);
12986
12987
12988 lpfc_sli_flush_fcp_rings(phba);
12989
12990
12991 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12992 lpfc_sli_flush_nvme_rings(phba);
12993}
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011static pci_ers_result_t
13012lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13013{
13014 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13015 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13016
13017 switch (state) {
13018 case pci_channel_io_normal:
13019
13020 lpfc_sli4_prep_dev_for_recover(phba);
13021 return PCI_ERS_RESULT_CAN_RECOVER;
13022 case pci_channel_io_frozen:
13023
13024 lpfc_sli4_prep_dev_for_reset(phba);
13025 return PCI_ERS_RESULT_NEED_RESET;
13026 case pci_channel_io_perm_failure:
13027
13028 lpfc_sli4_prep_dev_for_perm_failure(phba);
13029 return PCI_ERS_RESULT_DISCONNECT;
13030 default:
13031
13032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13033 "2825 Unknown PCI error state: x%x\n", state);
13034 lpfc_sli4_prep_dev_for_reset(phba);
13035 return PCI_ERS_RESULT_NEED_RESET;
13036 }
13037}
13038
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048
13049
13050
13051
13052
13053
13054
13055
13056
13057static pci_ers_result_t
13058lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13059{
13060 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13061 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13062 struct lpfc_sli *psli = &phba->sli;
13063 uint32_t intr_mode;
13064
13065 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13066 if (pci_enable_device_mem(pdev)) {
13067 printk(KERN_ERR "lpfc: Cannot re-enable "
13068 "PCI device after reset.\n");
13069 return PCI_ERS_RESULT_DISCONNECT;
13070 }
13071
13072 pci_restore_state(pdev);
13073
13074
13075
13076
13077
13078 pci_save_state(pdev);
13079
13080 if (pdev->is_busmaster)
13081 pci_set_master(pdev);
13082
13083 spin_lock_irq(&phba->hbalock);
13084 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13085 spin_unlock_irq(&phba->hbalock);
13086
13087
13088 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13089 if (intr_mode == LPFC_INTR_ERROR) {
13090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13091 "2824 Cannot re-enable interrupt after "
13092 "slot reset.\n");
13093 return PCI_ERS_RESULT_DISCONNECT;
13094 } else
13095 phba->intr_mode = intr_mode;
13096
13097
13098 lpfc_log_intr_mode(phba, phba->intr_mode);
13099
13100 return PCI_ERS_RESULT_RECOVERED;
13101}
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112
13113static void
13114lpfc_io_resume_s4(struct pci_dev *pdev)
13115{
13116 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13117 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13118
13119
13120
13121
13122
13123
13124
13125 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13126
13127 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13128 lpfc_offline(phba);
13129 lpfc_sli_brdrestart(phba);
13130
13131 lpfc_online(phba);
13132 }
13133}
13134
13135
13136
13137
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153static int
13154lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13155{
13156 int rc;
13157 struct lpfc_sli_intf intf;
13158
13159 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13160 return -ENODEV;
13161
13162 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13163 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13164 rc = lpfc_pci_probe_one_s4(pdev, pid);
13165 else
13166 rc = lpfc_pci_probe_one_s3(pdev, pid);
13167
13168 return rc;
13169}
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181static void
13182lpfc_pci_remove_one(struct pci_dev *pdev)
13183{
13184 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13185 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13186
13187 switch (phba->pci_dev_grp) {
13188 case LPFC_PCI_DEV_LP:
13189 lpfc_pci_remove_one_s3(pdev);
13190 break;
13191 case LPFC_PCI_DEV_OC:
13192 lpfc_pci_remove_one_s4(pdev);
13193 break;
13194 default:
13195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13196 "1424 Invalid PCI device group: 0x%x\n",
13197 phba->pci_dev_grp);
13198 break;
13199 }
13200 return;
13201}
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217static int
13218lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13219{
13220 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13221 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13222 int rc = -ENODEV;
13223
13224 switch (phba->pci_dev_grp) {
13225 case LPFC_PCI_DEV_LP:
13226 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13227 break;
13228 case LPFC_PCI_DEV_OC:
13229 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13230 break;
13231 default:
13232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13233 "1425 Invalid PCI device group: 0x%x\n",
13234 phba->pci_dev_grp);
13235 break;
13236 }
13237 return rc;
13238}
13239
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250
13251
13252
13253static int
13254lpfc_pci_resume_one(struct pci_dev *pdev)
13255{
13256 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13258 int rc = -ENODEV;
13259
13260 switch (phba->pci_dev_grp) {
13261 case LPFC_PCI_DEV_LP:
13262 rc = lpfc_pci_resume_one_s3(pdev);
13263 break;
13264 case LPFC_PCI_DEV_OC:
13265 rc = lpfc_pci_resume_one_s4(pdev);
13266 break;
13267 default:
13268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13269 "1426 Invalid PCI device group: 0x%x\n",
13270 phba->pci_dev_grp);
13271 break;
13272 }
13273 return rc;
13274}
13275
13276
13277
13278
13279
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289
13290
13291static pci_ers_result_t
13292lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13293{
13294 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13295 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13296 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13297
13298 switch (phba->pci_dev_grp) {
13299 case LPFC_PCI_DEV_LP:
13300 rc = lpfc_io_error_detected_s3(pdev, state);
13301 break;
13302 case LPFC_PCI_DEV_OC:
13303 rc = lpfc_io_error_detected_s4(pdev, state);
13304 break;
13305 default:
13306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13307 "1427 Invalid PCI device group: 0x%x\n",
13308 phba->pci_dev_grp);
13309 break;
13310 }
13311 return rc;
13312}
13313
13314
13315
13316
13317
13318
13319
13320
13321
13322
13323
13324
13325
13326
13327
13328static pci_ers_result_t
13329lpfc_io_slot_reset(struct pci_dev *pdev)
13330{
13331 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13332 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13333 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13334
13335 switch (phba->pci_dev_grp) {
13336 case LPFC_PCI_DEV_LP:
13337 rc = lpfc_io_slot_reset_s3(pdev);
13338 break;
13339 case LPFC_PCI_DEV_OC:
13340 rc = lpfc_io_slot_reset_s4(pdev);
13341 break;
13342 default:
13343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13344 "1428 Invalid PCI device group: 0x%x\n",
13345 phba->pci_dev_grp);
13346 break;
13347 }
13348 return rc;
13349}
13350
13351
13352
13353
13354
13355
13356
13357
13358
13359
13360
13361static void
13362lpfc_io_resume(struct pci_dev *pdev)
13363{
13364 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13365 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13366
13367 switch (phba->pci_dev_grp) {
13368 case LPFC_PCI_DEV_LP:
13369 lpfc_io_resume_s3(pdev);
13370 break;
13371 case LPFC_PCI_DEV_OC:
13372 lpfc_io_resume_s4(pdev);
13373 break;
13374 default:
13375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13376 "1429 Invalid PCI device group: 0x%x\n",
13377 phba->pci_dev_grp);
13378 break;
13379 }
13380 return;
13381}
13382
13383
13384
13385
13386
13387
13388
13389
13390
13391
13392
13393static void
13394lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13395{
13396
13397 if (!phba->cfg_EnableXLane)
13398 return;
13399
13400 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13401 phba->cfg_fof = 1;
13402 } else {
13403 phba->cfg_fof = 0;
13404 if (phba->device_data_mem_pool)
13405 mempool_destroy(phba->device_data_mem_pool);
13406 phba->device_data_mem_pool = NULL;
13407 }
13408
13409 return;
13410}
13411
13412
13413
13414
13415
13416
13417
13418
13419void
13420lpfc_sli4_ras_init(struct lpfc_hba *phba)
13421{
13422 switch (phba->pcidev->device) {
13423 case PCI_DEVICE_ID_LANCER_G6_FC:
13424 case PCI_DEVICE_ID_LANCER_G7_FC:
13425 phba->ras_fwlog.ras_hwsupport = true;
13426 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13427 phba->cfg_ras_fwlog_buffsize)
13428 phba->ras_fwlog.ras_enabled = true;
13429 else
13430 phba->ras_fwlog.ras_enabled = false;
13431 break;
13432 default:
13433 phba->ras_fwlog.ras_hwsupport = false;
13434 }
13435}
13436
13437
13438MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13439
13440static const struct pci_error_handlers lpfc_err_handler = {
13441 .error_detected = lpfc_io_error_detected,
13442 .slot_reset = lpfc_io_slot_reset,
13443 .resume = lpfc_io_resume,
13444};
13445
13446static struct pci_driver lpfc_driver = {
13447 .name = LPFC_DRIVER_NAME,
13448 .id_table = lpfc_id_table,
13449 .probe = lpfc_pci_probe_one,
13450 .remove = lpfc_pci_remove_one,
13451 .shutdown = lpfc_pci_remove_one,
13452 .suspend = lpfc_pci_suspend_one,
13453 .resume = lpfc_pci_resume_one,
13454 .err_handler = &lpfc_err_handler,
13455};
13456
13457static const struct file_operations lpfc_mgmt_fop = {
13458 .owner = THIS_MODULE,
13459};
13460
13461static struct miscdevice lpfc_mgmt_dev = {
13462 .minor = MISC_DYNAMIC_MINOR,
13463 .name = "lpfcmgmt",
13464 .fops = &lpfc_mgmt_fop,
13465};
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475
13476
13477
13478
13479static int __init
13480lpfc_init(void)
13481{
13482 int error = 0;
13483
13484 printk(LPFC_MODULE_DESC "\n");
13485 printk(LPFC_COPYRIGHT "\n");
13486
13487 error = misc_register(&lpfc_mgmt_dev);
13488 if (error)
13489 printk(KERN_ERR "Could not register lpfcmgmt device, "
13490 "misc_register returned with status %d", error);
13491
13492 lpfc_transport_functions.vport_create = lpfc_vport_create;
13493 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13494 lpfc_transport_template =
13495 fc_attach_transport(&lpfc_transport_functions);
13496 if (lpfc_transport_template == NULL)
13497 return -ENOMEM;
13498 lpfc_vport_transport_template =
13499 fc_attach_transport(&lpfc_vport_transport_functions);
13500 if (lpfc_vport_transport_template == NULL) {
13501 fc_release_transport(lpfc_transport_template);
13502 return -ENOMEM;
13503 }
13504 lpfc_nvme_cmd_template();
13505 lpfc_nvmet_cmd_template();
13506
13507
13508 lpfc_present_cpu = num_present_cpus();
13509
13510 error = pci_register_driver(&lpfc_driver);
13511 if (error) {
13512 fc_release_transport(lpfc_transport_template);
13513 fc_release_transport(lpfc_vport_transport_template);
13514 }
13515
13516 return error;
13517}
13518
13519
13520
13521
13522
13523
13524
13525
13526static void __exit
13527lpfc_exit(void)
13528{
13529 misc_deregister(&lpfc_mgmt_dev);
13530 pci_unregister_driver(&lpfc_driver);
13531 fc_release_transport(lpfc_transport_template);
13532 fc_release_transport(lpfc_vport_transport_template);
13533 if (_dump_buf_data) {
13534 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
13535 "_dump_buf_data at 0x%p\n",
13536 (1L << _dump_buf_data_order), _dump_buf_data);
13537 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
13538 }
13539
13540 if (_dump_buf_dif) {
13541 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
13542 "_dump_buf_dif at 0x%p\n",
13543 (1L << _dump_buf_dif_order), _dump_buf_dif);
13544 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
13545 }
13546 idr_destroy(&lpfc_hba_index);
13547}
13548
13549module_init(lpfc_init);
13550module_exit(lpfc_exit);
13551MODULE_LICENSE("GPL");
13552MODULE_DESCRIPTION(LPFC_MODULE_DESC);
13553MODULE_AUTHOR("Broadcom");
13554MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
13555