1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116int
117lpfc_config_port_prep(struct lpfc_hba *phba)
118{
119 lpfc_vpd_t *vp = &phba->vpd;
120 int i = 0, rc;
121 LPFC_MBOXQ_t *pmb;
122 MAILBOX_t *mb;
123 char *lpfc_vpd_data = NULL;
124 uint16_t offset = 0;
125 static char licensed[56] =
126 "key unlock for use with gnu public licensed code only\0";
127 static int init_key = 1;
128
129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130 if (!pmb) {
131 phba->link_state = LPFC_HBA_ERROR;
132 return -ENOMEM;
133 }
134
135 mb = &pmb->u.mb;
136 phba->link_state = LPFC_INIT_MBX_CMDS;
137
138 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139 if (init_key) {
140 uint32_t *ptext = (uint32_t *) licensed;
141
142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143 *ptext = cpu_to_be32(*ptext);
144 init_key = 0;
145 }
146
147 lpfc_read_nv(phba, pmb);
148 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149 sizeof (mb->un.varRDnvp.rsvd3));
150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151 sizeof (licensed));
152
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155 if (rc != MBX_SUCCESS) {
156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
157 "0324 Config Port initialization "
158 "error, mbxCmd x%x READ_NVPARM, "
159 "mbxStatus x%x\n",
160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free(pmb, phba->mbox_mem_pool);
162 return -ERESTART;
163 }
164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165 sizeof(phba->wwnn));
166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167 sizeof(phba->wwpn));
168 }
169
170
171
172
173
174 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176
177 lpfc_read_rev(phba, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
181 "0439 Adapter failed to init, mbxCmd x%x "
182 "READ_REV, mbxStatus x%x\n",
183 mb->mbxCommand, mb->mbxStatus);
184 mempool_free( pmb, phba->mbox_mem_pool);
185 return -ERESTART;
186 }
187
188
189
190
191
192
193 if (mb->un.varRdRev.rr == 0) {
194 vp->rev.rBit = 0;
195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
196 "0440 Adapter failed to init, READ_REV has "
197 "missing revision information.\n");
198 mempool_free(pmb, phba->mbox_mem_pool);
199 return -ERESTART;
200 }
201
202 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203 mempool_free(pmb, phba->mbox_mem_pool);
204 return -EINVAL;
205 }
206
207
208 vp->rev.rBit = 1;
209 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214 vp->rev.biuRev = mb->un.varRdRev.biuRev;
215 vp->rev.smRev = mb->un.varRdRev.smRev;
216 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217 vp->rev.endecRev = mb->un.varRdRev.endecRev;
218 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225
226
227
228
229 if (vp->rev.feaLevelHigh < 9)
230 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232 if (lpfc_is_LC_HBA(phba->pcidev->device))
233 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234 sizeof (phba->RandomData));
235
236
237 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238 if (!lpfc_vpd_data)
239 goto out_free_mbox;
240 do {
241 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244 if (rc != MBX_SUCCESS) {
245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246 "0441 VPD not present on adapter, "
247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248 mb->mbxCommand, mb->mbxStatus);
249 mb->un.varDmp.word_cnt = 0;
250 }
251
252
253
254 if (mb->un.varDmp.word_cnt == 0)
255 break;
256 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
257 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
258 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
259 lpfc_vpd_data + offset,
260 mb->un.varDmp.word_cnt);
261 offset += mb->un.varDmp.word_cnt;
262 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
263 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
264
265 kfree(lpfc_vpd_data);
266out_free_mbox:
267 mempool_free(pmb, phba->mbox_mem_pool);
268 return 0;
269}
270
271
272
273
274
275
276
277
278
279
280
281static void
282lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
283{
284 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
285 phba->temp_sensor_support = 1;
286 else
287 phba->temp_sensor_support = 0;
288 mempool_free(pmboxq, phba->mbox_mem_pool);
289 return;
290}
291
292
293
294
295
296
297
298
299
300
301
302static void
303lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
304{
305 struct prog_id *prg;
306 uint32_t prog_id_word;
307 char dist = ' ';
308
309 char dist_char[] = "nabx";
310
311 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
312 mempool_free(pmboxq, phba->mbox_mem_pool);
313 return;
314 }
315
316 prg = (struct prog_id *) &prog_id_word;
317
318
319 prog_id_word = pmboxq->u.mb.un.varWords[7];
320
321
322 if (prg->dist < 4)
323 dist = dist_char[prg->dist];
324
325 if ((prg->dist == 3) && (prg->num == 0))
326 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
327 prg->ver, prg->rev, prg->lev);
328 else
329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
330 prg->ver, prg->rev, prg->lev,
331 dist, prg->num);
332 mempool_free(pmboxq, phba->mbox_mem_pool);
333 return;
334}
335
336
337
338
339
340
341
342
343
344
345void
346lpfc_update_vport_wwn(struct lpfc_vport *vport)
347{
348 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
349 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
350
351
352 if (vport->phba->cfg_soft_wwnn)
353 u64_to_wwn(vport->phba->cfg_soft_wwnn,
354 vport->fc_sparam.nodeName.u.wwn);
355 if (vport->phba->cfg_soft_wwpn)
356 u64_to_wwn(vport->phba->cfg_soft_wwpn,
357 vport->fc_sparam.portName.u.wwn);
358
359
360
361
362
363 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
364 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
365 sizeof(struct lpfc_name));
366 else
367 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
368 sizeof(struct lpfc_name));
369
370
371
372
373
374 if (vport->fc_portname.u.wwn[0] != 0 &&
375 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
376 sizeof(struct lpfc_name)))
377 vport->vport_flag |= FAWWPN_PARAM_CHG;
378
379 if (vport->fc_portname.u.wwn[0] == 0 ||
380 vport->phba->cfg_soft_wwpn ||
381 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
382 vport->vport_flag & FAWWPN_SET) {
383 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
384 sizeof(struct lpfc_name));
385 vport->vport_flag &= ~FAWWPN_SET;
386 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
387 vport->vport_flag |= FAWWPN_SET;
388 }
389 else
390 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
391 sizeof(struct lpfc_name));
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407int
408lpfc_config_port_post(struct lpfc_hba *phba)
409{
410 struct lpfc_vport *vport = phba->pport;
411 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
412 LPFC_MBOXQ_t *pmb;
413 MAILBOX_t *mb;
414 struct lpfc_dmabuf *mp;
415 struct lpfc_sli *psli = &phba->sli;
416 uint32_t status, timeout;
417 int i, j;
418 int rc;
419
420 spin_lock_irq(&phba->hbalock);
421
422
423
424
425 if (phba->over_temp_state == HBA_OVER_TEMP)
426 phba->over_temp_state = HBA_NORMAL_TEMP;
427 spin_unlock_irq(&phba->hbalock);
428
429 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
430 if (!pmb) {
431 phba->link_state = LPFC_HBA_ERROR;
432 return -ENOMEM;
433 }
434 mb = &pmb->u.mb;
435
436
437 rc = lpfc_read_sparam(phba, pmb, 0);
438 if (rc) {
439 mempool_free(pmb, phba->mbox_mem_pool);
440 return -ENOMEM;
441 }
442
443 pmb->vport = vport;
444 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
446 "0448 Adapter failed init, mbxCmd x%x "
447 "READ_SPARM mbxStatus x%x\n",
448 mb->mbxCommand, mb->mbxStatus);
449 phba->link_state = LPFC_HBA_ERROR;
450 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
451 mempool_free(pmb, phba->mbox_mem_pool);
452 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453 kfree(mp);
454 return -EIO;
455 }
456
457 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
458
459 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
460 lpfc_mbuf_free(phba, mp->virt, mp->phys);
461 kfree(mp);
462 pmb->ctx_buf = NULL;
463 lpfc_update_vport_wwn(vport);
464
465
466 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
467 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
468 fc_host_max_npiv_vports(shost) = phba->max_vpi;
469
470
471
472 if (phba->SerialNumber[0] == 0) {
473 uint8_t *outptr;
474
475 outptr = &vport->fc_nodename.u.s.IEEE[0];
476 for (i = 0; i < 12; i++) {
477 status = *outptr++;
478 j = ((status & 0xf0) >> 4);
479 if (j <= 9)
480 phba->SerialNumber[i] =
481 (char)((uint8_t) 0x30 + (uint8_t) j);
482 else
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
485 i++;
486 j = (status & 0xf);
487 if (j <= 9)
488 phba->SerialNumber[i] =
489 (char)((uint8_t) 0x30 + (uint8_t) j);
490 else
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
493 }
494 }
495
496 lpfc_read_config(phba, pmb);
497 pmb->vport = vport;
498 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
500 "0453 Adapter failed to init, mbxCmd x%x "
501 "READ_CONFIG, mbxStatus x%x\n",
502 mb->mbxCommand, mb->mbxStatus);
503 phba->link_state = LPFC_HBA_ERROR;
504 mempool_free( pmb, phba->mbox_mem_pool);
505 return -EIO;
506 }
507
508
509 lpfc_sli_read_link_ste(phba);
510
511
512 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
513 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
514 "3359 HBA queue depth changed from %d to %d\n",
515 phba->cfg_hba_queue_depth,
516 mb->un.varRdConfig.max_xri);
517 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
518 }
519
520 phba->lmt = mb->un.varRdConfig.lmt;
521
522
523 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
524
525 phba->link_state = LPFC_LINK_DOWN;
526
527
528 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
529 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
530 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
531 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
532
533
534 if (phba->sli_rev != 3)
535 lpfc_post_rcv_buf(phba);
536
537
538
539
540 if (phba->intr_type == MSIX) {
541 rc = lpfc_config_msi(phba, pmb);
542 if (rc) {
543 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO;
545 }
546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
547 if (rc != MBX_SUCCESS) {
548 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
549 "0352 Config MSI mailbox command "
550 "failed, mbxCmd x%x, mbxStatus x%x\n",
551 pmb->u.mb.mbxCommand,
552 pmb->u.mb.mbxStatus);
553 mempool_free(pmb, phba->mbox_mem_pool);
554 return -EIO;
555 }
556 }
557
558 spin_lock_irq(&phba->hbalock);
559
560 phba->hba_flag &= ~HBA_ERATT_HANDLED;
561
562
563 if (lpfc_readl(phba->HCregaddr, &status)) {
564 spin_unlock_irq(&phba->hbalock);
565 return -EIO;
566 }
567 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
568 if (psli->num_rings > 0)
569 status |= HC_R0INT_ENA;
570 if (psli->num_rings > 1)
571 status |= HC_R1INT_ENA;
572 if (psli->num_rings > 2)
573 status |= HC_R2INT_ENA;
574 if (psli->num_rings > 3)
575 status |= HC_R3INT_ENA;
576
577 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
578 (phba->cfg_poll & DISABLE_FCP_RING_INT))
579 status &= ~(HC_R0INT_ENA);
580
581 writel(status, phba->HCregaddr);
582 readl(phba->HCregaddr);
583 spin_unlock_irq(&phba->hbalock);
584
585
586 timeout = phba->fc_ratov * 2;
587 mod_timer(&vport->els_tmofunc,
588 jiffies + msecs_to_jiffies(1000 * timeout));
589
590 mod_timer(&phba->hb_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
592 phba->hb_outstanding = 0;
593 phba->last_completion_time = jiffies;
594
595 mod_timer(&phba->eratt_poll,
596 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
597
598 if (phba->hba_flag & LINK_DISABLED) {
599 lpfc_printf_log(phba,
600 KERN_ERR, LOG_INIT,
601 "2598 Adapter Link is disabled.\n");
602 lpfc_down_link(phba, pmb);
603 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
604 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
605 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
606 lpfc_printf_log(phba,
607 KERN_ERR, LOG_INIT,
608 "2599 Adapter failed to issue DOWN_LINK"
609 " mbox command rc 0x%x\n", rc);
610
611 mempool_free(pmb, phba->mbox_mem_pool);
612 return -EIO;
613 }
614 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
615 mempool_free(pmb, phba->mbox_mem_pool);
616 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
617 if (rc)
618 return rc;
619 }
620
621 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
622 if (!pmb) {
623 phba->link_state = LPFC_HBA_ERROR;
624 return -ENOMEM;
625 }
626
627 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
628 pmb->mbox_cmpl = lpfc_config_async_cmpl;
629 pmb->vport = phba->pport;
630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
631
632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
633 lpfc_printf_log(phba,
634 KERN_ERR,
635 LOG_INIT,
636 "0456 Adapter failed to issue "
637 "ASYNCEVT_ENABLE mbox status x%x\n",
638 rc);
639 mempool_free(pmb, phba->mbox_mem_pool);
640 }
641
642
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 if (!pmb) {
645 phba->link_state = LPFC_HBA_ERROR;
646 return -ENOMEM;
647 }
648
649 lpfc_dump_wakeup_param(phba, pmb);
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 pmb->vport = phba->pport;
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653
654 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
656 "to get Option ROM version status x%x\n", rc);
657 mempool_free(pmb, phba->mbox_mem_pool);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static int
678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
679{
680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698int
699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700 uint32_t flag)
701{
702 struct lpfc_vport *vport = phba->pport;
703 LPFC_MBOXQ_t *pmb;
704 MAILBOX_t *mb;
705 int rc;
706
707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708 if (!pmb) {
709 phba->link_state = LPFC_HBA_ERROR;
710 return -ENOMEM;
711 }
712 mb = &pmb->u.mb;
713 pmb->vport = vport;
714
715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717 !(phba->lmt & LMT_1Gb)) ||
718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719 !(phba->lmt & LMT_2Gb)) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721 !(phba->lmt & LMT_4Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723 !(phba->lmt & LMT_8Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725 !(phba->lmt & LMT_10Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
727 !(phba->lmt & LMT_16Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
729 !(phba->lmt & LMT_32Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731 !(phba->lmt & LMT_64Gb))) {
732
733 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
734 "1302 Invalid speed for this board:%d "
735 "Reset link speed to auto.\n",
736 phba->cfg_link_speed);
737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738 }
739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
741 if (phba->sli_rev < LPFC_SLI_REV4)
742 lpfc_set_loopback_flag(phba);
743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
746 "0498 Adapter failed to init, mbxCmd x%x "
747 "INIT_LINK, mbxStatus x%x\n",
748 mb->mbxCommand, mb->mbxStatus);
749 if (phba->sli_rev <= LPFC_SLI_REV3) {
750
751 writel(0, phba->HCregaddr);
752 readl(phba->HCregaddr);
753
754 writel(0xffffffff, phba->HAregaddr);
755 readl(phba->HAregaddr);
756 }
757 phba->link_state = LPFC_HBA_ERROR;
758 if (rc != MBX_BUSY || flag == MBX_POLL)
759 mempool_free(pmb, phba->mbox_mem_pool);
760 return -EIO;
761 }
762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
763 if (flag == MBX_POLL)
764 mempool_free(pmb, phba->mbox_mem_pool);
765
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static int
783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
784{
785 LPFC_MBOXQ_t *pmb;
786 int rc;
787
788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789 if (!pmb) {
790 phba->link_state = LPFC_HBA_ERROR;
791 return -ENOMEM;
792 }
793
794 lpfc_printf_log(phba,
795 KERN_ERR, LOG_INIT,
796 "0491 Adapter Link is disabled.\n");
797 lpfc_down_link(phba, pmb);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 lpfc_printf_log(phba,
802 KERN_ERR, LOG_INIT,
803 "2522 Adapter failed to issue DOWN_LINK"
804 " mbox command rc 0x%x\n", rc);
805
806 mempool_free(pmb, phba->mbox_mem_pool);
807 return -EIO;
808 }
809 if (flag == MBX_POLL)
810 mempool_free(pmb, phba->mbox_mem_pool);
811
812 return 0;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826int
827lpfc_hba_down_prep(struct lpfc_hba *phba)
828{
829 struct lpfc_vport **vports;
830 int i;
831
832 if (phba->sli_rev <= LPFC_SLI_REV3) {
833
834 writel(0, phba->HCregaddr);
835 readl(phba->HCregaddr);
836 }
837
838 if (phba->pport->load_flag & FC_UNLOADING)
839 lpfc_cleanup_discovery_resources(phba->pport);
840 else {
841 vports = lpfc_create_vport_work_array(phba);
842 if (vports != NULL)
843 for (i = 0; i <= phba->max_vports &&
844 vports[i] != NULL; i++)
845 lpfc_cleanup_discovery_resources(vports[i]);
846 lpfc_destroy_vport_work_array(phba, vports);
847 }
848 return 0;
849}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864static void
865lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
866{
867 struct lpfc_iocbq *rspiocbq;
868 struct hbq_dmabuf *dmabuf;
869 struct lpfc_cq_event *cq_event;
870
871 spin_lock_irq(&phba->hbalock);
872 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
873 spin_unlock_irq(&phba->hbalock);
874
875 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
876
877 spin_lock_irq(&phba->hbalock);
878 list_remove_head(&phba->sli4_hba.sp_queue_event,
879 cq_event, struct lpfc_cq_event, list);
880 spin_unlock_irq(&phba->hbalock);
881
882 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
883 case CQE_CODE_COMPL_WQE:
884 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
885 cq_event);
886 lpfc_sli_release_iocbq(phba, rspiocbq);
887 break;
888 case CQE_CODE_RECEIVE:
889 case CQE_CODE_RECEIVE_V1:
890 dmabuf = container_of(cq_event, struct hbq_dmabuf,
891 cq_event);
892 lpfc_in_buf_free(phba, &dmabuf->dbuf);
893 }
894 }
895}
896
897
898
899
900
901
902
903
904
905
906
907
908static void
909lpfc_hba_free_post_buf(struct lpfc_hba *phba)
910{
911 struct lpfc_sli *psli = &phba->sli;
912 struct lpfc_sli_ring *pring;
913 struct lpfc_dmabuf *mp, *next_mp;
914 LIST_HEAD(buflist);
915 int count;
916
917 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
918 lpfc_sli_hbqbuf_free_all(phba);
919 else {
920
921 pring = &psli->sli3_ring[LPFC_ELS_RING];
922 spin_lock_irq(&phba->hbalock);
923 list_splice_init(&pring->postbufq, &buflist);
924 spin_unlock_irq(&phba->hbalock);
925
926 count = 0;
927 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
928 list_del(&mp->list);
929 count++;
930 lpfc_mbuf_free(phba, mp->virt, mp->phys);
931 kfree(mp);
932 }
933
934 spin_lock_irq(&phba->hbalock);
935 pring->postbufq_cnt -= count;
936 spin_unlock_irq(&phba->hbalock);
937 }
938}
939
940
941
942
943
944
945
946
947
948
949
950static void
951lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
952{
953 struct lpfc_sli *psli = &phba->sli;
954 struct lpfc_queue *qp = NULL;
955 struct lpfc_sli_ring *pring;
956 LIST_HEAD(completions);
957 int i;
958 struct lpfc_iocbq *piocb, *next_iocb;
959
960 if (phba->sli_rev != LPFC_SLI_REV4) {
961 for (i = 0; i < psli->num_rings; i++) {
962 pring = &psli->sli3_ring[i];
963 spin_lock_irq(&phba->hbalock);
964
965
966
967
968 list_splice_init(&pring->txcmplq, &completions);
969 pring->txcmplq_cnt = 0;
970 spin_unlock_irq(&phba->hbalock);
971
972 lpfc_sli_abort_iocb_ring(phba, pring);
973 }
974
975 lpfc_sli_cancel_iocbs(phba, &completions,
976 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
977 return;
978 }
979 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
980 pring = qp->pring;
981 if (!pring)
982 continue;
983 spin_lock_irq(&pring->ring_lock);
984 list_for_each_entry_safe(piocb, next_iocb,
985 &pring->txcmplq, list)
986 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
987 list_splice_init(&pring->txcmplq, &completions);
988 pring->txcmplq_cnt = 0;
989 spin_unlock_irq(&pring->ring_lock);
990 lpfc_sli_abort_iocb_ring(phba, pring);
991 }
992
993 lpfc_sli_cancel_iocbs(phba, &completions,
994 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int
1010lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1011{
1012 lpfc_hba_free_post_buf(phba);
1013 lpfc_hba_clean_txcmplq(phba);
1014 return 0;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static int
1029lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1030{
1031 struct lpfc_io_buf *psb, *psb_next;
1032 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1033 struct lpfc_sli4_hdw_queue *qp;
1034 LIST_HEAD(aborts);
1035 LIST_HEAD(nvme_aborts);
1036 LIST_HEAD(nvmet_aborts);
1037 struct lpfc_sglq *sglq_entry = NULL;
1038 int cnt, idx;
1039
1040
1041 lpfc_sli_hbqbuf_free_all(phba);
1042 lpfc_hba_clean_txcmplq(phba);
1043
1044
1045
1046
1047
1048
1049
1050 spin_lock_irq(&phba->hbalock);
1051
1052
1053
1054
1055 spin_lock(&phba->sli4_hba.sgl_list_lock);
1056 list_for_each_entry(sglq_entry,
1057 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1058 sglq_entry->state = SGL_FREED;
1059
1060 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1061 &phba->sli4_hba.lpfc_els_sgl_list);
1062
1063
1064 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1065
1066
1067
1068
1069 cnt = 0;
1070 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1071 qp = &phba->sli4_hba.hdwq[idx];
1072
1073 spin_lock(&qp->abts_io_buf_list_lock);
1074 list_splice_init(&qp->lpfc_abts_io_buf_list,
1075 &aborts);
1076
1077 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1078 psb->pCmd = NULL;
1079 psb->status = IOSTAT_SUCCESS;
1080 cnt++;
1081 }
1082 spin_lock(&qp->io_buf_list_put_lock);
1083 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1084 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1085 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1086 qp->abts_scsi_io_bufs = 0;
1087 qp->abts_nvme_io_bufs = 0;
1088 spin_unlock(&qp->io_buf_list_put_lock);
1089 spin_unlock(&qp->abts_io_buf_list_lock);
1090 }
1091 spin_unlock_irq(&phba->hbalock);
1092
1093 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1094 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1095 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1096 &nvmet_aborts);
1097 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1098 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1099 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1100 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1101 }
1102 }
1103
1104 lpfc_sli4_free_sp_events(phba);
1105 return cnt;
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119int
1120lpfc_hba_down_post(struct lpfc_hba *phba)
1121{
1122 return (*phba->lpfc_hba_down_post)(phba);
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static void
1138lpfc_hb_timeout(struct timer_list *t)
1139{
1140 struct lpfc_hba *phba;
1141 uint32_t tmo_posted;
1142 unsigned long iflag;
1143
1144 phba = from_timer(phba, t, hb_tmofunc);
1145
1146
1147 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1148 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1149 if (!tmo_posted)
1150 phba->pport->work_port_events |= WORKER_HB_TMO;
1151 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1152
1153
1154 if (!tmo_posted)
1155 lpfc_worker_wake_up(phba);
1156 return;
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static void
1172lpfc_rrq_timeout(struct timer_list *t)
1173{
1174 struct lpfc_hba *phba;
1175 unsigned long iflag;
1176
1177 phba = from_timer(phba, t, rrq_tmr);
1178 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1179 if (!(phba->pport->load_flag & FC_UNLOADING))
1180 phba->hba_flag |= HBA_RRQ_ACTIVE;
1181 else
1182 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1183 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1184
1185 if (!(phba->pport->load_flag & FC_UNLOADING))
1186 lpfc_worker_wake_up(phba);
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205static void
1206lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1207{
1208 unsigned long drvr_flag;
1209
1210 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1211 phba->hb_outstanding = 0;
1212 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1213
1214
1215 mempool_free(pmboxq, phba->mbox_mem_pool);
1216 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1217 !(phba->link_state == LPFC_HBA_ERROR) &&
1218 !(phba->pport->load_flag & FC_UNLOADING))
1219 mod_timer(&phba->hb_tmofunc,
1220 jiffies +
1221 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1222 return;
1223}
1224
1225static void
1226lpfc_hb_eq_delay_work(struct work_struct *work)
1227{
1228 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1229 struct lpfc_hba, eq_delay_work);
1230 struct lpfc_eq_intr_info *eqi, *eqi_new;
1231 struct lpfc_queue *eq, *eq_next;
1232 unsigned char *ena_delay = NULL;
1233 uint32_t usdelay;
1234 int i;
1235
1236 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1237 return;
1238
1239 if (phba->link_state == LPFC_HBA_ERROR ||
1240 phba->pport->fc_flag & FC_OFFLINE_MODE)
1241 goto requeue;
1242
1243 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1244 GFP_KERNEL);
1245 if (!ena_delay)
1246 goto requeue;
1247
1248 for (i = 0; i < phba->cfg_irq_chann; i++) {
1249
1250 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1251 if (!eq)
1252 continue;
1253 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1254 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1255 ena_delay[eq->last_cpu] = 1;
1256 }
1257 }
1258
1259 for_each_present_cpu(i) {
1260 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1261 if (ena_delay[i]) {
1262 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1263 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1264 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1265 } else {
1266 usdelay = 0;
1267 }
1268
1269 eqi->icnt = 0;
1270
1271 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1272 if (unlikely(eq->last_cpu != i)) {
1273 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1274 eq->last_cpu);
1275 list_move_tail(&eq->cpu_list, &eqi_new->list);
1276 continue;
1277 }
1278 if (usdelay != eq->q_mode)
1279 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1280 usdelay);
1281 }
1282 }
1283
1284 kfree(ena_delay);
1285
1286requeue:
1287 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1288 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1299{
1300 u32 i;
1301 u32 hwq_count;
1302
1303 hwq_count = phba->cfg_hdw_queue;
1304 for (i = 0; i < hwq_count; i++) {
1305
1306 lpfc_adjust_pvt_pool_count(phba, i);
1307
1308
1309 lpfc_adjust_high_watermark(phba, i);
1310
1311#ifdef LPFC_MXP_STAT
1312
1313 lpfc_snapshot_mxp(phba, i);
1314#endif
1315 }
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334void
1335lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1336{
1337 struct lpfc_vport **vports;
1338 LPFC_MBOXQ_t *pmboxq;
1339 struct lpfc_dmabuf *buf_ptr;
1340 int retval, i;
1341 struct lpfc_sli *psli = &phba->sli;
1342 LIST_HEAD(completions);
1343
1344 if (phba->cfg_xri_rebalancing) {
1345
1346 lpfc_hb_mxp_handler(phba);
1347 }
1348
1349 vports = lpfc_create_vport_work_array(phba);
1350 if (vports != NULL)
1351 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1352 lpfc_rcv_seq_check_edtov(vports[i]);
1353 lpfc_fdmi_change_check(vports[i]);
1354 }
1355 lpfc_destroy_vport_work_array(phba, vports);
1356
1357 if ((phba->link_state == LPFC_HBA_ERROR) ||
1358 (phba->pport->load_flag & FC_UNLOADING) ||
1359 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1360 return;
1361
1362 spin_lock_irq(&phba->pport->work_port_lock);
1363
1364 if (time_after(phba->last_completion_time +
1365 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1366 jiffies)) {
1367 spin_unlock_irq(&phba->pport->work_port_lock);
1368 if (!phba->hb_outstanding)
1369 mod_timer(&phba->hb_tmofunc,
1370 jiffies +
1371 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1372 else
1373 mod_timer(&phba->hb_tmofunc,
1374 jiffies +
1375 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1376 return;
1377 }
1378 spin_unlock_irq(&phba->pport->work_port_lock);
1379
1380 if (phba->elsbuf_cnt &&
1381 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1382 spin_lock_irq(&phba->hbalock);
1383 list_splice_init(&phba->elsbuf, &completions);
1384 phba->elsbuf_cnt = 0;
1385 phba->elsbuf_prev_cnt = 0;
1386 spin_unlock_irq(&phba->hbalock);
1387
1388 while (!list_empty(&completions)) {
1389 list_remove_head(&completions, buf_ptr,
1390 struct lpfc_dmabuf, list);
1391 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1392 kfree(buf_ptr);
1393 }
1394 }
1395 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1396
1397
1398 if (phba->cfg_enable_hba_heartbeat) {
1399 if (!phba->hb_outstanding) {
1400 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1401 (list_empty(&psli->mboxq))) {
1402 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1403 GFP_KERNEL);
1404 if (!pmboxq) {
1405 mod_timer(&phba->hb_tmofunc,
1406 jiffies +
1407 msecs_to_jiffies(1000 *
1408 LPFC_HB_MBOX_INTERVAL));
1409 return;
1410 }
1411
1412 lpfc_heart_beat(phba, pmboxq);
1413 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1414 pmboxq->vport = phba->pport;
1415 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1416 MBX_NOWAIT);
1417
1418 if (retval != MBX_BUSY &&
1419 retval != MBX_SUCCESS) {
1420 mempool_free(pmboxq,
1421 phba->mbox_mem_pool);
1422 mod_timer(&phba->hb_tmofunc,
1423 jiffies +
1424 msecs_to_jiffies(1000 *
1425 LPFC_HB_MBOX_INTERVAL));
1426 return;
1427 }
1428 phba->skipped_hb = 0;
1429 phba->hb_outstanding = 1;
1430 } else if (time_before_eq(phba->last_completion_time,
1431 phba->skipped_hb)) {
1432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1433 "2857 Last completion time not "
1434 " updated in %d ms\n",
1435 jiffies_to_msecs(jiffies
1436 - phba->last_completion_time));
1437 } else
1438 phba->skipped_hb = jiffies;
1439
1440 mod_timer(&phba->hb_tmofunc,
1441 jiffies +
1442 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1443 return;
1444 } else {
1445
1446
1447
1448
1449
1450 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1451 "0459 Adapter heartbeat still out"
1452 "standing:last compl time was %d ms.\n",
1453 jiffies_to_msecs(jiffies
1454 - phba->last_completion_time));
1455 mod_timer(&phba->hb_tmofunc,
1456 jiffies +
1457 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1458 }
1459 } else {
1460 mod_timer(&phba->hb_tmofunc,
1461 jiffies +
1462 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1463 }
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473static void
1474lpfc_offline_eratt(struct lpfc_hba *phba)
1475{
1476 struct lpfc_sli *psli = &phba->sli;
1477
1478 spin_lock_irq(&phba->hbalock);
1479 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1480 spin_unlock_irq(&phba->hbalock);
1481 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1482
1483 lpfc_offline(phba);
1484 lpfc_reset_barrier(phba);
1485 spin_lock_irq(&phba->hbalock);
1486 lpfc_sli_brdreset(phba);
1487 spin_unlock_irq(&phba->hbalock);
1488 lpfc_hba_down_post(phba);
1489 lpfc_sli_brdready(phba, HS_MBRDY);
1490 lpfc_unblock_mgmt_io(phba);
1491 phba->link_state = LPFC_HBA_ERROR;
1492 return;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502void
1503lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1504{
1505 spin_lock_irq(&phba->hbalock);
1506 phba->link_state = LPFC_HBA_ERROR;
1507 spin_unlock_irq(&phba->hbalock);
1508
1509 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1510 lpfc_sli_flush_io_rings(phba);
1511 lpfc_offline(phba);
1512 lpfc_hba_down_post(phba);
1513 lpfc_unblock_mgmt_io(phba);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static void
1526lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1527{
1528 uint32_t old_host_status = phba->work_hs;
1529 struct lpfc_sli *psli = &phba->sli;
1530
1531
1532
1533
1534 if (pci_channel_offline(phba->pcidev)) {
1535 spin_lock_irq(&phba->hbalock);
1536 phba->hba_flag &= ~DEFER_ERATT;
1537 spin_unlock_irq(&phba->hbalock);
1538 return;
1539 }
1540
1541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1542 "0479 Deferred Adapter Hardware Error "
1543 "Data: x%x x%x x%x\n",
1544 phba->work_hs,
1545 phba->work_status[0], phba->work_status[1]);
1546
1547 spin_lock_irq(&phba->hbalock);
1548 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1549 spin_unlock_irq(&phba->hbalock);
1550
1551
1552
1553
1554
1555
1556
1557 lpfc_sli_abort_fcp_rings(phba);
1558
1559
1560
1561
1562
1563 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1564 lpfc_offline(phba);
1565
1566
1567 while (phba->work_hs & HS_FFER1) {
1568 msleep(100);
1569 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1570 phba->work_hs = UNPLUG_ERR ;
1571 break;
1572 }
1573
1574 if (phba->pport->load_flag & FC_UNLOADING) {
1575 phba->work_hs = 0;
1576 break;
1577 }
1578 }
1579
1580
1581
1582
1583
1584
1585 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1586 phba->work_hs = old_host_status & ~HS_FFER1;
1587
1588 spin_lock_irq(&phba->hbalock);
1589 phba->hba_flag &= ~DEFER_ERATT;
1590 spin_unlock_irq(&phba->hbalock);
1591 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1592 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1593}
1594
1595static void
1596lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1597{
1598 struct lpfc_board_event_header board_event;
1599 struct Scsi_Host *shost;
1600
1601 board_event.event_type = FC_REG_BOARD_EVENT;
1602 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1603 shost = lpfc_shost_from_vport(phba->pport);
1604 fc_host_post_vendor_event(shost, fc_get_event_number(),
1605 sizeof(board_event),
1606 (char *) &board_event,
1607 LPFC_NL_VENDOR_ID);
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static void
1621lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1622{
1623 struct lpfc_vport *vport = phba->pport;
1624 struct lpfc_sli *psli = &phba->sli;
1625 uint32_t event_data;
1626 unsigned long temperature;
1627 struct temp_event temp_event_data;
1628 struct Scsi_Host *shost;
1629
1630
1631
1632
1633 if (pci_channel_offline(phba->pcidev)) {
1634 spin_lock_irq(&phba->hbalock);
1635 phba->hba_flag &= ~DEFER_ERATT;
1636 spin_unlock_irq(&phba->hbalock);
1637 return;
1638 }
1639
1640
1641 if (!phba->cfg_enable_hba_reset)
1642 return;
1643
1644
1645 lpfc_board_errevt_to_mgmt(phba);
1646
1647 if (phba->hba_flag & DEFER_ERATT)
1648 lpfc_handle_deferred_eratt(phba);
1649
1650 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1651 if (phba->work_hs & HS_FFER6)
1652
1653 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1654 "1301 Re-establishing Link "
1655 "Data: x%x x%x x%x\n",
1656 phba->work_hs, phba->work_status[0],
1657 phba->work_status[1]);
1658 if (phba->work_hs & HS_FFER8)
1659
1660 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1661 "2861 Host Authentication device "
1662 "zeroization Data:x%x x%x x%x\n",
1663 phba->work_hs, phba->work_status[0],
1664 phba->work_status[1]);
1665
1666 spin_lock_irq(&phba->hbalock);
1667 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1668 spin_unlock_irq(&phba->hbalock);
1669
1670
1671
1672
1673
1674
1675
1676 lpfc_sli_abort_fcp_rings(phba);
1677
1678
1679
1680
1681
1682 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1683 lpfc_offline(phba);
1684 lpfc_sli_brdrestart(phba);
1685 if (lpfc_online(phba) == 0) {
1686 lpfc_unblock_mgmt_io(phba);
1687 return;
1688 }
1689 lpfc_unblock_mgmt_io(phba);
1690 } else if (phba->work_hs & HS_CRIT_TEMP) {
1691 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1692 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1693 temp_event_data.event_code = LPFC_CRIT_TEMP;
1694 temp_event_data.data = (uint32_t)temperature;
1695
1696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1697 "0406 Adapter maximum temperature exceeded "
1698 "(%ld), taking this port offline "
1699 "Data: x%x x%x x%x\n",
1700 temperature, phba->work_hs,
1701 phba->work_status[0], phba->work_status[1]);
1702
1703 shost = lpfc_shost_from_vport(phba->pport);
1704 fc_host_post_vendor_event(shost, fc_get_event_number(),
1705 sizeof(temp_event_data),
1706 (char *) &temp_event_data,
1707 SCSI_NL_VID_TYPE_PCI
1708 | PCI_VENDOR_ID_EMULEX);
1709
1710 spin_lock_irq(&phba->hbalock);
1711 phba->over_temp_state = HBA_OVER_TEMP;
1712 spin_unlock_irq(&phba->hbalock);
1713 lpfc_offline_eratt(phba);
1714
1715 } else {
1716
1717
1718
1719
1720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1721 "0457 Adapter Hardware Error "
1722 "Data: x%x x%x x%x\n",
1723 phba->work_hs,
1724 phba->work_status[0], phba->work_status[1]);
1725
1726 event_data = FC_REG_DUMP_EVENT;
1727 shost = lpfc_shost_from_vport(vport);
1728 fc_host_post_vendor_event(shost, fc_get_event_number(),
1729 sizeof(event_data), (char *) &event_data,
1730 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1731
1732 lpfc_offline_eratt(phba);
1733 }
1734 return;
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static int
1749lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1750 bool en_rn_msg)
1751{
1752 int rc;
1753 uint32_t intr_mode;
1754
1755 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1756 LPFC_SLI_INTF_IF_TYPE_2) {
1757
1758
1759
1760
1761 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1762 if (rc)
1763 return rc;
1764 }
1765
1766
1767 if (en_rn_msg)
1768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1769 "2887 Reset Needed: Attempting Port "
1770 "Recovery...\n");
1771 lpfc_offline_prep(phba, mbx_action);
1772 lpfc_sli_flush_io_rings(phba);
1773 lpfc_offline(phba);
1774
1775 lpfc_sli4_disable_intr(phba);
1776 rc = lpfc_sli_brdrestart(phba);
1777 if (rc) {
1778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1779 "6309 Failed to restart board\n");
1780 return rc;
1781 }
1782
1783 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1784 if (intr_mode == LPFC_INTR_ERROR) {
1785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1786 "3175 Failed to enable interrupt\n");
1787 return -EIO;
1788 }
1789 phba->intr_mode = intr_mode;
1790 rc = lpfc_online(phba);
1791 if (rc == 0)
1792 lpfc_unblock_mgmt_io(phba);
1793
1794 return rc;
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804static void
1805lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1806{
1807 struct lpfc_vport *vport = phba->pport;
1808 uint32_t event_data;
1809 struct Scsi_Host *shost;
1810 uint32_t if_type;
1811 struct lpfc_register portstat_reg = {0};
1812 uint32_t reg_err1, reg_err2;
1813 uint32_t uerrlo_reg, uemasklo_reg;
1814 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1815 bool en_rn_msg = true;
1816 struct temp_event temp_event_data;
1817 struct lpfc_register portsmphr_reg;
1818 int rc, i;
1819
1820
1821
1822
1823 if (pci_channel_offline(phba->pcidev)) {
1824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1825 "3166 pci channel is offline\n");
1826 lpfc_sli4_offline_eratt(phba);
1827 return;
1828 }
1829
1830 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1831 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1832 switch (if_type) {
1833 case LPFC_SLI_INTF_IF_TYPE_0:
1834 pci_rd_rc1 = lpfc_readl(
1835 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1836 &uerrlo_reg);
1837 pci_rd_rc2 = lpfc_readl(
1838 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1839 &uemasklo_reg);
1840
1841 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1842 return;
1843 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1844 lpfc_sli4_offline_eratt(phba);
1845 return;
1846 }
1847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1848 "7623 Checking UE recoverable");
1849
1850 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1851 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1852 &portsmphr_reg.word0))
1853 continue;
1854
1855 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1856 &portsmphr_reg);
1857 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1858 LPFC_PORT_SEM_UE_RECOVERABLE)
1859 break;
1860
1861 msleep(1000);
1862 }
1863
1864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1865 "4827 smphr_port_status x%x : Waited %dSec",
1866 smphr_port_status, i);
1867
1868
1869 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1870 LPFC_PORT_SEM_UE_RECOVERABLE) {
1871 for (i = 0; i < 20; i++) {
1872 msleep(1000);
1873 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1874 &portsmphr_reg.word0) &&
1875 (LPFC_POST_STAGE_PORT_READY ==
1876 bf_get(lpfc_port_smphr_port_status,
1877 &portsmphr_reg))) {
1878 rc = lpfc_sli4_port_sta_fn_reset(phba,
1879 LPFC_MBX_NO_WAIT, en_rn_msg);
1880 if (rc == 0)
1881 return;
1882 lpfc_printf_log(phba,
1883 KERN_ERR, LOG_INIT,
1884 "4215 Failed to recover UE");
1885 break;
1886 }
1887 }
1888 }
1889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1890 "7624 Firmware not ready: Failing UE recovery,"
1891 " waited %dSec", i);
1892 phba->link_state = LPFC_HBA_ERROR;
1893 break;
1894
1895 case LPFC_SLI_INTF_IF_TYPE_2:
1896 case LPFC_SLI_INTF_IF_TYPE_6:
1897 pci_rd_rc1 = lpfc_readl(
1898 phba->sli4_hba.u.if_type2.STATUSregaddr,
1899 &portstat_reg.word0);
1900
1901 if (pci_rd_rc1 == -EIO) {
1902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1903 "3151 PCI bus read access failure: x%x\n",
1904 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1905 lpfc_sli4_offline_eratt(phba);
1906 return;
1907 }
1908 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1909 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1910 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1912 "2889 Port Overtemperature event, "
1913 "taking port offline Data: x%x x%x\n",
1914 reg_err1, reg_err2);
1915
1916 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1917 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1918 temp_event_data.event_code = LPFC_CRIT_TEMP;
1919 temp_event_data.data = 0xFFFFFFFF;
1920
1921 shost = lpfc_shost_from_vport(phba->pport);
1922 fc_host_post_vendor_event(shost, fc_get_event_number(),
1923 sizeof(temp_event_data),
1924 (char *)&temp_event_data,
1925 SCSI_NL_VID_TYPE_PCI
1926 | PCI_VENDOR_ID_EMULEX);
1927
1928 spin_lock_irq(&phba->hbalock);
1929 phba->over_temp_state = HBA_OVER_TEMP;
1930 spin_unlock_irq(&phba->hbalock);
1931 lpfc_sli4_offline_eratt(phba);
1932 return;
1933 }
1934 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1935 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1937 "3143 Port Down: Firmware Update "
1938 "Detected\n");
1939 en_rn_msg = false;
1940 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1941 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1943 "3144 Port Down: Debug Dump\n");
1944 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1945 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1947 "3145 Port Down: Provisioning\n");
1948
1949
1950 if (!phba->cfg_enable_hba_reset)
1951 return;
1952
1953
1954 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1955 en_rn_msg);
1956 if (rc == 0) {
1957
1958 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1959 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1960 return;
1961 else
1962 break;
1963 }
1964
1965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1966 "3152 Unrecoverable error\n");
1967 phba->link_state = LPFC_HBA_ERROR;
1968 break;
1969 case LPFC_SLI_INTF_IF_TYPE_1:
1970 default:
1971 break;
1972 }
1973 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1974 "3123 Report dump event to upper layer\n");
1975
1976 lpfc_board_errevt_to_mgmt(phba);
1977
1978 event_data = FC_REG_DUMP_EVENT;
1979 shost = lpfc_shost_from_vport(vport);
1980 fc_host_post_vendor_event(shost, fc_get_event_number(),
1981 sizeof(event_data), (char *) &event_data,
1982 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996void
1997lpfc_handle_eratt(struct lpfc_hba *phba)
1998{
1999 (*phba->lpfc_handle_eratt)(phba);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009void
2010lpfc_handle_latt(struct lpfc_hba *phba)
2011{
2012 struct lpfc_vport *vport = phba->pport;
2013 struct lpfc_sli *psli = &phba->sli;
2014 LPFC_MBOXQ_t *pmb;
2015 volatile uint32_t control;
2016 struct lpfc_dmabuf *mp;
2017 int rc = 0;
2018
2019 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2020 if (!pmb) {
2021 rc = 1;
2022 goto lpfc_handle_latt_err_exit;
2023 }
2024
2025 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2026 if (!mp) {
2027 rc = 2;
2028 goto lpfc_handle_latt_free_pmb;
2029 }
2030
2031 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2032 if (!mp->virt) {
2033 rc = 3;
2034 goto lpfc_handle_latt_free_mp;
2035 }
2036
2037
2038 lpfc_els_flush_all_cmd(phba);
2039
2040 psli->slistat.link_event++;
2041 lpfc_read_topology(phba, pmb, mp);
2042 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2043 pmb->vport = vport;
2044
2045 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2046 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2047 if (rc == MBX_NOT_FINISHED) {
2048 rc = 4;
2049 goto lpfc_handle_latt_free_mbuf;
2050 }
2051
2052
2053 spin_lock_irq(&phba->hbalock);
2054 writel(HA_LATT, phba->HAregaddr);
2055 readl(phba->HAregaddr);
2056 spin_unlock_irq(&phba->hbalock);
2057
2058 return;
2059
2060lpfc_handle_latt_free_mbuf:
2061 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2062 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2063lpfc_handle_latt_free_mp:
2064 kfree(mp);
2065lpfc_handle_latt_free_pmb:
2066 mempool_free(pmb, phba->mbox_mem_pool);
2067lpfc_handle_latt_err_exit:
2068
2069 spin_lock_irq(&phba->hbalock);
2070 psli->sli_flag |= LPFC_PROCESS_LA;
2071 control = readl(phba->HCregaddr);
2072 control |= HC_LAINT_ENA;
2073 writel(control, phba->HCregaddr);
2074 readl(phba->HCregaddr);
2075
2076
2077 writel(HA_LATT, phba->HAregaddr);
2078 readl(phba->HAregaddr);
2079 spin_unlock_irq(&phba->hbalock);
2080 lpfc_linkdown(phba);
2081 phba->link_state = LPFC_HBA_ERROR;
2082
2083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2084 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2085
2086 return;
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103int
2104lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2105{
2106 uint8_t lenlo, lenhi;
2107 int Length;
2108 int i, j;
2109 int finished = 0;
2110 int index = 0;
2111
2112 if (!vpd)
2113 return 0;
2114
2115
2116 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2117 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2118 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2119 (uint32_t) vpd[3]);
2120 while (!finished && (index < (len - 4))) {
2121 switch (vpd[index]) {
2122 case 0x82:
2123 case 0x91:
2124 index += 1;
2125 lenlo = vpd[index];
2126 index += 1;
2127 lenhi = vpd[index];
2128 index += 1;
2129 i = ((((unsigned short)lenhi) << 8) + lenlo);
2130 index += i;
2131 break;
2132 case 0x90:
2133 index += 1;
2134 lenlo = vpd[index];
2135 index += 1;
2136 lenhi = vpd[index];
2137 index += 1;
2138 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2139 if (Length > len - index)
2140 Length = len - index;
2141 while (Length > 0) {
2142
2143 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2144 index += 2;
2145 i = vpd[index];
2146 index += 1;
2147 j = 0;
2148 Length -= (3+i);
2149 while(i--) {
2150 phba->SerialNumber[j++] = vpd[index++];
2151 if (j == 31)
2152 break;
2153 }
2154 phba->SerialNumber[j] = 0;
2155 continue;
2156 }
2157 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2158 phba->vpd_flag |= VPD_MODEL_DESC;
2159 index += 2;
2160 i = vpd[index];
2161 index += 1;
2162 j = 0;
2163 Length -= (3+i);
2164 while(i--) {
2165 phba->ModelDesc[j++] = vpd[index++];
2166 if (j == 255)
2167 break;
2168 }
2169 phba->ModelDesc[j] = 0;
2170 continue;
2171 }
2172 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2173 phba->vpd_flag |= VPD_MODEL_NAME;
2174 index += 2;
2175 i = vpd[index];
2176 index += 1;
2177 j = 0;
2178 Length -= (3+i);
2179 while(i--) {
2180 phba->ModelName[j++] = vpd[index++];
2181 if (j == 79)
2182 break;
2183 }
2184 phba->ModelName[j] = 0;
2185 continue;
2186 }
2187 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2188 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2189 index += 2;
2190 i = vpd[index];
2191 index += 1;
2192 j = 0;
2193 Length -= (3+i);
2194 while(i--) {
2195 phba->ProgramType[j++] = vpd[index++];
2196 if (j == 255)
2197 break;
2198 }
2199 phba->ProgramType[j] = 0;
2200 continue;
2201 }
2202 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2203 phba->vpd_flag |= VPD_PORT;
2204 index += 2;
2205 i = vpd[index];
2206 index += 1;
2207 j = 0;
2208 Length -= (3+i);
2209 while(i--) {
2210 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2211 (phba->sli4_hba.pport_name_sta ==
2212 LPFC_SLI4_PPNAME_GET)) {
2213 j++;
2214 index++;
2215 } else
2216 phba->Port[j++] = vpd[index++];
2217 if (j == 19)
2218 break;
2219 }
2220 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2221 (phba->sli4_hba.pport_name_sta ==
2222 LPFC_SLI4_PPNAME_NON))
2223 phba->Port[j] = 0;
2224 continue;
2225 }
2226 else {
2227 index += 2;
2228 i = vpd[index];
2229 index += 1;
2230 index += i;
2231 Length -= (3 + i);
2232 }
2233 }
2234 finished = 0;
2235 break;
2236 case 0x78:
2237 finished = 1;
2238 break;
2239 default:
2240 index ++;
2241 break;
2242 }
2243 }
2244
2245 return(1);
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static void
2261lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2262{
2263 lpfc_vpd_t *vp;
2264 uint16_t dev_id = phba->pcidev->device;
2265 int max_speed;
2266 int GE = 0;
2267 int oneConnect = 0;
2268 struct {
2269 char *name;
2270 char *bus;
2271 char *function;
2272 } m = {"<Unknown>", "", ""};
2273
2274 if (mdp && mdp[0] != '\0'
2275 && descp && descp[0] != '\0')
2276 return;
2277
2278 if (phba->lmt & LMT_64Gb)
2279 max_speed = 64;
2280 else if (phba->lmt & LMT_32Gb)
2281 max_speed = 32;
2282 else if (phba->lmt & LMT_16Gb)
2283 max_speed = 16;
2284 else if (phba->lmt & LMT_10Gb)
2285 max_speed = 10;
2286 else if (phba->lmt & LMT_8Gb)
2287 max_speed = 8;
2288 else if (phba->lmt & LMT_4Gb)
2289 max_speed = 4;
2290 else if (phba->lmt & LMT_2Gb)
2291 max_speed = 2;
2292 else if (phba->lmt & LMT_1Gb)
2293 max_speed = 1;
2294 else
2295 max_speed = 0;
2296
2297 vp = &phba->vpd;
2298
2299 switch (dev_id) {
2300 case PCI_DEVICE_ID_FIREFLY:
2301 m = (typeof(m)){"LP6000", "PCI",
2302 "Obsolete, Unsupported Fibre Channel Adapter"};
2303 break;
2304 case PCI_DEVICE_ID_SUPERFLY:
2305 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2306 m = (typeof(m)){"LP7000", "PCI", ""};
2307 else
2308 m = (typeof(m)){"LP7000E", "PCI", ""};
2309 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2310 break;
2311 case PCI_DEVICE_ID_DRAGONFLY:
2312 m = (typeof(m)){"LP8000", "PCI",
2313 "Obsolete, Unsupported Fibre Channel Adapter"};
2314 break;
2315 case PCI_DEVICE_ID_CENTAUR:
2316 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2317 m = (typeof(m)){"LP9002", "PCI", ""};
2318 else
2319 m = (typeof(m)){"LP9000", "PCI", ""};
2320 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2321 break;
2322 case PCI_DEVICE_ID_RFLY:
2323 m = (typeof(m)){"LP952", "PCI",
2324 "Obsolete, Unsupported Fibre Channel Adapter"};
2325 break;
2326 case PCI_DEVICE_ID_PEGASUS:
2327 m = (typeof(m)){"LP9802", "PCI-X",
2328 "Obsolete, Unsupported Fibre Channel Adapter"};
2329 break;
2330 case PCI_DEVICE_ID_THOR:
2331 m = (typeof(m)){"LP10000", "PCI-X",
2332 "Obsolete, Unsupported Fibre Channel Adapter"};
2333 break;
2334 case PCI_DEVICE_ID_VIPER:
2335 m = (typeof(m)){"LPX1000", "PCI-X",
2336 "Obsolete, Unsupported Fibre Channel Adapter"};
2337 break;
2338 case PCI_DEVICE_ID_PFLY:
2339 m = (typeof(m)){"LP982", "PCI-X",
2340 "Obsolete, Unsupported Fibre Channel Adapter"};
2341 break;
2342 case PCI_DEVICE_ID_TFLY:
2343 m = (typeof(m)){"LP1050", "PCI-X",
2344 "Obsolete, Unsupported Fibre Channel Adapter"};
2345 break;
2346 case PCI_DEVICE_ID_HELIOS:
2347 m = (typeof(m)){"LP11000", "PCI-X2",
2348 "Obsolete, Unsupported Fibre Channel Adapter"};
2349 break;
2350 case PCI_DEVICE_ID_HELIOS_SCSP:
2351 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2352 "Obsolete, Unsupported Fibre Channel Adapter"};
2353 break;
2354 case PCI_DEVICE_ID_HELIOS_DCSP:
2355 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2356 "Obsolete, Unsupported Fibre Channel Adapter"};
2357 break;
2358 case PCI_DEVICE_ID_NEPTUNE:
2359 m = (typeof(m)){"LPe1000", "PCIe",
2360 "Obsolete, Unsupported Fibre Channel Adapter"};
2361 break;
2362 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2363 m = (typeof(m)){"LPe1000-SP", "PCIe",
2364 "Obsolete, Unsupported Fibre Channel Adapter"};
2365 break;
2366 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2367 m = (typeof(m)){"LPe1002-SP", "PCIe",
2368 "Obsolete, Unsupported Fibre Channel Adapter"};
2369 break;
2370 case PCI_DEVICE_ID_BMID:
2371 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2372 break;
2373 case PCI_DEVICE_ID_BSMB:
2374 m = (typeof(m)){"LP111", "PCI-X2",
2375 "Obsolete, Unsupported Fibre Channel Adapter"};
2376 break;
2377 case PCI_DEVICE_ID_ZEPHYR:
2378 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2379 break;
2380 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2381 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2382 break;
2383 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2384 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2385 GE = 1;
2386 break;
2387 case PCI_DEVICE_ID_ZMID:
2388 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2389 break;
2390 case PCI_DEVICE_ID_ZSMB:
2391 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2392 break;
2393 case PCI_DEVICE_ID_LP101:
2394 m = (typeof(m)){"LP101", "PCI-X",
2395 "Obsolete, Unsupported Fibre Channel Adapter"};
2396 break;
2397 case PCI_DEVICE_ID_LP10000S:
2398 m = (typeof(m)){"LP10000-S", "PCI",
2399 "Obsolete, Unsupported Fibre Channel Adapter"};
2400 break;
2401 case PCI_DEVICE_ID_LP11000S:
2402 m = (typeof(m)){"LP11000-S", "PCI-X2",
2403 "Obsolete, Unsupported Fibre Channel Adapter"};
2404 break;
2405 case PCI_DEVICE_ID_LPE11000S:
2406 m = (typeof(m)){"LPe11000-S", "PCIe",
2407 "Obsolete, Unsupported Fibre Channel Adapter"};
2408 break;
2409 case PCI_DEVICE_ID_SAT:
2410 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2411 break;
2412 case PCI_DEVICE_ID_SAT_MID:
2413 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2414 break;
2415 case PCI_DEVICE_ID_SAT_SMB:
2416 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2417 break;
2418 case PCI_DEVICE_ID_SAT_DCSP:
2419 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2420 break;
2421 case PCI_DEVICE_ID_SAT_SCSP:
2422 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2423 break;
2424 case PCI_DEVICE_ID_SAT_S:
2425 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2426 break;
2427 case PCI_DEVICE_ID_HORNET:
2428 m = (typeof(m)){"LP21000", "PCIe",
2429 "Obsolete, Unsupported FCoE Adapter"};
2430 GE = 1;
2431 break;
2432 case PCI_DEVICE_ID_PROTEUS_VF:
2433 m = (typeof(m)){"LPev12000", "PCIe IOV",
2434 "Obsolete, Unsupported Fibre Channel Adapter"};
2435 break;
2436 case PCI_DEVICE_ID_PROTEUS_PF:
2437 m = (typeof(m)){"LPev12000", "PCIe IOV",
2438 "Obsolete, Unsupported Fibre Channel Adapter"};
2439 break;
2440 case PCI_DEVICE_ID_PROTEUS_S:
2441 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2442 "Obsolete, Unsupported Fibre Channel Adapter"};
2443 break;
2444 case PCI_DEVICE_ID_TIGERSHARK:
2445 oneConnect = 1;
2446 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2447 break;
2448 case PCI_DEVICE_ID_TOMCAT:
2449 oneConnect = 1;
2450 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2451 break;
2452 case PCI_DEVICE_ID_FALCON:
2453 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2454 "EmulexSecure Fibre"};
2455 break;
2456 case PCI_DEVICE_ID_BALIUS:
2457 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2459 break;
2460 case PCI_DEVICE_ID_LANCER_FC:
2461 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2462 break;
2463 case PCI_DEVICE_ID_LANCER_FC_VF:
2464 m = (typeof(m)){"LPe16000", "PCIe",
2465 "Obsolete, Unsupported Fibre Channel Adapter"};
2466 break;
2467 case PCI_DEVICE_ID_LANCER_FCOE:
2468 oneConnect = 1;
2469 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2470 break;
2471 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2472 oneConnect = 1;
2473 m = (typeof(m)){"OCe15100", "PCIe",
2474 "Obsolete, Unsupported FCoE"};
2475 break;
2476 case PCI_DEVICE_ID_LANCER_G6_FC:
2477 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2478 break;
2479 case PCI_DEVICE_ID_LANCER_G7_FC:
2480 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2481 break;
2482 case PCI_DEVICE_ID_SKYHAWK:
2483 case PCI_DEVICE_ID_SKYHAWK_VF:
2484 oneConnect = 1;
2485 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2486 break;
2487 default:
2488 m = (typeof(m)){"Unknown", "", ""};
2489 break;
2490 }
2491
2492 if (mdp && mdp[0] == '\0')
2493 snprintf(mdp, 79,"%s", m.name);
2494
2495
2496
2497
2498 if (descp && descp[0] == '\0') {
2499 if (oneConnect)
2500 snprintf(descp, 255,
2501 "Emulex OneConnect %s, %s Initiator %s",
2502 m.name, m.function,
2503 phba->Port);
2504 else if (max_speed == 0)
2505 snprintf(descp, 255,
2506 "Emulex %s %s %s",
2507 m.name, m.bus, m.function);
2508 else
2509 snprintf(descp, 255,
2510 "Emulex %s %d%s %s %s",
2511 m.name, max_speed, (GE) ? "GE" : "Gb",
2512 m.bus, m.function);
2513 }
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528int
2529lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2530{
2531 IOCB_t *icmd;
2532 struct lpfc_iocbq *iocb;
2533 struct lpfc_dmabuf *mp1, *mp2;
2534
2535 cnt += pring->missbufcnt;
2536
2537
2538 while (cnt > 0) {
2539
2540 iocb = lpfc_sli_get_iocbq(phba);
2541 if (iocb == NULL) {
2542 pring->missbufcnt = cnt;
2543 return cnt;
2544 }
2545 icmd = &iocb->iocb;
2546
2547
2548
2549 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2550 if (mp1)
2551 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2552 if (!mp1 || !mp1->virt) {
2553 kfree(mp1);
2554 lpfc_sli_release_iocbq(phba, iocb);
2555 pring->missbufcnt = cnt;
2556 return cnt;
2557 }
2558
2559 INIT_LIST_HEAD(&mp1->list);
2560
2561 if (cnt > 1) {
2562 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2563 if (mp2)
2564 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2565 &mp2->phys);
2566 if (!mp2 || !mp2->virt) {
2567 kfree(mp2);
2568 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2569 kfree(mp1);
2570 lpfc_sli_release_iocbq(phba, iocb);
2571 pring->missbufcnt = cnt;
2572 return cnt;
2573 }
2574
2575 INIT_LIST_HEAD(&mp2->list);
2576 } else {
2577 mp2 = NULL;
2578 }
2579
2580 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2581 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2582 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2583 icmd->ulpBdeCount = 1;
2584 cnt--;
2585 if (mp2) {
2586 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2587 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2588 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2589 cnt--;
2590 icmd->ulpBdeCount = 2;
2591 }
2592
2593 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2594 icmd->ulpLe = 1;
2595
2596 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2597 IOCB_ERROR) {
2598 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2599 kfree(mp1);
2600 cnt++;
2601 if (mp2) {
2602 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2603 kfree(mp2);
2604 cnt++;
2605 }
2606 lpfc_sli_release_iocbq(phba, iocb);
2607 pring->missbufcnt = cnt;
2608 return cnt;
2609 }
2610 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2611 if (mp2)
2612 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2613 }
2614 pring->missbufcnt = 0;
2615 return 0;
2616}
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629static int
2630lpfc_post_rcv_buf(struct lpfc_hba *phba)
2631{
2632 struct lpfc_sli *psli = &phba->sli;
2633
2634
2635 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2636
2637
2638 return 0;
2639}
2640
2641#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2642
2643
2644
2645
2646
2647
2648
2649
2650static void
2651lpfc_sha_init(uint32_t * HashResultPointer)
2652{
2653 HashResultPointer[0] = 0x67452301;
2654 HashResultPointer[1] = 0xEFCDAB89;
2655 HashResultPointer[2] = 0x98BADCFE;
2656 HashResultPointer[3] = 0x10325476;
2657 HashResultPointer[4] = 0xC3D2E1F0;
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670static void
2671lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2672{
2673 int t;
2674 uint32_t TEMP;
2675 uint32_t A, B, C, D, E;
2676 t = 16;
2677 do {
2678 HashWorkingPointer[t] =
2679 S(1,
2680 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2681 8] ^
2682 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2683 } while (++t <= 79);
2684 t = 0;
2685 A = HashResultPointer[0];
2686 B = HashResultPointer[1];
2687 C = HashResultPointer[2];
2688 D = HashResultPointer[3];
2689 E = HashResultPointer[4];
2690
2691 do {
2692 if (t < 20) {
2693 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2694 } else if (t < 40) {
2695 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2696 } else if (t < 60) {
2697 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2698 } else {
2699 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2700 }
2701 TEMP += S(5, A) + E + HashWorkingPointer[t];
2702 E = D;
2703 D = C;
2704 C = S(30, B);
2705 B = A;
2706 A = TEMP;
2707 } while (++t <= 79);
2708
2709 HashResultPointer[0] += A;
2710 HashResultPointer[1] += B;
2711 HashResultPointer[2] += C;
2712 HashResultPointer[3] += D;
2713 HashResultPointer[4] += E;
2714
2715}
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727static void
2728lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2729{
2730 *HashWorking = (*RandomChallenge ^ *HashWorking);
2731}
2732
2733
2734
2735
2736
2737
2738
2739
2740void
2741lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2742{
2743 int t;
2744 uint32_t *HashWorking;
2745 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2746
2747 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2748 if (!HashWorking)
2749 return;
2750
2751 HashWorking[0] = HashWorking[78] = *pwwnn++;
2752 HashWorking[1] = HashWorking[79] = *pwwnn;
2753
2754 for (t = 0; t < 7; t++)
2755 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2756
2757 lpfc_sha_init(hbainit);
2758 lpfc_sha_iterate(hbainit, HashWorking);
2759 kfree(HashWorking);
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771void
2772lpfc_cleanup(struct lpfc_vport *vport)
2773{
2774 struct lpfc_hba *phba = vport->phba;
2775 struct lpfc_nodelist *ndlp, *next_ndlp;
2776 int i = 0;
2777
2778 if (phba->link_state > LPFC_LINK_DOWN)
2779 lpfc_port_link_failure(vport);
2780
2781 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2782 if (!NLP_CHK_NODE_ACT(ndlp)) {
2783 ndlp = lpfc_enable_node(vport, ndlp,
2784 NLP_STE_UNUSED_NODE);
2785 if (!ndlp)
2786 continue;
2787 spin_lock_irq(&phba->ndlp_lock);
2788 NLP_SET_FREE_REQ(ndlp);
2789 spin_unlock_irq(&phba->ndlp_lock);
2790
2791 lpfc_nlp_put(ndlp);
2792 continue;
2793 }
2794 spin_lock_irq(&phba->ndlp_lock);
2795 if (NLP_CHK_FREE_REQ(ndlp)) {
2796
2797 spin_unlock_irq(&phba->ndlp_lock);
2798 continue;
2799 } else
2800
2801 NLP_SET_FREE_REQ(ndlp);
2802 spin_unlock_irq(&phba->ndlp_lock);
2803
2804 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2805 ndlp->nlp_DID == Fabric_DID) {
2806
2807 lpfc_nlp_put(ndlp);
2808 continue;
2809 }
2810
2811
2812
2813
2814 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2815 lpfc_nlp_put(ndlp);
2816 continue;
2817 }
2818
2819 if (ndlp->nlp_type & NLP_FABRIC)
2820 lpfc_disc_state_machine(vport, ndlp, NULL,
2821 NLP_EVT_DEVICE_RECOVERY);
2822
2823 lpfc_disc_state_machine(vport, ndlp, NULL,
2824 NLP_EVT_DEVICE_RM);
2825 }
2826
2827
2828
2829
2830
2831 while (!list_empty(&vport->fc_nodes)) {
2832 if (i++ > 3000) {
2833 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2834 "0233 Nodelist not empty\n");
2835 list_for_each_entry_safe(ndlp, next_ndlp,
2836 &vport->fc_nodes, nlp_listp) {
2837 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2838 LOG_NODE,
2839 "0282 did:x%x ndlp:x%px "
2840 "usgmap:x%x refcnt:%d\n",
2841 ndlp->nlp_DID, (void *)ndlp,
2842 ndlp->nlp_usg_map,
2843 kref_read(&ndlp->kref));
2844 }
2845 break;
2846 }
2847
2848
2849 msleep(10);
2850 }
2851 lpfc_cleanup_vports_rrqs(vport, NULL);
2852}
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862void
2863lpfc_stop_vport_timers(struct lpfc_vport *vport)
2864{
2865 del_timer_sync(&vport->els_tmofunc);
2866 del_timer_sync(&vport->delayed_disc_tmo);
2867 lpfc_can_disctmo(vport);
2868 return;
2869}
2870
2871
2872
2873
2874
2875
2876
2877
2878void
2879__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2880{
2881
2882 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2883
2884
2885 del_timer(&phba->fcf.redisc_wait);
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897void
2898lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2899{
2900 spin_lock_irq(&phba->hbalock);
2901 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2902
2903 spin_unlock_irq(&phba->hbalock);
2904 return;
2905 }
2906 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2907
2908 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2909 spin_unlock_irq(&phba->hbalock);
2910}
2911
2912
2913
2914
2915
2916
2917
2918
2919void
2920lpfc_stop_hba_timers(struct lpfc_hba *phba)
2921{
2922 if (phba->pport)
2923 lpfc_stop_vport_timers(phba->pport);
2924 cancel_delayed_work_sync(&phba->eq_delay_work);
2925 del_timer_sync(&phba->sli.mbox_tmo);
2926 del_timer_sync(&phba->fabric_block_timer);
2927 del_timer_sync(&phba->eratt_poll);
2928 del_timer_sync(&phba->hb_tmofunc);
2929 if (phba->sli_rev == LPFC_SLI_REV4) {
2930 del_timer_sync(&phba->rrq_tmr);
2931 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2932 }
2933 phba->hb_outstanding = 0;
2934
2935 switch (phba->pci_dev_grp) {
2936 case LPFC_PCI_DEV_LP:
2937
2938 del_timer_sync(&phba->fcp_poll_timer);
2939 break;
2940 case LPFC_PCI_DEV_OC:
2941
2942 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2943 break;
2944 default:
2945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2946 "0297 Invalid device group (x%x)\n",
2947 phba->pci_dev_grp);
2948 break;
2949 }
2950 return;
2951}
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963static void
2964lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2965{
2966 unsigned long iflag;
2967 uint8_t actcmd = MBX_HEARTBEAT;
2968 unsigned long timeout;
2969
2970 spin_lock_irqsave(&phba->hbalock, iflag);
2971 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2972 spin_unlock_irqrestore(&phba->hbalock, iflag);
2973 if (mbx_action == LPFC_MBX_NO_WAIT)
2974 return;
2975 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2976 spin_lock_irqsave(&phba->hbalock, iflag);
2977 if (phba->sli.mbox_active) {
2978 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2979
2980
2981
2982 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2983 phba->sli.mbox_active) * 1000) + jiffies;
2984 }
2985 spin_unlock_irqrestore(&phba->hbalock, iflag);
2986
2987
2988 while (phba->sli.mbox_active) {
2989
2990 msleep(2);
2991 if (time_after(jiffies, timeout)) {
2992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2993 "2813 Mgmt IO is Blocked %x "
2994 "- mbox cmd %x still active\n",
2995 phba->sli.sli_flag, actcmd);
2996 break;
2997 }
2998 }
2999}
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009void
3010lpfc_sli4_node_prep(struct lpfc_hba *phba)
3011{
3012 struct lpfc_nodelist *ndlp, *next_ndlp;
3013 struct lpfc_vport **vports;
3014 int i, rpi;
3015 unsigned long flags;
3016
3017 if (phba->sli_rev != LPFC_SLI_REV4)
3018 return;
3019
3020 vports = lpfc_create_vport_work_array(phba);
3021 if (vports == NULL)
3022 return;
3023
3024 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3025 if (vports[i]->load_flag & FC_UNLOADING)
3026 continue;
3027
3028 list_for_each_entry_safe(ndlp, next_ndlp,
3029 &vports[i]->fc_nodes,
3030 nlp_listp) {
3031 if (!NLP_CHK_NODE_ACT(ndlp))
3032 continue;
3033 rpi = lpfc_sli4_alloc_rpi(phba);
3034 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3035 spin_lock_irqsave(&phba->ndlp_lock, flags);
3036 NLP_CLR_NODE_ACT(ndlp);
3037 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3038 continue;
3039 }
3040 ndlp->nlp_rpi = rpi;
3041 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3042 LOG_NODE | LOG_DISCOVERY,
3043 "0009 Assign RPI x%x to ndlp x%px "
3044 "DID:x%06x flg:x%x map:x%x\n",
3045 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3046 ndlp->nlp_flag, ndlp->nlp_usg_map);
3047 }
3048 }
3049 lpfc_destroy_vport_work_array(phba, vports);
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3060{
3061 struct lpfc_sli4_hdw_queue *qp;
3062 struct lpfc_io_buf *lpfc_ncmd;
3063 struct lpfc_io_buf *lpfc_ncmd_next;
3064 struct lpfc_epd_pool *epd_pool;
3065 unsigned long iflag;
3066
3067 epd_pool = &phba->epd_pool;
3068 qp = &phba->sli4_hba.hdwq[0];
3069
3070 spin_lock_init(&epd_pool->lock);
3071 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3072 spin_lock(&epd_pool->lock);
3073 INIT_LIST_HEAD(&epd_pool->list);
3074 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3075 &qp->lpfc_io_buf_list_put, list) {
3076 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3077 lpfc_ncmd->expedite = true;
3078 qp->put_io_bufs--;
3079 epd_pool->count++;
3080 if (epd_pool->count >= XRI_BATCH)
3081 break;
3082 }
3083 spin_unlock(&epd_pool->lock);
3084 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3085}
3086
3087
3088
3089
3090
3091
3092
3093
3094static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3095{
3096 struct lpfc_sli4_hdw_queue *qp;
3097 struct lpfc_io_buf *lpfc_ncmd;
3098 struct lpfc_io_buf *lpfc_ncmd_next;
3099 struct lpfc_epd_pool *epd_pool;
3100 unsigned long iflag;
3101
3102 epd_pool = &phba->epd_pool;
3103 qp = &phba->sli4_hba.hdwq[0];
3104
3105 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3106 spin_lock(&epd_pool->lock);
3107 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3108 &epd_pool->list, list) {
3109 list_move_tail(&lpfc_ncmd->list,
3110 &qp->lpfc_io_buf_list_put);
3111 lpfc_ncmd->flags = false;
3112 qp->put_io_bufs++;
3113 epd_pool->count--;
3114 }
3115 spin_unlock(&epd_pool->lock);
3116 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3117}
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3128{
3129 u32 i, j;
3130 u32 hwq_count;
3131 u32 count_per_hwq;
3132 struct lpfc_io_buf *lpfc_ncmd;
3133 struct lpfc_io_buf *lpfc_ncmd_next;
3134 unsigned long iflag;
3135 struct lpfc_sli4_hdw_queue *qp;
3136 struct lpfc_multixri_pool *multixri_pool;
3137 struct lpfc_pbl_pool *pbl_pool;
3138 struct lpfc_pvt_pool *pvt_pool;
3139
3140 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3141 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3142 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3143 phba->sli4_hba.io_xri_cnt);
3144
3145 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3146 lpfc_create_expedite_pool(phba);
3147
3148 hwq_count = phba->cfg_hdw_queue;
3149 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3150
3151 for (i = 0; i < hwq_count; i++) {
3152 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3153
3154 if (!multixri_pool) {
3155 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3156 "1238 Failed to allocate memory for "
3157 "multixri_pool\n");
3158
3159 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3160 lpfc_destroy_expedite_pool(phba);
3161
3162 j = 0;
3163 while (j < i) {
3164 qp = &phba->sli4_hba.hdwq[j];
3165 kfree(qp->p_multixri_pool);
3166 j++;
3167 }
3168 phba->cfg_xri_rebalancing = 0;
3169 return;
3170 }
3171
3172 qp = &phba->sli4_hba.hdwq[i];
3173 qp->p_multixri_pool = multixri_pool;
3174
3175 multixri_pool->xri_limit = count_per_hwq;
3176 multixri_pool->rrb_next_hwqid = i;
3177
3178
3179 pbl_pool = &multixri_pool->pbl_pool;
3180 spin_lock_init(&pbl_pool->lock);
3181 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3182 spin_lock(&pbl_pool->lock);
3183 INIT_LIST_HEAD(&pbl_pool->list);
3184 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3185 &qp->lpfc_io_buf_list_put, list) {
3186 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3187 qp->put_io_bufs--;
3188 pbl_pool->count++;
3189 }
3190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3191 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3192 pbl_pool->count, i);
3193 spin_unlock(&pbl_pool->lock);
3194 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3195
3196
3197 pvt_pool = &multixri_pool->pvt_pool;
3198 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3199 pvt_pool->low_watermark = XRI_BATCH;
3200 spin_lock_init(&pvt_pool->lock);
3201 spin_lock_irqsave(&pvt_pool->lock, iflag);
3202 INIT_LIST_HEAD(&pvt_pool->list);
3203 pvt_pool->count = 0;
3204 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3205 }
3206}
3207
3208
3209
3210
3211
3212
3213
3214static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3215{
3216 u32 i;
3217 u32 hwq_count;
3218 struct lpfc_io_buf *lpfc_ncmd;
3219 struct lpfc_io_buf *lpfc_ncmd_next;
3220 unsigned long iflag;
3221 struct lpfc_sli4_hdw_queue *qp;
3222 struct lpfc_multixri_pool *multixri_pool;
3223 struct lpfc_pbl_pool *pbl_pool;
3224 struct lpfc_pvt_pool *pvt_pool;
3225
3226 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3227 lpfc_destroy_expedite_pool(phba);
3228
3229 if (!(phba->pport->load_flag & FC_UNLOADING))
3230 lpfc_sli_flush_io_rings(phba);
3231
3232 hwq_count = phba->cfg_hdw_queue;
3233
3234 for (i = 0; i < hwq_count; i++) {
3235 qp = &phba->sli4_hba.hdwq[i];
3236 multixri_pool = qp->p_multixri_pool;
3237 if (!multixri_pool)
3238 continue;
3239
3240 qp->p_multixri_pool = NULL;
3241
3242 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3243
3244
3245 pbl_pool = &multixri_pool->pbl_pool;
3246 spin_lock(&pbl_pool->lock);
3247
3248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3249 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3250 pbl_pool->count, i);
3251
3252 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3253 &pbl_pool->list, list) {
3254 list_move_tail(&lpfc_ncmd->list,
3255 &qp->lpfc_io_buf_list_put);
3256 qp->put_io_bufs++;
3257 pbl_pool->count--;
3258 }
3259
3260 INIT_LIST_HEAD(&pbl_pool->list);
3261 pbl_pool->count = 0;
3262
3263 spin_unlock(&pbl_pool->lock);
3264
3265
3266 pvt_pool = &multixri_pool->pvt_pool;
3267 spin_lock(&pvt_pool->lock);
3268
3269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3270 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3271 pvt_pool->count, i);
3272
3273 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3274 &pvt_pool->list, list) {
3275 list_move_tail(&lpfc_ncmd->list,
3276 &qp->lpfc_io_buf_list_put);
3277 qp->put_io_bufs++;
3278 pvt_pool->count--;
3279 }
3280
3281 INIT_LIST_HEAD(&pvt_pool->list);
3282 pvt_pool->count = 0;
3283
3284 spin_unlock(&pvt_pool->lock);
3285 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3286
3287 kfree(multixri_pool);
3288 }
3289}
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303int
3304lpfc_online(struct lpfc_hba *phba)
3305{
3306 struct lpfc_vport *vport;
3307 struct lpfc_vport **vports;
3308 int i, error = 0;
3309 bool vpis_cleared = false;
3310
3311 if (!phba)
3312 return 0;
3313 vport = phba->pport;
3314
3315 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3316 return 0;
3317
3318 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3319 "0458 Bring Adapter online\n");
3320
3321 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3322
3323 if (phba->sli_rev == LPFC_SLI_REV4) {
3324 if (lpfc_sli4_hba_setup(phba)) {
3325 lpfc_unblock_mgmt_io(phba);
3326 return 1;
3327 }
3328 spin_lock_irq(&phba->hbalock);
3329 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3330 vpis_cleared = true;
3331 spin_unlock_irq(&phba->hbalock);
3332
3333
3334
3335
3336 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3337 !phba->nvmet_support) {
3338 error = lpfc_nvme_create_localport(phba->pport);
3339 if (error)
3340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3341 "6132 NVME restore reg failed "
3342 "on nvmei error x%x\n", error);
3343 }
3344 } else {
3345 lpfc_sli_queue_init(phba);
3346 if (lpfc_sli_hba_setup(phba)) {
3347 lpfc_unblock_mgmt_io(phba);
3348 return 1;
3349 }
3350 }
3351
3352 vports = lpfc_create_vport_work_array(phba);
3353 if (vports != NULL) {
3354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3355 struct Scsi_Host *shost;
3356 shost = lpfc_shost_from_vport(vports[i]);
3357 spin_lock_irq(shost->host_lock);
3358 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3359 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3360 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3361 if (phba->sli_rev == LPFC_SLI_REV4) {
3362 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3363 if ((vpis_cleared) &&
3364 (vports[i]->port_type !=
3365 LPFC_PHYSICAL_PORT))
3366 vports[i]->vpi = 0;
3367 }
3368 spin_unlock_irq(shost->host_lock);
3369 }
3370 }
3371 lpfc_destroy_vport_work_array(phba, vports);
3372
3373 if (phba->cfg_xri_rebalancing)
3374 lpfc_create_multixri_pools(phba);
3375
3376 lpfc_cpuhp_add(phba);
3377
3378 lpfc_unblock_mgmt_io(phba);
3379 return 0;
3380}
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393void
3394lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3395{
3396 unsigned long iflag;
3397
3398 spin_lock_irqsave(&phba->hbalock, iflag);
3399 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3400 spin_unlock_irqrestore(&phba->hbalock, iflag);
3401}
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411void
3412lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3413{
3414 struct lpfc_vport *vport = phba->pport;
3415 struct lpfc_nodelist *ndlp, *next_ndlp;
3416 struct lpfc_vport **vports;
3417 struct Scsi_Host *shost;
3418 int i;
3419
3420 if (vport->fc_flag & FC_OFFLINE_MODE)
3421 return;
3422
3423 lpfc_block_mgmt_io(phba, mbx_action);
3424
3425 lpfc_linkdown(phba);
3426
3427
3428 vports = lpfc_create_vport_work_array(phba);
3429 if (vports != NULL) {
3430 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3431 if (vports[i]->load_flag & FC_UNLOADING)
3432 continue;
3433 shost = lpfc_shost_from_vport(vports[i]);
3434 spin_lock_irq(shost->host_lock);
3435 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3436 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3437 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3438 spin_unlock_irq(shost->host_lock);
3439
3440 shost = lpfc_shost_from_vport(vports[i]);
3441 list_for_each_entry_safe(ndlp, next_ndlp,
3442 &vports[i]->fc_nodes,
3443 nlp_listp) {
3444 if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3445 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3446
3447
3448
3449 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3450 continue;
3451 }
3452
3453 if (ndlp->nlp_type & NLP_FABRIC) {
3454 lpfc_disc_state_machine(vports[i], ndlp,
3455 NULL, NLP_EVT_DEVICE_RECOVERY);
3456 lpfc_disc_state_machine(vports[i], ndlp,
3457 NULL, NLP_EVT_DEVICE_RM);
3458 }
3459 spin_lock_irq(shost->host_lock);
3460 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3461 spin_unlock_irq(shost->host_lock);
3462
3463
3464
3465
3466
3467 if (phba->sli_rev == LPFC_SLI_REV4) {
3468 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3469 LOG_NODE | LOG_DISCOVERY,
3470 "0011 Free RPI x%x on "
3471 "ndlp:x%px did x%x "
3472 "usgmap:x%x\n",
3473 ndlp->nlp_rpi, ndlp,
3474 ndlp->nlp_DID,
3475 ndlp->nlp_usg_map);
3476 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3477 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3478 }
3479 lpfc_unreg_rpi(vports[i], ndlp);
3480 }
3481 }
3482 }
3483 lpfc_destroy_vport_work_array(phba, vports);
3484
3485 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3486
3487 if (phba->wq)
3488 flush_workqueue(phba->wq);
3489}
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499void
3500lpfc_offline(struct lpfc_hba *phba)
3501{
3502 struct Scsi_Host *shost;
3503 struct lpfc_vport **vports;
3504 int i;
3505
3506 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3507 return;
3508
3509
3510 lpfc_stop_port(phba);
3511
3512
3513
3514
3515 lpfc_nvmet_destroy_targetport(phba);
3516 lpfc_nvme_destroy_localport(phba->pport);
3517
3518 vports = lpfc_create_vport_work_array(phba);
3519 if (vports != NULL)
3520 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3521 lpfc_stop_vport_timers(vports[i]);
3522 lpfc_destroy_vport_work_array(phba, vports);
3523 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3524 "0460 Bring Adapter offline\n");
3525
3526
3527 lpfc_sli_hba_down(phba);
3528 spin_lock_irq(&phba->hbalock);
3529 phba->work_ha = 0;
3530 spin_unlock_irq(&phba->hbalock);
3531 vports = lpfc_create_vport_work_array(phba);
3532 if (vports != NULL)
3533 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3534 shost = lpfc_shost_from_vport(vports[i]);
3535 spin_lock_irq(shost->host_lock);
3536 vports[i]->work_port_events = 0;
3537 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3538 spin_unlock_irq(shost->host_lock);
3539 }
3540 lpfc_destroy_vport_work_array(phba, vports);
3541 __lpfc_cpuhp_remove(phba);
3542
3543 if (phba->cfg_xri_rebalancing)
3544 lpfc_destroy_multixri_pools(phba);
3545}
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555static void
3556lpfc_scsi_free(struct lpfc_hba *phba)
3557{
3558 struct lpfc_io_buf *sb, *sb_next;
3559
3560 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3561 return;
3562
3563 spin_lock_irq(&phba->hbalock);
3564
3565
3566
3567 spin_lock(&phba->scsi_buf_list_put_lock);
3568 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3569 list) {
3570 list_del(&sb->list);
3571 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3572 sb->dma_handle);
3573 kfree(sb);
3574 phba->total_scsi_bufs--;
3575 }
3576 spin_unlock(&phba->scsi_buf_list_put_lock);
3577
3578 spin_lock(&phba->scsi_buf_list_get_lock);
3579 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3580 list) {
3581 list_del(&sb->list);
3582 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3583 sb->dma_handle);
3584 kfree(sb);
3585 phba->total_scsi_bufs--;
3586 }
3587 spin_unlock(&phba->scsi_buf_list_get_lock);
3588 spin_unlock_irq(&phba->hbalock);
3589}
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599void
3600lpfc_io_free(struct lpfc_hba *phba)
3601{
3602 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3603 struct lpfc_sli4_hdw_queue *qp;
3604 int idx;
3605
3606 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3607 qp = &phba->sli4_hba.hdwq[idx];
3608
3609 spin_lock(&qp->io_buf_list_put_lock);
3610 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3611 &qp->lpfc_io_buf_list_put,
3612 list) {
3613 list_del(&lpfc_ncmd->list);
3614 qp->put_io_bufs--;
3615 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3616 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3617 if (phba->cfg_xpsgl && !phba->nvmet_support)
3618 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3619 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3620 kfree(lpfc_ncmd);
3621 qp->total_io_bufs--;
3622 }
3623 spin_unlock(&qp->io_buf_list_put_lock);
3624
3625 spin_lock(&qp->io_buf_list_get_lock);
3626 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3627 &qp->lpfc_io_buf_list_get,
3628 list) {
3629 list_del(&lpfc_ncmd->list);
3630 qp->get_io_bufs--;
3631 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3632 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3633 if (phba->cfg_xpsgl && !phba->nvmet_support)
3634 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3635 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3636 kfree(lpfc_ncmd);
3637 qp->total_io_bufs--;
3638 }
3639 spin_unlock(&qp->io_buf_list_get_lock);
3640 }
3641}
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655int
3656lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3657{
3658 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3659 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3660 LIST_HEAD(els_sgl_list);
3661 int rc;
3662
3663
3664
3665
3666 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3667
3668 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3669
3670 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3671 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3672 "3157 ELS xri-sgl count increased from "
3673 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3674 els_xri_cnt);
3675
3676 for (i = 0; i < xri_cnt; i++) {
3677 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3678 GFP_KERNEL);
3679 if (sglq_entry == NULL) {
3680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3681 "2562 Failure to allocate an "
3682 "ELS sgl entry:%d\n", i);
3683 rc = -ENOMEM;
3684 goto out_free_mem;
3685 }
3686 sglq_entry->buff_type = GEN_BUFF_TYPE;
3687 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3688 &sglq_entry->phys);
3689 if (sglq_entry->virt == NULL) {
3690 kfree(sglq_entry);
3691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3692 "2563 Failure to allocate an "
3693 "ELS mbuf:%d\n", i);
3694 rc = -ENOMEM;
3695 goto out_free_mem;
3696 }
3697 sglq_entry->sgl = sglq_entry->virt;
3698 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3699 sglq_entry->state = SGL_FREED;
3700 list_add_tail(&sglq_entry->list, &els_sgl_list);
3701 }
3702 spin_lock_irq(&phba->hbalock);
3703 spin_lock(&phba->sli4_hba.sgl_list_lock);
3704 list_splice_init(&els_sgl_list,
3705 &phba->sli4_hba.lpfc_els_sgl_list);
3706 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3707 spin_unlock_irq(&phba->hbalock);
3708 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3709
3710 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3711 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3712 "3158 ELS xri-sgl count decreased from "
3713 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3714 els_xri_cnt);
3715 spin_lock_irq(&phba->hbalock);
3716 spin_lock(&phba->sli4_hba.sgl_list_lock);
3717 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3718 &els_sgl_list);
3719
3720 for (i = 0; i < xri_cnt; i++) {
3721 list_remove_head(&els_sgl_list,
3722 sglq_entry, struct lpfc_sglq, list);
3723 if (sglq_entry) {
3724 __lpfc_mbuf_free(phba, sglq_entry->virt,
3725 sglq_entry->phys);
3726 kfree(sglq_entry);
3727 }
3728 }
3729 list_splice_init(&els_sgl_list,
3730 &phba->sli4_hba.lpfc_els_sgl_list);
3731 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3732 spin_unlock_irq(&phba->hbalock);
3733 } else
3734 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3735 "3163 ELS xri-sgl count unchanged: %d\n",
3736 els_xri_cnt);
3737 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3738
3739
3740 sglq_entry = NULL;
3741 sglq_entry_next = NULL;
3742 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3743 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3744 lxri = lpfc_sli4_next_xritag(phba);
3745 if (lxri == NO_XRI) {
3746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3747 "2400 Failed to allocate xri for "
3748 "ELS sgl\n");
3749 rc = -ENOMEM;
3750 goto out_free_mem;
3751 }
3752 sglq_entry->sli4_lxritag = lxri;
3753 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3754 }
3755 return 0;
3756
3757out_free_mem:
3758 lpfc_free_els_sgl_list(phba);
3759 return rc;
3760}
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774int
3775lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3776{
3777 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3778 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3779 uint16_t nvmet_xri_cnt;
3780 LIST_HEAD(nvmet_sgl_list);
3781 int rc;
3782
3783
3784
3785
3786 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3787
3788
3789 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3790 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3791
3792 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3793 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3794 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3795 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3796
3797 for (i = 0; i < xri_cnt; i++) {
3798 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3799 GFP_KERNEL);
3800 if (sglq_entry == NULL) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "6303 Failure to allocate an "
3803 "NVMET sgl entry:%d\n", i);
3804 rc = -ENOMEM;
3805 goto out_free_mem;
3806 }
3807 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3808 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3809 &sglq_entry->phys);
3810 if (sglq_entry->virt == NULL) {
3811 kfree(sglq_entry);
3812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3813 "6304 Failure to allocate an "
3814 "NVMET buf:%d\n", i);
3815 rc = -ENOMEM;
3816 goto out_free_mem;
3817 }
3818 sglq_entry->sgl = sglq_entry->virt;
3819 memset(sglq_entry->sgl, 0,
3820 phba->cfg_sg_dma_buf_size);
3821 sglq_entry->state = SGL_FREED;
3822 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3823 }
3824 spin_lock_irq(&phba->hbalock);
3825 spin_lock(&phba->sli4_hba.sgl_list_lock);
3826 list_splice_init(&nvmet_sgl_list,
3827 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3828 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3829 spin_unlock_irq(&phba->hbalock);
3830 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3831
3832 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834 "6305 NVMET xri-sgl count decreased from "
3835 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3836 nvmet_xri_cnt);
3837 spin_lock_irq(&phba->hbalock);
3838 spin_lock(&phba->sli4_hba.sgl_list_lock);
3839 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3840 &nvmet_sgl_list);
3841
3842 for (i = 0; i < xri_cnt; i++) {
3843 list_remove_head(&nvmet_sgl_list,
3844 sglq_entry, struct lpfc_sglq, list);
3845 if (sglq_entry) {
3846 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3847 sglq_entry->phys);
3848 kfree(sglq_entry);
3849 }
3850 }
3851 list_splice_init(&nvmet_sgl_list,
3852 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3853 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3854 spin_unlock_irq(&phba->hbalock);
3855 } else
3856 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3857 "6306 NVMET xri-sgl count unchanged: %d\n",
3858 nvmet_xri_cnt);
3859 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3860
3861
3862 sglq_entry = NULL;
3863 sglq_entry_next = NULL;
3864 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3865 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3866 lxri = lpfc_sli4_next_xritag(phba);
3867 if (lxri == NO_XRI) {
3868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3869 "6307 Failed to allocate xri for "
3870 "NVMET sgl\n");
3871 rc = -ENOMEM;
3872 goto out_free_mem;
3873 }
3874 sglq_entry->sli4_lxritag = lxri;
3875 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3876 }
3877 return 0;
3878
3879out_free_mem:
3880 lpfc_free_nvmet_sgl_list(phba);
3881 return rc;
3882}
3883
3884int
3885lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3886{
3887 LIST_HEAD(blist);
3888 struct lpfc_sli4_hdw_queue *qp;
3889 struct lpfc_io_buf *lpfc_cmd;
3890 struct lpfc_io_buf *iobufp, *prev_iobufp;
3891 int idx, cnt, xri, inserted;
3892
3893 cnt = 0;
3894 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3895 qp = &phba->sli4_hba.hdwq[idx];
3896 spin_lock_irq(&qp->io_buf_list_get_lock);
3897 spin_lock(&qp->io_buf_list_put_lock);
3898
3899
3900 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3901 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3902 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3903 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3904 cnt += qp->get_io_bufs + qp->put_io_bufs;
3905 qp->get_io_bufs = 0;
3906 qp->put_io_bufs = 0;
3907 qp->total_io_bufs = 0;
3908 spin_unlock(&qp->io_buf_list_put_lock);
3909 spin_unlock_irq(&qp->io_buf_list_get_lock);
3910 }
3911
3912
3913
3914
3915
3916
3917 for (idx = 0; idx < cnt; idx++) {
3918 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3919 if (!lpfc_cmd)
3920 return cnt;
3921 if (idx == 0) {
3922 list_add_tail(&lpfc_cmd->list, cbuf);
3923 continue;
3924 }
3925 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3926 inserted = 0;
3927 prev_iobufp = NULL;
3928 list_for_each_entry(iobufp, cbuf, list) {
3929 if (xri < iobufp->cur_iocbq.sli4_xritag) {
3930 if (prev_iobufp)
3931 list_add(&lpfc_cmd->list,
3932 &prev_iobufp->list);
3933 else
3934 list_add(&lpfc_cmd->list, cbuf);
3935 inserted = 1;
3936 break;
3937 }
3938 prev_iobufp = iobufp;
3939 }
3940 if (!inserted)
3941 list_add_tail(&lpfc_cmd->list, cbuf);
3942 }
3943 return cnt;
3944}
3945
3946int
3947lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3948{
3949 struct lpfc_sli4_hdw_queue *qp;
3950 struct lpfc_io_buf *lpfc_cmd;
3951 int idx, cnt;
3952
3953 qp = phba->sli4_hba.hdwq;
3954 cnt = 0;
3955 while (!list_empty(cbuf)) {
3956 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3957 list_remove_head(cbuf, lpfc_cmd,
3958 struct lpfc_io_buf, list);
3959 if (!lpfc_cmd)
3960 return cnt;
3961 cnt++;
3962 qp = &phba->sli4_hba.hdwq[idx];
3963 lpfc_cmd->hdwq_no = idx;
3964 lpfc_cmd->hdwq = qp;
3965 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3966 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3967 spin_lock(&qp->io_buf_list_put_lock);
3968 list_add_tail(&lpfc_cmd->list,
3969 &qp->lpfc_io_buf_list_put);
3970 qp->put_io_bufs++;
3971 qp->total_io_bufs++;
3972 spin_unlock(&qp->io_buf_list_put_lock);
3973 }
3974 }
3975 return cnt;
3976}
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990int
3991lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
3992{
3993 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3994 uint16_t i, lxri, els_xri_cnt;
3995 uint16_t io_xri_cnt, io_xri_max;
3996 LIST_HEAD(io_sgl_list);
3997 int rc, cnt;
3998
3999
4000
4001
4002
4003
4004 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4005 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4006 phba->sli4_hba.io_xri_max = io_xri_max;
4007
4008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4009 "6074 Current allocated XRI sgl count:%d, "
4010 "maximum XRI count:%d\n",
4011 phba->sli4_hba.io_xri_cnt,
4012 phba->sli4_hba.io_xri_max);
4013
4014 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4015
4016 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4017
4018 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4019 phba->sli4_hba.io_xri_max;
4020
4021 for (i = 0; i < io_xri_cnt; i++) {
4022 list_remove_head(&io_sgl_list, lpfc_ncmd,
4023 struct lpfc_io_buf, list);
4024 if (lpfc_ncmd) {
4025 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4026 lpfc_ncmd->data,
4027 lpfc_ncmd->dma_handle);
4028 kfree(lpfc_ncmd);
4029 }
4030 }
4031 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4032 }
4033
4034
4035 lpfc_ncmd = NULL;
4036 lpfc_ncmd_next = NULL;
4037 phba->sli4_hba.io_xri_cnt = cnt;
4038 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4039 &io_sgl_list, list) {
4040 lxri = lpfc_sli4_next_xritag(phba);
4041 if (lxri == NO_XRI) {
4042 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4043 "6075 Failed to allocate xri for "
4044 "nvme buffer\n");
4045 rc = -ENOMEM;
4046 goto out_free_mem;
4047 }
4048 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4049 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4050 }
4051 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4052 return 0;
4053
4054out_free_mem:
4055 lpfc_io_free(phba);
4056 return rc;
4057}
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073int
4074lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4075{
4076 struct lpfc_io_buf *lpfc_ncmd;
4077 struct lpfc_iocbq *pwqeq;
4078 uint16_t iotag, lxri = 0;
4079 int bcnt, num_posted;
4080 LIST_HEAD(prep_nblist);
4081 LIST_HEAD(post_nblist);
4082 LIST_HEAD(nvme_nblist);
4083
4084 phba->sli4_hba.io_xri_cnt = 0;
4085 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4086 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4087 if (!lpfc_ncmd)
4088 break;
4089
4090
4091
4092
4093
4094 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4095 GFP_KERNEL,
4096 &lpfc_ncmd->dma_handle);
4097 if (!lpfc_ncmd->data) {
4098 kfree(lpfc_ncmd);
4099 break;
4100 }
4101
4102 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4103 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4104 } else {
4105
4106
4107
4108
4109 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4110 (((unsigned long)(lpfc_ncmd->data) &
4111 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4112 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4113 "3369 Memory alignment err: "
4114 "addr=%lx\n",
4115 (unsigned long)lpfc_ncmd->data);
4116 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4117 lpfc_ncmd->data,
4118 lpfc_ncmd->dma_handle);
4119 kfree(lpfc_ncmd);
4120 break;
4121 }
4122 }
4123
4124 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4125
4126 lxri = lpfc_sli4_next_xritag(phba);
4127 if (lxri == NO_XRI) {
4128 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4129 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4130 kfree(lpfc_ncmd);
4131 break;
4132 }
4133 pwqeq = &lpfc_ncmd->cur_iocbq;
4134
4135
4136 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4137 if (iotag == 0) {
4138 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4139 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4140 kfree(lpfc_ncmd);
4141 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4142 "6121 Failed to allocate IOTAG for"
4143 " XRI:0x%x\n", lxri);
4144 lpfc_sli4_free_xri(phba, lxri);
4145 break;
4146 }
4147 pwqeq->sli4_lxritag = lxri;
4148 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4149 pwqeq->context1 = lpfc_ncmd;
4150
4151
4152 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4153 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4154 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4155 spin_lock_init(&lpfc_ncmd->buf_lock);
4156
4157
4158 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4159 phba->sli4_hba.io_xri_cnt++;
4160 }
4161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4162 "6114 Allocate %d out of %d requested new NVME "
4163 "buffers\n", bcnt, num_to_alloc);
4164
4165
4166 if (!list_empty(&post_nblist))
4167 num_posted = lpfc_sli4_post_io_sgl_list(
4168 phba, &post_nblist, bcnt);
4169 else
4170 num_posted = 0;
4171
4172 return num_posted;
4173}
4174
4175static uint64_t
4176lpfc_get_wwpn(struct lpfc_hba *phba)
4177{
4178 uint64_t wwn;
4179 int rc;
4180 LPFC_MBOXQ_t *mboxq;
4181 MAILBOX_t *mb;
4182
4183 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4184 GFP_KERNEL);
4185 if (!mboxq)
4186 return (uint64_t)-1;
4187
4188
4189 lpfc_read_nv(phba, mboxq);
4190 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4191 if (rc != MBX_SUCCESS) {
4192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4193 "6019 Mailbox failed , mbxCmd x%x "
4194 "READ_NV, mbxStatus x%x\n",
4195 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4196 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4197 mempool_free(mboxq, phba->mbox_mem_pool);
4198 return (uint64_t) -1;
4199 }
4200 mb = &mboxq->u.mb;
4201 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4202
4203 mempool_free(mboxq, phba->mbox_mem_pool);
4204 if (phba->sli_rev == LPFC_SLI_REV4)
4205 return be64_to_cpu(wwn);
4206 else
4207 return rol64(wwn, 32);
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226struct lpfc_vport *
4227lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4228{
4229 struct lpfc_vport *vport;
4230 struct Scsi_Host *shost = NULL;
4231 struct scsi_host_template *template;
4232 int error = 0;
4233 int i;
4234 uint64_t wwn;
4235 bool use_no_reset_hba = false;
4236 int rc;
4237
4238 if (lpfc_no_hba_reset_cnt) {
4239 if (phba->sli_rev < LPFC_SLI_REV4 &&
4240 dev == &phba->pcidev->dev) {
4241
4242 lpfc_sli_brdrestart(phba);
4243 rc = lpfc_sli_chipset_init(phba);
4244 if (rc)
4245 return NULL;
4246 }
4247 wwn = lpfc_get_wwpn(phba);
4248 }
4249
4250 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4251 if (wwn == lpfc_no_hba_reset[i]) {
4252 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4253 "6020 Setting use_no_reset port=%llx\n",
4254 wwn);
4255 use_no_reset_hba = true;
4256 break;
4257 }
4258 }
4259
4260
4261 if (dev == &phba->pcidev->dev) {
4262 template = &phba->port_template;
4263
4264 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4265
4266 memcpy(template, &lpfc_template, sizeof(*template));
4267
4268 if (use_no_reset_hba) {
4269
4270 template->max_sectors = 0xffff;
4271 template->eh_host_reset_handler = NULL;
4272 }
4273
4274
4275 memcpy(&phba->vport_template, &lpfc_template,
4276 sizeof(*template));
4277 phba->vport_template.max_sectors = 0xffff;
4278 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4279 phba->vport_template.eh_bus_reset_handler = NULL;
4280 phba->vport_template.eh_host_reset_handler = NULL;
4281 phba->vport_template.vendor_id = 0;
4282
4283
4284 if (phba->sli_rev == LPFC_SLI_REV4) {
4285 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4286 phba->vport_template.sg_tablesize =
4287 phba->cfg_scsi_seg_cnt;
4288 } else {
4289 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4290 phba->vport_template.sg_tablesize =
4291 phba->cfg_sg_seg_cnt;
4292 }
4293
4294 } else {
4295
4296 memcpy(template, &lpfc_template_nvme,
4297 sizeof(*template));
4298 }
4299 } else {
4300 template = &phba->vport_template;
4301 }
4302
4303 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4304 if (!shost)
4305 goto out;
4306
4307 vport = (struct lpfc_vport *) shost->hostdata;
4308 vport->phba = phba;
4309 vport->load_flag |= FC_LOADING;
4310 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4311 vport->fc_rscn_flush = 0;
4312 lpfc_get_vport_cfgparam(vport);
4313
4314
4315 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4316
4317 shost->unique_id = instance;
4318 shost->max_id = LPFC_MAX_TARGET;
4319 shost->max_lun = vport->cfg_max_luns;
4320 shost->this_id = -1;
4321 shost->max_cmd_len = 16;
4322
4323 if (phba->sli_rev == LPFC_SLI_REV4) {
4324 if (!phba->cfg_fcp_mq_threshold ||
4325 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4326 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4327
4328 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4329 phba->cfg_fcp_mq_threshold);
4330
4331 shost->dma_boundary =
4332 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4333
4334 if (phba->cfg_xpsgl && !phba->nvmet_support)
4335 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4336 else
4337 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4338 } else
4339
4340
4341
4342 shost->nr_hw_queues = 1;
4343
4344
4345
4346
4347
4348
4349 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4350 if (dev != &phba->pcidev->dev) {
4351 shost->transportt = lpfc_vport_transport_template;
4352 vport->port_type = LPFC_NPIV_PORT;
4353 } else {
4354 shost->transportt = lpfc_transport_template;
4355 vport->port_type = LPFC_PHYSICAL_PORT;
4356 }
4357
4358 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4359 "9081 CreatePort TMPLATE type %x TBLsize %d "
4360 "SEGcnt %d/%d\n",
4361 vport->port_type, shost->sg_tablesize,
4362 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4363
4364
4365 INIT_LIST_HEAD(&vport->fc_nodes);
4366 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4367 spin_lock_init(&vport->work_port_lock);
4368
4369 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4370
4371 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4372
4373 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4374
4375 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4376 lpfc_setup_bg(phba, shost);
4377
4378 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4379 if (error)
4380 goto out_put_shost;
4381
4382 spin_lock_irq(&phba->port_list_lock);
4383 list_add_tail(&vport->listentry, &phba->port_list);
4384 spin_unlock_irq(&phba->port_list_lock);
4385 return vport;
4386
4387out_put_shost:
4388 scsi_host_put(shost);
4389out:
4390 return NULL;
4391}
4392
4393
4394
4395
4396
4397
4398
4399
4400void
4401destroy_port(struct lpfc_vport *vport)
4402{
4403 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4404 struct lpfc_hba *phba = vport->phba;
4405
4406 lpfc_debugfs_terminate(vport);
4407 fc_remove_host(shost);
4408 scsi_remove_host(shost);
4409
4410 spin_lock_irq(&phba->port_list_lock);
4411 list_del_init(&vport->listentry);
4412 spin_unlock_irq(&phba->port_list_lock);
4413
4414 lpfc_cleanup(vport);
4415 return;
4416}
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428int
4429lpfc_get_instance(void)
4430{
4431 int ret;
4432
4433 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4434 return ret < 0 ? -1 : ret;
4435}
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4453{
4454 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4455 struct lpfc_hba *phba = vport->phba;
4456 int stat = 0;
4457
4458 spin_lock_irq(shost->host_lock);
4459
4460 if (vport->load_flag & FC_UNLOADING) {
4461 stat = 1;
4462 goto finished;
4463 }
4464 if (time >= msecs_to_jiffies(30 * 1000)) {
4465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4466 "0461 Scanning longer than 30 "
4467 "seconds. Continuing initialization\n");
4468 stat = 1;
4469 goto finished;
4470 }
4471 if (time >= msecs_to_jiffies(15 * 1000) &&
4472 phba->link_state <= LPFC_LINK_DOWN) {
4473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4474 "0465 Link down longer than 15 "
4475 "seconds. Continuing initialization\n");
4476 stat = 1;
4477 goto finished;
4478 }
4479
4480 if (vport->port_state != LPFC_VPORT_READY)
4481 goto finished;
4482 if (vport->num_disc_nodes || vport->fc_prli_sent)
4483 goto finished;
4484 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4485 goto finished;
4486 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4487 goto finished;
4488
4489 stat = 1;
4490
4491finished:
4492 spin_unlock_irq(shost->host_lock);
4493 return stat;
4494}
4495
4496static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4497{
4498 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4499 struct lpfc_hba *phba = vport->phba;
4500
4501 fc_host_supported_speeds(shost) = 0;
4502 if (phba->lmt & LMT_128Gb)
4503 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4504 if (phba->lmt & LMT_64Gb)
4505 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4506 if (phba->lmt & LMT_32Gb)
4507 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4508 if (phba->lmt & LMT_16Gb)
4509 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4510 if (phba->lmt & LMT_10Gb)
4511 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4512 if (phba->lmt & LMT_8Gb)
4513 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4514 if (phba->lmt & LMT_4Gb)
4515 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4516 if (phba->lmt & LMT_2Gb)
4517 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4518 if (phba->lmt & LMT_1Gb)
4519 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4520}
4521
4522
4523
4524
4525
4526
4527
4528
4529void lpfc_host_attrib_init(struct Scsi_Host *shost)
4530{
4531 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4532 struct lpfc_hba *phba = vport->phba;
4533
4534
4535
4536
4537 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4538 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4539 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4540
4541 memset(fc_host_supported_fc4s(shost), 0,
4542 sizeof(fc_host_supported_fc4s(shost)));
4543 fc_host_supported_fc4s(shost)[2] = 1;
4544 fc_host_supported_fc4s(shost)[7] = 1;
4545
4546 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4547 sizeof fc_host_symbolic_name(shost));
4548
4549 lpfc_host_supported_speeds_set(shost);
4550
4551 fc_host_maxframe_size(shost) =
4552 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4553 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4554
4555 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4556
4557
4558 memset(fc_host_active_fc4s(shost), 0,
4559 sizeof(fc_host_active_fc4s(shost)));
4560 fc_host_active_fc4s(shost)[2] = 1;
4561 fc_host_active_fc4s(shost)[7] = 1;
4562
4563 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4564 spin_lock_irq(shost->host_lock);
4565 vport->load_flag &= ~FC_LOADING;
4566 spin_unlock_irq(shost->host_lock);
4567}
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577static void
4578lpfc_stop_port_s3(struct lpfc_hba *phba)
4579{
4580
4581 writel(0, phba->HCregaddr);
4582 readl(phba->HCregaddr);
4583
4584 writel(0xffffffff, phba->HAregaddr);
4585 readl(phba->HAregaddr);
4586
4587
4588 lpfc_stop_hba_timers(phba);
4589 phba->pport->work_port_events = 0;
4590}
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600static void
4601lpfc_stop_port_s4(struct lpfc_hba *phba)
4602{
4603
4604 lpfc_stop_hba_timers(phba);
4605 if (phba->pport)
4606 phba->pport->work_port_events = 0;
4607 phba->sli4_hba.intr_enable = 0;
4608}
4609
4610
4611
4612
4613
4614
4615
4616
4617void
4618lpfc_stop_port(struct lpfc_hba *phba)
4619{
4620 phba->lpfc_stop_port(phba);
4621
4622 if (phba->wq)
4623 flush_workqueue(phba->wq);
4624}
4625
4626
4627
4628
4629
4630
4631
4632void
4633lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4634{
4635 unsigned long fcf_redisc_wait_tmo =
4636 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4637
4638 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4639 spin_lock_irq(&phba->hbalock);
4640
4641 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4642
4643 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4644 spin_unlock_irq(&phba->hbalock);
4645}
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657static void
4658lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4659{
4660 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4661
4662
4663 spin_lock_irq(&phba->hbalock);
4664 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4665 spin_unlock_irq(&phba->hbalock);
4666 return;
4667 }
4668
4669 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4670
4671 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4672 spin_unlock_irq(&phba->hbalock);
4673 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4674 "2776 FCF rediscover quiescent timer expired\n");
4675
4676 lpfc_worker_wake_up(phba);
4677}
4678
4679
4680
4681
4682
4683
4684
4685
4686static void
4687lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4688 struct lpfc_acqe_link *acqe_link)
4689{
4690 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4691 case LPFC_ASYNC_LINK_FAULT_NONE:
4692 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4693 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4694 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4695 break;
4696 default:
4697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4698 "0398 Unknown link fault code: x%x\n",
4699 bf_get(lpfc_acqe_link_fault, acqe_link));
4700 break;
4701 }
4702}
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714static uint8_t
4715lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4716 struct lpfc_acqe_link *acqe_link)
4717{
4718 uint8_t att_type;
4719
4720 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4721 case LPFC_ASYNC_LINK_STATUS_DOWN:
4722 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4723 att_type = LPFC_ATT_LINK_DOWN;
4724 break;
4725 case LPFC_ASYNC_LINK_STATUS_UP:
4726
4727 att_type = LPFC_ATT_RESERVED;
4728 break;
4729 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4730 att_type = LPFC_ATT_LINK_UP;
4731 break;
4732 default:
4733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4734 "0399 Invalid link attention type: x%x\n",
4735 bf_get(lpfc_acqe_link_status, acqe_link));
4736 att_type = LPFC_ATT_RESERVED;
4737 break;
4738 }
4739 return att_type;
4740}
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750uint32_t
4751lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4752{
4753 uint32_t link_speed;
4754
4755 if (!lpfc_is_link_up(phba))
4756 return 0;
4757
4758 if (phba->sli_rev <= LPFC_SLI_REV3) {
4759 switch (phba->fc_linkspeed) {
4760 case LPFC_LINK_SPEED_1GHZ:
4761 link_speed = 1000;
4762 break;
4763 case LPFC_LINK_SPEED_2GHZ:
4764 link_speed = 2000;
4765 break;
4766 case LPFC_LINK_SPEED_4GHZ:
4767 link_speed = 4000;
4768 break;
4769 case LPFC_LINK_SPEED_8GHZ:
4770 link_speed = 8000;
4771 break;
4772 case LPFC_LINK_SPEED_10GHZ:
4773 link_speed = 10000;
4774 break;
4775 case LPFC_LINK_SPEED_16GHZ:
4776 link_speed = 16000;
4777 break;
4778 default:
4779 link_speed = 0;
4780 }
4781 } else {
4782 if (phba->sli4_hba.link_state.logical_speed)
4783 link_speed =
4784 phba->sli4_hba.link_state.logical_speed;
4785 else
4786 link_speed = phba->sli4_hba.link_state.speed;
4787 }
4788 return link_speed;
4789}
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802static uint32_t
4803lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4804 uint8_t speed_code)
4805{
4806 uint32_t port_speed;
4807
4808 switch (evt_code) {
4809 case LPFC_TRAILER_CODE_LINK:
4810 switch (speed_code) {
4811 case LPFC_ASYNC_LINK_SPEED_ZERO:
4812 port_speed = 0;
4813 break;
4814 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4815 port_speed = 10;
4816 break;
4817 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4818 port_speed = 100;
4819 break;
4820 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4821 port_speed = 1000;
4822 break;
4823 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4824 port_speed = 10000;
4825 break;
4826 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4827 port_speed = 20000;
4828 break;
4829 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4830 port_speed = 25000;
4831 break;
4832 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4833 port_speed = 40000;
4834 break;
4835 default:
4836 port_speed = 0;
4837 }
4838 break;
4839 case LPFC_TRAILER_CODE_FC:
4840 switch (speed_code) {
4841 case LPFC_FC_LA_SPEED_UNKNOWN:
4842 port_speed = 0;
4843 break;
4844 case LPFC_FC_LA_SPEED_1G:
4845 port_speed = 1000;
4846 break;
4847 case LPFC_FC_LA_SPEED_2G:
4848 port_speed = 2000;
4849 break;
4850 case LPFC_FC_LA_SPEED_4G:
4851 port_speed = 4000;
4852 break;
4853 case LPFC_FC_LA_SPEED_8G:
4854 port_speed = 8000;
4855 break;
4856 case LPFC_FC_LA_SPEED_10G:
4857 port_speed = 10000;
4858 break;
4859 case LPFC_FC_LA_SPEED_16G:
4860 port_speed = 16000;
4861 break;
4862 case LPFC_FC_LA_SPEED_32G:
4863 port_speed = 32000;
4864 break;
4865 case LPFC_FC_LA_SPEED_64G:
4866 port_speed = 64000;
4867 break;
4868 case LPFC_FC_LA_SPEED_128G:
4869 port_speed = 128000;
4870 break;
4871 default:
4872 port_speed = 0;
4873 }
4874 break;
4875 default:
4876 port_speed = 0;
4877 }
4878 return port_speed;
4879}
4880
4881
4882
4883
4884
4885
4886
4887
4888static void
4889lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4890 struct lpfc_acqe_link *acqe_link)
4891{
4892 struct lpfc_dmabuf *mp;
4893 LPFC_MBOXQ_t *pmb;
4894 MAILBOX_t *mb;
4895 struct lpfc_mbx_read_top *la;
4896 uint8_t att_type;
4897 int rc;
4898
4899 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4900 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4901 return;
4902 phba->fcoe_eventtag = acqe_link->event_tag;
4903 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4904 if (!pmb) {
4905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4906 "0395 The mboxq allocation failed\n");
4907 return;
4908 }
4909 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4910 if (!mp) {
4911 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4912 "0396 The lpfc_dmabuf allocation failed\n");
4913 goto out_free_pmb;
4914 }
4915 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4916 if (!mp->virt) {
4917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4918 "0397 The mbuf allocation failed\n");
4919 goto out_free_dmabuf;
4920 }
4921
4922
4923 lpfc_els_flush_all_cmd(phba);
4924
4925
4926 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4927
4928
4929 phba->sli.slistat.link_event++;
4930
4931
4932 lpfc_read_topology(phba, pmb, mp);
4933 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4934 pmb->vport = phba->pport;
4935
4936
4937 phba->sli4_hba.link_state.speed =
4938 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4939 bf_get(lpfc_acqe_link_speed, acqe_link));
4940 phba->sli4_hba.link_state.duplex =
4941 bf_get(lpfc_acqe_link_duplex, acqe_link);
4942 phba->sli4_hba.link_state.status =
4943 bf_get(lpfc_acqe_link_status, acqe_link);
4944 phba->sli4_hba.link_state.type =
4945 bf_get(lpfc_acqe_link_type, acqe_link);
4946 phba->sli4_hba.link_state.number =
4947 bf_get(lpfc_acqe_link_number, acqe_link);
4948 phba->sli4_hba.link_state.fault =
4949 bf_get(lpfc_acqe_link_fault, acqe_link);
4950 phba->sli4_hba.link_state.logical_speed =
4951 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4952
4953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4954 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4955 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4956 "Logical speed:%dMbps Fault:%d\n",
4957 phba->sli4_hba.link_state.speed,
4958 phba->sli4_hba.link_state.topology,
4959 phba->sli4_hba.link_state.status,
4960 phba->sli4_hba.link_state.type,
4961 phba->sli4_hba.link_state.number,
4962 phba->sli4_hba.link_state.logical_speed,
4963 phba->sli4_hba.link_state.fault);
4964
4965
4966
4967
4968 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4969 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4970 if (rc == MBX_NOT_FINISHED)
4971 goto out_free_dmabuf;
4972 return;
4973 }
4974
4975
4976
4977
4978
4979
4980 mb = &pmb->u.mb;
4981 mb->mbxStatus = MBX_SUCCESS;
4982
4983
4984 lpfc_sli4_parse_latt_fault(phba, acqe_link);
4985
4986
4987 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4988 la->eventTag = acqe_link->event_tag;
4989 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4990 bf_set(lpfc_mbx_read_top_link_spd, la,
4991 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4992
4993
4994 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4995 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4996 bf_set(lpfc_mbx_read_top_il, la, 0);
4997 bf_set(lpfc_mbx_read_top_pb, la, 0);
4998 bf_set(lpfc_mbx_read_top_fa, la, 0);
4999 bf_set(lpfc_mbx_read_top_mm, la, 0);
5000
5001
5002 lpfc_mbx_cmpl_read_topology(phba, pmb);
5003
5004 return;
5005
5006out_free_dmabuf:
5007 kfree(mp);
5008out_free_pmb:
5009 mempool_free(pmb, phba->mbox_mem_pool);
5010}
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024static uint8_t
5025lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5026{
5027 uint8_t port_speed;
5028
5029 switch (speed_code) {
5030 case LPFC_FC_LA_SPEED_1G:
5031 port_speed = LPFC_LINK_SPEED_1GHZ;
5032 break;
5033 case LPFC_FC_LA_SPEED_2G:
5034 port_speed = LPFC_LINK_SPEED_2GHZ;
5035 break;
5036 case LPFC_FC_LA_SPEED_4G:
5037 port_speed = LPFC_LINK_SPEED_4GHZ;
5038 break;
5039 case LPFC_FC_LA_SPEED_8G:
5040 port_speed = LPFC_LINK_SPEED_8GHZ;
5041 break;
5042 case LPFC_FC_LA_SPEED_16G:
5043 port_speed = LPFC_LINK_SPEED_16GHZ;
5044 break;
5045 case LPFC_FC_LA_SPEED_32G:
5046 port_speed = LPFC_LINK_SPEED_32GHZ;
5047 break;
5048 case LPFC_FC_LA_SPEED_64G:
5049 port_speed = LPFC_LINK_SPEED_64GHZ;
5050 break;
5051 case LPFC_FC_LA_SPEED_128G:
5052 port_speed = LPFC_LINK_SPEED_128GHZ;
5053 break;
5054 case LPFC_FC_LA_SPEED_256G:
5055 port_speed = LPFC_LINK_SPEED_256GHZ;
5056 break;
5057 default:
5058 port_speed = 0;
5059 break;
5060 }
5061
5062 return port_speed;
5063}
5064
5065#define trunk_link_status(__idx)\
5066 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5067 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5068 "Link up" : "Link down") : "NA"
5069
5070#define trunk_port_fault(__idx)\
5071 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5072 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5073
5074static void
5075lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5076 struct lpfc_acqe_fc_la *acqe_fc)
5077{
5078 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5079 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5080
5081 phba->sli4_hba.link_state.speed =
5082 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5083 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5084
5085 phba->sli4_hba.link_state.logical_speed =
5086 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5087
5088 phba->fc_linkspeed =
5089 lpfc_async_link_speed_to_read_top(
5090 phba,
5091 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5092
5093 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5094 phba->trunk_link.link0.state =
5095 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5096 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5097 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5098 }
5099 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5100 phba->trunk_link.link1.state =
5101 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5102 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5103 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5104 }
5105 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5106 phba->trunk_link.link2.state =
5107 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5108 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5109 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5110 }
5111 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5112 phba->trunk_link.link3.state =
5113 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5114 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5115 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5116 }
5117
5118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5119 "2910 Async FC Trunking Event - Speed:%d\n"
5120 "\tLogical speed:%d "
5121 "port0: %s port1: %s port2: %s port3: %s\n",
5122 phba->sli4_hba.link_state.speed,
5123 phba->sli4_hba.link_state.logical_speed,
5124 trunk_link_status(0), trunk_link_status(1),
5125 trunk_link_status(2), trunk_link_status(3));
5126
5127 if (port_fault)
5128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5129 "3202 trunk error:0x%x (%s) seen on port0:%s "
5130
5131
5132
5133
5134
5135 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5136 "UNDEFINED. update driver." : trunk_errmsg[err],
5137 trunk_port_fault(0), trunk_port_fault(1),
5138 trunk_port_fault(2), trunk_port_fault(3));
5139}
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151static void
5152lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5153{
5154 struct lpfc_dmabuf *mp;
5155 LPFC_MBOXQ_t *pmb;
5156 MAILBOX_t *mb;
5157 struct lpfc_mbx_read_top *la;
5158 int rc;
5159
5160 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5161 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5163 "2895 Non FC link Event detected.(%d)\n",
5164 bf_get(lpfc_trailer_type, acqe_fc));
5165 return;
5166 }
5167
5168 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5169 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5170 lpfc_update_trunk_link_status(phba, acqe_fc);
5171 return;
5172 }
5173
5174
5175 phba->sli4_hba.link_state.speed =
5176 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5177 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5178 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5179 phba->sli4_hba.link_state.topology =
5180 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5181 phba->sli4_hba.link_state.status =
5182 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5183 phba->sli4_hba.link_state.type =
5184 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5185 phba->sli4_hba.link_state.number =
5186 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5187 phba->sli4_hba.link_state.fault =
5188 bf_get(lpfc_acqe_link_fault, acqe_fc);
5189
5190 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5191 LPFC_FC_LA_TYPE_LINK_DOWN)
5192 phba->sli4_hba.link_state.logical_speed = 0;
5193 else if (!phba->sli4_hba.conf_trunk)
5194 phba->sli4_hba.link_state.logical_speed =
5195 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5196
5197 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5198 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5199 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5200 "%dMbps Fault:%d\n",
5201 phba->sli4_hba.link_state.speed,
5202 phba->sli4_hba.link_state.topology,
5203 phba->sli4_hba.link_state.status,
5204 phba->sli4_hba.link_state.type,
5205 phba->sli4_hba.link_state.number,
5206 phba->sli4_hba.link_state.logical_speed,
5207 phba->sli4_hba.link_state.fault);
5208 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5209 if (!pmb) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5211 "2897 The mboxq allocation failed\n");
5212 return;
5213 }
5214 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5215 if (!mp) {
5216 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5217 "2898 The lpfc_dmabuf allocation failed\n");
5218 goto out_free_pmb;
5219 }
5220 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5221 if (!mp->virt) {
5222 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5223 "2899 The mbuf allocation failed\n");
5224 goto out_free_dmabuf;
5225 }
5226
5227
5228 lpfc_els_flush_all_cmd(phba);
5229
5230
5231 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5232
5233
5234 phba->sli.slistat.link_event++;
5235
5236
5237 lpfc_read_topology(phba, pmb, mp);
5238 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5239 pmb->vport = phba->pport;
5240
5241 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5242 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5243
5244 switch (phba->sli4_hba.link_state.status) {
5245 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5246 phba->link_flag |= LS_MDS_LINK_DOWN;
5247 break;
5248 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5249 phba->link_flag |= LS_MDS_LOOPBACK;
5250 break;
5251 default:
5252 break;
5253 }
5254
5255
5256 mb = &pmb->u.mb;
5257 mb->mbxStatus = MBX_SUCCESS;
5258
5259
5260 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5261
5262
5263 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5264 la->eventTag = acqe_fc->event_tag;
5265
5266 if (phba->sli4_hba.link_state.status ==
5267 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5268 bf_set(lpfc_mbx_read_top_att_type, la,
5269 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5270 } else {
5271 bf_set(lpfc_mbx_read_top_att_type, la,
5272 LPFC_FC_LA_TYPE_LINK_DOWN);
5273 }
5274
5275 lpfc_mbx_cmpl_read_topology(phba, pmb);
5276
5277 return;
5278 }
5279
5280 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5281 if (rc == MBX_NOT_FINISHED)
5282 goto out_free_dmabuf;
5283 return;
5284
5285out_free_dmabuf:
5286 kfree(mp);
5287out_free_pmb:
5288 mempool_free(pmb, phba->mbox_mem_pool);
5289}
5290
5291
5292
5293
5294
5295
5296
5297
5298static void
5299lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5300{
5301 char port_name;
5302 char message[128];
5303 uint8_t status;
5304 uint8_t evt_type;
5305 uint8_t operational = 0;
5306 struct temp_event temp_event_data;
5307 struct lpfc_acqe_misconfigured_event *misconfigured;
5308 struct Scsi_Host *shost;
5309 struct lpfc_vport **vports;
5310 int rc, i;
5311
5312 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5313
5314 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5315 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5316 "x%08x x%08x x%08x\n", evt_type,
5317 acqe_sli->event_data1, acqe_sli->event_data2,
5318 acqe_sli->reserved, acqe_sli->trailer);
5319
5320 port_name = phba->Port[0];
5321 if (port_name == 0x00)
5322 port_name = '?';
5323
5324 switch (evt_type) {
5325 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5326 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5327 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5328 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5329
5330 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5331 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5332 acqe_sli->event_data1, port_name);
5333
5334 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5335 shost = lpfc_shost_from_vport(phba->pport);
5336 fc_host_post_vendor_event(shost, fc_get_event_number(),
5337 sizeof(temp_event_data),
5338 (char *)&temp_event_data,
5339 SCSI_NL_VID_TYPE_PCI
5340 | PCI_VENDOR_ID_EMULEX);
5341 break;
5342 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5343 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5344 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5345 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5346
5347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5348 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5349 acqe_sli->event_data1, port_name);
5350
5351 shost = lpfc_shost_from_vport(phba->pport);
5352 fc_host_post_vendor_event(shost, fc_get_event_number(),
5353 sizeof(temp_event_data),
5354 (char *)&temp_event_data,
5355 SCSI_NL_VID_TYPE_PCI
5356 | PCI_VENDOR_ID_EMULEX);
5357 break;
5358 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5359 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5360 &acqe_sli->event_data1;
5361
5362
5363 switch (phba->sli4_hba.lnk_info.lnk_no) {
5364 case LPFC_LINK_NUMBER_0:
5365 status = bf_get(lpfc_sli_misconfigured_port0_state,
5366 &misconfigured->theEvent);
5367 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5368 &misconfigured->theEvent);
5369 break;
5370 case LPFC_LINK_NUMBER_1:
5371 status = bf_get(lpfc_sli_misconfigured_port1_state,
5372 &misconfigured->theEvent);
5373 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5374 &misconfigured->theEvent);
5375 break;
5376 case LPFC_LINK_NUMBER_2:
5377 status = bf_get(lpfc_sli_misconfigured_port2_state,
5378 &misconfigured->theEvent);
5379 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5380 &misconfigured->theEvent);
5381 break;
5382 case LPFC_LINK_NUMBER_3:
5383 status = bf_get(lpfc_sli_misconfigured_port3_state,
5384 &misconfigured->theEvent);
5385 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5386 &misconfigured->theEvent);
5387 break;
5388 default:
5389 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5390 "3296 "
5391 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5392 "event: Invalid link %d",
5393 phba->sli4_hba.lnk_info.lnk_no);
5394 return;
5395 }
5396
5397
5398 if (phba->sli4_hba.lnk_info.optic_state == status)
5399 return;
5400
5401 switch (status) {
5402 case LPFC_SLI_EVENT_STATUS_VALID:
5403 sprintf(message, "Physical Link is functional");
5404 break;
5405 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5406 sprintf(message, "Optics faulted/incorrectly "
5407 "installed/not installed - Reseat optics, "
5408 "if issue not resolved, replace.");
5409 break;
5410 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5411 sprintf(message,
5412 "Optics of two types installed - Remove one "
5413 "optic or install matching pair of optics.");
5414 break;
5415 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5416 sprintf(message, "Incompatible optics - Replace with "
5417 "compatible optics for card to function.");
5418 break;
5419 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5420 sprintf(message, "Unqualified optics - Replace with "
5421 "Avago optics for Warranty and Technical "
5422 "Support - Link is%s operational",
5423 (operational) ? " not" : "");
5424 break;
5425 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5426 sprintf(message, "Uncertified optics - Replace with "
5427 "Avago-certified optics to enable link "
5428 "operation - Link is%s operational",
5429 (operational) ? " not" : "");
5430 break;
5431 default:
5432
5433 sprintf(message, "Unknown event status x%02x", status);
5434 break;
5435 }
5436
5437
5438 rc = lpfc_sli4_read_config(phba);
5439 if (rc) {
5440 phba->lmt = 0;
5441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5442 "3194 Unable to retrieve supported "
5443 "speeds, rc = 0x%x\n", rc);
5444 }
5445 vports = lpfc_create_vport_work_array(phba);
5446 if (vports != NULL) {
5447 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5448 i++) {
5449 shost = lpfc_shost_from_vport(vports[i]);
5450 lpfc_host_supported_speeds_set(shost);
5451 }
5452 }
5453 lpfc_destroy_vport_work_array(phba, vports);
5454
5455 phba->sli4_hba.lnk_info.optic_state = status;
5456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5457 "3176 Port Name %c %s\n", port_name, message);
5458 break;
5459 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5461 "3192 Remote DPort Test Initiated - "
5462 "Event Data1:x%08x Event Data2: x%08x\n",
5463 acqe_sli->event_data1, acqe_sli->event_data2);
5464 break;
5465 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5466
5467
5468
5469
5470
5471 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5472 "2699 Misconfigured FA-WWN - Attached device does "
5473 "not support FA-WWN\n");
5474 break;
5475 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5476
5477 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5478 "2518 EEPROM failure - "
5479 "Event Data1: x%08x Event Data2: x%08x\n",
5480 acqe_sli->event_data1, acqe_sli->event_data2);
5481 break;
5482 default:
5483 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5484 "3193 Unrecognized SLI event, type: 0x%x",
5485 evt_type);
5486 break;
5487 }
5488}
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500static struct lpfc_nodelist *
5501lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5502{
5503 struct lpfc_nodelist *ndlp;
5504 struct Scsi_Host *shost;
5505 struct lpfc_hba *phba;
5506
5507 if (!vport)
5508 return NULL;
5509 phba = vport->phba;
5510 if (!phba)
5511 return NULL;
5512 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5513 if (!ndlp) {
5514
5515 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5516 if (!ndlp)
5517 return 0;
5518
5519 ndlp->nlp_type |= NLP_FABRIC;
5520
5521 lpfc_enqueue_node(vport, ndlp);
5522 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5523
5524 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5525 if (!ndlp)
5526 return 0;
5527 }
5528 if ((phba->pport->port_state < LPFC_FLOGI) &&
5529 (phba->pport->port_state != LPFC_VPORT_FAILED))
5530 return NULL;
5531
5532 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5533 && (vport->port_state != LPFC_VPORT_FAILED))
5534 return NULL;
5535 shost = lpfc_shost_from_vport(vport);
5536 if (!shost)
5537 return NULL;
5538 lpfc_linkdown_port(vport);
5539 lpfc_cleanup_pending_mbox(vport);
5540 spin_lock_irq(shost->host_lock);
5541 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5542 spin_unlock_irq(shost->host_lock);
5543
5544 return ndlp;
5545}
5546
5547
5548
5549
5550
5551
5552
5553
5554static void
5555lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5556{
5557 struct lpfc_vport **vports;
5558 int i;
5559
5560 vports = lpfc_create_vport_work_array(phba);
5561 if (vports)
5562 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5563 lpfc_sli4_perform_vport_cvl(vports[i]);
5564 lpfc_destroy_vport_work_array(phba, vports);
5565}
5566
5567
5568
5569
5570
5571
5572
5573
5574static void
5575lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5576 struct lpfc_acqe_fip *acqe_fip)
5577{
5578 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5579 int rc;
5580 struct lpfc_vport *vport;
5581 struct lpfc_nodelist *ndlp;
5582 struct Scsi_Host *shost;
5583 int active_vlink_present;
5584 struct lpfc_vport **vports;
5585 int i;
5586
5587 phba->fc_eventTag = acqe_fip->event_tag;
5588 phba->fcoe_eventtag = acqe_fip->event_tag;
5589 switch (event_type) {
5590 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5591 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5592 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5594 LOG_DISCOVERY,
5595 "2546 New FCF event, evt_tag:x%x, "
5596 "index:x%x\n",
5597 acqe_fip->event_tag,
5598 acqe_fip->index);
5599 else
5600 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5601 LOG_DISCOVERY,
5602 "2788 FCF param modified event, "
5603 "evt_tag:x%x, index:x%x\n",
5604 acqe_fip->event_tag,
5605 acqe_fip->index);
5606 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5607
5608
5609
5610
5611
5612 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5613 LOG_DISCOVERY,
5614 "2779 Read FCF (x%x) for updating "
5615 "roundrobin FCF failover bmask\n",
5616 acqe_fip->index);
5617 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5618 }
5619
5620
5621 spin_lock_irq(&phba->hbalock);
5622 if (phba->hba_flag & FCF_TS_INPROG) {
5623 spin_unlock_irq(&phba->hbalock);
5624 break;
5625 }
5626
5627 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5628 spin_unlock_irq(&phba->hbalock);
5629 break;
5630 }
5631
5632
5633 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5634 spin_unlock_irq(&phba->hbalock);
5635 break;
5636 }
5637 spin_unlock_irq(&phba->hbalock);
5638
5639
5640 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5641 "2770 Start FCF table scan per async FCF "
5642 "event, evt_tag:x%x, index:x%x\n",
5643 acqe_fip->event_tag, acqe_fip->index);
5644 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5645 LPFC_FCOE_FCF_GET_FIRST);
5646 if (rc)
5647 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5648 "2547 Issue FCF scan read FCF mailbox "
5649 "command failed (x%x)\n", rc);
5650 break;
5651
5652 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5654 "2548 FCF Table full count 0x%x tag 0x%x\n",
5655 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5656 acqe_fip->event_tag);
5657 break;
5658
5659 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5660 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5661 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5662 "2549 FCF (x%x) disconnected from network, "
5663 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5664
5665
5666
5667
5668 spin_lock_irq(&phba->hbalock);
5669 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5670 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5671 spin_unlock_irq(&phba->hbalock);
5672
5673 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5674 break;
5675 }
5676 spin_unlock_irq(&phba->hbalock);
5677
5678
5679 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5680 break;
5681
5682
5683
5684
5685
5686
5687
5688 spin_lock_irq(&phba->hbalock);
5689
5690 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5691 spin_unlock_irq(&phba->hbalock);
5692
5693 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5694 "2771 Start FCF fast failover process due to "
5695 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5696 "\n", acqe_fip->event_tag, acqe_fip->index);
5697 rc = lpfc_sli4_redisc_fcf_table(phba);
5698 if (rc) {
5699 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5700 LOG_DISCOVERY,
5701 "2772 Issue FCF rediscover mailbox "
5702 "command failed, fail through to FCF "
5703 "dead event\n");
5704 spin_lock_irq(&phba->hbalock);
5705 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5706 spin_unlock_irq(&phba->hbalock);
5707
5708
5709
5710
5711 lpfc_sli4_fcf_dead_failthrough(phba);
5712 } else {
5713
5714 lpfc_sli4_clear_fcf_rr_bmask(phba);
5715
5716
5717
5718
5719 lpfc_sli4_perform_all_vport_cvl(phba);
5720 }
5721 break;
5722 case LPFC_FIP_EVENT_TYPE_CVL:
5723 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5724 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5725 "2718 Clear Virtual Link Received for VPI 0x%x"
5726 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5727
5728 vport = lpfc_find_vport_by_vpid(phba,
5729 acqe_fip->index);
5730 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5731 if (!ndlp)
5732 break;
5733 active_vlink_present = 0;
5734
5735 vports = lpfc_create_vport_work_array(phba);
5736 if (vports) {
5737 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5738 i++) {
5739 if ((!(vports[i]->fc_flag &
5740 FC_VPORT_CVL_RCVD)) &&
5741 (vports[i]->port_state > LPFC_FDISC)) {
5742 active_vlink_present = 1;
5743 break;
5744 }
5745 }
5746 lpfc_destroy_vport_work_array(phba, vports);
5747 }
5748
5749
5750
5751
5752
5753
5754 if (!(vport->load_flag & FC_UNLOADING) &&
5755 active_vlink_present) {
5756
5757
5758
5759
5760 mod_timer(&ndlp->nlp_delayfunc,
5761 jiffies + msecs_to_jiffies(1000));
5762 shost = lpfc_shost_from_vport(vport);
5763 spin_lock_irq(shost->host_lock);
5764 ndlp->nlp_flag |= NLP_DELAY_TMO;
5765 spin_unlock_irq(shost->host_lock);
5766 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5767 vport->port_state = LPFC_FDISC;
5768 } else {
5769
5770
5771
5772
5773
5774
5775
5776 spin_lock_irq(&phba->hbalock);
5777 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5778 spin_unlock_irq(&phba->hbalock);
5779 break;
5780 }
5781
5782 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5783 spin_unlock_irq(&phba->hbalock);
5784 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5785 LOG_DISCOVERY,
5786 "2773 Start FCF failover per CVL, "
5787 "evt_tag:x%x\n", acqe_fip->event_tag);
5788 rc = lpfc_sli4_redisc_fcf_table(phba);
5789 if (rc) {
5790 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5791 LOG_DISCOVERY,
5792 "2774 Issue FCF rediscover "
5793 "mailbox command failed, "
5794 "through to CVL event\n");
5795 spin_lock_irq(&phba->hbalock);
5796 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5797 spin_unlock_irq(&phba->hbalock);
5798
5799
5800
5801
5802 lpfc_retry_pport_discovery(phba);
5803 } else
5804
5805
5806
5807
5808 lpfc_sli4_clear_fcf_rr_bmask(phba);
5809 }
5810 break;
5811 default:
5812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5813 "0288 Unknown FCoE event type 0x%x event tag "
5814 "0x%x\n", event_type, acqe_fip->event_tag);
5815 break;
5816 }
5817}
5818
5819
5820
5821
5822
5823
5824
5825
5826static void
5827lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5828 struct lpfc_acqe_dcbx *acqe_dcbx)
5829{
5830 phba->fc_eventTag = acqe_dcbx->event_tag;
5831 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5832 "0290 The SLI4 DCBX asynchronous event is not "
5833 "handled yet\n");
5834}
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845static void
5846lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5847 struct lpfc_acqe_grp5 *acqe_grp5)
5848{
5849 uint16_t prev_ll_spd;
5850
5851 phba->fc_eventTag = acqe_grp5->event_tag;
5852 phba->fcoe_eventtag = acqe_grp5->event_tag;
5853 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5854 phba->sli4_hba.link_state.logical_speed =
5855 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5856 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5857 "2789 GRP5 Async Event: Updating logical link speed "
5858 "from %dMbps to %dMbps\n", prev_ll_spd,
5859 phba->sli4_hba.link_state.logical_speed);
5860}
5861
5862
5863
5864
5865
5866
5867
5868
5869void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5870{
5871 struct lpfc_cq_event *cq_event;
5872
5873
5874 spin_lock_irq(&phba->hbalock);
5875 phba->hba_flag &= ~ASYNC_EVENT;
5876 spin_unlock_irq(&phba->hbalock);
5877
5878 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5879
5880 spin_lock_irq(&phba->hbalock);
5881 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5882 cq_event, struct lpfc_cq_event, list);
5883 spin_unlock_irq(&phba->hbalock);
5884
5885 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5886 case LPFC_TRAILER_CODE_LINK:
5887 lpfc_sli4_async_link_evt(phba,
5888 &cq_event->cqe.acqe_link);
5889 break;
5890 case LPFC_TRAILER_CODE_FCOE:
5891 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5892 break;
5893 case LPFC_TRAILER_CODE_DCBX:
5894 lpfc_sli4_async_dcbx_evt(phba,
5895 &cq_event->cqe.acqe_dcbx);
5896 break;
5897 case LPFC_TRAILER_CODE_GRP5:
5898 lpfc_sli4_async_grp5_evt(phba,
5899 &cq_event->cqe.acqe_grp5);
5900 break;
5901 case LPFC_TRAILER_CODE_FC:
5902 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5903 break;
5904 case LPFC_TRAILER_CODE_SLI:
5905 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5906 break;
5907 default:
5908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5909 "1804 Invalid asynchronous event code: "
5910 "x%x\n", bf_get(lpfc_trailer_code,
5911 &cq_event->cqe.mcqe_cmpl));
5912 break;
5913 }
5914
5915 lpfc_sli4_cq_event_release(phba, cq_event);
5916 }
5917}
5918
5919
5920
5921
5922
5923
5924
5925
5926void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5927{
5928 int rc;
5929
5930 spin_lock_irq(&phba->hbalock);
5931
5932 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5933
5934 phba->fcf.failover_rec.flag = 0;
5935
5936 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5937 spin_unlock_irq(&phba->hbalock);
5938
5939
5940 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5941 "2777 Start post-quiescent FCF table scan\n");
5942 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5943 if (rc)
5944 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5945 "2747 Issue FCF scan read FCF mailbox "
5946 "command failed 0x%x\n", rc);
5947}
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959int
5960lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5961{
5962 int rc;
5963
5964
5965 phba->pci_dev_grp = dev_grp;
5966
5967
5968 if (dev_grp == LPFC_PCI_DEV_OC)
5969 phba->sli_rev = LPFC_SLI_REV4;
5970
5971
5972 rc = lpfc_init_api_table_setup(phba, dev_grp);
5973 if (rc)
5974 return -ENODEV;
5975
5976 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5977 if (rc)
5978 return -ENODEV;
5979
5980 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5981 if (rc)
5982 return -ENODEV;
5983
5984 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5985 if (rc)
5986 return -ENODEV;
5987
5988 return 0;
5989}
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6000{
6001 switch (intr_mode) {
6002 case 0:
6003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6004 "0470 Enable INTx interrupt mode.\n");
6005 break;
6006 case 1:
6007 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6008 "0481 Enabled MSI interrupt mode.\n");
6009 break;
6010 case 2:
6011 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6012 "0480 Enabled MSI-X interrupt mode.\n");
6013 break;
6014 default:
6015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6016 "0482 Illegal interrupt mode.\n");
6017 break;
6018 }
6019 return;
6020}
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033static int
6034lpfc_enable_pci_dev(struct lpfc_hba *phba)
6035{
6036 struct pci_dev *pdev;
6037
6038
6039 if (!phba->pcidev)
6040 goto out_error;
6041 else
6042 pdev = phba->pcidev;
6043
6044 if (pci_enable_device_mem(pdev))
6045 goto out_error;
6046
6047 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6048 goto out_disable_device;
6049
6050 pci_set_master(pdev);
6051 pci_try_set_mwi(pdev);
6052 pci_save_state(pdev);
6053
6054
6055 if (pci_is_pcie(pdev))
6056 pdev->needs_freset = 1;
6057
6058 return 0;
6059
6060out_disable_device:
6061 pci_disable_device(pdev);
6062out_error:
6063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6064 "1401 Failed to enable pci device\n");
6065 return -ENODEV;
6066}
6067
6068
6069
6070
6071
6072
6073
6074
6075static void
6076lpfc_disable_pci_dev(struct lpfc_hba *phba)
6077{
6078 struct pci_dev *pdev;
6079
6080
6081 if (!phba->pcidev)
6082 return;
6083 else
6084 pdev = phba->pcidev;
6085
6086 pci_release_mem_regions(pdev);
6087 pci_disable_device(pdev);
6088
6089 return;
6090}
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101void
6102lpfc_reset_hba(struct lpfc_hba *phba)
6103{
6104
6105 if (!phba->cfg_enable_hba_reset) {
6106 phba->link_state = LPFC_HBA_ERROR;
6107 return;
6108 }
6109 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6110 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6111 else
6112 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6113 lpfc_offline(phba);
6114 lpfc_sli_brdrestart(phba);
6115 lpfc_online(phba);
6116 lpfc_unblock_mgmt_io(phba);
6117}
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129uint16_t
6130lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6131{
6132 struct pci_dev *pdev = phba->pcidev;
6133 uint16_t nr_virtfn;
6134 int pos;
6135
6136 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6137 if (pos == 0)
6138 return 0;
6139
6140 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6141 return nr_virtfn;
6142}
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155int
6156lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6157{
6158 struct pci_dev *pdev = phba->pcidev;
6159 uint16_t max_nr_vfn;
6160 int rc;
6161
6162 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6163 if (nr_vfn > max_nr_vfn) {
6164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6165 "3057 Requested vfs (%d) greater than "
6166 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6167 return -EINVAL;
6168 }
6169
6170 rc = pci_enable_sriov(pdev, nr_vfn);
6171 if (rc) {
6172 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6173 "2806 Failed to enable sriov on this device "
6174 "with vfn number nr_vf:%d, rc:%d\n",
6175 nr_vfn, rc);
6176 } else
6177 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6178 "2807 Successful enable sriov on this device "
6179 "with vfn number nr_vf:%d\n", nr_vfn);
6180 return rc;
6181}
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194static int
6195lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6196{
6197 struct lpfc_sli *psli = &phba->sli;
6198
6199
6200
6201
6202 atomic_set(&phba->fast_event_count, 0);
6203 spin_lock_init(&phba->hbalock);
6204
6205
6206 spin_lock_init(&phba->ndlp_lock);
6207
6208
6209 spin_lock_init(&phba->port_list_lock);
6210 INIT_LIST_HEAD(&phba->port_list);
6211
6212 INIT_LIST_HEAD(&phba->work_list);
6213 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6214
6215
6216 init_waitqueue_head(&phba->work_waitq);
6217
6218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6219 "1403 Protocols supported %s %s %s\n",
6220 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6221 "SCSI" : " "),
6222 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6223 "NVME" : " "),
6224 (phba->nvmet_support ? "NVMET" : " "));
6225
6226
6227 spin_lock_init(&phba->scsi_buf_list_get_lock);
6228 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6229 spin_lock_init(&phba->scsi_buf_list_put_lock);
6230 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6231
6232
6233 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6234
6235
6236 INIT_LIST_HEAD(&phba->elsbuf);
6237
6238
6239 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6240
6241
6242 spin_lock_init(&phba->devicelock);
6243 INIT_LIST_HEAD(&phba->luns);
6244
6245
6246 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6247
6248 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6249
6250 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6251
6252 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6253
6254 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6255
6256 return 0;
6257}
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270static int
6271lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6272{
6273 int rc, entry_sz;
6274
6275
6276
6277
6278
6279
6280 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6281
6282
6283 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6284 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6285
6286
6287 lpfc_get_cfgparam(phba);
6288
6289
6290 rc = lpfc_setup_driver_resource_phase1(phba);
6291 if (rc)
6292 return -ENODEV;
6293
6294 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6295 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6296
6297 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6298 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6299 }
6300
6301 if (!phba->sli.sli3_ring)
6302 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6303 sizeof(struct lpfc_sli_ring),
6304 GFP_KERNEL);
6305 if (!phba->sli.sli3_ring)
6306 return -ENOMEM;
6307
6308
6309
6310
6311
6312
6313 if (phba->sli_rev == LPFC_SLI_REV4)
6314 entry_sz = sizeof(struct sli4_sge);
6315 else
6316 entry_sz = sizeof(struct ulp_bde64);
6317
6318
6319 if (phba->cfg_enable_bg) {
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6330 sizeof(struct fcp_rsp) +
6331 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6332
6333 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6334 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6335
6336
6337 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6338 } else {
6339
6340
6341
6342
6343
6344 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6345 sizeof(struct fcp_rsp) +
6346 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6347
6348
6349 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6350 }
6351
6352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6353 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6354 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6355 phba->cfg_total_seg_cnt);
6356
6357 phba->max_vpi = LPFC_MAX_VPI;
6358
6359 phba->max_vports = 0;
6360
6361
6362
6363
6364 lpfc_sli_setup(phba);
6365 lpfc_sli_queue_init(phba);
6366
6367
6368 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6369 return -ENOMEM;
6370
6371 phba->lpfc_sg_dma_buf_pool =
6372 dma_pool_create("lpfc_sg_dma_buf_pool",
6373 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6374 BPL_ALIGN_SZ, 0);
6375
6376 if (!phba->lpfc_sg_dma_buf_pool)
6377 goto fail_free_mem;
6378
6379 phba->lpfc_cmd_rsp_buf_pool =
6380 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6381 &phba->pcidev->dev,
6382 sizeof(struct fcp_cmnd) +
6383 sizeof(struct fcp_rsp),
6384 BPL_ALIGN_SZ, 0);
6385
6386 if (!phba->lpfc_cmd_rsp_buf_pool)
6387 goto fail_free_dma_buf_pool;
6388
6389
6390
6391
6392
6393 if (phba->cfg_sriov_nr_virtfn > 0) {
6394 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6395 phba->cfg_sriov_nr_virtfn);
6396 if (rc) {
6397 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6398 "2808 Requested number of SR-IOV "
6399 "virtual functions (%d) is not "
6400 "supported\n",
6401 phba->cfg_sriov_nr_virtfn);
6402 phba->cfg_sriov_nr_virtfn = 0;
6403 }
6404 }
6405
6406 return 0;
6407
6408fail_free_dma_buf_pool:
6409 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6410 phba->lpfc_sg_dma_buf_pool = NULL;
6411fail_free_mem:
6412 lpfc_mem_free(phba);
6413 return -ENOMEM;
6414}
6415
6416
6417
6418
6419
6420
6421
6422
6423static void
6424lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6425{
6426
6427 lpfc_mem_free_all(phba);
6428
6429 return;
6430}
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443static int
6444lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6445{
6446 LPFC_MBOXQ_t *mboxq;
6447 MAILBOX_t *mb;
6448 int rc, i, max_buf_size;
6449 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6450 struct lpfc_mqe *mqe;
6451 int longs;
6452 int extra;
6453 uint64_t wwn;
6454 u32 if_type;
6455 u32 if_fam;
6456
6457 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6458 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6459 phba->sli4_hba.curr_disp_cpu = 0;
6460
6461
6462 lpfc_get_cfgparam(phba);
6463
6464
6465 rc = lpfc_setup_driver_resource_phase1(phba);
6466 if (rc)
6467 return -ENODEV;
6468
6469
6470 rc = lpfc_sli4_post_status_check(phba);
6471 if (rc)
6472 return -ENODEV;
6473
6474
6475
6476
6477 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6478
6479
6480
6481
6482
6483 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6484
6485
6486 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6487
6488
6489
6490
6491
6492 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6493 sizeof(struct lpfc_mbox_ext_buf_ctx));
6494 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6495
6496 phba->max_vpi = LPFC_MAX_VPI;
6497
6498
6499 phba->max_vports = 0;
6500
6501
6502 phba->valid_vlan = 0;
6503 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6504 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6505 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6506
6507
6508
6509
6510
6511
6512
6513
6514 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6515 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6516 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6517
6518
6519
6520
6521
6522 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6523 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6524
6525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6526
6527 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6528 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6529 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6530 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6531 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6532 }
6533
6534
6535 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6536 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6537
6538
6539
6540
6541
6542
6543 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6544
6545 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6546
6547 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6548
6549 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6550
6551 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6552
6553 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6554
6555
6556 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6557 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6559 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6560
6561
6562
6563
6564 INIT_LIST_HEAD(&phba->sli.mboxq);
6565 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6566
6567
6568 phba->sli4_hba.lnk_info.optic_state = 0xff;
6569
6570
6571 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6572 if (rc)
6573 return -ENOMEM;
6574
6575
6576 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6577 LPFC_SLI_INTF_IF_TYPE_2) {
6578 rc = lpfc_pci_function_reset(phba);
6579 if (unlikely(rc)) {
6580 rc = -ENODEV;
6581 goto out_free_mem;
6582 }
6583 phba->temp_sensor_support = 1;
6584 }
6585
6586
6587 rc = lpfc_create_bootstrap_mbox(phba);
6588 if (unlikely(rc))
6589 goto out_free_mem;
6590
6591
6592 rc = lpfc_setup_endian_order(phba);
6593 if (unlikely(rc))
6594 goto out_free_bsmbx;
6595
6596
6597 rc = lpfc_sli4_read_config(phba);
6598 if (unlikely(rc))
6599 goto out_free_bsmbx;
6600 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6601 if (unlikely(rc))
6602 goto out_free_bsmbx;
6603
6604
6605 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6606 LPFC_SLI_INTF_IF_TYPE_0) {
6607 rc = lpfc_pci_function_reset(phba);
6608 if (unlikely(rc))
6609 goto out_free_bsmbx;
6610 }
6611
6612 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6613 GFP_KERNEL);
6614 if (!mboxq) {
6615 rc = -ENOMEM;
6616 goto out_free_bsmbx;
6617 }
6618
6619
6620 phba->nvmet_support = 0;
6621 if (lpfc_enable_nvmet_cnt) {
6622
6623
6624 lpfc_read_nv(phba, mboxq);
6625 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6626 if (rc != MBX_SUCCESS) {
6627 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6628 "6016 Mailbox failed , mbxCmd x%x "
6629 "READ_NV, mbxStatus x%x\n",
6630 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6631 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6632 mempool_free(mboxq, phba->mbox_mem_pool);
6633 rc = -EIO;
6634 goto out_free_bsmbx;
6635 }
6636 mb = &mboxq->u.mb;
6637 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6638 sizeof(uint64_t));
6639 wwn = cpu_to_be64(wwn);
6640 phba->sli4_hba.wwnn.u.name = wwn;
6641 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6642 sizeof(uint64_t));
6643
6644 wwn = cpu_to_be64(wwn);
6645 phba->sli4_hba.wwpn.u.name = wwn;
6646
6647
6648 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6649 if (wwn == lpfc_enable_nvmet[i]) {
6650#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6651 if (lpfc_nvmet_mem_alloc(phba))
6652 break;
6653
6654 phba->nvmet_support = 1;
6655
6656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6657 "6017 NVME Target %016llx\n",
6658 wwn);
6659#else
6660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6661 "6021 Can't enable NVME Target."
6662 " NVME_TARGET_FC infrastructure"
6663 " is not in kernel\n");
6664#endif
6665
6666 phba->cfg_xri_rebalancing = 0;
6667 if (phba->irq_chann_mode == NHT_MODE) {
6668 phba->cfg_irq_chann =
6669 phba->sli4_hba.num_present_cpu;
6670 phba->cfg_hdw_queue =
6671 phba->sli4_hba.num_present_cpu;
6672 phba->irq_chann_mode = NORMAL_MODE;
6673 }
6674 break;
6675 }
6676 }
6677 }
6678
6679 lpfc_nvme_mod_param_dep(phba);
6680
6681
6682 lpfc_supported_pages(mboxq);
6683 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6684 if (!rc) {
6685 mqe = &mboxq->u.mqe;
6686 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6687 LPFC_MAX_SUPPORTED_PAGES);
6688 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6689 switch (pn_page[i]) {
6690 case LPFC_SLI4_PARAMETERS:
6691 phba->sli4_hba.pc_sli4_params.supported = 1;
6692 break;
6693 default:
6694 break;
6695 }
6696 }
6697
6698 if (phba->sli4_hba.pc_sli4_params.supported)
6699 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6700 if (rc) {
6701 mempool_free(mboxq, phba->mbox_mem_pool);
6702 rc = -EIO;
6703 goto out_free_bsmbx;
6704 }
6705 }
6706
6707
6708
6709
6710
6711
6712 rc = lpfc_get_sli4_parameters(phba, mboxq);
6713 if (rc) {
6714 if_type = bf_get(lpfc_sli_intf_if_type,
6715 &phba->sli4_hba.sli_intf);
6716 if_fam = bf_get(lpfc_sli_intf_sli_family,
6717 &phba->sli4_hba.sli_intf);
6718 if (phba->sli4_hba.extents_in_use &&
6719 phba->sli4_hba.rpi_hdrs_in_use) {
6720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6721 "2999 Unsupported SLI4 Parameters "
6722 "Extents and RPI headers enabled.\n");
6723 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6724 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6725 mempool_free(mboxq, phba->mbox_mem_pool);
6726 rc = -EIO;
6727 goto out_free_bsmbx;
6728 }
6729 }
6730 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6731 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6732 mempool_free(mboxq, phba->mbox_mem_pool);
6733 rc = -EIO;
6734 goto out_free_bsmbx;
6735 }
6736 }
6737
6738
6739
6740
6741
6742 extra = 2;
6743 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6744 extra++;
6745
6746
6747
6748
6749
6750
6751 max_buf_size = (2 * SLI4_PAGE_SIZE);
6752
6753
6754
6755
6756
6757 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6770 sizeof(struct fcp_rsp) + max_buf_size;
6771
6772
6773 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6774
6775
6776
6777
6778
6779 if (phba->cfg_enable_bg &&
6780 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6781 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6782 else
6783 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6784
6785 } else {
6786
6787
6788
6789
6790
6791 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6792 sizeof(struct fcp_rsp) +
6793 ((phba->cfg_sg_seg_cnt + extra) *
6794 sizeof(struct sli4_sge));
6795
6796
6797 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6798 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6799
6800
6801
6802
6803
6804 }
6805
6806 if (phba->cfg_xpsgl && !phba->nvmet_support)
6807 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6808 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6809 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6810 else
6811 phba->cfg_sg_dma_buf_size =
6812 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6813
6814 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6815 sizeof(struct sli4_sge);
6816
6817
6818 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6819 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6820 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6821 "6300 Reducing NVME sg segment "
6822 "cnt to %d\n",
6823 LPFC_MAX_NVME_SEG_CNT);
6824 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6825 } else
6826 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6827 }
6828
6829 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6830 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6831 "total:%d scsi:%d nvme:%d\n",
6832 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6833 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6834 phba->cfg_nvme_seg_cnt);
6835
6836 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6837 i = phba->cfg_sg_dma_buf_size;
6838 else
6839 i = SLI4_PAGE_SIZE;
6840
6841 phba->lpfc_sg_dma_buf_pool =
6842 dma_pool_create("lpfc_sg_dma_buf_pool",
6843 &phba->pcidev->dev,
6844 phba->cfg_sg_dma_buf_size,
6845 i, 0);
6846 if (!phba->lpfc_sg_dma_buf_pool)
6847 goto out_free_bsmbx;
6848
6849 phba->lpfc_cmd_rsp_buf_pool =
6850 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6851 &phba->pcidev->dev,
6852 sizeof(struct fcp_cmnd) +
6853 sizeof(struct fcp_rsp),
6854 i, 0);
6855 if (!phba->lpfc_cmd_rsp_buf_pool)
6856 goto out_free_sg_dma_buf;
6857
6858 mempool_free(mboxq, phba->mbox_mem_pool);
6859
6860
6861 lpfc_sli4_oas_verify(phba);
6862
6863
6864 lpfc_sli4_ras_init(phba);
6865
6866
6867 rc = lpfc_sli4_queue_verify(phba);
6868 if (rc)
6869 goto out_free_cmd_rsp_buf;
6870
6871
6872 rc = lpfc_sli4_cq_event_pool_create(phba);
6873 if (rc)
6874 goto out_free_cmd_rsp_buf;
6875
6876
6877 lpfc_init_sgl_list(phba);
6878
6879
6880 rc = lpfc_init_active_sgl_array(phba);
6881 if (rc) {
6882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6883 "1430 Failed to initialize sgl list.\n");
6884 goto out_destroy_cq_event_pool;
6885 }
6886 rc = lpfc_sli4_init_rpi_hdrs(phba);
6887 if (rc) {
6888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6889 "1432 Failed to initialize rpi headers.\n");
6890 goto out_free_active_sgl;
6891 }
6892
6893
6894 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6895 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6896 GFP_KERNEL);
6897 if (!phba->fcf.fcf_rr_bmask) {
6898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6899 "2759 Failed allocate memory for FCF round "
6900 "robin failover bmask\n");
6901 rc = -ENOMEM;
6902 goto out_remove_rpi_hdrs;
6903 }
6904
6905 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6906 sizeof(struct lpfc_hba_eq_hdl),
6907 GFP_KERNEL);
6908 if (!phba->sli4_hba.hba_eq_hdl) {
6909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6910 "2572 Failed allocate memory for "
6911 "fast-path per-EQ handle array\n");
6912 rc = -ENOMEM;
6913 goto out_free_fcf_rr_bmask;
6914 }
6915
6916 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6917 sizeof(struct lpfc_vector_map_info),
6918 GFP_KERNEL);
6919 if (!phba->sli4_hba.cpu_map) {
6920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6921 "3327 Failed allocate memory for msi-x "
6922 "interrupt vector mapping\n");
6923 rc = -ENOMEM;
6924 goto out_free_hba_eq_hdl;
6925 }
6926
6927 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6928 if (!phba->sli4_hba.eq_info) {
6929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6930 "3321 Failed allocation for per_cpu stats\n");
6931 rc = -ENOMEM;
6932 goto out_free_hba_cpu_map;
6933 }
6934
6935#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6936 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
6937 if (!phba->sli4_hba.c_stat) {
6938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6939 "3332 Failed allocating per cpu hdwq stats\n");
6940 rc = -ENOMEM;
6941 goto out_free_hba_eq_info;
6942 }
6943#endif
6944
6945
6946
6947
6948
6949 if (phba->cfg_sriov_nr_virtfn > 0) {
6950 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6951 phba->cfg_sriov_nr_virtfn);
6952 if (rc) {
6953 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6954 "3020 Requested number of SR-IOV "
6955 "virtual functions (%d) is not "
6956 "supported\n",
6957 phba->cfg_sriov_nr_virtfn);
6958 phba->cfg_sriov_nr_virtfn = 0;
6959 }
6960 }
6961
6962 return 0;
6963
6964#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6965out_free_hba_eq_info:
6966 free_percpu(phba->sli4_hba.eq_info);
6967#endif
6968out_free_hba_cpu_map:
6969 kfree(phba->sli4_hba.cpu_map);
6970out_free_hba_eq_hdl:
6971 kfree(phba->sli4_hba.hba_eq_hdl);
6972out_free_fcf_rr_bmask:
6973 kfree(phba->fcf.fcf_rr_bmask);
6974out_remove_rpi_hdrs:
6975 lpfc_sli4_remove_rpi_hdrs(phba);
6976out_free_active_sgl:
6977 lpfc_free_active_sgl(phba);
6978out_destroy_cq_event_pool:
6979 lpfc_sli4_cq_event_pool_destroy(phba);
6980out_free_cmd_rsp_buf:
6981 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
6982 phba->lpfc_cmd_rsp_buf_pool = NULL;
6983out_free_sg_dma_buf:
6984 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6985 phba->lpfc_sg_dma_buf_pool = NULL;
6986out_free_bsmbx:
6987 lpfc_destroy_bootstrap_mbox(phba);
6988out_free_mem:
6989 lpfc_mem_free(phba);
6990 return rc;
6991}
6992
6993
6994
6995
6996
6997
6998
6999
7000static void
7001lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7002{
7003 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7004
7005 free_percpu(phba->sli4_hba.eq_info);
7006#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7007 free_percpu(phba->sli4_hba.c_stat);
7008#endif
7009
7010
7011 kfree(phba->sli4_hba.cpu_map);
7012 phba->sli4_hba.num_possible_cpu = 0;
7013 phba->sli4_hba.num_present_cpu = 0;
7014 phba->sli4_hba.curr_disp_cpu = 0;
7015 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7016
7017
7018 kfree(phba->sli4_hba.hba_eq_hdl);
7019
7020
7021 lpfc_sli4_remove_rpi_hdrs(phba);
7022 lpfc_sli4_remove_rpis(phba);
7023
7024
7025 kfree(phba->fcf.fcf_rr_bmask);
7026
7027
7028 lpfc_free_active_sgl(phba);
7029 lpfc_free_els_sgl_list(phba);
7030 lpfc_free_nvmet_sgl_list(phba);
7031
7032
7033 lpfc_sli4_cq_event_release_all(phba);
7034 lpfc_sli4_cq_event_pool_destroy(phba);
7035
7036
7037 lpfc_sli4_dealloc_resource_identifiers(phba);
7038
7039
7040 lpfc_destroy_bootstrap_mbox(phba);
7041
7042
7043 lpfc_mem_free_all(phba);
7044
7045
7046 list_for_each_entry_safe(conn_entry, next_conn_entry,
7047 &phba->fcf_conn_rec_list, list) {
7048 list_del_init(&conn_entry->list);
7049 kfree(conn_entry);
7050 }
7051
7052 return;
7053}
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065int
7066lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7067{
7068 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7069 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7070 phba->lpfc_selective_reset = lpfc_selective_reset;
7071 switch (dev_grp) {
7072 case LPFC_PCI_DEV_LP:
7073 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7074 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7075 phba->lpfc_stop_port = lpfc_stop_port_s3;
7076 break;
7077 case LPFC_PCI_DEV_OC:
7078 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7079 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7080 phba->lpfc_stop_port = lpfc_stop_port_s4;
7081 break;
7082 default:
7083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7084 "1431 Invalid HBA PCI-device group: 0x%x\n",
7085 dev_grp);
7086 return -ENODEV;
7087 break;
7088 }
7089 return 0;
7090}
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103static int
7104lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7105{
7106 int error;
7107
7108
7109 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7110 "lpfc_worker_%d", phba->brd_no);
7111 if (IS_ERR(phba->worker_thread)) {
7112 error = PTR_ERR(phba->worker_thread);
7113 return error;
7114 }
7115
7116 return 0;
7117}
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127static void
7128lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7129{
7130 if (phba->wq) {
7131 flush_workqueue(phba->wq);
7132 destroy_workqueue(phba->wq);
7133 phba->wq = NULL;
7134 }
7135
7136
7137 if (phba->worker_thread)
7138 kthread_stop(phba->worker_thread);
7139}
7140
7141
7142
7143
7144
7145
7146
7147void
7148lpfc_free_iocb_list(struct lpfc_hba *phba)
7149{
7150 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7151
7152 spin_lock_irq(&phba->hbalock);
7153 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7154 &phba->lpfc_iocb_list, list) {
7155 list_del(&iocbq_entry->list);
7156 kfree(iocbq_entry);
7157 phba->total_iocbq_bufs--;
7158 }
7159 spin_unlock_irq(&phba->hbalock);
7160
7161 return;
7162}
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175int
7176lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7177{
7178 struct lpfc_iocbq *iocbq_entry = NULL;
7179 uint16_t iotag;
7180 int i;
7181
7182
7183 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7184 for (i = 0; i < iocb_count; i++) {
7185 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7186 if (iocbq_entry == NULL) {
7187 printk(KERN_ERR "%s: only allocated %d iocbs of "
7188 "expected %d count. Unloading driver.\n",
7189 __func__, i, iocb_count);
7190 goto out_free_iocbq;
7191 }
7192
7193 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7194 if (iotag == 0) {
7195 kfree(iocbq_entry);
7196 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7197 "Unloading driver.\n", __func__);
7198 goto out_free_iocbq;
7199 }
7200 iocbq_entry->sli4_lxritag = NO_XRI;
7201 iocbq_entry->sli4_xritag = NO_XRI;
7202
7203 spin_lock_irq(&phba->hbalock);
7204 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7205 phba->total_iocbq_bufs++;
7206 spin_unlock_irq(&phba->hbalock);
7207 }
7208
7209 return 0;
7210
7211out_free_iocbq:
7212 lpfc_free_iocb_list(phba);
7213
7214 return -ENOMEM;
7215}
7216
7217
7218
7219
7220
7221
7222
7223
7224void
7225lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7226{
7227 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7228
7229 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7230 list_del(&sglq_entry->list);
7231 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7232 kfree(sglq_entry);
7233 }
7234}
7235
7236
7237
7238
7239
7240
7241
7242static void
7243lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7244{
7245 LIST_HEAD(sglq_list);
7246
7247
7248 spin_lock_irq(&phba->hbalock);
7249 spin_lock(&phba->sli4_hba.sgl_list_lock);
7250 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7251 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7252 spin_unlock_irq(&phba->hbalock);
7253
7254
7255 lpfc_free_sgl_list(phba, &sglq_list);
7256}
7257
7258
7259
7260
7261
7262
7263
7264static void
7265lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7266{
7267 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7268 LIST_HEAD(sglq_list);
7269
7270
7271 spin_lock_irq(&phba->hbalock);
7272 spin_lock(&phba->sli4_hba.sgl_list_lock);
7273 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7274 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7275 spin_unlock_irq(&phba->hbalock);
7276
7277
7278 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7279 list_del(&sglq_entry->list);
7280 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7281 kfree(sglq_entry);
7282 }
7283
7284
7285
7286
7287
7288 phba->sli4_hba.nvmet_xri_cnt = 0;
7289}
7290
7291
7292
7293
7294
7295
7296
7297
7298static int
7299lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7300{
7301 int size;
7302 size = sizeof(struct lpfc_sglq *);
7303 size *= phba->sli4_hba.max_cfg_param.max_xri;
7304
7305 phba->sli4_hba.lpfc_sglq_active_list =
7306 kzalloc(size, GFP_KERNEL);
7307 if (!phba->sli4_hba.lpfc_sglq_active_list)
7308 return -ENOMEM;
7309 return 0;
7310}
7311
7312
7313
7314
7315
7316
7317
7318
7319
7320static void
7321lpfc_free_active_sgl(struct lpfc_hba *phba)
7322{
7323 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7324}
7325
7326
7327
7328
7329
7330
7331
7332
7333
7334static void
7335lpfc_init_sgl_list(struct lpfc_hba *phba)
7336{
7337
7338 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7339 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7340 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7341 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7342
7343
7344 phba->sli4_hba.els_xri_cnt = 0;
7345
7346
7347 phba->sli4_hba.io_xri_cnt = 0;
7348}
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364int
7365lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7366{
7367 int rc = 0;
7368 struct lpfc_rpi_hdr *rpi_hdr;
7369
7370 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7371 if (!phba->sli4_hba.rpi_hdrs_in_use)
7372 return rc;
7373 if (phba->sli4_hba.extents_in_use)
7374 return -EIO;
7375
7376 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7377 if (!rpi_hdr) {
7378 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7379 "0391 Error during rpi post operation\n");
7380 lpfc_sli4_remove_rpis(phba);
7381 rc = -ENODEV;
7382 }
7383
7384 return rc;
7385}
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398
7399
7400struct lpfc_rpi_hdr *
7401lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7402{
7403 uint16_t rpi_limit, curr_rpi_range;
7404 struct lpfc_dmabuf *dmabuf;
7405 struct lpfc_rpi_hdr *rpi_hdr;
7406
7407
7408
7409
7410
7411
7412 if (!phba->sli4_hba.rpi_hdrs_in_use)
7413 return NULL;
7414 if (phba->sli4_hba.extents_in_use)
7415 return NULL;
7416
7417
7418 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7419
7420 spin_lock_irq(&phba->hbalock);
7421
7422
7423
7424
7425
7426 curr_rpi_range = phba->sli4_hba.next_rpi;
7427 spin_unlock_irq(&phba->hbalock);
7428
7429
7430 if (curr_rpi_range == rpi_limit)
7431 return NULL;
7432
7433
7434
7435
7436
7437 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7438 if (!dmabuf)
7439 return NULL;
7440
7441 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7442 LPFC_HDR_TEMPLATE_SIZE,
7443 &dmabuf->phys, GFP_KERNEL);
7444 if (!dmabuf->virt) {
7445 rpi_hdr = NULL;
7446 goto err_free_dmabuf;
7447 }
7448
7449 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7450 rpi_hdr = NULL;
7451 goto err_free_coherent;
7452 }
7453
7454
7455 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7456 if (!rpi_hdr)
7457 goto err_free_coherent;
7458
7459 rpi_hdr->dmabuf = dmabuf;
7460 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7461 rpi_hdr->page_count = 1;
7462 spin_lock_irq(&phba->hbalock);
7463
7464
7465 rpi_hdr->start_rpi = curr_rpi_range;
7466 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7467 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7468
7469 spin_unlock_irq(&phba->hbalock);
7470 return rpi_hdr;
7471
7472 err_free_coherent:
7473 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7474 dmabuf->virt, dmabuf->phys);
7475 err_free_dmabuf:
7476 kfree(dmabuf);
7477 return NULL;
7478}
7479
7480
7481
7482
7483
7484
7485
7486
7487
7488
7489void
7490lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7491{
7492 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7493
7494 if (!phba->sli4_hba.rpi_hdrs_in_use)
7495 goto exit;
7496
7497 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7498 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7499 list_del(&rpi_hdr->list);
7500 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7501 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7502 kfree(rpi_hdr->dmabuf);
7503 kfree(rpi_hdr);
7504 }
7505 exit:
7506
7507 phba->sli4_hba.next_rpi = 0;
7508}
7509
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522static struct lpfc_hba *
7523lpfc_hba_alloc(struct pci_dev *pdev)
7524{
7525 struct lpfc_hba *phba;
7526
7527
7528 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7529 if (!phba) {
7530 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7531 return NULL;
7532 }
7533
7534
7535 phba->pcidev = pdev;
7536
7537
7538 phba->brd_no = lpfc_get_instance();
7539 if (phba->brd_no < 0) {
7540 kfree(phba);
7541 return NULL;
7542 }
7543 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7544
7545 spin_lock_init(&phba->ct_ev_lock);
7546 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7547
7548 return phba;
7549}
7550
7551
7552
7553
7554
7555
7556
7557
7558static void
7559lpfc_hba_free(struct lpfc_hba *phba)
7560{
7561 if (phba->sli_rev == LPFC_SLI_REV4)
7562 kfree(phba->sli4_hba.hdwq);
7563
7564
7565 idr_remove(&lpfc_hba_index, phba->brd_no);
7566
7567
7568 kfree(phba->sli.sli3_ring);
7569 phba->sli.sli3_ring = NULL;
7570
7571 kfree(phba);
7572 return;
7573}
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586static int
7587lpfc_create_shost(struct lpfc_hba *phba)
7588{
7589 struct lpfc_vport *vport;
7590 struct Scsi_Host *shost;
7591
7592
7593 phba->fc_edtov = FF_DEF_EDTOV;
7594 phba->fc_ratov = FF_DEF_RATOV;
7595 phba->fc_altov = FF_DEF_ALTOV;
7596 phba->fc_arbtov = FF_DEF_ARBTOV;
7597
7598 atomic_set(&phba->sdev_cnt, 0);
7599 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7600 if (!vport)
7601 return -ENODEV;
7602
7603 shost = lpfc_shost_from_vport(vport);
7604 phba->pport = vport;
7605
7606 if (phba->nvmet_support) {
7607
7608 phba->targetport = NULL;
7609 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7611 "6076 NVME Target Found\n");
7612 }
7613
7614 lpfc_debugfs_initialize(vport);
7615
7616 pci_set_drvdata(phba->pcidev, shost);
7617
7618
7619
7620
7621
7622 vport->load_flag |= FC_ALLOW_FDMI;
7623 if (phba->cfg_enable_SmartSAN ||
7624 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7625
7626
7627 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7628 if (phba->cfg_enable_SmartSAN)
7629 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7630 else
7631 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7632 }
7633 return 0;
7634}
7635
7636
7637
7638
7639
7640
7641
7642
7643static void
7644lpfc_destroy_shost(struct lpfc_hba *phba)
7645{
7646 struct lpfc_vport *vport = phba->pport;
7647
7648
7649 destroy_port(vport);
7650
7651 return;
7652}
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662static void
7663lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7664{
7665 uint32_t old_mask;
7666 uint32_t old_guard;
7667
7668 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7670 "1478 Registering BlockGuard with the "
7671 "SCSI layer\n");
7672
7673 old_mask = phba->cfg_prot_mask;
7674 old_guard = phba->cfg_prot_guard;
7675
7676
7677 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7678 SHOST_DIX_TYPE0_PROTECTION |
7679 SHOST_DIX_TYPE1_PROTECTION);
7680 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7681 SHOST_DIX_GUARD_CRC);
7682
7683
7684 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7685 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7686
7687 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7688 if ((old_mask != phba->cfg_prot_mask) ||
7689 (old_guard != phba->cfg_prot_guard))
7690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7691 "1475 Registering BlockGuard with the "
7692 "SCSI layer: mask %d guard %d\n",
7693 phba->cfg_prot_mask,
7694 phba->cfg_prot_guard);
7695
7696 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7697 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7698 } else
7699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700 "1479 Not Registering BlockGuard with the SCSI "
7701 "layer, Bad protection parameters: %d %d\n",
7702 old_mask, old_guard);
7703 }
7704}
7705
7706
7707
7708
7709
7710
7711
7712
7713static void
7714lpfc_post_init_setup(struct lpfc_hba *phba)
7715{
7716 struct Scsi_Host *shost;
7717 struct lpfc_adapter_event_header adapter_event;
7718
7719
7720 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7721
7722
7723
7724
7725
7726 shost = pci_get_drvdata(phba->pcidev);
7727 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7728
7729 lpfc_host_attrib_init(shost);
7730
7731 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7732 spin_lock_irq(shost->host_lock);
7733 lpfc_poll_start_timer(phba);
7734 spin_unlock_irq(shost->host_lock);
7735 }
7736
7737 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7738 "0428 Perform SCSI scan\n");
7739
7740 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7741 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7742 fc_host_post_vendor_event(shost, fc_get_event_number(),
7743 sizeof(adapter_event),
7744 (char *) &adapter_event,
7745 LPFC_NL_VENDOR_ID);
7746 return;
7747}
7748
7749
7750
7751
7752
7753
7754
7755
7756
7757
7758
7759
7760static int
7761lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7762{
7763 struct pci_dev *pdev = phba->pcidev;
7764 unsigned long bar0map_len, bar2map_len;
7765 int i, hbq_count;
7766 void *ptr;
7767 int error;
7768
7769 if (!pdev)
7770 return -ENODEV;
7771
7772
7773 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7774 if (error)
7775 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7776 if (error)
7777 return error;
7778 error = -ENODEV;
7779
7780
7781
7782
7783 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7784 bar0map_len = pci_resource_len(pdev, 0);
7785
7786 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7787 bar2map_len = pci_resource_len(pdev, 2);
7788
7789
7790 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7791 if (!phba->slim_memmap_p) {
7792 dev_printk(KERN_ERR, &pdev->dev,
7793 "ioremap failed for SLIM memory.\n");
7794 goto out;
7795 }
7796
7797
7798 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7799 if (!phba->ctrl_regs_memmap_p) {
7800 dev_printk(KERN_ERR, &pdev->dev,
7801 "ioremap failed for HBA control registers.\n");
7802 goto out_iounmap_slim;
7803 }
7804
7805
7806 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7807 &phba->slim2p.phys, GFP_KERNEL);
7808 if (!phba->slim2p.virt)
7809 goto out_iounmap;
7810
7811 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7812 phba->mbox_ext = (phba->slim2p.virt +
7813 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7814 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7815 phba->IOCBs = (phba->slim2p.virt +
7816 offsetof(struct lpfc_sli2_slim, IOCBs));
7817
7818 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7819 lpfc_sli_hbq_size(),
7820 &phba->hbqslimp.phys,
7821 GFP_KERNEL);
7822 if (!phba->hbqslimp.virt)
7823 goto out_free_slim;
7824
7825 hbq_count = lpfc_sli_hbq_count();
7826 ptr = phba->hbqslimp.virt;
7827 for (i = 0; i < hbq_count; ++i) {
7828 phba->hbqs[i].hbq_virt = ptr;
7829 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7830 ptr += (lpfc_hbq_defs[i]->entry_count *
7831 sizeof(struct lpfc_hbq_entry));
7832 }
7833 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7834 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7835
7836 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7837
7838 phba->MBslimaddr = phba->slim_memmap_p;
7839 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7840 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7841 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7842 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7843
7844 return 0;
7845
7846out_free_slim:
7847 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7848 phba->slim2p.virt, phba->slim2p.phys);
7849out_iounmap:
7850 iounmap(phba->ctrl_regs_memmap_p);
7851out_iounmap_slim:
7852 iounmap(phba->slim_memmap_p);
7853out:
7854 return error;
7855}
7856
7857
7858
7859
7860
7861
7862
7863
7864static void
7865lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7866{
7867 struct pci_dev *pdev;
7868
7869
7870 if (!phba->pcidev)
7871 return;
7872 else
7873 pdev = phba->pcidev;
7874
7875
7876 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7877 phba->hbqslimp.virt, phba->hbqslimp.phys);
7878 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7879 phba->slim2p.virt, phba->slim2p.phys);
7880
7881
7882 iounmap(phba->ctrl_regs_memmap_p);
7883 iounmap(phba->slim_memmap_p);
7884
7885 return;
7886}
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897int
7898lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7899{
7900 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7901 struct lpfc_register reg_data;
7902 int i, port_error = 0;
7903 uint32_t if_type;
7904
7905 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7906 memset(®_data, 0, sizeof(reg_data));
7907 if (!phba->sli4_hba.PSMPHRregaddr)
7908 return -ENODEV;
7909
7910
7911 for (i = 0; i < 3000; i++) {
7912 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7913 &portsmphr_reg.word0) ||
7914 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7915
7916 port_error = -ENODEV;
7917 break;
7918 }
7919 if (LPFC_POST_STAGE_PORT_READY ==
7920 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7921 break;
7922 msleep(10);
7923 }
7924
7925
7926
7927
7928
7929 if (port_error) {
7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7931 "1408 Port Failed POST - portsmphr=0x%x, "
7932 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7933 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7934 portsmphr_reg.word0,
7935 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7936 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7937 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7938 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7939 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7940 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7941 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7942 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7943 } else {
7944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7945 "2534 Device Info: SLIFamily=0x%x, "
7946 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7947 "SLIHint_2=0x%x, FT=0x%x\n",
7948 bf_get(lpfc_sli_intf_sli_family,
7949 &phba->sli4_hba.sli_intf),
7950 bf_get(lpfc_sli_intf_slirev,
7951 &phba->sli4_hba.sli_intf),
7952 bf_get(lpfc_sli_intf_if_type,
7953 &phba->sli4_hba.sli_intf),
7954 bf_get(lpfc_sli_intf_sli_hint1,
7955 &phba->sli4_hba.sli_intf),
7956 bf_get(lpfc_sli_intf_sli_hint2,
7957 &phba->sli4_hba.sli_intf),
7958 bf_get(lpfc_sli_intf_func_type,
7959 &phba->sli4_hba.sli_intf));
7960
7961
7962
7963
7964
7965 if_type = bf_get(lpfc_sli_intf_if_type,
7966 &phba->sli4_hba.sli_intf);
7967 switch (if_type) {
7968 case LPFC_SLI_INTF_IF_TYPE_0:
7969 phba->sli4_hba.ue_mask_lo =
7970 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7971 phba->sli4_hba.ue_mask_hi =
7972 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7973 uerrlo_reg.word0 =
7974 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7975 uerrhi_reg.word0 =
7976 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7977 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7978 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7980 "1422 Unrecoverable Error "
7981 "Detected during POST "
7982 "uerr_lo_reg=0x%x, "
7983 "uerr_hi_reg=0x%x, "
7984 "ue_mask_lo_reg=0x%x, "
7985 "ue_mask_hi_reg=0x%x\n",
7986 uerrlo_reg.word0,
7987 uerrhi_reg.word0,
7988 phba->sli4_hba.ue_mask_lo,
7989 phba->sli4_hba.ue_mask_hi);
7990 port_error = -ENODEV;
7991 }
7992 break;
7993 case LPFC_SLI_INTF_IF_TYPE_2:
7994 case LPFC_SLI_INTF_IF_TYPE_6:
7995
7996 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7997 ®_data.word0) ||
7998 (bf_get(lpfc_sliport_status_err, ®_data) &&
7999 !bf_get(lpfc_sliport_status_rn, ®_data))) {
8000 phba->work_status[0] =
8001 readl(phba->sli4_hba.u.if_type2.
8002 ERR1regaddr);
8003 phba->work_status[1] =
8004 readl(phba->sli4_hba.u.if_type2.
8005 ERR2regaddr);
8006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8007 "2888 Unrecoverable port error "
8008 "following POST: port status reg "
8009 "0x%x, port_smphr reg 0x%x, "
8010 "error 1=0x%x, error 2=0x%x\n",
8011 reg_data.word0,
8012 portsmphr_reg.word0,
8013 phba->work_status[0],
8014 phba->work_status[1]);
8015 port_error = -ENODEV;
8016 }
8017 break;
8018 case LPFC_SLI_INTF_IF_TYPE_1:
8019 default:
8020 break;
8021 }
8022 }
8023 return port_error;
8024}
8025
8026
8027
8028
8029
8030
8031
8032
8033
8034static void
8035lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8036{
8037 switch (if_type) {
8038 case LPFC_SLI_INTF_IF_TYPE_0:
8039 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8040 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8041 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8042 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8043 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8044 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8045 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8046 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8047 phba->sli4_hba.SLIINTFregaddr =
8048 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8049 break;
8050 case LPFC_SLI_INTF_IF_TYPE_2:
8051 phba->sli4_hba.u.if_type2.EQDregaddr =
8052 phba->sli4_hba.conf_regs_memmap_p +
8053 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8054 phba->sli4_hba.u.if_type2.ERR1regaddr =
8055 phba->sli4_hba.conf_regs_memmap_p +
8056 LPFC_CTL_PORT_ER1_OFFSET;
8057 phba->sli4_hba.u.if_type2.ERR2regaddr =
8058 phba->sli4_hba.conf_regs_memmap_p +
8059 LPFC_CTL_PORT_ER2_OFFSET;
8060 phba->sli4_hba.u.if_type2.CTRLregaddr =
8061 phba->sli4_hba.conf_regs_memmap_p +
8062 LPFC_CTL_PORT_CTL_OFFSET;
8063 phba->sli4_hba.u.if_type2.STATUSregaddr =
8064 phba->sli4_hba.conf_regs_memmap_p +
8065 LPFC_CTL_PORT_STA_OFFSET;
8066 phba->sli4_hba.SLIINTFregaddr =
8067 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8068 phba->sli4_hba.PSMPHRregaddr =
8069 phba->sli4_hba.conf_regs_memmap_p +
8070 LPFC_CTL_PORT_SEM_OFFSET;
8071 phba->sli4_hba.RQDBregaddr =
8072 phba->sli4_hba.conf_regs_memmap_p +
8073 LPFC_ULP0_RQ_DOORBELL;
8074 phba->sli4_hba.WQDBregaddr =
8075 phba->sli4_hba.conf_regs_memmap_p +
8076 LPFC_ULP0_WQ_DOORBELL;
8077 phba->sli4_hba.CQDBregaddr =
8078 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8079 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8080 phba->sli4_hba.MQDBregaddr =
8081 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8082 phba->sli4_hba.BMBXregaddr =
8083 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8084 break;
8085 case LPFC_SLI_INTF_IF_TYPE_6:
8086 phba->sli4_hba.u.if_type2.EQDregaddr =
8087 phba->sli4_hba.conf_regs_memmap_p +
8088 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8089 phba->sli4_hba.u.if_type2.ERR1regaddr =
8090 phba->sli4_hba.conf_regs_memmap_p +
8091 LPFC_CTL_PORT_ER1_OFFSET;
8092 phba->sli4_hba.u.if_type2.ERR2regaddr =
8093 phba->sli4_hba.conf_regs_memmap_p +
8094 LPFC_CTL_PORT_ER2_OFFSET;
8095 phba->sli4_hba.u.if_type2.CTRLregaddr =
8096 phba->sli4_hba.conf_regs_memmap_p +
8097 LPFC_CTL_PORT_CTL_OFFSET;
8098 phba->sli4_hba.u.if_type2.STATUSregaddr =
8099 phba->sli4_hba.conf_regs_memmap_p +
8100 LPFC_CTL_PORT_STA_OFFSET;
8101 phba->sli4_hba.PSMPHRregaddr =
8102 phba->sli4_hba.conf_regs_memmap_p +
8103 LPFC_CTL_PORT_SEM_OFFSET;
8104 phba->sli4_hba.BMBXregaddr =
8105 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8106 break;
8107 case LPFC_SLI_INTF_IF_TYPE_1:
8108 default:
8109 dev_printk(KERN_ERR, &phba->pcidev->dev,
8110 "FATAL - unsupported SLI4 interface type - %d\n",
8111 if_type);
8112 break;
8113 }
8114}
8115
8116
8117
8118
8119
8120
8121
8122static void
8123lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8124{
8125 switch (if_type) {
8126 case LPFC_SLI_INTF_IF_TYPE_0:
8127 phba->sli4_hba.PSMPHRregaddr =
8128 phba->sli4_hba.ctrl_regs_memmap_p +
8129 LPFC_SLIPORT_IF0_SMPHR;
8130 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8131 LPFC_HST_ISR0;
8132 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8133 LPFC_HST_IMR0;
8134 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8135 LPFC_HST_ISCR0;
8136 break;
8137 case LPFC_SLI_INTF_IF_TYPE_6:
8138 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8139 LPFC_IF6_RQ_DOORBELL;
8140 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8141 LPFC_IF6_WQ_DOORBELL;
8142 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8143 LPFC_IF6_CQ_DOORBELL;
8144 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8145 LPFC_IF6_EQ_DOORBELL;
8146 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8147 LPFC_IF6_MQ_DOORBELL;
8148 break;
8149 case LPFC_SLI_INTF_IF_TYPE_2:
8150 case LPFC_SLI_INTF_IF_TYPE_1:
8151 default:
8152 dev_err(&phba->pcidev->dev,
8153 "FATAL - unsupported SLI4 interface type - %d\n",
8154 if_type);
8155 break;
8156 }
8157}
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169static int
8170lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8171{
8172 if (vf > LPFC_VIR_FUNC_MAX)
8173 return -ENODEV;
8174
8175 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8176 vf * LPFC_VFR_PAGE_SIZE +
8177 LPFC_ULP0_RQ_DOORBELL);
8178 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8179 vf * LPFC_VFR_PAGE_SIZE +
8180 LPFC_ULP0_WQ_DOORBELL);
8181 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8182 vf * LPFC_VFR_PAGE_SIZE +
8183 LPFC_EQCQ_DOORBELL);
8184 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8185 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8186 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8187 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8188 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8189 return 0;
8190}
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207static int
8208lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8209{
8210 uint32_t bmbx_size;
8211 struct lpfc_dmabuf *dmabuf;
8212 struct dma_address *dma_address;
8213 uint32_t pa_addr;
8214 uint64_t phys_addr;
8215
8216 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8217 if (!dmabuf)
8218 return -ENOMEM;
8219
8220
8221
8222
8223
8224 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8225 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8226 &dmabuf->phys, GFP_KERNEL);
8227 if (!dmabuf->virt) {
8228 kfree(dmabuf);
8229 return -ENOMEM;
8230 }
8231
8232
8233
8234
8235
8236
8237
8238
8239 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8240 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8241
8242 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8243 LPFC_ALIGN_16_BYTE);
8244 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8245 LPFC_ALIGN_16_BYTE);
8246
8247
8248
8249
8250
8251
8252
8253
8254
8255 dma_address = &phba->sli4_hba.bmbx.dma_address;
8256 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8257 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8258 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8259 LPFC_BMBX_BIT1_ADDR_HI);
8260
8261 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8262 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8263 LPFC_BMBX_BIT1_ADDR_LO);
8264 return 0;
8265}
8266
8267
8268
8269
8270
8271
8272
8273
8274
8275
8276
8277
8278static void
8279lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8280{
8281 dma_free_coherent(&phba->pcidev->dev,
8282 phba->sli4_hba.bmbx.bmbx_size,
8283 phba->sli4_hba.bmbx.dmabuf->virt,
8284 phba->sli4_hba.bmbx.dmabuf->phys);
8285
8286 kfree(phba->sli4_hba.bmbx.dmabuf);
8287 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8288}
8289
8290static const char * const lpfc_topo_to_str[] = {
8291 "Loop then P2P",
8292 "Loopback",
8293 "P2P Only",
8294 "Unsupported",
8295 "Loop Only",
8296 "Unsupported",
8297 "P2P then Loop",
8298};
8299
8300
8301
8302
8303
8304
8305
8306
8307
8308
8309
8310
8311#define LINK_FLAGS_DEF 0x0
8312#define LINK_FLAGS_P2P 0x1
8313#define LINK_FLAGS_LOOP 0x2
8314static void
8315lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8316{
8317 u8 ptv, tf, pt;
8318
8319 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8320 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8321 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8322
8323 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8324 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8325 ptv, tf, pt);
8326 if (!ptv) {
8327 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8328 "2019 FW does not support persistent topology "
8329 "Using driver parameter defined value [%s]",
8330 lpfc_topo_to_str[phba->cfg_topology]);
8331 return;
8332 }
8333
8334 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8335 switch (phba->pcidev->device) {
8336 case PCI_DEVICE_ID_LANCER_G7_FC:
8337 case PCI_DEVICE_ID_LANCER_G6_FC:
8338 if (!tf) {
8339 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8340 ? FLAGS_TOPOLOGY_MODE_LOOP
8341 : FLAGS_TOPOLOGY_MODE_PT_PT);
8342 } else {
8343 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8344 }
8345 break;
8346 default:
8347 if (tf) {
8348
8349 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8350 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8351 } else {
8352 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8353 ? FLAGS_TOPOLOGY_MODE_PT_PT
8354 : FLAGS_TOPOLOGY_MODE_LOOP);
8355 }
8356 break;
8357 }
8358 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8360 "2020 Using persistent topology value [%s]",
8361 lpfc_topo_to_str[phba->cfg_topology]);
8362 } else {
8363 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8364 "2021 Invalid topology values from FW "
8365 "Using driver parameter defined value [%s]",
8366 lpfc_topo_to_str[phba->cfg_topology]);
8367 }
8368}
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384int
8385lpfc_sli4_read_config(struct lpfc_hba *phba)
8386{
8387 LPFC_MBOXQ_t *pmb;
8388 struct lpfc_mbx_read_config *rd_config;
8389 union lpfc_sli4_cfg_shdr *shdr;
8390 uint32_t shdr_status, shdr_add_status;
8391 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8392 struct lpfc_rsrc_desc_fcfcoe *desc;
8393 char *pdesc_0;
8394 uint16_t forced_link_speed;
8395 uint32_t if_type, qmin;
8396 int length, i, rc = 0, rc2;
8397
8398 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8399 if (!pmb) {
8400 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8401 "2011 Unable to allocate memory for issuing "
8402 "SLI_CONFIG_SPECIAL mailbox command\n");
8403 return -ENOMEM;
8404 }
8405
8406 lpfc_read_config(phba, pmb);
8407
8408 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8409 if (rc != MBX_SUCCESS) {
8410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8411 "2012 Mailbox failed , mbxCmd x%x "
8412 "READ_CONFIG, mbxStatus x%x\n",
8413 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8414 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8415 rc = -EIO;
8416 } else {
8417 rd_config = &pmb->u.mqe.un.rd_config;
8418 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8419 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8420 phba->sli4_hba.lnk_info.lnk_tp =
8421 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8422 phba->sli4_hba.lnk_info.lnk_no =
8423 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8424 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8425 "3081 lnk_type:%d, lnk_numb:%d\n",
8426 phba->sli4_hba.lnk_info.lnk_tp,
8427 phba->sli4_hba.lnk_info.lnk_no);
8428 } else
8429 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8430 "3082 Mailbox (x%x) returned ldv:x0\n",
8431 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8432 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8433 phba->bbcredit_support = 1;
8434 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8435 }
8436
8437 phba->sli4_hba.conf_trunk =
8438 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8439 phba->sli4_hba.extents_in_use =
8440 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8441 phba->sli4_hba.max_cfg_param.max_xri =
8442 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8443
8444 if (is_kdump_kernel() &&
8445 phba->sli4_hba.max_cfg_param.max_xri > 512)
8446 phba->sli4_hba.max_cfg_param.max_xri = 512;
8447 phba->sli4_hba.max_cfg_param.xri_base =
8448 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8449 phba->sli4_hba.max_cfg_param.max_vpi =
8450 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8451
8452 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8453 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8454 phba->sli4_hba.max_cfg_param.vpi_base =
8455 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8456 phba->sli4_hba.max_cfg_param.max_rpi =
8457 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8458 phba->sli4_hba.max_cfg_param.rpi_base =
8459 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8460 phba->sli4_hba.max_cfg_param.max_vfi =
8461 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8462 phba->sli4_hba.max_cfg_param.vfi_base =
8463 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8464 phba->sli4_hba.max_cfg_param.max_fcfi =
8465 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8466 phba->sli4_hba.max_cfg_param.max_eq =
8467 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8468 phba->sli4_hba.max_cfg_param.max_rq =
8469 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8470 phba->sli4_hba.max_cfg_param.max_wq =
8471 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8472 phba->sli4_hba.max_cfg_param.max_cq =
8473 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8474 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8475 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8476 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8477 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8478 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8479 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8480 phba->max_vports = phba->max_vpi;
8481 lpfc_map_topology(phba, rd_config);
8482 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8483 "2003 cfg params Extents? %d "
8484 "XRI(B:%d M:%d), "
8485 "VPI(B:%d M:%d) "
8486 "VFI(B:%d M:%d) "
8487 "RPI(B:%d M:%d) "
8488 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8489 phba->sli4_hba.extents_in_use,
8490 phba->sli4_hba.max_cfg_param.xri_base,
8491 phba->sli4_hba.max_cfg_param.max_xri,
8492 phba->sli4_hba.max_cfg_param.vpi_base,
8493 phba->sli4_hba.max_cfg_param.max_vpi,
8494 phba->sli4_hba.max_cfg_param.vfi_base,
8495 phba->sli4_hba.max_cfg_param.max_vfi,
8496 phba->sli4_hba.max_cfg_param.rpi_base,
8497 phba->sli4_hba.max_cfg_param.max_rpi,
8498 phba->sli4_hba.max_cfg_param.max_fcfi,
8499 phba->sli4_hba.max_cfg_param.max_eq,
8500 phba->sli4_hba.max_cfg_param.max_cq,
8501 phba->sli4_hba.max_cfg_param.max_wq,
8502 phba->sli4_hba.max_cfg_param.max_rq);
8503
8504
8505
8506
8507
8508 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8509 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8510 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8511 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8512 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8513
8514
8515
8516
8517
8518
8519 qmin -= 4;
8520
8521
8522 if ((phba->cfg_irq_chann > qmin) ||
8523 (phba->cfg_hdw_queue > qmin)) {
8524 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8525 "2005 Reducing Queues: "
8526 "WQ %d CQ %d EQ %d: min %d: "
8527 "IRQ %d HDWQ %d\n",
8528 phba->sli4_hba.max_cfg_param.max_wq,
8529 phba->sli4_hba.max_cfg_param.max_cq,
8530 phba->sli4_hba.max_cfg_param.max_eq,
8531 qmin, phba->cfg_irq_chann,
8532 phba->cfg_hdw_queue);
8533
8534 if (phba->cfg_irq_chann > qmin)
8535 phba->cfg_irq_chann = qmin;
8536 if (phba->cfg_hdw_queue > qmin)
8537 phba->cfg_hdw_queue = qmin;
8538 }
8539 }
8540
8541 if (rc)
8542 goto read_cfg_out;
8543
8544
8545 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8546 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8547 forced_link_speed =
8548 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8549 if (forced_link_speed) {
8550 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8551
8552 switch (forced_link_speed) {
8553 case LINK_SPEED_1G:
8554 phba->cfg_link_speed =
8555 LPFC_USER_LINK_SPEED_1G;
8556 break;
8557 case LINK_SPEED_2G:
8558 phba->cfg_link_speed =
8559 LPFC_USER_LINK_SPEED_2G;
8560 break;
8561 case LINK_SPEED_4G:
8562 phba->cfg_link_speed =
8563 LPFC_USER_LINK_SPEED_4G;
8564 break;
8565 case LINK_SPEED_8G:
8566 phba->cfg_link_speed =
8567 LPFC_USER_LINK_SPEED_8G;
8568 break;
8569 case LINK_SPEED_10G:
8570 phba->cfg_link_speed =
8571 LPFC_USER_LINK_SPEED_10G;
8572 break;
8573 case LINK_SPEED_16G:
8574 phba->cfg_link_speed =
8575 LPFC_USER_LINK_SPEED_16G;
8576 break;
8577 case LINK_SPEED_32G:
8578 phba->cfg_link_speed =
8579 LPFC_USER_LINK_SPEED_32G;
8580 break;
8581 case LINK_SPEED_64G:
8582 phba->cfg_link_speed =
8583 LPFC_USER_LINK_SPEED_64G;
8584 break;
8585 case 0xffff:
8586 phba->cfg_link_speed =
8587 LPFC_USER_LINK_SPEED_AUTO;
8588 break;
8589 default:
8590 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8591 "0047 Unrecognized link "
8592 "speed : %d\n",
8593 forced_link_speed);
8594 phba->cfg_link_speed =
8595 LPFC_USER_LINK_SPEED_AUTO;
8596 }
8597 }
8598 }
8599
8600
8601 length = phba->sli4_hba.max_cfg_param.max_xri -
8602 lpfc_sli4_get_els_iocb_cnt(phba);
8603 if (phba->cfg_hba_queue_depth > length) {
8604 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8605 "3361 HBA queue depth changed from %d to %d\n",
8606 phba->cfg_hba_queue_depth, length);
8607 phba->cfg_hba_queue_depth = length;
8608 }
8609
8610 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8611 LPFC_SLI_INTF_IF_TYPE_2)
8612 goto read_cfg_out;
8613
8614
8615 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8616 sizeof(struct lpfc_sli4_cfg_mhdr));
8617 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8618 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8619 length, LPFC_SLI4_MBX_EMBED);
8620
8621 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8622 shdr = (union lpfc_sli4_cfg_shdr *)
8623 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8624 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8625 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8626 if (rc2 || shdr_status || shdr_add_status) {
8627 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8628 "3026 Mailbox failed , mbxCmd x%x "
8629 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8630 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8631 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8632 goto read_cfg_out;
8633 }
8634
8635
8636 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8637
8638 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8639 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8640 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8641 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8642 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8643 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8644 goto read_cfg_out;
8645
8646 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8647 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8648 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8649 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8650 phba->sli4_hba.iov.pf_number =
8651 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8652 phba->sli4_hba.iov.vf_number =
8653 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8654 break;
8655 }
8656 }
8657
8658 if (i < LPFC_RSRC_DESC_MAX_NUM)
8659 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8660 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8661 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8662 phba->sli4_hba.iov.vf_number);
8663 else
8664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8665 "3028 GET_FUNCTION_CONFIG: failed to find "
8666 "Resource Descriptor:x%x\n",
8667 LPFC_RSRC_DESC_TYPE_FCFCOE);
8668
8669read_cfg_out:
8670 mempool_free(pmb, phba->mbox_mem_pool);
8671 return rc;
8672}
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687static int
8688lpfc_setup_endian_order(struct lpfc_hba *phba)
8689{
8690 LPFC_MBOXQ_t *mboxq;
8691 uint32_t if_type, rc = 0;
8692 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8693 HOST_ENDIAN_HIGH_WORD1};
8694
8695 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8696 switch (if_type) {
8697 case LPFC_SLI_INTF_IF_TYPE_0:
8698 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8699 GFP_KERNEL);
8700 if (!mboxq) {
8701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8702 "0492 Unable to allocate memory for "
8703 "issuing SLI_CONFIG_SPECIAL mailbox "
8704 "command\n");
8705 return -ENOMEM;
8706 }
8707
8708
8709
8710
8711
8712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8713 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8714 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8715 if (rc != MBX_SUCCESS) {
8716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8717 "0493 SLI_CONFIG_SPECIAL mailbox "
8718 "failed with status x%x\n",
8719 rc);
8720 rc = -EIO;
8721 }
8722 mempool_free(mboxq, phba->mbox_mem_pool);
8723 break;
8724 case LPFC_SLI_INTF_IF_TYPE_6:
8725 case LPFC_SLI_INTF_IF_TYPE_2:
8726 case LPFC_SLI_INTF_IF_TYPE_1:
8727 default:
8728 break;
8729 }
8730 return rc;
8731}
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746static int
8747lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8748{
8749
8750
8751
8752
8753
8754 if (phba->nvmet_support) {
8755 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8756 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8757 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8758 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8759 }
8760
8761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8762 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8763 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8764 phba->cfg_nvmet_mrq);
8765
8766
8767 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8768 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8769
8770
8771 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8772 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8773 return 0;
8774}
8775
8776static int
8777lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8778{
8779 struct lpfc_queue *qdesc;
8780 u32 wqesize;
8781 int cpu;
8782
8783 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8784
8785 if (phba->enab_exp_wqcq_pages)
8786
8787 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8788 phba->sli4_hba.cq_esize,
8789 LPFC_CQE_EXP_COUNT, cpu);
8790
8791 else
8792 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8793 phba->sli4_hba.cq_esize,
8794 phba->sli4_hba.cq_ecount, cpu);
8795 if (!qdesc) {
8796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8797 "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8798 return 1;
8799 }
8800 qdesc->qe_valid = 1;
8801 qdesc->hdwq = idx;
8802 qdesc->chann = cpu;
8803 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8804
8805
8806 if (phba->enab_exp_wqcq_pages) {
8807
8808 wqesize = (phba->fcp_embed_io) ?
8809 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8810 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8811 wqesize,
8812 LPFC_WQE_EXP_COUNT, cpu);
8813 } else
8814 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8815 phba->sli4_hba.wq_esize,
8816 phba->sli4_hba.wq_ecount, cpu);
8817
8818 if (!qdesc) {
8819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8820 "0503 Failed allocate fast-path IO WQ (%d)\n",
8821 idx);
8822 return 1;
8823 }
8824 qdesc->hdwq = idx;
8825 qdesc->chann = cpu;
8826 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8827 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8828 return 0;
8829}
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843
8844
8845int
8846lpfc_sli4_queue_create(struct lpfc_hba *phba)
8847{
8848 struct lpfc_queue *qdesc;
8849 int idx, cpu, eqcpu;
8850 struct lpfc_sli4_hdw_queue *qp;
8851 struct lpfc_vector_map_info *cpup;
8852 struct lpfc_vector_map_info *eqcpup;
8853 struct lpfc_eq_intr_info *eqi;
8854
8855
8856
8857
8858
8859 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8860 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8861 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8862 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8863 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8864 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8865 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8866 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8867 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8868 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8869
8870 if (!phba->sli4_hba.hdwq) {
8871 phba->sli4_hba.hdwq = kcalloc(
8872 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8873 GFP_KERNEL);
8874 if (!phba->sli4_hba.hdwq) {
8875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8876 "6427 Failed allocate memory for "
8877 "fast-path Hardware Queue array\n");
8878 goto out_error;
8879 }
8880
8881 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8882 qp = &phba->sli4_hba.hdwq[idx];
8883 spin_lock_init(&qp->io_buf_list_get_lock);
8884 spin_lock_init(&qp->io_buf_list_put_lock);
8885 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8886 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8887 qp->get_io_bufs = 0;
8888 qp->put_io_bufs = 0;
8889 qp->total_io_bufs = 0;
8890 spin_lock_init(&qp->abts_io_buf_list_lock);
8891 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8892 qp->abts_scsi_io_bufs = 0;
8893 qp->abts_nvme_io_bufs = 0;
8894 INIT_LIST_HEAD(&qp->sgl_list);
8895 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8896 spin_lock_init(&qp->hdwq_lock);
8897 }
8898 }
8899
8900 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8901 if (phba->nvmet_support) {
8902 phba->sli4_hba.nvmet_cqset = kcalloc(
8903 phba->cfg_nvmet_mrq,
8904 sizeof(struct lpfc_queue *),
8905 GFP_KERNEL);
8906 if (!phba->sli4_hba.nvmet_cqset) {
8907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8908 "3121 Fail allocate memory for "
8909 "fast-path CQ set array\n");
8910 goto out_error;
8911 }
8912 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8913 phba->cfg_nvmet_mrq,
8914 sizeof(struct lpfc_queue *),
8915 GFP_KERNEL);
8916 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8918 "3122 Fail allocate memory for "
8919 "fast-path RQ set hdr array\n");
8920 goto out_error;
8921 }
8922 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8923 phba->cfg_nvmet_mrq,
8924 sizeof(struct lpfc_queue *),
8925 GFP_KERNEL);
8926 if (!phba->sli4_hba.nvmet_mrq_data) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8928 "3124 Fail allocate memory for "
8929 "fast-path RQ set data array\n");
8930 goto out_error;
8931 }
8932 }
8933 }
8934
8935 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8936
8937
8938 for_each_present_cpu(cpu) {
8939
8940
8941
8942
8943 cpup = &phba->sli4_hba.cpu_map[cpu];
8944 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8945 continue;
8946
8947
8948 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8949
8950
8951 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8952 phba->sli4_hba.eq_esize,
8953 phba->sli4_hba.eq_ecount, cpu);
8954 if (!qdesc) {
8955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8956 "0497 Failed allocate EQ (%d)\n",
8957 cpup->hdwq);
8958 goto out_error;
8959 }
8960 qdesc->qe_valid = 1;
8961 qdesc->hdwq = cpup->hdwq;
8962 qdesc->chann = cpu;
8963 qdesc->last_cpu = qdesc->chann;
8964
8965
8966 qp->hba_eq = qdesc;
8967
8968 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8969 list_add(&qdesc->cpu_list, &eqi->list);
8970 }
8971
8972
8973
8974
8975 for_each_present_cpu(cpu) {
8976 cpup = &phba->sli4_hba.cpu_map[cpu];
8977
8978
8979 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8980 continue;
8981
8982
8983 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8984 if (qp->hba_eq)
8985 continue;
8986
8987
8988 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8989 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8990 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8991 }
8992
8993
8994 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8995 if (lpfc_alloc_io_wq_cq(phba, idx))
8996 goto out_error;
8997 }
8998
8999 if (phba->nvmet_support) {
9000 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9001 cpu = lpfc_find_cpu_handle(phba, idx,
9002 LPFC_FIND_BY_HDWQ);
9003 qdesc = lpfc_sli4_queue_alloc(phba,
9004 LPFC_DEFAULT_PAGE_SIZE,
9005 phba->sli4_hba.cq_esize,
9006 phba->sli4_hba.cq_ecount,
9007 cpu);
9008 if (!qdesc) {
9009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010 "3142 Failed allocate NVME "
9011 "CQ Set (%d)\n", idx);
9012 goto out_error;
9013 }
9014 qdesc->qe_valid = 1;
9015 qdesc->hdwq = idx;
9016 qdesc->chann = cpu;
9017 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9018 }
9019 }
9020
9021
9022
9023
9024
9025 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9026
9027 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9028 phba->sli4_hba.cq_esize,
9029 phba->sli4_hba.cq_ecount, cpu);
9030 if (!qdesc) {
9031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9032 "0500 Failed allocate slow-path mailbox CQ\n");
9033 goto out_error;
9034 }
9035 qdesc->qe_valid = 1;
9036 phba->sli4_hba.mbx_cq = qdesc;
9037
9038
9039 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9040 phba->sli4_hba.cq_esize,
9041 phba->sli4_hba.cq_ecount, cpu);
9042 if (!qdesc) {
9043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9044 "0501 Failed allocate slow-path ELS CQ\n");
9045 goto out_error;
9046 }
9047 qdesc->qe_valid = 1;
9048 qdesc->chann = cpu;
9049 phba->sli4_hba.els_cq = qdesc;
9050
9051
9052
9053
9054
9055
9056
9057
9058 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9059 phba->sli4_hba.mq_esize,
9060 phba->sli4_hba.mq_ecount, cpu);
9061 if (!qdesc) {
9062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9063 "0505 Failed allocate slow-path MQ\n");
9064 goto out_error;
9065 }
9066 qdesc->chann = cpu;
9067 phba->sli4_hba.mbx_wq = qdesc;
9068
9069
9070
9071
9072
9073
9074 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9075 phba->sli4_hba.wq_esize,
9076 phba->sli4_hba.wq_ecount, cpu);
9077 if (!qdesc) {
9078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9079 "0504 Failed allocate slow-path ELS WQ\n");
9080 goto out_error;
9081 }
9082 qdesc->chann = cpu;
9083 phba->sli4_hba.els_wq = qdesc;
9084 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9085
9086 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9087
9088 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9089 phba->sli4_hba.cq_esize,
9090 phba->sli4_hba.cq_ecount, cpu);
9091 if (!qdesc) {
9092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9093 "6079 Failed allocate NVME LS CQ\n");
9094 goto out_error;
9095 }
9096 qdesc->chann = cpu;
9097 qdesc->qe_valid = 1;
9098 phba->sli4_hba.nvmels_cq = qdesc;
9099
9100
9101 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9102 phba->sli4_hba.wq_esize,
9103 phba->sli4_hba.wq_ecount, cpu);
9104 if (!qdesc) {
9105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9106 "6080 Failed allocate NVME LS WQ\n");
9107 goto out_error;
9108 }
9109 qdesc->chann = cpu;
9110 phba->sli4_hba.nvmels_wq = qdesc;
9111 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9112 }
9113
9114
9115
9116
9117
9118
9119 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9120 phba->sli4_hba.rq_esize,
9121 phba->sli4_hba.rq_ecount, cpu);
9122 if (!qdesc) {
9123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9124 "0506 Failed allocate receive HRQ\n");
9125 goto out_error;
9126 }
9127 phba->sli4_hba.hdr_rq = qdesc;
9128
9129
9130 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9131 phba->sli4_hba.rq_esize,
9132 phba->sli4_hba.rq_ecount, cpu);
9133 if (!qdesc) {
9134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9135 "0507 Failed allocate receive DRQ\n");
9136 goto out_error;
9137 }
9138 phba->sli4_hba.dat_rq = qdesc;
9139
9140 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9141 phba->nvmet_support) {
9142 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9143 cpu = lpfc_find_cpu_handle(phba, idx,
9144 LPFC_FIND_BY_HDWQ);
9145
9146 qdesc = lpfc_sli4_queue_alloc(phba,
9147 LPFC_DEFAULT_PAGE_SIZE,
9148 phba->sli4_hba.rq_esize,
9149 LPFC_NVMET_RQE_DEF_COUNT,
9150 cpu);
9151 if (!qdesc) {
9152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9153 "3146 Failed allocate "
9154 "receive HRQ\n");
9155 goto out_error;
9156 }
9157 qdesc->hdwq = idx;
9158 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9159
9160
9161 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9162 GFP_KERNEL,
9163 cpu_to_node(cpu));
9164 if (qdesc->rqbp == NULL) {
9165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9166 "6131 Failed allocate "
9167 "Header RQBP\n");
9168 goto out_error;
9169 }
9170
9171
9172 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9173
9174
9175 qdesc = lpfc_sli4_queue_alloc(phba,
9176 LPFC_DEFAULT_PAGE_SIZE,
9177 phba->sli4_hba.rq_esize,
9178 LPFC_NVMET_RQE_DEF_COUNT,
9179 cpu);
9180 if (!qdesc) {
9181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9182 "3156 Failed allocate "
9183 "receive DRQ\n");
9184 goto out_error;
9185 }
9186 qdesc->hdwq = idx;
9187 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9188 }
9189 }
9190
9191
9192 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9193 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9194 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9195 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9196 }
9197 }
9198
9199
9200 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9201 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9202 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9203 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9204 }
9205 }
9206
9207 return 0;
9208
9209out_error:
9210 lpfc_sli4_queue_destroy(phba);
9211 return -ENOMEM;
9212}
9213
9214static inline void
9215__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9216{
9217 if (*qp != NULL) {
9218 lpfc_sli4_queue_free(*qp);
9219 *qp = NULL;
9220 }
9221}
9222
9223static inline void
9224lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9225{
9226 int idx;
9227
9228 if (*qs == NULL)
9229 return;
9230
9231 for (idx = 0; idx < max; idx++)
9232 __lpfc_sli4_release_queue(&(*qs)[idx]);
9233
9234 kfree(*qs);
9235 *qs = NULL;
9236}
9237
9238static inline void
9239lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9240{
9241 struct lpfc_sli4_hdw_queue *hdwq;
9242 struct lpfc_queue *eq;
9243 uint32_t idx;
9244
9245 hdwq = phba->sli4_hba.hdwq;
9246
9247
9248 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9249
9250 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9251 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9252 hdwq[idx].hba_eq = NULL;
9253 hdwq[idx].io_cq = NULL;
9254 hdwq[idx].io_wq = NULL;
9255 if (phba->cfg_xpsgl && !phba->nvmet_support)
9256 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9257 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9258 }
9259
9260 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9261
9262 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9263 lpfc_sli4_queue_free(eq);
9264 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9265 }
9266}
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279
9280void
9281lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9282{
9283
9284
9285
9286
9287
9288 spin_lock_irq(&phba->hbalock);
9289 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9290 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9291 spin_unlock_irq(&phba->hbalock);
9292 msleep(20);
9293 spin_lock_irq(&phba->hbalock);
9294 }
9295 spin_unlock_irq(&phba->hbalock);
9296
9297 lpfc_sli4_cleanup_poll_list(phba);
9298
9299
9300 if (phba->sli4_hba.hdwq)
9301 lpfc_sli4_release_hdwq(phba);
9302
9303 if (phba->nvmet_support) {
9304 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9305 phba->cfg_nvmet_mrq);
9306
9307 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9308 phba->cfg_nvmet_mrq);
9309 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9310 phba->cfg_nvmet_mrq);
9311 }
9312
9313
9314 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9315
9316
9317 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9318
9319
9320 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9321
9322
9323 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9324 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9325
9326
9327 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9328
9329
9330 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9331
9332
9333 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9334
9335
9336 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9337
9338
9339 spin_lock_irq(&phba->hbalock);
9340 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9341 spin_unlock_irq(&phba->hbalock);
9342}
9343
9344int
9345lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9346{
9347 struct lpfc_rqb *rqbp;
9348 struct lpfc_dmabuf *h_buf;
9349 struct rqb_dmabuf *rqb_buffer;
9350
9351 rqbp = rq->rqbp;
9352 while (!list_empty(&rqbp->rqb_buffer_list)) {
9353 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9354 struct lpfc_dmabuf, list);
9355
9356 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9357 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9358 rqbp->buffer_count--;
9359 }
9360 return 1;
9361}
9362
9363static int
9364lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9365 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9366 int qidx, uint32_t qtype)
9367{
9368 struct lpfc_sli_ring *pring;
9369 int rc;
9370
9371 if (!eq || !cq || !wq) {
9372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9373 "6085 Fast-path %s (%d) not allocated\n",
9374 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9375 return -ENOMEM;
9376 }
9377
9378
9379 rc = lpfc_cq_create(phba, cq, eq,
9380 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9381 if (rc) {
9382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9383 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9384 qidx, (uint32_t)rc);
9385 return rc;
9386 }
9387
9388 if (qtype != LPFC_MBOX) {
9389
9390 if (cq_map)
9391 *cq_map = cq->queue_id;
9392
9393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9394 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9395 qidx, cq->queue_id, qidx, eq->queue_id);
9396
9397
9398 rc = lpfc_wq_create(phba, wq, cq, qtype);
9399 if (rc) {
9400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9401 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9402 qidx, (uint32_t)rc);
9403
9404 return rc;
9405 }
9406
9407
9408 pring = wq->pring;
9409 pring->sli.sli4.wqp = (void *)wq;
9410 cq->pring = pring;
9411
9412 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9413 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9414 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9415 } else {
9416 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9417 if (rc) {
9418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9419 "0539 Failed setup of slow-path MQ: "
9420 "rc = 0x%x\n", rc);
9421
9422 return rc;
9423 }
9424
9425 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9426 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9427 phba->sli4_hba.mbx_wq->queue_id,
9428 phba->sli4_hba.mbx_cq->queue_id);
9429 }
9430
9431 return 0;
9432}
9433
9434
9435
9436
9437
9438
9439
9440
9441static void
9442lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9443{
9444 struct lpfc_queue *eq, *childq;
9445 int qidx;
9446
9447 memset(phba->sli4_hba.cq_lookup, 0,
9448 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9449
9450 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9451
9452 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9453 if (!eq)
9454 continue;
9455
9456 list_for_each_entry(childq, &eq->child_list, list) {
9457 if (childq->queue_id > phba->sli4_hba.cq_max)
9458 continue;
9459 if (childq->subtype == LPFC_IO)
9460 phba->sli4_hba.cq_lookup[childq->queue_id] =
9461 childq;
9462 }
9463 }
9464}
9465
9466
9467
9468
9469
9470
9471
9472
9473
9474
9475
9476
9477
9478int
9479lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9480{
9481 uint32_t shdr_status, shdr_add_status;
9482 union lpfc_sli4_cfg_shdr *shdr;
9483 struct lpfc_vector_map_info *cpup;
9484 struct lpfc_sli4_hdw_queue *qp;
9485 LPFC_MBOXQ_t *mboxq;
9486 int qidx, cpu;
9487 uint32_t length, usdelay;
9488 int rc = -ENOMEM;
9489
9490
9491 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9492 if (!mboxq) {
9493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9494 "3249 Unable to allocate memory for "
9495 "QUERY_FW_CFG mailbox command\n");
9496 return -ENOMEM;
9497 }
9498 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9499 sizeof(struct lpfc_sli4_cfg_mhdr));
9500 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9501 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9502 length, LPFC_SLI4_MBX_EMBED);
9503
9504 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9505
9506 shdr = (union lpfc_sli4_cfg_shdr *)
9507 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9508 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9509 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9510 if (shdr_status || shdr_add_status || rc) {
9511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9512 "3250 QUERY_FW_CFG mailbox failed with status "
9513 "x%x add_status x%x, mbx status x%x\n",
9514 shdr_status, shdr_add_status, rc);
9515 if (rc != MBX_TIMEOUT)
9516 mempool_free(mboxq, phba->mbox_mem_pool);
9517 rc = -ENXIO;
9518 goto out_error;
9519 }
9520
9521 phba->sli4_hba.fw_func_mode =
9522 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9523 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9524 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9525 phba->sli4_hba.physical_port =
9526 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9528 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9529 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9530 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9531
9532 if (rc != MBX_TIMEOUT)
9533 mempool_free(mboxq, phba->mbox_mem_pool);
9534
9535
9536
9537
9538 qp = phba->sli4_hba.hdwq;
9539
9540
9541 if (!qp) {
9542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9543 "3147 Fast-path EQs not allocated\n");
9544 rc = -ENOMEM;
9545 goto out_error;
9546 }
9547
9548
9549 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9550
9551 for_each_present_cpu(cpu) {
9552 cpup = &phba->sli4_hba.cpu_map[cpu];
9553
9554
9555
9556
9557 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9558 continue;
9559 if (qidx != cpup->eq)
9560 continue;
9561
9562
9563 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9564 phba->cfg_fcp_imax);
9565 if (rc) {
9566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9567 "0523 Failed setup of fast-path"
9568 " EQ (%d), rc = 0x%x\n",
9569 cpup->eq, (uint32_t)rc);
9570 goto out_destroy;
9571 }
9572
9573
9574 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9575 qp[cpup->hdwq].hba_eq;
9576
9577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9578 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9579 cpup->eq,
9580 qp[cpup->hdwq].hba_eq->queue_id);
9581 }
9582 }
9583
9584
9585 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9586 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9587 cpup = &phba->sli4_hba.cpu_map[cpu];
9588
9589
9590 rc = lpfc_create_wq_cq(phba,
9591 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9592 qp[qidx].io_cq,
9593 qp[qidx].io_wq,
9594 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9595 qidx,
9596 LPFC_IO);
9597 if (rc) {
9598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9599 "0535 Failed to setup fastpath "
9600 "IO WQ/CQ (%d), rc = 0x%x\n",
9601 qidx, (uint32_t)rc);
9602 goto out_destroy;
9603 }
9604 }
9605
9606
9607
9608
9609
9610
9611
9612 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9614 "0528 %s not allocated\n",
9615 phba->sli4_hba.mbx_cq ?
9616 "Mailbox WQ" : "Mailbox CQ");
9617 rc = -ENOMEM;
9618 goto out_destroy;
9619 }
9620
9621 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9622 phba->sli4_hba.mbx_cq,
9623 phba->sli4_hba.mbx_wq,
9624 NULL, 0, LPFC_MBOX);
9625 if (rc) {
9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9627 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9628 (uint32_t)rc);
9629 goto out_destroy;
9630 }
9631 if (phba->nvmet_support) {
9632 if (!phba->sli4_hba.nvmet_cqset) {
9633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9634 "3165 Fast-path NVME CQ Set "
9635 "array not allocated\n");
9636 rc = -ENOMEM;
9637 goto out_destroy;
9638 }
9639 if (phba->cfg_nvmet_mrq > 1) {
9640 rc = lpfc_cq_create_set(phba,
9641 phba->sli4_hba.nvmet_cqset,
9642 qp,
9643 LPFC_WCQ, LPFC_NVMET);
9644 if (rc) {
9645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9646 "3164 Failed setup of NVME CQ "
9647 "Set, rc = 0x%x\n",
9648 (uint32_t)rc);
9649 goto out_destroy;
9650 }
9651 } else {
9652
9653 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9654 qp[0].hba_eq,
9655 LPFC_WCQ, LPFC_NVMET);
9656 if (rc) {
9657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9658 "6089 Failed setup NVMET CQ: "
9659 "rc = 0x%x\n", (uint32_t)rc);
9660 goto out_destroy;
9661 }
9662 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9663
9664 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9665 "6090 NVMET CQ setup: cq-id=%d, "
9666 "parent eq-id=%d\n",
9667 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9668 qp[0].hba_eq->queue_id);
9669 }
9670 }
9671
9672
9673 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9675 "0530 ELS %s not allocated\n",
9676 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9677 rc = -ENOMEM;
9678 goto out_destroy;
9679 }
9680 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9681 phba->sli4_hba.els_cq,
9682 phba->sli4_hba.els_wq,
9683 NULL, 0, LPFC_ELS);
9684 if (rc) {
9685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9686 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9687 (uint32_t)rc);
9688 goto out_destroy;
9689 }
9690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9691 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9692 phba->sli4_hba.els_wq->queue_id,
9693 phba->sli4_hba.els_cq->queue_id);
9694
9695 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9696
9697 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9699 "6091 LS %s not allocated\n",
9700 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9701 rc = -ENOMEM;
9702 goto out_destroy;
9703 }
9704 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9705 phba->sli4_hba.nvmels_cq,
9706 phba->sli4_hba.nvmels_wq,
9707 NULL, 0, LPFC_NVME_LS);
9708 if (rc) {
9709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9710 "0526 Failed setup of NVVME LS WQ/CQ: "
9711 "rc = 0x%x\n", (uint32_t)rc);
9712 goto out_destroy;
9713 }
9714
9715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9716 "6096 ELS WQ setup: wq-id=%d, "
9717 "parent cq-id=%d\n",
9718 phba->sli4_hba.nvmels_wq->queue_id,
9719 phba->sli4_hba.nvmels_cq->queue_id);
9720 }
9721
9722
9723
9724
9725 if (phba->nvmet_support) {
9726 if ((!phba->sli4_hba.nvmet_cqset) ||
9727 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9728 (!phba->sli4_hba.nvmet_mrq_data)) {
9729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9730 "6130 MRQ CQ Queues not "
9731 "allocated\n");
9732 rc = -ENOMEM;
9733 goto out_destroy;
9734 }
9735 if (phba->cfg_nvmet_mrq > 1) {
9736 rc = lpfc_mrq_create(phba,
9737 phba->sli4_hba.nvmet_mrq_hdr,
9738 phba->sli4_hba.nvmet_mrq_data,
9739 phba->sli4_hba.nvmet_cqset,
9740 LPFC_NVMET);
9741 if (rc) {
9742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9743 "6098 Failed setup of NVMET "
9744 "MRQ: rc = 0x%x\n",
9745 (uint32_t)rc);
9746 goto out_destroy;
9747 }
9748
9749 } else {
9750 rc = lpfc_rq_create(phba,
9751 phba->sli4_hba.nvmet_mrq_hdr[0],
9752 phba->sli4_hba.nvmet_mrq_data[0],
9753 phba->sli4_hba.nvmet_cqset[0],
9754 LPFC_NVMET);
9755 if (rc) {
9756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9757 "6057 Failed setup of NVMET "
9758 "Receive Queue: rc = 0x%x\n",
9759 (uint32_t)rc);
9760 goto out_destroy;
9761 }
9762
9763 lpfc_printf_log(
9764 phba, KERN_INFO, LOG_INIT,
9765 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9766 "dat-rq-id=%d parent cq-id=%d\n",
9767 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9768 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9769 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9770
9771 }
9772 }
9773
9774 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9776 "0540 Receive Queue not allocated\n");
9777 rc = -ENOMEM;
9778 goto out_destroy;
9779 }
9780
9781 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9782 phba->sli4_hba.els_cq, LPFC_USOL);
9783 if (rc) {
9784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9785 "0541 Failed setup of Receive Queue: "
9786 "rc = 0x%x\n", (uint32_t)rc);
9787 goto out_destroy;
9788 }
9789
9790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9791 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9792 "parent cq-id=%d\n",
9793 phba->sli4_hba.hdr_rq->queue_id,
9794 phba->sli4_hba.dat_rq->queue_id,
9795 phba->sli4_hba.els_cq->queue_id);
9796
9797 if (phba->cfg_fcp_imax)
9798 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9799 else
9800 usdelay = 0;
9801
9802 for (qidx = 0; qidx < phba->cfg_irq_chann;
9803 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9804 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9805 usdelay);
9806
9807 if (phba->sli4_hba.cq_max) {
9808 kfree(phba->sli4_hba.cq_lookup);
9809 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9810 sizeof(struct lpfc_queue *), GFP_KERNEL);
9811 if (!phba->sli4_hba.cq_lookup) {
9812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9813 "0549 Failed setup of CQ Lookup table: "
9814 "size 0x%x\n", phba->sli4_hba.cq_max);
9815 rc = -ENOMEM;
9816 goto out_destroy;
9817 }
9818 lpfc_setup_cq_lookup(phba);
9819 }
9820 return 0;
9821
9822out_destroy:
9823 lpfc_sli4_queue_unset(phba);
9824out_error:
9825 return rc;
9826}
9827
9828
9829
9830
9831
9832
9833
9834
9835
9836
9837
9838
9839
9840void
9841lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9842{
9843 struct lpfc_sli4_hdw_queue *qp;
9844 struct lpfc_queue *eq;
9845 int qidx;
9846
9847
9848 if (phba->sli4_hba.mbx_wq)
9849 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9850
9851
9852 if (phba->sli4_hba.nvmels_wq)
9853 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9854
9855
9856 if (phba->sli4_hba.els_wq)
9857 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9858
9859
9860 if (phba->sli4_hba.hdr_rq)
9861 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9862 phba->sli4_hba.dat_rq);
9863
9864
9865 if (phba->sli4_hba.mbx_cq)
9866 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9867
9868
9869 if (phba->sli4_hba.els_cq)
9870 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9871
9872
9873 if (phba->sli4_hba.nvmels_cq)
9874 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9875
9876 if (phba->nvmet_support) {
9877
9878 if (phba->sli4_hba.nvmet_mrq_hdr) {
9879 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9880 lpfc_rq_destroy(
9881 phba,
9882 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9883 phba->sli4_hba.nvmet_mrq_data[qidx]);
9884 }
9885
9886
9887 if (phba->sli4_hba.nvmet_cqset) {
9888 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9889 lpfc_cq_destroy(
9890 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9891 }
9892 }
9893
9894
9895 if (phba->sli4_hba.hdwq) {
9896
9897 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9898
9899 qp = &phba->sli4_hba.hdwq[qidx];
9900 lpfc_wq_destroy(phba, qp->io_wq);
9901 lpfc_cq_destroy(phba, qp->io_cq);
9902 }
9903
9904 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9905
9906 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9907 lpfc_eq_destroy(phba, eq);
9908 }
9909 }
9910
9911 kfree(phba->sli4_hba.cq_lookup);
9912 phba->sli4_hba.cq_lookup = NULL;
9913 phba->sli4_hba.cq_max = 0;
9914}
9915
9916
9917
9918
9919
9920
9921
9922
9923
9924
9925
9926
9927
9928
9929
9930
9931
9932static int
9933lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9934{
9935 struct lpfc_cq_event *cq_event;
9936 int i;
9937
9938 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9939 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9940 if (!cq_event)
9941 goto out_pool_create_fail;
9942 list_add_tail(&cq_event->list,
9943 &phba->sli4_hba.sp_cqe_event_pool);
9944 }
9945 return 0;
9946
9947out_pool_create_fail:
9948 lpfc_sli4_cq_event_pool_destroy(phba);
9949 return -ENOMEM;
9950}
9951
9952
9953
9954
9955
9956
9957
9958
9959
9960
9961
9962static void
9963lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9964{
9965 struct lpfc_cq_event *cq_event, *next_cq_event;
9966
9967 list_for_each_entry_safe(cq_event, next_cq_event,
9968 &phba->sli4_hba.sp_cqe_event_pool, list) {
9969 list_del(&cq_event->list);
9970 kfree(cq_event);
9971 }
9972}
9973
9974
9975
9976
9977
9978
9979
9980
9981
9982
9983
9984struct lpfc_cq_event *
9985__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9986{
9987 struct lpfc_cq_event *cq_event = NULL;
9988
9989 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9990 struct lpfc_cq_event, list);
9991 return cq_event;
9992}
9993
9994
9995
9996
9997
9998
9999
10000
10001
10002
10003
10004struct lpfc_cq_event *
10005lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10006{
10007 struct lpfc_cq_event *cq_event;
10008 unsigned long iflags;
10009
10010 spin_lock_irqsave(&phba->hbalock, iflags);
10011 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10012 spin_unlock_irqrestore(&phba->hbalock, iflags);
10013 return cq_event;
10014}
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024void
10025__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10026 struct lpfc_cq_event *cq_event)
10027{
10028 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10029}
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039void
10040lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10041 struct lpfc_cq_event *cq_event)
10042{
10043 unsigned long iflags;
10044 spin_lock_irqsave(&phba->hbalock, iflags);
10045 __lpfc_sli4_cq_event_release(phba, cq_event);
10046 spin_unlock_irqrestore(&phba->hbalock, iflags);
10047}
10048
10049
10050
10051
10052
10053
10054
10055
10056static void
10057lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10058{
10059 LIST_HEAD(cqelist);
10060 struct lpfc_cq_event *cqe;
10061 unsigned long iflags;
10062
10063
10064 spin_lock_irqsave(&phba->hbalock, iflags);
10065
10066 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10067 &cqelist);
10068
10069 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10070 &cqelist);
10071
10072 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10073 &cqelist);
10074 spin_unlock_irqrestore(&phba->hbalock, iflags);
10075
10076 while (!list_empty(&cqelist)) {
10077 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10078 lpfc_sli4_cq_event_release(phba, cqe);
10079 }
10080}
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094int
10095lpfc_pci_function_reset(struct lpfc_hba *phba)
10096{
10097 LPFC_MBOXQ_t *mboxq;
10098 uint32_t rc = 0, if_type;
10099 uint32_t shdr_status, shdr_add_status;
10100 uint32_t rdy_chk;
10101 uint32_t port_reset = 0;
10102 union lpfc_sli4_cfg_shdr *shdr;
10103 struct lpfc_register reg_data;
10104 uint16_t devid;
10105
10106 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10107 switch (if_type) {
10108 case LPFC_SLI_INTF_IF_TYPE_0:
10109 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10110 GFP_KERNEL);
10111 if (!mboxq) {
10112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10113 "0494 Unable to allocate memory for "
10114 "issuing SLI_FUNCTION_RESET mailbox "
10115 "command\n");
10116 return -ENOMEM;
10117 }
10118
10119
10120 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10121 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10122 LPFC_SLI4_MBX_EMBED);
10123 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10124 shdr = (union lpfc_sli4_cfg_shdr *)
10125 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10126 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10127 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10128 &shdr->response);
10129 if (rc != MBX_TIMEOUT)
10130 mempool_free(mboxq, phba->mbox_mem_pool);
10131 if (shdr_status || shdr_add_status || rc) {
10132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10133 "0495 SLI_FUNCTION_RESET mailbox "
10134 "failed with status x%x add_status x%x,"
10135 " mbx status x%x\n",
10136 shdr_status, shdr_add_status, rc);
10137 rc = -ENXIO;
10138 }
10139 break;
10140 case LPFC_SLI_INTF_IF_TYPE_2:
10141 case LPFC_SLI_INTF_IF_TYPE_6:
10142wait:
10143
10144
10145
10146
10147
10148 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10149 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10150 STATUSregaddr, ®_data.word0)) {
10151 rc = -ENODEV;
10152 goto out;
10153 }
10154 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10155 break;
10156 msleep(20);
10157 }
10158
10159 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10160 phba->work_status[0] = readl(
10161 phba->sli4_hba.u.if_type2.ERR1regaddr);
10162 phba->work_status[1] = readl(
10163 phba->sli4_hba.u.if_type2.ERR2regaddr);
10164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10165 "2890 Port not ready, port status reg "
10166 "0x%x error 1=0x%x, error 2=0x%x\n",
10167 reg_data.word0,
10168 phba->work_status[0],
10169 phba->work_status[1]);
10170 rc = -ENODEV;
10171 goto out;
10172 }
10173
10174 if (!port_reset) {
10175
10176
10177
10178 reg_data.word0 = 0;
10179 bf_set(lpfc_sliport_ctrl_end, ®_data,
10180 LPFC_SLIPORT_LITTLE_ENDIAN);
10181 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10182 LPFC_SLIPORT_INIT_PORT);
10183 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10184 CTRLregaddr);
10185
10186 pci_read_config_word(phba->pcidev,
10187 PCI_DEVICE_ID, &devid);
10188
10189 port_reset = 1;
10190 msleep(20);
10191 goto wait;
10192 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10193 rc = -ENODEV;
10194 goto out;
10195 }
10196 break;
10197
10198 case LPFC_SLI_INTF_IF_TYPE_1:
10199 default:
10200 break;
10201 }
10202
10203out:
10204
10205 if (rc) {
10206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10207 "3317 HBA not functional: IP Reset Failed "
10208 "try: echo fw_reset > board_mode\n");
10209 rc = -ENODEV;
10210 }
10211
10212 return rc;
10213}
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226static int
10227lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10228{
10229 struct pci_dev *pdev = phba->pcidev;
10230 unsigned long bar0map_len, bar1map_len, bar2map_len;
10231 int error;
10232 uint32_t if_type;
10233
10234 if (!pdev)
10235 return -ENODEV;
10236
10237
10238 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10239 if (error)
10240 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10241 if (error)
10242 return error;
10243
10244
10245
10246
10247
10248 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10249 &phba->sli4_hba.sli_intf.word0)) {
10250 return -ENODEV;
10251 }
10252
10253
10254 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10255 LPFC_SLI_INTF_VALID) {
10256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10257 "2894 SLI_INTF reg contents invalid "
10258 "sli_intf reg 0x%x\n",
10259 phba->sli4_hba.sli_intf.word0);
10260 return -ENODEV;
10261 }
10262
10263 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10264
10265
10266
10267
10268
10269
10270 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10271 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10272 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10273
10274
10275
10276
10277
10278 phba->sli4_hba.conf_regs_memmap_p =
10279 ioremap(phba->pci_bar0_map, bar0map_len);
10280 if (!phba->sli4_hba.conf_regs_memmap_p) {
10281 dev_printk(KERN_ERR, &pdev->dev,
10282 "ioremap failed for SLI4 PCI config "
10283 "registers.\n");
10284 return -ENODEV;
10285 }
10286 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10287
10288 lpfc_sli4_bar0_register_memmap(phba, if_type);
10289 } else {
10290 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10291 bar0map_len = pci_resource_len(pdev, 1);
10292 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10293 dev_printk(KERN_ERR, &pdev->dev,
10294 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10295 return -ENODEV;
10296 }
10297 phba->sli4_hba.conf_regs_memmap_p =
10298 ioremap(phba->pci_bar0_map, bar0map_len);
10299 if (!phba->sli4_hba.conf_regs_memmap_p) {
10300 dev_printk(KERN_ERR, &pdev->dev,
10301 "ioremap failed for SLI4 PCI config "
10302 "registers.\n");
10303 return -ENODEV;
10304 }
10305 lpfc_sli4_bar0_register_memmap(phba, if_type);
10306 }
10307
10308 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10309 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10310
10311
10312
10313
10314 phba->pci_bar1_map = pci_resource_start(pdev,
10315 PCI_64BIT_BAR2);
10316 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10317 phba->sli4_hba.ctrl_regs_memmap_p =
10318 ioremap(phba->pci_bar1_map,
10319 bar1map_len);
10320 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10321 dev_err(&pdev->dev,
10322 "ioremap failed for SLI4 HBA "
10323 "control registers.\n");
10324 error = -ENOMEM;
10325 goto out_iounmap_conf;
10326 }
10327 phba->pci_bar2_memmap_p =
10328 phba->sli4_hba.ctrl_regs_memmap_p;
10329 lpfc_sli4_bar1_register_memmap(phba, if_type);
10330 } else {
10331 error = -ENOMEM;
10332 goto out_iounmap_conf;
10333 }
10334 }
10335
10336 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10337 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10338
10339
10340
10341
10342 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10343 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10344 phba->sli4_hba.drbl_regs_memmap_p =
10345 ioremap(phba->pci_bar1_map, bar1map_len);
10346 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10347 dev_err(&pdev->dev,
10348 "ioremap failed for SLI4 HBA doorbell registers.\n");
10349 error = -ENOMEM;
10350 goto out_iounmap_conf;
10351 }
10352 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10353 lpfc_sli4_bar1_register_memmap(phba, if_type);
10354 }
10355
10356 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10357 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10358
10359
10360
10361
10362 phba->pci_bar2_map = pci_resource_start(pdev,
10363 PCI_64BIT_BAR4);
10364 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10365 phba->sli4_hba.drbl_regs_memmap_p =
10366 ioremap(phba->pci_bar2_map,
10367 bar2map_len);
10368 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10369 dev_err(&pdev->dev,
10370 "ioremap failed for SLI4 HBA"
10371 " doorbell registers.\n");
10372 error = -ENOMEM;
10373 goto out_iounmap_ctrl;
10374 }
10375 phba->pci_bar4_memmap_p =
10376 phba->sli4_hba.drbl_regs_memmap_p;
10377 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10378 if (error)
10379 goto out_iounmap_all;
10380 } else {
10381 error = -ENOMEM;
10382 goto out_iounmap_all;
10383 }
10384 }
10385
10386 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10387 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10388
10389
10390
10391
10392 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10393 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10394 phba->sli4_hba.dpp_regs_memmap_p =
10395 ioremap(phba->pci_bar2_map, bar2map_len);
10396 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10397 dev_err(&pdev->dev,
10398 "ioremap failed for SLI4 HBA dpp registers.\n");
10399 error = -ENOMEM;
10400 goto out_iounmap_ctrl;
10401 }
10402 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10403 }
10404
10405
10406 switch (if_type) {
10407 case LPFC_SLI_INTF_IF_TYPE_0:
10408 case LPFC_SLI_INTF_IF_TYPE_2:
10409 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10410 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10411 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10412 break;
10413 case LPFC_SLI_INTF_IF_TYPE_6:
10414 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10415 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10416 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10417 break;
10418 default:
10419 break;
10420 }
10421
10422 return 0;
10423
10424out_iounmap_all:
10425 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10426out_iounmap_ctrl:
10427 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10428out_iounmap_conf:
10429 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10430
10431 return error;
10432}
10433
10434
10435
10436
10437
10438
10439
10440
10441static void
10442lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10443{
10444 uint32_t if_type;
10445 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10446
10447 switch (if_type) {
10448 case LPFC_SLI_INTF_IF_TYPE_0:
10449 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10450 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10451 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10452 break;
10453 case LPFC_SLI_INTF_IF_TYPE_2:
10454 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10455 break;
10456 case LPFC_SLI_INTF_IF_TYPE_6:
10457 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10458 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10459 if (phba->sli4_hba.dpp_regs_memmap_p)
10460 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10461 break;
10462 case LPFC_SLI_INTF_IF_TYPE_1:
10463 default:
10464 dev_printk(KERN_ERR, &phba->pcidev->dev,
10465 "FATAL - unsupported SLI4 interface type - %d\n",
10466 if_type);
10467 break;
10468 }
10469}
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482static int
10483lpfc_sli_enable_msix(struct lpfc_hba *phba)
10484{
10485 int rc;
10486 LPFC_MBOXQ_t *pmb;
10487
10488
10489 rc = pci_alloc_irq_vectors(phba->pcidev,
10490 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10491 if (rc < 0) {
10492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10493 "0420 PCI enable MSI-X failed (%d)\n", rc);
10494 goto vec_fail_out;
10495 }
10496
10497
10498
10499
10500
10501
10502 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10503 &lpfc_sli_sp_intr_handler, 0,
10504 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10505 if (rc) {
10506 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10507 "0421 MSI-X slow-path request_irq failed "
10508 "(%d)\n", rc);
10509 goto msi_fail_out;
10510 }
10511
10512
10513 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10514 &lpfc_sli_fp_intr_handler, 0,
10515 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10516
10517 if (rc) {
10518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10519 "0429 MSI-X fast-path request_irq failed "
10520 "(%d)\n", rc);
10521 goto irq_fail_out;
10522 }
10523
10524
10525
10526
10527 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10528
10529 if (!pmb) {
10530 rc = -ENOMEM;
10531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10532 "0474 Unable to allocate memory for issuing "
10533 "MBOX_CONFIG_MSI command\n");
10534 goto mem_fail_out;
10535 }
10536 rc = lpfc_config_msi(phba, pmb);
10537 if (rc)
10538 goto mbx_fail_out;
10539 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10540 if (rc != MBX_SUCCESS) {
10541 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10542 "0351 Config MSI mailbox command failed, "
10543 "mbxCmd x%x, mbxStatus x%x\n",
10544 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10545 goto mbx_fail_out;
10546 }
10547
10548
10549 mempool_free(pmb, phba->mbox_mem_pool);
10550 return rc;
10551
10552mbx_fail_out:
10553
10554 mempool_free(pmb, phba->mbox_mem_pool);
10555
10556mem_fail_out:
10557
10558 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10559
10560irq_fail_out:
10561
10562 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10563
10564msi_fail_out:
10565
10566 pci_free_irq_vectors(phba->pcidev);
10567
10568vec_fail_out:
10569 return rc;
10570}
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586static int
10587lpfc_sli_enable_msi(struct lpfc_hba *phba)
10588{
10589 int rc;
10590
10591 rc = pci_enable_msi(phba->pcidev);
10592 if (!rc)
10593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10594 "0462 PCI enable MSI mode success.\n");
10595 else {
10596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10597 "0471 PCI enable MSI mode failed (%d)\n", rc);
10598 return rc;
10599 }
10600
10601 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10602 0, LPFC_DRIVER_NAME, phba);
10603 if (rc) {
10604 pci_disable_msi(phba->pcidev);
10605 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10606 "0478 MSI request_irq failed (%d)\n", rc);
10607 }
10608 return rc;
10609}
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627static uint32_t
10628lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10629{
10630 uint32_t intr_mode = LPFC_INTR_ERROR;
10631 int retval;
10632
10633 if (cfg_mode == 2) {
10634
10635 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10636 if (!retval) {
10637
10638 retval = lpfc_sli_enable_msix(phba);
10639 if (!retval) {
10640
10641 phba->intr_type = MSIX;
10642 intr_mode = 2;
10643 }
10644 }
10645 }
10646
10647
10648 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10649 retval = lpfc_sli_enable_msi(phba);
10650 if (!retval) {
10651
10652 phba->intr_type = MSI;
10653 intr_mode = 1;
10654 }
10655 }
10656
10657
10658 if (phba->intr_type == NONE) {
10659 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10660 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10661 if (!retval) {
10662
10663 phba->intr_type = INTx;
10664 intr_mode = 0;
10665 }
10666 }
10667 return intr_mode;
10668}
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679static void
10680lpfc_sli_disable_intr(struct lpfc_hba *phba)
10681{
10682 int nr_irqs, i;
10683
10684 if (phba->intr_type == MSIX)
10685 nr_irqs = LPFC_MSIX_VECTORS;
10686 else
10687 nr_irqs = 1;
10688
10689 for (i = 0; i < nr_irqs; i++)
10690 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10691 pci_free_irq_vectors(phba->pcidev);
10692
10693
10694 phba->intr_type = NONE;
10695 phba->sli.slistat.sli_intr = 0;
10696}
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706static uint16_t
10707lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10708{
10709 struct lpfc_vector_map_info *cpup;
10710 int cpu;
10711
10712
10713 for_each_present_cpu(cpu) {
10714 cpup = &phba->sli4_hba.cpu_map[cpu];
10715
10716
10717
10718
10719
10720 if ((match == LPFC_FIND_BY_EQ) &&
10721 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10722 (cpup->eq == id))
10723 return cpu;
10724
10725
10726 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10727 return cpu;
10728 }
10729 return 0;
10730}
10731
10732#ifdef CONFIG_X86
10733
10734
10735
10736
10737
10738
10739
10740static int
10741lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10742 uint16_t phys_id, uint16_t core_id)
10743{
10744 struct lpfc_vector_map_info *cpup;
10745 int idx;
10746
10747 for_each_present_cpu(idx) {
10748 cpup = &phba->sli4_hba.cpu_map[idx];
10749
10750 if ((cpup->phys_id == phys_id) &&
10751 (cpup->core_id == core_id) &&
10752 (cpu != idx))
10753 return 1;
10754 }
10755 return 0;
10756}
10757#endif
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768static inline void
10769lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10770 unsigned int cpu)
10771{
10772 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10773 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10774
10775 cpup->eq = eqidx;
10776 cpup->flag |= flag;
10777
10778 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10779 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10780 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10781}
10782
10783
10784
10785
10786
10787
10788
10789static void
10790lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10791{
10792 struct lpfc_vector_map_info *cpup;
10793 struct lpfc_eq_intr_info *eqi;
10794 int cpu;
10795
10796 for_each_possible_cpu(cpu) {
10797 cpup = &phba->sli4_hba.cpu_map[cpu];
10798 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10799 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10800 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10801 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10802 cpup->flag = 0;
10803 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10804 INIT_LIST_HEAD(&eqi->list);
10805 eqi->icnt = 0;
10806 }
10807}
10808
10809
10810
10811
10812
10813
10814
10815static void
10816lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10817{
10818 struct lpfc_hba_eq_hdl *eqhdl;
10819 int i;
10820
10821 for (i = 0; i < phba->cfg_irq_chann; i++) {
10822 eqhdl = lpfc_get_eq_hdl(i);
10823 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10824 eqhdl->phba = phba;
10825 }
10826}
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838static void
10839lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10840{
10841 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10842 int max_phys_id, min_phys_id;
10843 int max_core_id, min_core_id;
10844 struct lpfc_vector_map_info *cpup;
10845 struct lpfc_vector_map_info *new_cpup;
10846#ifdef CONFIG_X86
10847 struct cpuinfo_x86 *cpuinfo;
10848#endif
10849#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10850 struct lpfc_hdwq_stat *c_stat;
10851#endif
10852
10853 max_phys_id = 0;
10854 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10855 max_core_id = 0;
10856 min_core_id = LPFC_VECTOR_MAP_EMPTY;
10857
10858
10859 for_each_present_cpu(cpu) {
10860 cpup = &phba->sli4_hba.cpu_map[cpu];
10861#ifdef CONFIG_X86
10862 cpuinfo = &cpu_data(cpu);
10863 cpup->phys_id = cpuinfo->phys_proc_id;
10864 cpup->core_id = cpuinfo->cpu_core_id;
10865 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10866 cpup->flag |= LPFC_CPU_MAP_HYPER;
10867#else
10868
10869 cpup->phys_id = 0;
10870 cpup->core_id = cpu;
10871#endif
10872
10873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10874 "3328 CPU %d physid %d coreid %d flag x%x\n",
10875 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10876
10877 if (cpup->phys_id > max_phys_id)
10878 max_phys_id = cpup->phys_id;
10879 if (cpup->phys_id < min_phys_id)
10880 min_phys_id = cpup->phys_id;
10881
10882 if (cpup->core_id > max_core_id)
10883 max_core_id = cpup->core_id;
10884 if (cpup->core_id < min_core_id)
10885 min_core_id = cpup->core_id;
10886 }
10887
10888
10889
10890
10891
10892
10893 first_cpu = cpumask_first(cpu_present_mask);
10894 start_cpu = first_cpu;
10895
10896 for_each_present_cpu(cpu) {
10897 cpup = &phba->sli4_hba.cpu_map[cpu];
10898
10899
10900 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10901
10902 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10903
10904
10905
10906
10907
10908
10909 new_cpu = start_cpu;
10910 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10911 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10912 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10913 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
10914 (new_cpup->phys_id == cpup->phys_id))
10915 goto found_same;
10916 new_cpu = cpumask_next(
10917 new_cpu, cpu_present_mask);
10918 if (new_cpu == nr_cpumask_bits)
10919 new_cpu = first_cpu;
10920 }
10921
10922 continue;
10923found_same:
10924
10925 cpup->eq = new_cpup->eq;
10926
10927
10928
10929
10930
10931 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10932 if (start_cpu == nr_cpumask_bits)
10933 start_cpu = first_cpu;
10934
10935 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10936 "3337 Set Affinity: CPU %d "
10937 "eq %d from peer cpu %d same "
10938 "phys_id (%d)\n",
10939 cpu, cpup->eq, new_cpu,
10940 cpup->phys_id);
10941 }
10942 }
10943
10944
10945 start_cpu = first_cpu;
10946
10947 for_each_present_cpu(cpu) {
10948 cpup = &phba->sli4_hba.cpu_map[cpu];
10949
10950
10951 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10952
10953 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10954
10955
10956
10957
10958
10959
10960 new_cpu = start_cpu;
10961 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10962 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10963 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10964 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
10965 goto found_any;
10966 new_cpu = cpumask_next(
10967 new_cpu, cpu_present_mask);
10968 if (new_cpu == nr_cpumask_bits)
10969 new_cpu = first_cpu;
10970 }
10971
10972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10973 "3339 Set Affinity: CPU %d "
10974 "eq %d UNASSIGNED\n",
10975 cpup->hdwq, cpup->eq);
10976 continue;
10977found_any:
10978
10979 cpup->eq = new_cpup->eq;
10980
10981
10982
10983
10984
10985 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10986 if (start_cpu == nr_cpumask_bits)
10987 start_cpu = first_cpu;
10988
10989 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10990 "3338 Set Affinity: CPU %d "
10991 "eq %d from peer cpu %d (%d/%d)\n",
10992 cpu, cpup->eq, new_cpu,
10993 new_cpup->phys_id, new_cpup->core_id);
10994 }
10995 }
10996
10997
10998
10999
11000 idx = 0;
11001 for_each_present_cpu(cpu) {
11002 cpup = &phba->sli4_hba.cpu_map[cpu];
11003
11004
11005 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11006 continue;
11007
11008
11009 cpup->hdwq = idx;
11010 idx++;
11011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11012 "3333 Set Affinity: CPU %d (phys %d core %d): "
11013 "hdwq %d eq %d flg x%x\n",
11014 cpu, cpup->phys_id, cpup->core_id,
11015 cpup->hdwq, cpup->eq, cpup->flag);
11016 }
11017
11018
11019
11020
11021
11022
11023
11024
11025 next_idx = idx;
11026 start_cpu = 0;
11027 idx = 0;
11028 for_each_present_cpu(cpu) {
11029 cpup = &phba->sli4_hba.cpu_map[cpu];
11030
11031
11032 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11033 continue;
11034
11035
11036
11037
11038
11039 if (next_idx < phba->cfg_hdw_queue) {
11040 cpup->hdwq = next_idx;
11041 next_idx++;
11042 continue;
11043 }
11044
11045
11046
11047
11048
11049
11050 new_cpu = start_cpu;
11051 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11052 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11053 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11054 new_cpup->phys_id == cpup->phys_id &&
11055 new_cpup->core_id == cpup->core_id) {
11056 goto found_hdwq;
11057 }
11058 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11059 if (new_cpu == nr_cpumask_bits)
11060 new_cpu = first_cpu;
11061 }
11062
11063
11064
11065
11066 new_cpu = start_cpu;
11067 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11068 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11069 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11070 new_cpup->phys_id == cpup->phys_id)
11071 goto found_hdwq;
11072
11073 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11074 if (new_cpu == nr_cpumask_bits)
11075 new_cpu = first_cpu;
11076 }
11077
11078
11079 cpup->hdwq = idx % phba->cfg_hdw_queue;
11080 idx++;
11081 goto logit;
11082 found_hdwq:
11083
11084 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11085 if (start_cpu == nr_cpumask_bits)
11086 start_cpu = first_cpu;
11087 cpup->hdwq = new_cpup->hdwq;
11088 logit:
11089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11090 "3335 Set Affinity: CPU %d (phys %d core %d): "
11091 "hdwq %d eq %d flg x%x\n",
11092 cpu, cpup->phys_id, cpup->core_id,
11093 cpup->hdwq, cpup->eq, cpup->flag);
11094 }
11095
11096
11097
11098
11099
11100 idx = 0;
11101 for_each_possible_cpu(cpu) {
11102 cpup = &phba->sli4_hba.cpu_map[cpu];
11103#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11104 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11105 c_stat->hdwq_no = cpup->hdwq;
11106#endif
11107 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11108 continue;
11109
11110 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11111#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11112 c_stat->hdwq_no = cpup->hdwq;
11113#endif
11114 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11115 "3340 Set Affinity: not present "
11116 "CPU %d hdwq %d\n",
11117 cpu, cpup->hdwq);
11118 }
11119
11120
11121
11122
11123 return;
11124}
11125
11126
11127
11128
11129
11130
11131
11132
11133static int
11134lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11135 struct list_head *eqlist)
11136{
11137 const struct cpumask *maskp;
11138 struct lpfc_queue *eq;
11139 struct cpumask *tmp;
11140 u16 idx;
11141
11142 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11143 if (!tmp)
11144 return -ENOMEM;
11145
11146 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11147 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11148 if (!maskp)
11149 continue;
11150
11151
11152
11153
11154
11155 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11156 continue;
11157
11158
11159
11160
11161
11162
11163 cpumask_and(tmp, maskp, cpu_online_mask);
11164 if (cpumask_weight(tmp) > 1)
11165 continue;
11166
11167
11168
11169
11170
11171
11172 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11173 list_add(&eq->_poll_list, eqlist);
11174 }
11175 kfree(tmp);
11176 return 0;
11177}
11178
11179static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11180{
11181 if (phba->sli_rev != LPFC_SLI_REV4)
11182 return;
11183
11184 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11185 &phba->cpuhp);
11186
11187
11188
11189
11190 synchronize_rcu();
11191 del_timer_sync(&phba->cpuhp_poll_timer);
11192}
11193
11194static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11195{
11196 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11197 return;
11198
11199 __lpfc_cpuhp_remove(phba);
11200}
11201
11202static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11203{
11204 if (phba->sli_rev != LPFC_SLI_REV4)
11205 return;
11206
11207 rcu_read_lock();
11208
11209 if (!list_empty(&phba->poll_list))
11210 mod_timer(&phba->cpuhp_poll_timer,
11211 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11212
11213 rcu_read_unlock();
11214
11215 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11216 &phba->cpuhp);
11217}
11218
11219static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11220{
11221 if (phba->pport->load_flag & FC_UNLOADING) {
11222 *retval = -EAGAIN;
11223 return true;
11224 }
11225
11226 if (phba->sli_rev != LPFC_SLI_REV4) {
11227 *retval = 0;
11228 return true;
11229 }
11230
11231
11232 return false;
11233}
11234
11235
11236
11237
11238
11239
11240
11241static inline void
11242lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11243{
11244 cpumask_clear(&eqhdl->aff_mask);
11245 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11246 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11247 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11248}
11249
11250
11251
11252
11253
11254
11255static inline void
11256lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11257{
11258 cpumask_clear(&eqhdl->aff_mask);
11259 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11260 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11261}
11262
11263
11264
11265
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279static void
11280lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11281{
11282 struct lpfc_vector_map_info *cpup;
11283 struct cpumask *aff_mask;
11284 unsigned int cpu_select, cpu_next, idx;
11285 const struct cpumask *orig_mask;
11286
11287 if (phba->irq_chann_mode == NORMAL_MODE)
11288 return;
11289
11290 orig_mask = &phba->sli4_hba.irq_aff_mask;
11291
11292 if (!cpumask_test_cpu(cpu, orig_mask))
11293 return;
11294
11295 cpup = &phba->sli4_hba.cpu_map[cpu];
11296
11297 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11298 return;
11299
11300 if (offline) {
11301
11302 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11303 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11304
11305
11306 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11307
11308
11309
11310 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11311 aff_mask = lpfc_get_aff_mask(idx);
11312
11313
11314 if (cpumask_test_cpu(cpu, aff_mask))
11315 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11316 cpu_select);
11317 }
11318 } else {
11319
11320 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11321 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11322 }
11323 } else {
11324
11325 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11326 }
11327}
11328
11329static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11330{
11331 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11332 struct lpfc_queue *eq, *next;
11333 LIST_HEAD(eqlist);
11334 int retval;
11335
11336 if (!phba) {
11337 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11338 return 0;
11339 }
11340
11341 if (__lpfc_cpuhp_checks(phba, &retval))
11342 return retval;
11343
11344 lpfc_irq_rebalance(phba, cpu, true);
11345
11346 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11347 if (retval)
11348 return retval;
11349
11350
11351 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11352 list_del_init(&eq->_poll_list);
11353 lpfc_sli4_start_polling(eq);
11354 }
11355
11356 return 0;
11357}
11358
11359static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11360{
11361 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11362 struct lpfc_queue *eq, *next;
11363 unsigned int n;
11364 int retval;
11365
11366 if (!phba) {
11367 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11368 return 0;
11369 }
11370
11371 if (__lpfc_cpuhp_checks(phba, &retval))
11372 return retval;
11373
11374 lpfc_irq_rebalance(phba, cpu, false);
11375
11376 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11377 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11378 if (n == cpu)
11379 lpfc_sli4_stop_polling(eq);
11380 }
11381
11382 return 0;
11383}
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413static int
11414lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11415{
11416 int vectors, rc, index;
11417 char *name;
11418 const struct cpumask *aff_mask = NULL;
11419 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11420 struct lpfc_hba_eq_hdl *eqhdl;
11421 const struct cpumask *maskp;
11422 bool first;
11423 unsigned int flags = PCI_IRQ_MSIX;
11424
11425
11426 vectors = phba->cfg_irq_chann;
11427
11428 if (phba->irq_chann_mode != NORMAL_MODE)
11429 aff_mask = &phba->sli4_hba.irq_aff_mask;
11430
11431 if (aff_mask) {
11432 cpu_cnt = cpumask_weight(aff_mask);
11433 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11434
11435
11436
11437
11438 cpu = cpumask_first(aff_mask);
11439 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11440 } else {
11441 flags |= PCI_IRQ_AFFINITY;
11442 }
11443
11444 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11445 if (rc < 0) {
11446 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11447 "0484 PCI enable MSI-X failed (%d)\n", rc);
11448 goto vec_fail_out;
11449 }
11450 vectors = rc;
11451
11452
11453 for (index = 0; index < vectors; index++) {
11454 eqhdl = lpfc_get_eq_hdl(index);
11455 name = eqhdl->handler_name;
11456 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11457 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11458 LPFC_DRIVER_HANDLER_NAME"%d", index);
11459
11460 eqhdl->idx = index;
11461 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11462 &lpfc_sli4_hba_intr_handler, 0,
11463 name, eqhdl);
11464 if (rc) {
11465 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11466 "0486 MSI-X fast-path (%d) "
11467 "request_irq failed (%d)\n", index, rc);
11468 goto cfg_fail_out;
11469 }
11470
11471 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11472
11473 if (aff_mask) {
11474
11475 if (cpu_select < nr_cpu_ids)
11476 lpfc_irq_set_aff(eqhdl, cpu_select);
11477
11478
11479 lpfc_assign_eq_map_info(phba, index,
11480 LPFC_CPU_FIRST_IRQ,
11481 cpu);
11482
11483
11484 cpu = cpumask_next(cpu, aff_mask);
11485
11486
11487 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11488 } else if (vectors == 1) {
11489 cpu = cpumask_first(cpu_present_mask);
11490 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11491 cpu);
11492 } else {
11493 maskp = pci_irq_get_affinity(phba->pcidev, index);
11494
11495 first = true;
11496
11497 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11498
11499
11500
11501 lpfc_assign_eq_map_info(phba, index,
11502 first ?
11503 LPFC_CPU_FIRST_IRQ : 0,
11504 cpu);
11505 if (first)
11506 first = false;
11507 }
11508 }
11509 }
11510
11511 if (vectors != phba->cfg_irq_chann) {
11512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11513 "3238 Reducing IO channels to match number of "
11514 "MSI-X vectors, requested %d got %d\n",
11515 phba->cfg_irq_chann, vectors);
11516 if (phba->cfg_irq_chann > vectors)
11517 phba->cfg_irq_chann = vectors;
11518 }
11519
11520 return rc;
11521
11522cfg_fail_out:
11523
11524 for (--index; index >= 0; index--) {
11525 eqhdl = lpfc_get_eq_hdl(index);
11526 lpfc_irq_clear_aff(eqhdl);
11527 irq_set_affinity_hint(eqhdl->irq, NULL);
11528 free_irq(eqhdl->irq, eqhdl);
11529 }
11530
11531
11532 pci_free_irq_vectors(phba->pcidev);
11533
11534vec_fail_out:
11535 return rc;
11536}
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552static int
11553lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11554{
11555 int rc, index;
11556 unsigned int cpu;
11557 struct lpfc_hba_eq_hdl *eqhdl;
11558
11559 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11560 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11561 if (rc > 0)
11562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11563 "0487 PCI enable MSI mode success.\n");
11564 else {
11565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11566 "0488 PCI enable MSI mode failed (%d)\n", rc);
11567 return rc ? rc : -1;
11568 }
11569
11570 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11571 0, LPFC_DRIVER_NAME, phba);
11572 if (rc) {
11573 pci_free_irq_vectors(phba->pcidev);
11574 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11575 "0490 MSI request_irq failed (%d)\n", rc);
11576 return rc;
11577 }
11578
11579 eqhdl = lpfc_get_eq_hdl(0);
11580 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11581
11582 cpu = cpumask_first(cpu_present_mask);
11583 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11584
11585 for (index = 0; index < phba->cfg_irq_chann; index++) {
11586 eqhdl = lpfc_get_eq_hdl(index);
11587 eqhdl->idx = index;
11588 }
11589
11590 return 0;
11591}
11592
11593
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609static uint32_t
11610lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11611{
11612 uint32_t intr_mode = LPFC_INTR_ERROR;
11613 int retval, idx;
11614
11615 if (cfg_mode == 2) {
11616
11617 retval = 0;
11618 if (!retval) {
11619
11620 retval = lpfc_sli4_enable_msix(phba);
11621 if (!retval) {
11622
11623 phba->intr_type = MSIX;
11624 intr_mode = 2;
11625 }
11626 }
11627 }
11628
11629
11630 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11631 retval = lpfc_sli4_enable_msi(phba);
11632 if (!retval) {
11633
11634 phba->intr_type = MSI;
11635 intr_mode = 1;
11636 }
11637 }
11638
11639
11640 if (phba->intr_type == NONE) {
11641 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11642 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11643 if (!retval) {
11644 struct lpfc_hba_eq_hdl *eqhdl;
11645 unsigned int cpu;
11646
11647
11648 phba->intr_type = INTx;
11649 intr_mode = 0;
11650
11651 eqhdl = lpfc_get_eq_hdl(0);
11652 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11653
11654 cpu = cpumask_first(cpu_present_mask);
11655 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11656 cpu);
11657 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11658 eqhdl = lpfc_get_eq_hdl(idx);
11659 eqhdl->idx = idx;
11660 }
11661 }
11662 }
11663 return intr_mode;
11664}
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675static void
11676lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11677{
11678
11679 if (phba->intr_type == MSIX) {
11680 int index;
11681 struct lpfc_hba_eq_hdl *eqhdl;
11682
11683
11684 for (index = 0; index < phba->cfg_irq_chann; index++) {
11685 eqhdl = lpfc_get_eq_hdl(index);
11686 lpfc_irq_clear_aff(eqhdl);
11687 irq_set_affinity_hint(eqhdl->irq, NULL);
11688 free_irq(eqhdl->irq, eqhdl);
11689 }
11690 } else {
11691 free_irq(phba->pcidev->irq, phba);
11692 }
11693
11694 pci_free_irq_vectors(phba->pcidev);
11695
11696
11697 phba->intr_type = NONE;
11698 phba->sli.slistat.sli_intr = 0;
11699}
11700
11701
11702
11703
11704
11705
11706
11707
11708static void
11709lpfc_unset_hba(struct lpfc_hba *phba)
11710{
11711 struct lpfc_vport *vport = phba->pport;
11712 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11713
11714 spin_lock_irq(shost->host_lock);
11715 vport->load_flag |= FC_UNLOADING;
11716 spin_unlock_irq(shost->host_lock);
11717
11718 kfree(phba->vpi_bmask);
11719 kfree(phba->vpi_ids);
11720
11721 lpfc_stop_hba_timers(phba);
11722
11723 phba->pport->work_port_events = 0;
11724
11725 lpfc_sli_hba_down(phba);
11726
11727 lpfc_sli_brdrestart(phba);
11728
11729 lpfc_sli_disable_intr(phba);
11730
11731 return;
11732}
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747static void
11748lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11749{
11750 struct lpfc_sli4_hdw_queue *qp;
11751 int idx, ccnt;
11752 int wait_time = 0;
11753 int io_xri_cmpl = 1;
11754 int nvmet_xri_cmpl = 1;
11755 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11756
11757
11758
11759
11760
11761 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11762
11763
11764 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11765 lpfc_nvme_wait_for_io_drain(phba);
11766
11767 ccnt = 0;
11768 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11769 qp = &phba->sli4_hba.hdwq[idx];
11770 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11771 if (!io_xri_cmpl)
11772 ccnt++;
11773 }
11774 if (ccnt)
11775 io_xri_cmpl = 0;
11776
11777 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11778 nvmet_xri_cmpl =
11779 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11780 }
11781
11782 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11783 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11784 if (!nvmet_xri_cmpl)
11785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11786 "6424 NVMET XRI exchange busy "
11787 "wait time: %d seconds.\n",
11788 wait_time/1000);
11789 if (!io_xri_cmpl)
11790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11791 "6100 IO XRI exchange busy "
11792 "wait time: %d seconds.\n",
11793 wait_time/1000);
11794 if (!els_xri_cmpl)
11795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11796 "2878 ELS XRI exchange busy "
11797 "wait time: %d seconds.\n",
11798 wait_time/1000);
11799 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11800 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11801 } else {
11802 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11803 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11804 }
11805
11806 ccnt = 0;
11807 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11808 qp = &phba->sli4_hba.hdwq[idx];
11809 io_xri_cmpl = list_empty(
11810 &qp->lpfc_abts_io_buf_list);
11811 if (!io_xri_cmpl)
11812 ccnt++;
11813 }
11814 if (ccnt)
11815 io_xri_cmpl = 0;
11816
11817 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11818 nvmet_xri_cmpl = list_empty(
11819 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11820 }
11821 els_xri_cmpl =
11822 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11823
11824 }
11825}
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837static void
11838lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11839{
11840 int wait_cnt = 0;
11841 LPFC_MBOXQ_t *mboxq;
11842 struct pci_dev *pdev = phba->pcidev;
11843
11844 lpfc_stop_hba_timers(phba);
11845 if (phba->pport)
11846 phba->sli4_hba.intr_enable = 0;
11847
11848
11849
11850
11851
11852
11853
11854 spin_lock_irq(&phba->hbalock);
11855 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11856 spin_unlock_irq(&phba->hbalock);
11857
11858 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11859 msleep(10);
11860 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11861 break;
11862 }
11863
11864 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11865 spin_lock_irq(&phba->hbalock);
11866 mboxq = phba->sli.mbox_active;
11867 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11868 __lpfc_mbox_cmpl_put(phba, mboxq);
11869 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11870 phba->sli.mbox_active = NULL;
11871 spin_unlock_irq(&phba->hbalock);
11872 }
11873
11874
11875 lpfc_sli_hba_iocb_abort(phba);
11876
11877
11878 lpfc_sli4_xri_exchange_busy_wait(phba);
11879
11880
11881 if (phba->pport)
11882 lpfc_cpuhp_remove(phba);
11883
11884
11885 lpfc_sli4_disable_intr(phba);
11886
11887
11888 if (phba->cfg_sriov_nr_virtfn)
11889 pci_disable_sriov(pdev);
11890
11891
11892 kthread_stop(phba->worker_thread);
11893
11894
11895 lpfc_ras_stop_fwlog(phba);
11896
11897
11898
11899
11900 lpfc_sli4_queue_unset(phba);
11901 lpfc_sli4_queue_destroy(phba);
11902
11903
11904 lpfc_pci_function_reset(phba);
11905
11906
11907 if (phba->ras_fwlog.ras_enabled)
11908 lpfc_sli4_ras_dma_free(phba);
11909
11910
11911 if (phba->pport)
11912 phba->pport->work_port_events = 0;
11913}
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927int
11928lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11929{
11930 int rc;
11931 struct lpfc_mqe *mqe;
11932 struct lpfc_pc_sli4_params *sli4_params;
11933 uint32_t mbox_tmo;
11934
11935 rc = 0;
11936 mqe = &mboxq->u.mqe;
11937
11938
11939 lpfc_pc_sli4_params(mboxq);
11940 if (!phba->sli4_hba.intr_enable)
11941 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11942 else {
11943 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11944 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11945 }
11946
11947 if (unlikely(rc))
11948 return 1;
11949
11950 sli4_params = &phba->sli4_hba.pc_sli4_params;
11951 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11952 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11953 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11954 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11955 &mqe->un.sli4_params);
11956 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11957 &mqe->un.sli4_params);
11958 sli4_params->proto_types = mqe->un.sli4_params.word3;
11959 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11960 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11961 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11962 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11963 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11964 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11965 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11966 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11967 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11968 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11969 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11970 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11971 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11972 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11973 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11974 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11975 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11976 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11977 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11978 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11979
11980
11981 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11982 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11983
11984 return rc;
11985}
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999int
12000lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12001{
12002 int rc;
12003 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12004 struct lpfc_pc_sli4_params *sli4_params;
12005 uint32_t mbox_tmo;
12006 int length;
12007 bool exp_wqcq_pages = true;
12008 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12009
12010
12011
12012
12013
12014
12015 phba->sli4_hba.rpi_hdrs_in_use = 1;
12016
12017
12018 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12019 sizeof(struct lpfc_sli4_cfg_mhdr));
12020 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12021 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12022 length, LPFC_SLI4_MBX_EMBED);
12023 if (!phba->sli4_hba.intr_enable)
12024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12025 else {
12026 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12027 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12028 }
12029 if (unlikely(rc))
12030 return rc;
12031 sli4_params = &phba->sli4_hba.pc_sli4_params;
12032 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12033 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12034 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12035 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12036 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12037 mbx_sli4_parameters);
12038 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12039 mbx_sli4_parameters);
12040 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12041 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12042 else
12043 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12044 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12045 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
12046 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12047 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12048 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12049 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12050 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12051 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12052 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12053 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12054 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12055 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12056 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12057 mbx_sli4_parameters);
12058 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12059 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12060 mbx_sli4_parameters);
12061 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12062 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12063
12064
12065 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12066
12067
12068 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12069 bf_get(cfg_xib, mbx_sli4_parameters));
12070
12071 if (rc) {
12072
12073 sli4_params->nvme = 1;
12074
12075
12076 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12077 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12078 "6133 Disabling NVME support: "
12079 "FC4 type not supported: x%x\n",
12080 phba->cfg_enable_fc4_type);
12081 goto fcponly;
12082 }
12083 } else {
12084
12085 sli4_params->nvme = 0;
12086 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12088 "6101 Disabling NVME support: Not "
12089 "supported by firmware (%d %d) x%x\n",
12090 bf_get(cfg_nvme, mbx_sli4_parameters),
12091 bf_get(cfg_xib, mbx_sli4_parameters),
12092 phba->cfg_enable_fc4_type);
12093fcponly:
12094 phba->nvme_support = 0;
12095 phba->nvmet_support = 0;
12096 phba->cfg_nvmet_mrq = 0;
12097 phba->cfg_nvme_seg_cnt = 0;
12098
12099
12100 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12101 return -ENODEV;
12102 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12103 }
12104 }
12105
12106
12107
12108
12109 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12110 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12111
12112
12113 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12114 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12115 phba->cfg_enable_pbde = 0;
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12126 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12127 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12128 else
12129 phba->cfg_suppress_rsp = 0;
12130
12131 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12132 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12133
12134
12135 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12136 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12137
12138
12139
12140
12141
12142
12143 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12144 phba->fcp_embed_io = 1;
12145 else
12146 phba->fcp_embed_io = 0;
12147
12148 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12149 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12150 bf_get(cfg_xib, mbx_sli4_parameters),
12151 phba->cfg_enable_pbde,
12152 phba->fcp_embed_io, phba->nvme_support,
12153 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12154
12155 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12156 LPFC_SLI_INTF_IF_TYPE_2) &&
12157 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12158 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12159 exp_wqcq_pages = false;
12160
12161 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12162 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12163 exp_wqcq_pages &&
12164 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12165 phba->enab_exp_wqcq_pages = 1;
12166 else
12167 phba->enab_exp_wqcq_pages = 0;
12168
12169
12170
12171 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12172 phba->mds_diags_support = 1;
12173 else
12174 phba->mds_diags_support = 0;
12175
12176
12177
12178
12179 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12180 phba->nsler = 1;
12181 else
12182 phba->nsler = 0;
12183
12184 return 0;
12185}
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203
12204static int
12205lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12206{
12207 struct lpfc_hba *phba;
12208 struct lpfc_vport *vport = NULL;
12209 struct Scsi_Host *shost = NULL;
12210 int error;
12211 uint32_t cfg_mode, intr_mode;
12212
12213
12214 phba = lpfc_hba_alloc(pdev);
12215 if (!phba)
12216 return -ENOMEM;
12217
12218
12219 error = lpfc_enable_pci_dev(phba);
12220 if (error)
12221 goto out_free_phba;
12222
12223
12224 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12225 if (error)
12226 goto out_disable_pci_dev;
12227
12228
12229 error = lpfc_sli_pci_mem_setup(phba);
12230 if (error) {
12231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12232 "1402 Failed to set up pci memory space.\n");
12233 goto out_disable_pci_dev;
12234 }
12235
12236
12237 error = lpfc_sli_driver_resource_setup(phba);
12238 if (error) {
12239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12240 "1404 Failed to set up driver resource.\n");
12241 goto out_unset_pci_mem_s3;
12242 }
12243
12244
12245
12246 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12247 if (error) {
12248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12249 "1405 Failed to initialize iocb list.\n");
12250 goto out_unset_driver_resource_s3;
12251 }
12252
12253
12254 error = lpfc_setup_driver_resource_phase2(phba);
12255 if (error) {
12256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12257 "1406 Failed to set up driver resource.\n");
12258 goto out_free_iocb_list;
12259 }
12260
12261
12262 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12263
12264
12265 error = lpfc_create_shost(phba);
12266 if (error) {
12267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12268 "1407 Failed to create scsi host.\n");
12269 goto out_unset_driver_resource;
12270 }
12271
12272
12273 vport = phba->pport;
12274 error = lpfc_alloc_sysfs_attr(vport);
12275 if (error) {
12276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12277 "1476 Failed to allocate sysfs attr\n");
12278 goto out_destroy_shost;
12279 }
12280
12281 shost = lpfc_shost_from_vport(vport);
12282
12283 cfg_mode = phba->cfg_use_msi;
12284 while (true) {
12285
12286 lpfc_stop_port(phba);
12287
12288 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12289 if (intr_mode == LPFC_INTR_ERROR) {
12290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12291 "0431 Failed to enable interrupt.\n");
12292 error = -ENODEV;
12293 goto out_free_sysfs_attr;
12294 }
12295
12296 if (lpfc_sli_hba_setup(phba)) {
12297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12298 "1477 Failed to set up hba\n");
12299 error = -ENODEV;
12300 goto out_remove_device;
12301 }
12302
12303
12304 msleep(50);
12305
12306 if (intr_mode == 0 ||
12307 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12308
12309 phba->intr_mode = intr_mode;
12310 lpfc_log_intr_mode(phba, intr_mode);
12311 break;
12312 } else {
12313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12314 "0447 Configure interrupt mode (%d) "
12315 "failed active interrupt test.\n",
12316 intr_mode);
12317
12318 lpfc_sli_disable_intr(phba);
12319
12320 cfg_mode = --intr_mode;
12321 }
12322 }
12323
12324
12325 lpfc_post_init_setup(phba);
12326
12327
12328 lpfc_create_static_vport(phba);
12329
12330 return 0;
12331
12332out_remove_device:
12333 lpfc_unset_hba(phba);
12334out_free_sysfs_attr:
12335 lpfc_free_sysfs_attr(vport);
12336out_destroy_shost:
12337 lpfc_destroy_shost(phba);
12338out_unset_driver_resource:
12339 lpfc_unset_driver_resource_phase2(phba);
12340out_free_iocb_list:
12341 lpfc_free_iocb_list(phba);
12342out_unset_driver_resource_s3:
12343 lpfc_sli_driver_resource_unset(phba);
12344out_unset_pci_mem_s3:
12345 lpfc_sli_pci_mem_unset(phba);
12346out_disable_pci_dev:
12347 lpfc_disable_pci_dev(phba);
12348 if (shost)
12349 scsi_host_put(shost);
12350out_free_phba:
12351 lpfc_hba_free(phba);
12352 return error;
12353}
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364static void
12365lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12366{
12367 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12368 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12369 struct lpfc_vport **vports;
12370 struct lpfc_hba *phba = vport->phba;
12371 int i;
12372
12373 spin_lock_irq(&phba->hbalock);
12374 vport->load_flag |= FC_UNLOADING;
12375 spin_unlock_irq(&phba->hbalock);
12376
12377 lpfc_free_sysfs_attr(vport);
12378
12379
12380 vports = lpfc_create_vport_work_array(phba);
12381 if (vports != NULL)
12382 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12383 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12384 continue;
12385 fc_vport_terminate(vports[i]->fc_vport);
12386 }
12387 lpfc_destroy_vport_work_array(phba, vports);
12388
12389
12390 fc_remove_host(shost);
12391 scsi_remove_host(shost);
12392
12393 lpfc_cleanup(vport);
12394
12395
12396
12397
12398
12399
12400
12401
12402 lpfc_sli_hba_down(phba);
12403
12404 kthread_stop(phba->worker_thread);
12405
12406 lpfc_sli_brdrestart(phba);
12407
12408 kfree(phba->vpi_bmask);
12409 kfree(phba->vpi_ids);
12410
12411 lpfc_stop_hba_timers(phba);
12412 spin_lock_irq(&phba->port_list_lock);
12413 list_del_init(&vport->listentry);
12414 spin_unlock_irq(&phba->port_list_lock);
12415
12416 lpfc_debugfs_terminate(vport);
12417
12418
12419 if (phba->cfg_sriov_nr_virtfn)
12420 pci_disable_sriov(pdev);
12421
12422
12423 lpfc_sli_disable_intr(phba);
12424
12425 scsi_host_put(shost);
12426
12427
12428
12429
12430
12431 lpfc_scsi_free(phba);
12432 lpfc_free_iocb_list(phba);
12433
12434 lpfc_mem_free_all(phba);
12435
12436 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12437 phba->hbqslimp.virt, phba->hbqslimp.phys);
12438
12439
12440 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12441 phba->slim2p.virt, phba->slim2p.phys);
12442
12443
12444 iounmap(phba->ctrl_regs_memmap_p);
12445 iounmap(phba->slim_memmap_p);
12446
12447 lpfc_hba_free(phba);
12448
12449 pci_release_mem_regions(pdev);
12450 pci_disable_device(pdev);
12451}
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470
12471
12472
12473
12474static int
12475lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12476{
12477 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12478 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12479
12480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12481 "0473 PCI device Power Management suspend.\n");
12482
12483
12484 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12485 lpfc_offline(phba);
12486 kthread_stop(phba->worker_thread);
12487
12488
12489 lpfc_sli_disable_intr(phba);
12490
12491
12492 pci_save_state(pdev);
12493 pci_set_power_state(pdev, PCI_D3hot);
12494
12495 return 0;
12496}
12497
12498
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517static int
12518lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12519{
12520 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12521 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12522 uint32_t intr_mode;
12523 int error;
12524
12525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12526 "0452 PCI device Power Management resume.\n");
12527
12528
12529 pci_set_power_state(pdev, PCI_D0);
12530 pci_restore_state(pdev);
12531
12532
12533
12534
12535
12536 pci_save_state(pdev);
12537
12538 if (pdev->is_busmaster)
12539 pci_set_master(pdev);
12540
12541
12542 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12543 "lpfc_worker_%d", phba->brd_no);
12544 if (IS_ERR(phba->worker_thread)) {
12545 error = PTR_ERR(phba->worker_thread);
12546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12547 "0434 PM resume failed to start worker "
12548 "thread: error=x%x.\n", error);
12549 return error;
12550 }
12551
12552
12553 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12554 if (intr_mode == LPFC_INTR_ERROR) {
12555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12556 "0430 PM resume Failed to enable interrupt\n");
12557 return -EIO;
12558 } else
12559 phba->intr_mode = intr_mode;
12560
12561
12562 lpfc_sli_brdrestart(phba);
12563 lpfc_online(phba);
12564
12565
12566 lpfc_log_intr_mode(phba, phba->intr_mode);
12567
12568 return 0;
12569}
12570
12571
12572
12573
12574
12575
12576
12577
12578static void
12579lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12580{
12581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12582 "2723 PCI channel I/O abort preparing for recovery\n");
12583
12584
12585
12586
12587
12588 lpfc_sli_abort_fcp_rings(phba);
12589}
12590
12591
12592
12593
12594
12595
12596
12597
12598
12599static void
12600lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12601{
12602 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12603 "2710 PCI channel disable preparing for reset\n");
12604
12605
12606 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12607
12608
12609 lpfc_scsi_dev_block(phba);
12610
12611
12612 lpfc_sli_flush_io_rings(phba);
12613
12614
12615 lpfc_stop_hba_timers(phba);
12616
12617
12618 lpfc_sli_disable_intr(phba);
12619 pci_disable_device(phba->pcidev);
12620}
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630static void
12631lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12632{
12633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12634 "2711 PCI channel permanent disable for failure\n");
12635
12636 lpfc_scsi_dev_block(phba);
12637
12638
12639 lpfc_stop_hba_timers(phba);
12640
12641
12642 lpfc_sli_flush_io_rings(phba);
12643}
12644
12645
12646
12647
12648
12649
12650
12651
12652
12653
12654
12655
12656
12657
12658
12659
12660
12661
12662
12663static pci_ers_result_t
12664lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12665{
12666 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12667 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12668
12669 switch (state) {
12670 case pci_channel_io_normal:
12671
12672 lpfc_sli_prep_dev_for_recover(phba);
12673 return PCI_ERS_RESULT_CAN_RECOVER;
12674 case pci_channel_io_frozen:
12675
12676 lpfc_sli_prep_dev_for_reset(phba);
12677 return PCI_ERS_RESULT_NEED_RESET;
12678 case pci_channel_io_perm_failure:
12679
12680 lpfc_sli_prep_dev_for_perm_failure(phba);
12681 return PCI_ERS_RESULT_DISCONNECT;
12682 default:
12683
12684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12685 "0472 Unknown PCI error state: x%x\n", state);
12686 lpfc_sli_prep_dev_for_reset(phba);
12687 return PCI_ERS_RESULT_NEED_RESET;
12688 }
12689}
12690
12691
12692
12693
12694
12695
12696
12697
12698
12699
12700
12701
12702
12703
12704
12705
12706
12707
12708
12709static pci_ers_result_t
12710lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12711{
12712 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12713 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12714 struct lpfc_sli *psli = &phba->sli;
12715 uint32_t intr_mode;
12716
12717 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12718 if (pci_enable_device_mem(pdev)) {
12719 printk(KERN_ERR "lpfc: Cannot re-enable "
12720 "PCI device after reset.\n");
12721 return PCI_ERS_RESULT_DISCONNECT;
12722 }
12723
12724 pci_restore_state(pdev);
12725
12726
12727
12728
12729
12730 pci_save_state(pdev);
12731
12732 if (pdev->is_busmaster)
12733 pci_set_master(pdev);
12734
12735 spin_lock_irq(&phba->hbalock);
12736 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12737 spin_unlock_irq(&phba->hbalock);
12738
12739
12740 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12741 if (intr_mode == LPFC_INTR_ERROR) {
12742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12743 "0427 Cannot re-enable interrupt after "
12744 "slot reset.\n");
12745 return PCI_ERS_RESULT_DISCONNECT;
12746 } else
12747 phba->intr_mode = intr_mode;
12748
12749
12750 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12751 lpfc_offline(phba);
12752 lpfc_sli_brdrestart(phba);
12753
12754
12755 lpfc_log_intr_mode(phba, phba->intr_mode);
12756
12757 return PCI_ERS_RESULT_RECOVERED;
12758}
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770static void
12771lpfc_io_resume_s3(struct pci_dev *pdev)
12772{
12773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12774 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12775
12776
12777 lpfc_online(phba);
12778}
12779
12780
12781
12782
12783
12784
12785
12786int
12787lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12788{
12789 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12790
12791 if (phba->sli_rev == LPFC_SLI_REV4) {
12792 if (max_xri <= 100)
12793 return 10;
12794 else if (max_xri <= 256)
12795 return 25;
12796 else if (max_xri <= 512)
12797 return 50;
12798 else if (max_xri <= 1024)
12799 return 100;
12800 else if (max_xri <= 1536)
12801 return 150;
12802 else if (max_xri <= 2048)
12803 return 200;
12804 else
12805 return 250;
12806 } else
12807 return 0;
12808}
12809
12810
12811
12812
12813
12814
12815
12816int
12817lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12818{
12819 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12820
12821 if (phba->nvmet_support)
12822 max_xri += LPFC_NVMET_BUF_POST;
12823 return max_xri;
12824}
12825
12826
12827static int
12828lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12829 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12830 const struct firmware *fw)
12831{
12832 int rc;
12833
12834
12835
12836
12837
12838
12839
12840 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12841 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12842 magic_number != MAGIC_NUMBER_G6) ||
12843 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12844 magic_number != MAGIC_NUMBER_G7)) {
12845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12846 "3030 This firmware version is not supported on"
12847 " this HBA model. Device:%x Magic:%x Type:%x "
12848 "ID:%x Size %d %zd\n",
12849 phba->pcidev->device, magic_number, ftype, fid,
12850 fsize, fw->size);
12851 rc = -EINVAL;
12852 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12854 "3021 Firmware downloads have been prohibited "
12855 "by a system configuration setting on "
12856 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12857 "%zd\n",
12858 phba->pcidev->device, magic_number, ftype, fid,
12859 fsize, fw->size);
12860 rc = -EACCES;
12861 } else {
12862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12863 "3022 FW Download failed. Add Status x%x "
12864 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12865 "%zd\n",
12866 offset, phba->pcidev->device, magic_number,
12867 ftype, fid, fsize, fw->size);
12868 rc = -EIO;
12869 }
12870 return rc;
12871}
12872
12873
12874
12875
12876
12877
12878
12879
12880static void
12881lpfc_write_firmware(const struct firmware *fw, void *context)
12882{
12883 struct lpfc_hba *phba = (struct lpfc_hba *)context;
12884 char fwrev[FW_REV_STR_SIZE];
12885 struct lpfc_grp_hdr *image;
12886 struct list_head dma_buffer_list;
12887 int i, rc = 0;
12888 struct lpfc_dmabuf *dmabuf, *next;
12889 uint32_t offset = 0, temp_offset = 0;
12890 uint32_t magic_number, ftype, fid, fsize;
12891
12892
12893 if (!fw) {
12894 rc = -ENXIO;
12895 goto out;
12896 }
12897 image = (struct lpfc_grp_hdr *)fw->data;
12898
12899 magic_number = be32_to_cpu(image->magic_number);
12900 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12901 fid = bf_get_be32(lpfc_grp_hdr_id, image);
12902 fsize = be32_to_cpu(image->size);
12903
12904 INIT_LIST_HEAD(&dma_buffer_list);
12905 lpfc_decode_firmware_rev(phba, fwrev, 1);
12906 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12908 "3023 Updating Firmware, Current Version:%s "
12909 "New Version:%s\n",
12910 fwrev, image->revision);
12911 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12912 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12913 GFP_KERNEL);
12914 if (!dmabuf) {
12915 rc = -ENOMEM;
12916 goto release_out;
12917 }
12918 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12919 SLI4_PAGE_SIZE,
12920 &dmabuf->phys,
12921 GFP_KERNEL);
12922 if (!dmabuf->virt) {
12923 kfree(dmabuf);
12924 rc = -ENOMEM;
12925 goto release_out;
12926 }
12927 list_add_tail(&dmabuf->list, &dma_buffer_list);
12928 }
12929 while (offset < fw->size) {
12930 temp_offset = offset;
12931 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12932 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12933 memcpy(dmabuf->virt,
12934 fw->data + temp_offset,
12935 fw->size - temp_offset);
12936 temp_offset = fw->size;
12937 break;
12938 }
12939 memcpy(dmabuf->virt, fw->data + temp_offset,
12940 SLI4_PAGE_SIZE);
12941 temp_offset += SLI4_PAGE_SIZE;
12942 }
12943 rc = lpfc_wr_object(phba, &dma_buffer_list,
12944 (fw->size - offset), &offset);
12945 if (rc) {
12946 rc = lpfc_log_write_firmware_error(phba, offset,
12947 magic_number,
12948 ftype,
12949 fid,
12950 fsize,
12951 fw);
12952 goto release_out;
12953 }
12954 }
12955 rc = offset;
12956 } else
12957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12958 "3029 Skipped Firmware update, Current "
12959 "Version:%s New Version:%s\n",
12960 fwrev, image->revision);
12961
12962release_out:
12963 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12964 list_del(&dmabuf->list);
12965 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12966 dmabuf->virt, dmabuf->phys);
12967 kfree(dmabuf);
12968 }
12969 release_firmware(fw);
12970out:
12971 if (rc < 0)
12972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12973 "3062 Firmware update error, status %d.\n", rc);
12974 else
12975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12976 "3024 Firmware update success: size %d.\n", rc);
12977}
12978
12979
12980
12981
12982
12983
12984
12985
12986int
12987lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12988{
12989 uint8_t file_name[ELX_MODEL_NAME_SIZE];
12990 int ret;
12991 const struct firmware *fw;
12992
12993
12994 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12995 LPFC_SLI_INTF_IF_TYPE_2)
12996 return -EPERM;
12997
12998 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12999
13000 if (fw_upgrade == INT_FW_UPGRADE) {
13001 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13002 file_name, &phba->pcidev->dev,
13003 GFP_KERNEL, (void *)phba,
13004 lpfc_write_firmware);
13005 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13006 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13007 if (!ret)
13008 lpfc_write_firmware(fw, (void *)phba);
13009 } else {
13010 ret = -EINVAL;
13011 }
13012
13013 return ret;
13014}
13015
13016
13017
13018
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034static int
13035lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13036{
13037 struct lpfc_hba *phba;
13038 struct lpfc_vport *vport = NULL;
13039 struct Scsi_Host *shost = NULL;
13040 int error;
13041 uint32_t cfg_mode, intr_mode;
13042
13043
13044 phba = lpfc_hba_alloc(pdev);
13045 if (!phba)
13046 return -ENOMEM;
13047
13048
13049 error = lpfc_enable_pci_dev(phba);
13050 if (error)
13051 goto out_free_phba;
13052
13053
13054 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13055 if (error)
13056 goto out_disable_pci_dev;
13057
13058
13059 error = lpfc_sli4_pci_mem_setup(phba);
13060 if (error) {
13061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13062 "1410 Failed to set up pci memory space.\n");
13063 goto out_disable_pci_dev;
13064 }
13065
13066
13067 error = lpfc_sli4_driver_resource_setup(phba);
13068 if (error) {
13069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13070 "1412 Failed to set up driver resource.\n");
13071 goto out_unset_pci_mem_s4;
13072 }
13073
13074 INIT_LIST_HEAD(&phba->active_rrq_list);
13075 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13076
13077
13078 error = lpfc_setup_driver_resource_phase2(phba);
13079 if (error) {
13080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13081 "1414 Failed to set up driver resource.\n");
13082 goto out_unset_driver_resource_s4;
13083 }
13084
13085
13086 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13087
13088
13089 cfg_mode = phba->cfg_use_msi;
13090
13091
13092 phba->pport = NULL;
13093 lpfc_stop_port(phba);
13094
13095
13096 lpfc_cpu_map_array_init(phba);
13097
13098
13099 lpfc_hba_eq_hdl_array_init(phba);
13100
13101
13102 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13103 if (intr_mode == LPFC_INTR_ERROR) {
13104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13105 "0426 Failed to enable interrupt.\n");
13106 error = -ENODEV;
13107 goto out_unset_driver_resource;
13108 }
13109
13110 if (phba->intr_type != MSIX) {
13111 phba->cfg_irq_chann = 1;
13112 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13113 if (phba->nvmet_support)
13114 phba->cfg_nvmet_mrq = 1;
13115 }
13116 }
13117 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13118
13119
13120 error = lpfc_create_shost(phba);
13121 if (error) {
13122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13123 "1415 Failed to create scsi host.\n");
13124 goto out_disable_intr;
13125 }
13126 vport = phba->pport;
13127 shost = lpfc_shost_from_vport(vport);
13128
13129
13130 error = lpfc_alloc_sysfs_attr(vport);
13131 if (error) {
13132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13133 "1416 Failed to allocate sysfs attr\n");
13134 goto out_destroy_shost;
13135 }
13136
13137
13138 if (lpfc_sli4_hba_setup(phba)) {
13139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13140 "1421 Failed to set up hba\n");
13141 error = -ENODEV;
13142 goto out_free_sysfs_attr;
13143 }
13144
13145
13146 phba->intr_mode = intr_mode;
13147 lpfc_log_intr_mode(phba, intr_mode);
13148
13149
13150 lpfc_post_init_setup(phba);
13151
13152
13153
13154
13155 if (phba->nvmet_support == 0) {
13156 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13157
13158
13159
13160
13161
13162 error = lpfc_nvme_create_localport(vport);
13163 if (error) {
13164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13165 "6004 NVME registration "
13166 "failed, error x%x\n",
13167 error);
13168 }
13169 }
13170 }
13171
13172
13173 if (phba->cfg_request_firmware_upgrade)
13174 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13175
13176
13177 lpfc_create_static_vport(phba);
13178
13179
13180 lpfc_sli4_ras_setup(phba);
13181
13182 INIT_LIST_HEAD(&phba->poll_list);
13183 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13184 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13185
13186 return 0;
13187
13188out_free_sysfs_attr:
13189 lpfc_free_sysfs_attr(vport);
13190out_destroy_shost:
13191 lpfc_destroy_shost(phba);
13192out_disable_intr:
13193 lpfc_sli4_disable_intr(phba);
13194out_unset_driver_resource:
13195 lpfc_unset_driver_resource_phase2(phba);
13196out_unset_driver_resource_s4:
13197 lpfc_sli4_driver_resource_unset(phba);
13198out_unset_pci_mem_s4:
13199 lpfc_sli4_pci_mem_unset(phba);
13200out_disable_pci_dev:
13201 lpfc_disable_pci_dev(phba);
13202 if (shost)
13203 scsi_host_put(shost);
13204out_free_phba:
13205 lpfc_hba_free(phba);
13206 return error;
13207}
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218static void
13219lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13220{
13221 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13222 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13223 struct lpfc_vport **vports;
13224 struct lpfc_hba *phba = vport->phba;
13225 int i;
13226
13227
13228 spin_lock_irq(&phba->hbalock);
13229 vport->load_flag |= FC_UNLOADING;
13230 spin_unlock_irq(&phba->hbalock);
13231
13232
13233 lpfc_free_sysfs_attr(vport);
13234
13235
13236 vports = lpfc_create_vport_work_array(phba);
13237 if (vports != NULL)
13238 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13239 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13240 continue;
13241 fc_vport_terminate(vports[i]->fc_vport);
13242 }
13243 lpfc_destroy_vport_work_array(phba, vports);
13244
13245
13246 fc_remove_host(shost);
13247 scsi_remove_host(shost);
13248
13249
13250
13251
13252 lpfc_cleanup(vport);
13253 lpfc_nvmet_destroy_targetport(phba);
13254 lpfc_nvme_destroy_localport(vport);
13255
13256
13257 if (phba->cfg_xri_rebalancing)
13258 lpfc_destroy_multixri_pools(phba);
13259
13260
13261
13262
13263
13264
13265 lpfc_debugfs_terminate(vport);
13266
13267 lpfc_stop_hba_timers(phba);
13268 spin_lock_irq(&phba->port_list_lock);
13269 list_del_init(&vport->listentry);
13270 spin_unlock_irq(&phba->port_list_lock);
13271
13272
13273
13274
13275 lpfc_io_free(phba);
13276 lpfc_free_iocb_list(phba);
13277 lpfc_sli4_hba_unset(phba);
13278
13279 lpfc_unset_driver_resource_phase2(phba);
13280 lpfc_sli4_driver_resource_unset(phba);
13281
13282
13283 lpfc_sli4_pci_mem_unset(phba);
13284
13285
13286 scsi_host_put(shost);
13287 lpfc_disable_pci_dev(phba);
13288
13289
13290 lpfc_hba_free(phba);
13291
13292 return;
13293}
13294
13295
13296
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316static int
13317lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
13318{
13319 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13320 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13321
13322 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13323 "2843 PCI device Power Management suspend.\n");
13324
13325
13326 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13327 lpfc_offline(phba);
13328 kthread_stop(phba->worker_thread);
13329
13330
13331 lpfc_sli4_disable_intr(phba);
13332 lpfc_sli4_queue_destroy(phba);
13333
13334
13335 pci_save_state(pdev);
13336 pci_set_power_state(pdev, PCI_D3hot);
13337
13338 return 0;
13339}
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355
13356
13357
13358
13359
13360static int
13361lpfc_pci_resume_one_s4(struct pci_dev *pdev)
13362{
13363 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13364 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13365 uint32_t intr_mode;
13366 int error;
13367
13368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13369 "0292 PCI device Power Management resume.\n");
13370
13371
13372 pci_set_power_state(pdev, PCI_D0);
13373 pci_restore_state(pdev);
13374
13375
13376
13377
13378
13379 pci_save_state(pdev);
13380
13381 if (pdev->is_busmaster)
13382 pci_set_master(pdev);
13383
13384
13385 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13386 "lpfc_worker_%d", phba->brd_no);
13387 if (IS_ERR(phba->worker_thread)) {
13388 error = PTR_ERR(phba->worker_thread);
13389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13390 "0293 PM resume failed to start worker "
13391 "thread: error=x%x.\n", error);
13392 return error;
13393 }
13394
13395
13396 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13397 if (intr_mode == LPFC_INTR_ERROR) {
13398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13399 "0294 PM resume Failed to enable interrupt\n");
13400 return -EIO;
13401 } else
13402 phba->intr_mode = intr_mode;
13403
13404
13405 lpfc_sli_brdrestart(phba);
13406 lpfc_online(phba);
13407
13408
13409 lpfc_log_intr_mode(phba, phba->intr_mode);
13410
13411 return 0;
13412}
13413
13414
13415
13416
13417
13418
13419
13420
13421static void
13422lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13423{
13424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13425 "2828 PCI channel I/O abort preparing for recovery\n");
13426
13427
13428
13429
13430 lpfc_sli_abort_fcp_rings(phba);
13431}
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441static void
13442lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13443{
13444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13445 "2826 PCI channel disable preparing for reset\n");
13446
13447
13448 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13449
13450
13451 lpfc_scsi_dev_block(phba);
13452
13453
13454 lpfc_sli_flush_io_rings(phba);
13455
13456
13457 lpfc_stop_hba_timers(phba);
13458
13459
13460 lpfc_sli4_disable_intr(phba);
13461 lpfc_sli4_queue_destroy(phba);
13462 pci_disable_device(phba->pcidev);
13463}
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473static void
13474lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13475{
13476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13477 "2827 PCI channel permanent disable for failure\n");
13478
13479
13480 lpfc_scsi_dev_block(phba);
13481
13482
13483 lpfc_stop_hba_timers(phba);
13484
13485
13486 lpfc_sli_flush_io_rings(phba);
13487}
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505static pci_ers_result_t
13506lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13507{
13508 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13510
13511 switch (state) {
13512 case pci_channel_io_normal:
13513
13514 lpfc_sli4_prep_dev_for_recover(phba);
13515 return PCI_ERS_RESULT_CAN_RECOVER;
13516 case pci_channel_io_frozen:
13517
13518 lpfc_sli4_prep_dev_for_reset(phba);
13519 return PCI_ERS_RESULT_NEED_RESET;
13520 case pci_channel_io_perm_failure:
13521
13522 lpfc_sli4_prep_dev_for_perm_failure(phba);
13523 return PCI_ERS_RESULT_DISCONNECT;
13524 default:
13525
13526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13527 "2825 Unknown PCI error state: x%x\n", state);
13528 lpfc_sli4_prep_dev_for_reset(phba);
13529 return PCI_ERS_RESULT_NEED_RESET;
13530 }
13531}
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550
13551static pci_ers_result_t
13552lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13553{
13554 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13555 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13556 struct lpfc_sli *psli = &phba->sli;
13557 uint32_t intr_mode;
13558
13559 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13560 if (pci_enable_device_mem(pdev)) {
13561 printk(KERN_ERR "lpfc: Cannot re-enable "
13562 "PCI device after reset.\n");
13563 return PCI_ERS_RESULT_DISCONNECT;
13564 }
13565
13566 pci_restore_state(pdev);
13567
13568
13569
13570
13571
13572 pci_save_state(pdev);
13573
13574 if (pdev->is_busmaster)
13575 pci_set_master(pdev);
13576
13577 spin_lock_irq(&phba->hbalock);
13578 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13579 spin_unlock_irq(&phba->hbalock);
13580
13581
13582 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13583 if (intr_mode == LPFC_INTR_ERROR) {
13584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13585 "2824 Cannot re-enable interrupt after "
13586 "slot reset.\n");
13587 return PCI_ERS_RESULT_DISCONNECT;
13588 } else
13589 phba->intr_mode = intr_mode;
13590
13591
13592 lpfc_log_intr_mode(phba, phba->intr_mode);
13593
13594 return PCI_ERS_RESULT_RECOVERED;
13595}
13596
13597
13598
13599
13600
13601
13602
13603
13604
13605
13606
13607static void
13608lpfc_io_resume_s4(struct pci_dev *pdev)
13609{
13610 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13611 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13612
13613
13614
13615
13616
13617
13618
13619 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13620
13621 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13622 lpfc_offline(phba);
13623 lpfc_sli_brdrestart(phba);
13624
13625 lpfc_online(phba);
13626 }
13627}
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647static int
13648lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13649{
13650 int rc;
13651 struct lpfc_sli_intf intf;
13652
13653 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13654 return -ENODEV;
13655
13656 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13657 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13658 rc = lpfc_pci_probe_one_s4(pdev, pid);
13659 else
13660 rc = lpfc_pci_probe_one_s3(pdev, pid);
13661
13662 return rc;
13663}
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675static void
13676lpfc_pci_remove_one(struct pci_dev *pdev)
13677{
13678 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13679 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13680
13681 switch (phba->pci_dev_grp) {
13682 case LPFC_PCI_DEV_LP:
13683 lpfc_pci_remove_one_s3(pdev);
13684 break;
13685 case LPFC_PCI_DEV_OC:
13686 lpfc_pci_remove_one_s4(pdev);
13687 break;
13688 default:
13689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13690 "1424 Invalid PCI device group: 0x%x\n",
13691 phba->pci_dev_grp);
13692 break;
13693 }
13694 return;
13695}
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707
13708
13709
13710
13711static int
13712lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13713{
13714 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13715 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13716 int rc = -ENODEV;
13717
13718 switch (phba->pci_dev_grp) {
13719 case LPFC_PCI_DEV_LP:
13720 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13721 break;
13722 case LPFC_PCI_DEV_OC:
13723 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13724 break;
13725 default:
13726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13727 "1425 Invalid PCI device group: 0x%x\n",
13728 phba->pci_dev_grp);
13729 break;
13730 }
13731 return rc;
13732}
13733
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747static int
13748lpfc_pci_resume_one(struct pci_dev *pdev)
13749{
13750 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13751 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13752 int rc = -ENODEV;
13753
13754 switch (phba->pci_dev_grp) {
13755 case LPFC_PCI_DEV_LP:
13756 rc = lpfc_pci_resume_one_s3(pdev);
13757 break;
13758 case LPFC_PCI_DEV_OC:
13759 rc = lpfc_pci_resume_one_s4(pdev);
13760 break;
13761 default:
13762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13763 "1426 Invalid PCI device group: 0x%x\n",
13764 phba->pci_dev_grp);
13765 break;
13766 }
13767 return rc;
13768}
13769
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785static pci_ers_result_t
13786lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13787{
13788 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13789 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13790 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13791
13792 switch (phba->pci_dev_grp) {
13793 case LPFC_PCI_DEV_LP:
13794 rc = lpfc_io_error_detected_s3(pdev, state);
13795 break;
13796 case LPFC_PCI_DEV_OC:
13797 rc = lpfc_io_error_detected_s4(pdev, state);
13798 break;
13799 default:
13800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13801 "1427 Invalid PCI device group: 0x%x\n",
13802 phba->pci_dev_grp);
13803 break;
13804 }
13805 return rc;
13806}
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817
13818
13819
13820
13821
13822static pci_ers_result_t
13823lpfc_io_slot_reset(struct pci_dev *pdev)
13824{
13825 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13826 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13827 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13828
13829 switch (phba->pci_dev_grp) {
13830 case LPFC_PCI_DEV_LP:
13831 rc = lpfc_io_slot_reset_s3(pdev);
13832 break;
13833 case LPFC_PCI_DEV_OC:
13834 rc = lpfc_io_slot_reset_s4(pdev);
13835 break;
13836 default:
13837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13838 "1428 Invalid PCI device group: 0x%x\n",
13839 phba->pci_dev_grp);
13840 break;
13841 }
13842 return rc;
13843}
13844
13845
13846
13847
13848
13849
13850
13851
13852
13853
13854
13855static void
13856lpfc_io_resume(struct pci_dev *pdev)
13857{
13858 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13859 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13860
13861 switch (phba->pci_dev_grp) {
13862 case LPFC_PCI_DEV_LP:
13863 lpfc_io_resume_s3(pdev);
13864 break;
13865 case LPFC_PCI_DEV_OC:
13866 lpfc_io_resume_s4(pdev);
13867 break;
13868 default:
13869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13870 "1429 Invalid PCI device group: 0x%x\n",
13871 phba->pci_dev_grp);
13872 break;
13873 }
13874 return;
13875}
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887static void
13888lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13889{
13890
13891 if (!phba->cfg_EnableXLane)
13892 return;
13893
13894 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13895 phba->cfg_fof = 1;
13896 } else {
13897 phba->cfg_fof = 0;
13898 mempool_destroy(phba->device_data_mem_pool);
13899 phba->device_data_mem_pool = NULL;
13900 }
13901
13902 return;
13903}
13904
13905
13906
13907
13908
13909
13910
13911
13912void
13913lpfc_sli4_ras_init(struct lpfc_hba *phba)
13914{
13915 switch (phba->pcidev->device) {
13916 case PCI_DEVICE_ID_LANCER_G6_FC:
13917 case PCI_DEVICE_ID_LANCER_G7_FC:
13918 phba->ras_fwlog.ras_hwsupport = true;
13919 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13920 phba->cfg_ras_fwlog_buffsize)
13921 phba->ras_fwlog.ras_enabled = true;
13922 else
13923 phba->ras_fwlog.ras_enabled = false;
13924 break;
13925 default:
13926 phba->ras_fwlog.ras_hwsupport = false;
13927 }
13928}
13929
13930
13931MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13932
13933static const struct pci_error_handlers lpfc_err_handler = {
13934 .error_detected = lpfc_io_error_detected,
13935 .slot_reset = lpfc_io_slot_reset,
13936 .resume = lpfc_io_resume,
13937};
13938
13939static struct pci_driver lpfc_driver = {
13940 .name = LPFC_DRIVER_NAME,
13941 .id_table = lpfc_id_table,
13942 .probe = lpfc_pci_probe_one,
13943 .remove = lpfc_pci_remove_one,
13944 .shutdown = lpfc_pci_remove_one,
13945 .suspend = lpfc_pci_suspend_one,
13946 .resume = lpfc_pci_resume_one,
13947 .err_handler = &lpfc_err_handler,
13948};
13949
13950static const struct file_operations lpfc_mgmt_fop = {
13951 .owner = THIS_MODULE,
13952};
13953
13954static struct miscdevice lpfc_mgmt_dev = {
13955 .minor = MISC_DYNAMIC_MINOR,
13956 .name = "lpfcmgmt",
13957 .fops = &lpfc_mgmt_fop,
13958};
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970
13971
13972static int __init
13973lpfc_init(void)
13974{
13975 int error = 0;
13976
13977 printk(LPFC_MODULE_DESC "\n");
13978 printk(LPFC_COPYRIGHT "\n");
13979
13980 error = misc_register(&lpfc_mgmt_dev);
13981 if (error)
13982 printk(KERN_ERR "Could not register lpfcmgmt device, "
13983 "misc_register returned with status %d", error);
13984
13985 lpfc_transport_functions.vport_create = lpfc_vport_create;
13986 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13987 lpfc_transport_template =
13988 fc_attach_transport(&lpfc_transport_functions);
13989 if (lpfc_transport_template == NULL)
13990 return -ENOMEM;
13991 lpfc_vport_transport_template =
13992 fc_attach_transport(&lpfc_vport_transport_functions);
13993 if (lpfc_vport_transport_template == NULL) {
13994 fc_release_transport(lpfc_transport_template);
13995 return -ENOMEM;
13996 }
13997 lpfc_nvme_cmd_template();
13998 lpfc_nvmet_cmd_template();
13999
14000
14001 lpfc_present_cpu = num_present_cpus();
14002
14003 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14004 "lpfc/sli4:online",
14005 lpfc_cpu_online, lpfc_cpu_offline);
14006 if (error < 0)
14007 goto cpuhp_failure;
14008 lpfc_cpuhp_state = error;
14009
14010 error = pci_register_driver(&lpfc_driver);
14011 if (error)
14012 goto unwind;
14013
14014 return error;
14015
14016unwind:
14017 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14018cpuhp_failure:
14019 fc_release_transport(lpfc_transport_template);
14020 fc_release_transport(lpfc_vport_transport_template);
14021
14022 return error;
14023}
14024
14025
14026
14027
14028
14029
14030
14031
14032static void __exit
14033lpfc_exit(void)
14034{
14035 misc_deregister(&lpfc_mgmt_dev);
14036 pci_unregister_driver(&lpfc_driver);
14037 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14038 fc_release_transport(lpfc_transport_template);
14039 fc_release_transport(lpfc_vport_transport_template);
14040 idr_destroy(&lpfc_hba_index);
14041}
14042
14043module_init(lpfc_init);
14044module_exit(lpfc_exit);
14045MODULE_LICENSE("GPL");
14046MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14047MODULE_AUTHOR("Broadcom");
14048MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
14049