1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116int
117lpfc_config_port_prep(struct lpfc_hba *phba)
118{
119 lpfc_vpd_t *vp = &phba->vpd;
120 int i = 0, rc;
121 LPFC_MBOXQ_t *pmb;
122 MAILBOX_t *mb;
123 char *lpfc_vpd_data = NULL;
124 uint16_t offset = 0;
125 static char licensed[56] =
126 "key unlock for use with gnu public licensed code only\0";
127 static int init_key = 1;
128
129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130 if (!pmb) {
131 phba->link_state = LPFC_HBA_ERROR;
132 return -ENOMEM;
133 }
134
135 mb = &pmb->u.mb;
136 phba->link_state = LPFC_INIT_MBX_CMDS;
137
138 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139 if (init_key) {
140 uint32_t *ptext = (uint32_t *) licensed;
141
142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143 *ptext = cpu_to_be32(*ptext);
144 init_key = 0;
145 }
146
147 lpfc_read_nv(phba, pmb);
148 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149 sizeof (mb->un.varRDnvp.rsvd3));
150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151 sizeof (licensed));
152
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155 if (rc != MBX_SUCCESS) {
156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
157 "0324 Config Port initialization "
158 "error, mbxCmd x%x READ_NVPARM, "
159 "mbxStatus x%x\n",
160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free(pmb, phba->mbox_mem_pool);
162 return -ERESTART;
163 }
164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165 sizeof(phba->wwnn));
166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167 sizeof(phba->wwpn));
168 }
169
170
171
172
173
174 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176
177 lpfc_read_rev(phba, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
181 "0439 Adapter failed to init, mbxCmd x%x "
182 "READ_REV, mbxStatus x%x\n",
183 mb->mbxCommand, mb->mbxStatus);
184 mempool_free( pmb, phba->mbox_mem_pool);
185 return -ERESTART;
186 }
187
188
189
190
191
192
193 if (mb->un.varRdRev.rr == 0) {
194 vp->rev.rBit = 0;
195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
196 "0440 Adapter failed to init, READ_REV has "
197 "missing revision information.\n");
198 mempool_free(pmb, phba->mbox_mem_pool);
199 return -ERESTART;
200 }
201
202 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203 mempool_free(pmb, phba->mbox_mem_pool);
204 return -EINVAL;
205 }
206
207
208 vp->rev.rBit = 1;
209 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214 vp->rev.biuRev = mb->un.varRdRev.biuRev;
215 vp->rev.smRev = mb->un.varRdRev.smRev;
216 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217 vp->rev.endecRev = mb->un.varRdRev.endecRev;
218 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225
226
227
228
229 if (vp->rev.feaLevelHigh < 9)
230 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232 if (lpfc_is_LC_HBA(phba->pcidev->device))
233 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234 sizeof (phba->RandomData));
235
236
237 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238 if (!lpfc_vpd_data)
239 goto out_free_mbox;
240 do {
241 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244 if (rc != MBX_SUCCESS) {
245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246 "0441 VPD not present on adapter, "
247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248 mb->mbxCommand, mb->mbxStatus);
249 mb->un.varDmp.word_cnt = 0;
250 }
251
252
253
254 if (mb->un.varDmp.word_cnt == 0)
255 break;
256
257 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
258 if (offset + i > DMP_VPD_SIZE)
259 i = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset, i);
262 offset += i;
263 } while (offset < DMP_VPD_SIZE);
264
265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
266
267 kfree(lpfc_vpd_data);
268out_free_mbox:
269 mempool_free(pmb, phba->mbox_mem_pool);
270 return 0;
271}
272
273
274
275
276
277
278
279
280
281
282
283static void
284lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
285{
286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
287 phba->temp_sensor_support = 1;
288 else
289 phba->temp_sensor_support = 0;
290 mempool_free(pmboxq, phba->mbox_mem_pool);
291 return;
292}
293
294
295
296
297
298
299
300
301
302
303
304static void
305lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
306{
307 struct prog_id *prg;
308 uint32_t prog_id_word;
309 char dist = ' ';
310
311 char dist_char[] = "nabx";
312
313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
314 mempool_free(pmboxq, phba->mbox_mem_pool);
315 return;
316 }
317
318 prg = (struct prog_id *) &prog_id_word;
319
320
321 prog_id_word = pmboxq->u.mb.un.varWords[7];
322
323
324 if (prg->dist < 4)
325 dist = dist_char[prg->dist];
326
327 if ((prg->dist == 3) && (prg->num == 0))
328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
329 prg->ver, prg->rev, prg->lev);
330 else
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
332 prg->ver, prg->rev, prg->lev,
333 dist, prg->num);
334 mempool_free(pmboxq, phba->mbox_mem_pool);
335 return;
336}
337
338
339
340
341
342
343
344
345
346
347void
348lpfc_update_vport_wwn(struct lpfc_vport *vport)
349{
350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
352
353
354 if (vport->phba->cfg_soft_wwnn)
355 u64_to_wwn(vport->phba->cfg_soft_wwnn,
356 vport->fc_sparam.nodeName.u.wwn);
357 if (vport->phba->cfg_soft_wwpn)
358 u64_to_wwn(vport->phba->cfg_soft_wwpn,
359 vport->fc_sparam.portName.u.wwn);
360
361
362
363
364
365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
367 sizeof(struct lpfc_name));
368 else
369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 sizeof(struct lpfc_name));
371
372
373
374
375
376 if (vport->fc_portname.u.wwn[0] != 0 &&
377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
378 sizeof(struct lpfc_name)))
379 vport->vport_flag |= FAWWPN_PARAM_CHG;
380
381 if (vport->fc_portname.u.wwn[0] == 0 ||
382 vport->phba->cfg_soft_wwpn ||
383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
384 vport->vport_flag & FAWWPN_SET) {
385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
386 sizeof(struct lpfc_name));
387 vport->vport_flag &= ~FAWWPN_SET;
388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
389 vport->vport_flag |= FAWWPN_SET;
390 }
391 else
392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
393 sizeof(struct lpfc_name));
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int
410lpfc_config_port_post(struct lpfc_hba *phba)
411{
412 struct lpfc_vport *vport = phba->pport;
413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 LPFC_MBOXQ_t *pmb;
415 MAILBOX_t *mb;
416 struct lpfc_dmabuf *mp;
417 struct lpfc_sli *psli = &phba->sli;
418 uint32_t status, timeout;
419 int i, j;
420 int rc;
421
422 spin_lock_irq(&phba->hbalock);
423
424
425
426
427 if (phba->over_temp_state == HBA_OVER_TEMP)
428 phba->over_temp_state = HBA_NORMAL_TEMP;
429 spin_unlock_irq(&phba->hbalock);
430
431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
432 if (!pmb) {
433 phba->link_state = LPFC_HBA_ERROR;
434 return -ENOMEM;
435 }
436 mb = &pmb->u.mb;
437
438
439 rc = lpfc_read_sparam(phba, pmb, 0);
440 if (rc) {
441 mempool_free(pmb, phba->mbox_mem_pool);
442 return -ENOMEM;
443 }
444
445 pmb->vport = vport;
446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb->mbxCommand, mb->mbxStatus);
451 phba->link_state = LPFC_HBA_ERROR;
452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
453 mempool_free(pmb, phba->mbox_mem_pool);
454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
455 kfree(mp);
456 return -EIO;
457 }
458
459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
460
461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
463 kfree(mp);
464 pmb->ctx_buf = NULL;
465 lpfc_update_vport_wwn(vport);
466
467
468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
470 fc_host_max_npiv_vports(shost) = phba->max_vpi;
471
472
473
474 if (phba->SerialNumber[0] == 0) {
475 uint8_t *outptr;
476
477 outptr = &vport->fc_nodename.u.s.IEEE[0];
478 for (i = 0; i < 12; i++) {
479 status = *outptr++;
480 j = ((status & 0xf0) >> 4);
481 if (j <= 9)
482 phba->SerialNumber[i] =
483 (char)((uint8_t) 0x30 + (uint8_t) j);
484 else
485 phba->SerialNumber[i] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
487 i++;
488 j = (status & 0xf);
489 if (j <= 9)
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
492 else
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
495 }
496 }
497
498 lpfc_read_config(phba, pmb);
499 pmb->vport = vport;
500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb->mbxCommand, mb->mbxStatus);
505 phba->link_state = LPFC_HBA_ERROR;
506 mempool_free( pmb, phba->mbox_mem_pool);
507 return -EIO;
508 }
509
510
511 lpfc_sli_read_link_ste(phba);
512
513
514 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
516 "3359 HBA queue depth changed from %d to %d\n",
517 phba->cfg_hba_queue_depth,
518 mb->un.varRdConfig.max_xri);
519 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
520 }
521
522 phba->lmt = mb->un.varRdConfig.lmt;
523
524
525 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
526
527 phba->link_state = LPFC_LINK_DOWN;
528
529
530 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
531 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
532 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
534
535
536 if (phba->sli_rev != 3)
537 lpfc_post_rcv_buf(phba);
538
539
540
541
542 if (phba->intr_type == MSIX) {
543 rc = lpfc_config_msi(phba, pmb);
544 if (rc) {
545 mempool_free(pmb, phba->mbox_mem_pool);
546 return -EIO;
547 }
548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
549 if (rc != MBX_SUCCESS) {
550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
551 "0352 Config MSI mailbox command "
552 "failed, mbxCmd x%x, mbxStatus x%x\n",
553 pmb->u.mb.mbxCommand,
554 pmb->u.mb.mbxStatus);
555 mempool_free(pmb, phba->mbox_mem_pool);
556 return -EIO;
557 }
558 }
559
560 spin_lock_irq(&phba->hbalock);
561
562 phba->hba_flag &= ~HBA_ERATT_HANDLED;
563
564
565 if (lpfc_readl(phba->HCregaddr, &status)) {
566 spin_unlock_irq(&phba->hbalock);
567 return -EIO;
568 }
569 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
570 if (psli->num_rings > 0)
571 status |= HC_R0INT_ENA;
572 if (psli->num_rings > 1)
573 status |= HC_R1INT_ENA;
574 if (psli->num_rings > 2)
575 status |= HC_R2INT_ENA;
576 if (psli->num_rings > 3)
577 status |= HC_R3INT_ENA;
578
579 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
580 (phba->cfg_poll & DISABLE_FCP_RING_INT))
581 status &= ~(HC_R0INT_ENA);
582
583 writel(status, phba->HCregaddr);
584 readl(phba->HCregaddr);
585 spin_unlock_irq(&phba->hbalock);
586
587
588 timeout = phba->fc_ratov * 2;
589 mod_timer(&vport->els_tmofunc,
590 jiffies + msecs_to_jiffies(1000 * timeout));
591
592 mod_timer(&phba->hb_tmofunc,
593 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
594 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
595 phba->last_completion_time = jiffies;
596
597 mod_timer(&phba->eratt_poll,
598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
599
600 if (phba->hba_flag & LINK_DISABLED) {
601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
602 "2598 Adapter Link is disabled.\n");
603 lpfc_down_link(phba, pmb);
604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
608 "2599 Adapter failed to issue DOWN_LINK"
609 " mbox command rc 0x%x\n", rc);
610
611 mempool_free(pmb, phba->mbox_mem_pool);
612 return -EIO;
613 }
614 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
615 mempool_free(pmb, phba->mbox_mem_pool);
616 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
617 if (rc)
618 return rc;
619 }
620
621 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
622 if (!pmb) {
623 phba->link_state = LPFC_HBA_ERROR;
624 return -ENOMEM;
625 }
626
627 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
628 pmb->mbox_cmpl = lpfc_config_async_cmpl;
629 pmb->vport = phba->pport;
630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
631
632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
634 "0456 Adapter failed to issue "
635 "ASYNCEVT_ENABLE mbox status x%x\n",
636 rc);
637 mempool_free(pmb, phba->mbox_mem_pool);
638 }
639
640
641 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
642 if (!pmb) {
643 phba->link_state = LPFC_HBA_ERROR;
644 return -ENOMEM;
645 }
646
647 lpfc_dump_wakeup_param(phba, pmb);
648 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
649 pmb->vport = phba->pport;
650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
651
652 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
654 "0435 Adapter failed "
655 "to get Option ROM version status x%x\n", rc);
656 mempool_free(pmb, phba->mbox_mem_pool);
657 }
658
659 return 0;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676static int
677lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
678{
679 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697int
698lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
699 uint32_t flag)
700{
701 struct lpfc_vport *vport = phba->pport;
702 LPFC_MBOXQ_t *pmb;
703 MAILBOX_t *mb;
704 int rc;
705
706 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
707 if (!pmb) {
708 phba->link_state = LPFC_HBA_ERROR;
709 return -ENOMEM;
710 }
711 mb = &pmb->u.mb;
712 pmb->vport = vport;
713
714 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
715 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
716 !(phba->lmt & LMT_1Gb)) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
718 !(phba->lmt & LMT_2Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
720 !(phba->lmt & LMT_4Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
722 !(phba->lmt & LMT_8Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
724 !(phba->lmt & LMT_10Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
726 !(phba->lmt & LMT_16Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
728 !(phba->lmt & LMT_32Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
730 !(phba->lmt & LMT_64Gb))) {
731
732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
733 "1302 Invalid speed for this board:%d "
734 "Reset link speed to auto.\n",
735 phba->cfg_link_speed);
736 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
737 }
738 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
739 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
740 if (phba->sli_rev < LPFC_SLI_REV4)
741 lpfc_set_loopback_flag(phba);
742 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
743 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
745 "0498 Adapter failed to init, mbxCmd x%x "
746 "INIT_LINK, mbxStatus x%x\n",
747 mb->mbxCommand, mb->mbxStatus);
748 if (phba->sli_rev <= LPFC_SLI_REV3) {
749
750 writel(0, phba->HCregaddr);
751 readl(phba->HCregaddr);
752
753 writel(0xffffffff, phba->HAregaddr);
754 readl(phba->HAregaddr);
755 }
756 phba->link_state = LPFC_HBA_ERROR;
757 if (rc != MBX_BUSY || flag == MBX_POLL)
758 mempool_free(pmb, phba->mbox_mem_pool);
759 return -EIO;
760 }
761 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
762 if (flag == MBX_POLL)
763 mempool_free(pmb, phba->mbox_mem_pool);
764
765 return 0;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static int
782lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
783{
784 LPFC_MBOXQ_t *pmb;
785 int rc;
786
787 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
788 if (!pmb) {
789 phba->link_state = LPFC_HBA_ERROR;
790 return -ENOMEM;
791 }
792
793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
794 "0491 Adapter Link is disabled.\n");
795 lpfc_down_link(phba, pmb);
796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
798 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800 "2522 Adapter failed to issue DOWN_LINK"
801 " mbox command rc 0x%x\n", rc);
802
803 mempool_free(pmb, phba->mbox_mem_pool);
804 return -EIO;
805 }
806 if (flag == MBX_POLL)
807 mempool_free(pmb, phba->mbox_mem_pool);
808
809 return 0;
810}
811
812
813
814
815
816
817
818
819
820
821
822
823int
824lpfc_hba_down_prep(struct lpfc_hba *phba)
825{
826 struct lpfc_vport **vports;
827 int i;
828
829 if (phba->sli_rev <= LPFC_SLI_REV3) {
830
831 writel(0, phba->HCregaddr);
832 readl(phba->HCregaddr);
833 }
834
835 if (phba->pport->load_flag & FC_UNLOADING)
836 lpfc_cleanup_discovery_resources(phba->pport);
837 else {
838 vports = lpfc_create_vport_work_array(phba);
839 if (vports != NULL)
840 for (i = 0; i <= phba->max_vports &&
841 vports[i] != NULL; i++)
842 lpfc_cleanup_discovery_resources(vports[i]);
843 lpfc_destroy_vport_work_array(phba, vports);
844 }
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861static void
862lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
863{
864 struct lpfc_iocbq *rspiocbq;
865 struct hbq_dmabuf *dmabuf;
866 struct lpfc_cq_event *cq_event;
867
868 spin_lock_irq(&phba->hbalock);
869 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
870 spin_unlock_irq(&phba->hbalock);
871
872 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
873
874 spin_lock_irq(&phba->hbalock);
875 list_remove_head(&phba->sli4_hba.sp_queue_event,
876 cq_event, struct lpfc_cq_event, list);
877 spin_unlock_irq(&phba->hbalock);
878
879 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
880 case CQE_CODE_COMPL_WQE:
881 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
882 cq_event);
883 lpfc_sli_release_iocbq(phba, rspiocbq);
884 break;
885 case CQE_CODE_RECEIVE:
886 case CQE_CODE_RECEIVE_V1:
887 dmabuf = container_of(cq_event, struct hbq_dmabuf,
888 cq_event);
889 lpfc_in_buf_free(phba, &dmabuf->dbuf);
890 }
891 }
892}
893
894
895
896
897
898
899
900
901
902
903
904
905static void
906lpfc_hba_free_post_buf(struct lpfc_hba *phba)
907{
908 struct lpfc_sli *psli = &phba->sli;
909 struct lpfc_sli_ring *pring;
910 struct lpfc_dmabuf *mp, *next_mp;
911 LIST_HEAD(buflist);
912 int count;
913
914 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
915 lpfc_sli_hbqbuf_free_all(phba);
916 else {
917
918 pring = &psli->sli3_ring[LPFC_ELS_RING];
919 spin_lock_irq(&phba->hbalock);
920 list_splice_init(&pring->postbufq, &buflist);
921 spin_unlock_irq(&phba->hbalock);
922
923 count = 0;
924 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
925 list_del(&mp->list);
926 count++;
927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 kfree(mp);
929 }
930
931 spin_lock_irq(&phba->hbalock);
932 pring->postbufq_cnt -= count;
933 spin_unlock_irq(&phba->hbalock);
934 }
935}
936
937
938
939
940
941
942
943
944
945
946
947static void
948lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
949{
950 struct lpfc_sli *psli = &phba->sli;
951 struct lpfc_queue *qp = NULL;
952 struct lpfc_sli_ring *pring;
953 LIST_HEAD(completions);
954 int i;
955 struct lpfc_iocbq *piocb, *next_iocb;
956
957 if (phba->sli_rev != LPFC_SLI_REV4) {
958 for (i = 0; i < psli->num_rings; i++) {
959 pring = &psli->sli3_ring[i];
960 spin_lock_irq(&phba->hbalock);
961
962
963
964
965 list_splice_init(&pring->txcmplq, &completions);
966 pring->txcmplq_cnt = 0;
967 spin_unlock_irq(&phba->hbalock);
968
969 lpfc_sli_abort_iocb_ring(phba, pring);
970 }
971
972 lpfc_sli_cancel_iocbs(phba, &completions,
973 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
974 return;
975 }
976 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
977 pring = qp->pring;
978 if (!pring)
979 continue;
980 spin_lock_irq(&pring->ring_lock);
981 list_for_each_entry_safe(piocb, next_iocb,
982 &pring->txcmplq, list)
983 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
984 list_splice_init(&pring->txcmplq, &completions);
985 pring->txcmplq_cnt = 0;
986 spin_unlock_irq(&pring->ring_lock);
987 lpfc_sli_abort_iocb_ring(phba, pring);
988 }
989
990 lpfc_sli_cancel_iocbs(phba, &completions,
991 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int
1006lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1007{
1008 lpfc_hba_free_post_buf(phba);
1009 lpfc_hba_clean_txcmplq(phba);
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static int
1025lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1026{
1027 struct lpfc_io_buf *psb, *psb_next;
1028 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1029 struct lpfc_sli4_hdw_queue *qp;
1030 LIST_HEAD(aborts);
1031 LIST_HEAD(nvme_aborts);
1032 LIST_HEAD(nvmet_aborts);
1033 struct lpfc_sglq *sglq_entry = NULL;
1034 int cnt, idx;
1035
1036
1037 lpfc_sli_hbqbuf_free_all(phba);
1038 lpfc_hba_clean_txcmplq(phba);
1039
1040
1041
1042
1043
1044
1045
1046 spin_lock_irq(&phba->hbalock);
1047
1048
1049
1050
1051 spin_lock(&phba->sli4_hba.sgl_list_lock);
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1061
1062
1063
1064
1065 cnt = 0;
1066 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1067 qp = &phba->sli4_hba.hdwq[idx];
1068
1069 spin_lock(&qp->abts_io_buf_list_lock);
1070 list_splice_init(&qp->lpfc_abts_io_buf_list,
1071 &aborts);
1072
1073 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1074 psb->pCmd = NULL;
1075 psb->status = IOSTAT_SUCCESS;
1076 cnt++;
1077 }
1078 spin_lock(&qp->io_buf_list_put_lock);
1079 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1080 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1081 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1082 qp->abts_scsi_io_bufs = 0;
1083 qp->abts_nvme_io_bufs = 0;
1084 spin_unlock(&qp->io_buf_list_put_lock);
1085 spin_unlock(&qp->abts_io_buf_list_lock);
1086 }
1087 spin_unlock_irq(&phba->hbalock);
1088
1089 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1090 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1091 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1092 &nvmet_aborts);
1093 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1094 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1095 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1097 }
1098 }
1099
1100 lpfc_sli4_free_sp_events(phba);
1101 return cnt;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115int
1116lpfc_hba_down_post(struct lpfc_hba *phba)
1117{
1118 return (*phba->lpfc_hba_down_post)(phba);
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static void
1134lpfc_hb_timeout(struct timer_list *t)
1135{
1136 struct lpfc_hba *phba;
1137 uint32_t tmo_posted;
1138 unsigned long iflag;
1139
1140 phba = from_timer(phba, t, hb_tmofunc);
1141
1142
1143 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1144 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1145 if (!tmo_posted)
1146 phba->pport->work_port_events |= WORKER_HB_TMO;
1147 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1148
1149
1150 if (!tmo_posted)
1151 lpfc_worker_wake_up(phba);
1152 return;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static void
1168lpfc_rrq_timeout(struct timer_list *t)
1169{
1170 struct lpfc_hba *phba;
1171 unsigned long iflag;
1172
1173 phba = from_timer(phba, t, rrq_tmr);
1174 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1175 if (!(phba->pport->load_flag & FC_UNLOADING))
1176 phba->hba_flag |= HBA_RRQ_ACTIVE;
1177 else
1178 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1179 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1180
1181 if (!(phba->pport->load_flag & FC_UNLOADING))
1182 lpfc_worker_wake_up(phba);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static void
1202lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1203{
1204 unsigned long drvr_flag;
1205
1206 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1207 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1208 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1209
1210
1211 mempool_free(pmboxq, phba->mbox_mem_pool);
1212 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1213 !(phba->link_state == LPFC_HBA_ERROR) &&
1214 !(phba->pport->load_flag & FC_UNLOADING))
1215 mod_timer(&phba->hb_tmofunc,
1216 jiffies +
1217 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1218 return;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229static void
1230lpfc_idle_stat_delay_work(struct work_struct *work)
1231{
1232 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1233 struct lpfc_hba,
1234 idle_stat_delay_work);
1235 struct lpfc_queue *cq;
1236 struct lpfc_sli4_hdw_queue *hdwq;
1237 struct lpfc_idle_stat *idle_stat;
1238 u32 i, idle_percent;
1239 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1240
1241 if (phba->pport->load_flag & FC_UNLOADING)
1242 return;
1243
1244 if (phba->link_state == LPFC_HBA_ERROR ||
1245 phba->pport->fc_flag & FC_OFFLINE_MODE)
1246 goto requeue;
1247
1248 for_each_present_cpu(i) {
1249 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1250 cq = hdwq->io_cq;
1251
1252
1253 if (cq->chann != i)
1254 continue;
1255
1256 idle_stat = &phba->sli4_hba.idle_stat[i];
1257
1258
1259
1260
1261
1262
1263
1264 wall_idle = get_cpu_idle_time(i, &wall, 1);
1265 diff_idle = wall_idle - idle_stat->prev_idle;
1266 diff_wall = wall - idle_stat->prev_wall;
1267
1268 if (diff_wall <= diff_idle)
1269 busy_time = 0;
1270 else
1271 busy_time = diff_wall - diff_idle;
1272
1273 idle_percent = div64_u64(100 * busy_time, diff_wall);
1274 idle_percent = 100 - idle_percent;
1275
1276 if (idle_percent < 15)
1277 cq->poll_mode = LPFC_QUEUE_WORK;
1278 else
1279 cq->poll_mode = LPFC_IRQ_POLL;
1280
1281 idle_stat->prev_idle = wall_idle;
1282 idle_stat->prev_wall = wall;
1283 }
1284
1285requeue:
1286 schedule_delayed_work(&phba->idle_stat_delay_work,
1287 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1288}
1289
1290static void
1291lpfc_hb_eq_delay_work(struct work_struct *work)
1292{
1293 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294 struct lpfc_hba, eq_delay_work);
1295 struct lpfc_eq_intr_info *eqi, *eqi_new;
1296 struct lpfc_queue *eq, *eq_next;
1297 unsigned char *ena_delay = NULL;
1298 uint32_t usdelay;
1299 int i;
1300
1301 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1302 return;
1303
1304 if (phba->link_state == LPFC_HBA_ERROR ||
1305 phba->pport->fc_flag & FC_OFFLINE_MODE)
1306 goto requeue;
1307
1308 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1309 GFP_KERNEL);
1310 if (!ena_delay)
1311 goto requeue;
1312
1313 for (i = 0; i < phba->cfg_irq_chann; i++) {
1314
1315 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1316 if (!eq)
1317 continue;
1318 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1319 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1320 ena_delay[eq->last_cpu] = 1;
1321 }
1322 }
1323
1324 for_each_present_cpu(i) {
1325 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1326 if (ena_delay[i]) {
1327 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1328 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1329 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1330 } else {
1331 usdelay = 0;
1332 }
1333
1334 eqi->icnt = 0;
1335
1336 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1337 if (unlikely(eq->last_cpu != i)) {
1338 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1339 eq->last_cpu);
1340 list_move_tail(&eq->cpu_list, &eqi_new->list);
1341 continue;
1342 }
1343 if (usdelay != eq->q_mode)
1344 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1345 usdelay);
1346 }
1347 }
1348
1349 kfree(ena_delay);
1350
1351requeue:
1352 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1353 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1364{
1365 u32 i;
1366 u32 hwq_count;
1367
1368 hwq_count = phba->cfg_hdw_queue;
1369 for (i = 0; i < hwq_count; i++) {
1370
1371 lpfc_adjust_pvt_pool_count(phba, i);
1372
1373
1374 lpfc_adjust_high_watermark(phba, i);
1375
1376#ifdef LPFC_MXP_STAT
1377
1378 lpfc_snapshot_mxp(phba, i);
1379#endif
1380 }
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391int
1392lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1393{
1394 LPFC_MBOXQ_t *pmboxq;
1395 int retval;
1396
1397
1398 if (phba->hba_flag & HBA_HBEAT_INP)
1399 return 0;
1400
1401 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1402 if (!pmboxq)
1403 return -ENOMEM;
1404
1405 lpfc_heart_beat(phba, pmboxq);
1406 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1407 pmboxq->vport = phba->pport;
1408 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1409
1410 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1411 mempool_free(pmboxq, phba->mbox_mem_pool);
1412 return -ENXIO;
1413 }
1414 phba->hba_flag |= HBA_HBEAT_INP;
1415
1416 return 0;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429void
1430lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1431{
1432 if (phba->cfg_enable_hba_heartbeat)
1433 return;
1434 phba->hba_flag |= HBA_HBEAT_TMO;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453void
1454lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1455{
1456 struct lpfc_vport **vports;
1457 struct lpfc_dmabuf *buf_ptr;
1458 int retval = 0;
1459 int i, tmo;
1460 struct lpfc_sli *psli = &phba->sli;
1461 LIST_HEAD(completions);
1462
1463 if (phba->cfg_xri_rebalancing) {
1464
1465 lpfc_hb_mxp_handler(phba);
1466 }
1467
1468 vports = lpfc_create_vport_work_array(phba);
1469 if (vports != NULL)
1470 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1471 lpfc_rcv_seq_check_edtov(vports[i]);
1472 lpfc_fdmi_change_check(vports[i]);
1473 }
1474 lpfc_destroy_vport_work_array(phba, vports);
1475
1476 if ((phba->link_state == LPFC_HBA_ERROR) ||
1477 (phba->pport->load_flag & FC_UNLOADING) ||
1478 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1479 return;
1480
1481 if (phba->elsbuf_cnt &&
1482 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1483 spin_lock_irq(&phba->hbalock);
1484 list_splice_init(&phba->elsbuf, &completions);
1485 phba->elsbuf_cnt = 0;
1486 phba->elsbuf_prev_cnt = 0;
1487 spin_unlock_irq(&phba->hbalock);
1488
1489 while (!list_empty(&completions)) {
1490 list_remove_head(&completions, buf_ptr,
1491 struct lpfc_dmabuf, list);
1492 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1493 kfree(buf_ptr);
1494 }
1495 }
1496 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1497
1498
1499 if (phba->cfg_enable_hba_heartbeat) {
1500
1501 spin_lock_irq(&phba->pport->work_port_lock);
1502 if (time_after(phba->last_completion_time +
1503 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1504 jiffies)) {
1505 spin_unlock_irq(&phba->pport->work_port_lock);
1506 if (phba->hba_flag & HBA_HBEAT_INP)
1507 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1508 else
1509 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1510 goto out;
1511 }
1512 spin_unlock_irq(&phba->pport->work_port_lock);
1513
1514
1515 if (phba->hba_flag & HBA_HBEAT_INP) {
1516
1517
1518
1519
1520
1521 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1522 "0459 Adapter heartbeat still outstanding: "
1523 "last compl time was %d ms.\n",
1524 jiffies_to_msecs(jiffies
1525 - phba->last_completion_time));
1526 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1527 } else {
1528 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1529 (list_empty(&psli->mboxq))) {
1530
1531 retval = lpfc_issue_hb_mbox(phba);
1532 if (retval) {
1533 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1534 goto out;
1535 }
1536 phba->skipped_hb = 0;
1537 } else if (time_before_eq(phba->last_completion_time,
1538 phba->skipped_hb)) {
1539 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1540 "2857 Last completion time not "
1541 " updated in %d ms\n",
1542 jiffies_to_msecs(jiffies
1543 - phba->last_completion_time));
1544 } else
1545 phba->skipped_hb = jiffies;
1546
1547 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1548 goto out;
1549 }
1550 } else {
1551
1552 if (phba->hba_flag & HBA_HBEAT_TMO) {
1553 retval = lpfc_issue_hb_mbox(phba);
1554 if (retval)
1555 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1556 else
1557 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1558 goto out;
1559 }
1560 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1561 }
1562out:
1563 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573static void
1574lpfc_offline_eratt(struct lpfc_hba *phba)
1575{
1576 struct lpfc_sli *psli = &phba->sli;
1577
1578 spin_lock_irq(&phba->hbalock);
1579 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1580 spin_unlock_irq(&phba->hbalock);
1581 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1582
1583 lpfc_offline(phba);
1584 lpfc_reset_barrier(phba);
1585 spin_lock_irq(&phba->hbalock);
1586 lpfc_sli_brdreset(phba);
1587 spin_unlock_irq(&phba->hbalock);
1588 lpfc_hba_down_post(phba);
1589 lpfc_sli_brdready(phba, HS_MBRDY);
1590 lpfc_unblock_mgmt_io(phba);
1591 phba->link_state = LPFC_HBA_ERROR;
1592 return;
1593}
1594
1595
1596
1597
1598
1599
1600
1601
1602void
1603lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1604{
1605 spin_lock_irq(&phba->hbalock);
1606 phba->link_state = LPFC_HBA_ERROR;
1607 spin_unlock_irq(&phba->hbalock);
1608
1609 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1610 lpfc_sli_flush_io_rings(phba);
1611 lpfc_offline(phba);
1612 lpfc_hba_down_post(phba);
1613 lpfc_unblock_mgmt_io(phba);
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static void
1626lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1627{
1628 uint32_t old_host_status = phba->work_hs;
1629 struct lpfc_sli *psli = &phba->sli;
1630
1631
1632
1633
1634 if (pci_channel_offline(phba->pcidev)) {
1635 spin_lock_irq(&phba->hbalock);
1636 phba->hba_flag &= ~DEFER_ERATT;
1637 spin_unlock_irq(&phba->hbalock);
1638 return;
1639 }
1640
1641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1642 "0479 Deferred Adapter Hardware Error "
1643 "Data: x%x x%x x%x\n",
1644 phba->work_hs, phba->work_status[0],
1645 phba->work_status[1]);
1646
1647 spin_lock_irq(&phba->hbalock);
1648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1649 spin_unlock_irq(&phba->hbalock);
1650
1651
1652
1653
1654
1655
1656
1657 lpfc_sli_abort_fcp_rings(phba);
1658
1659
1660
1661
1662
1663 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1664 lpfc_offline(phba);
1665
1666
1667 while (phba->work_hs & HS_FFER1) {
1668 msleep(100);
1669 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1670 phba->work_hs = UNPLUG_ERR ;
1671 break;
1672 }
1673
1674 if (phba->pport->load_flag & FC_UNLOADING) {
1675 phba->work_hs = 0;
1676 break;
1677 }
1678 }
1679
1680
1681
1682
1683
1684
1685 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1686 phba->work_hs = old_host_status & ~HS_FFER1;
1687
1688 spin_lock_irq(&phba->hbalock);
1689 phba->hba_flag &= ~DEFER_ERATT;
1690 spin_unlock_irq(&phba->hbalock);
1691 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1692 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1693}
1694
1695static void
1696lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1697{
1698 struct lpfc_board_event_header board_event;
1699 struct Scsi_Host *shost;
1700
1701 board_event.event_type = FC_REG_BOARD_EVENT;
1702 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1703 shost = lpfc_shost_from_vport(phba->pport);
1704 fc_host_post_vendor_event(shost, fc_get_event_number(),
1705 sizeof(board_event),
1706 (char *) &board_event,
1707 LPFC_NL_VENDOR_ID);
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static void
1721lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1722{
1723 struct lpfc_vport *vport = phba->pport;
1724 struct lpfc_sli *psli = &phba->sli;
1725 uint32_t event_data;
1726 unsigned long temperature;
1727 struct temp_event temp_event_data;
1728 struct Scsi_Host *shost;
1729
1730
1731
1732
1733 if (pci_channel_offline(phba->pcidev)) {
1734 spin_lock_irq(&phba->hbalock);
1735 phba->hba_flag &= ~DEFER_ERATT;
1736 spin_unlock_irq(&phba->hbalock);
1737 return;
1738 }
1739
1740
1741 if (!phba->cfg_enable_hba_reset)
1742 return;
1743
1744
1745 lpfc_board_errevt_to_mgmt(phba);
1746
1747 if (phba->hba_flag & DEFER_ERATT)
1748 lpfc_handle_deferred_eratt(phba);
1749
1750 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1751 if (phba->work_hs & HS_FFER6)
1752
1753 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1754 "1301 Re-establishing Link "
1755 "Data: x%x x%x x%x\n",
1756 phba->work_hs, phba->work_status[0],
1757 phba->work_status[1]);
1758 if (phba->work_hs & HS_FFER8)
1759
1760 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1761 "2861 Host Authentication device "
1762 "zeroization Data:x%x x%x x%x\n",
1763 phba->work_hs, phba->work_status[0],
1764 phba->work_status[1]);
1765
1766 spin_lock_irq(&phba->hbalock);
1767 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1768 spin_unlock_irq(&phba->hbalock);
1769
1770
1771
1772
1773
1774
1775
1776 lpfc_sli_abort_fcp_rings(phba);
1777
1778
1779
1780
1781
1782 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1783 lpfc_offline(phba);
1784 lpfc_sli_brdrestart(phba);
1785 if (lpfc_online(phba) == 0) {
1786 lpfc_unblock_mgmt_io(phba);
1787 return;
1788 }
1789 lpfc_unblock_mgmt_io(phba);
1790 } else if (phba->work_hs & HS_CRIT_TEMP) {
1791 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1792 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1793 temp_event_data.event_code = LPFC_CRIT_TEMP;
1794 temp_event_data.data = (uint32_t)temperature;
1795
1796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1797 "0406 Adapter maximum temperature exceeded "
1798 "(%ld), taking this port offline "
1799 "Data: x%x x%x x%x\n",
1800 temperature, phba->work_hs,
1801 phba->work_status[0], phba->work_status[1]);
1802
1803 shost = lpfc_shost_from_vport(phba->pport);
1804 fc_host_post_vendor_event(shost, fc_get_event_number(),
1805 sizeof(temp_event_data),
1806 (char *) &temp_event_data,
1807 SCSI_NL_VID_TYPE_PCI
1808 | PCI_VENDOR_ID_EMULEX);
1809
1810 spin_lock_irq(&phba->hbalock);
1811 phba->over_temp_state = HBA_OVER_TEMP;
1812 spin_unlock_irq(&phba->hbalock);
1813 lpfc_offline_eratt(phba);
1814
1815 } else {
1816
1817
1818
1819
1820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1821 "0457 Adapter Hardware Error "
1822 "Data: x%x x%x x%x\n",
1823 phba->work_hs,
1824 phba->work_status[0], phba->work_status[1]);
1825
1826 event_data = FC_REG_DUMP_EVENT;
1827 shost = lpfc_shost_from_vport(vport);
1828 fc_host_post_vendor_event(shost, fc_get_event_number(),
1829 sizeof(event_data), (char *) &event_data,
1830 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1831
1832 lpfc_offline_eratt(phba);
1833 }
1834 return;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static int
1849lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1850 bool en_rn_msg)
1851{
1852 int rc;
1853 uint32_t intr_mode;
1854
1855 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1856 LPFC_SLI_INTF_IF_TYPE_2) {
1857
1858
1859
1860
1861 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1862 if (rc)
1863 return rc;
1864 }
1865
1866
1867 if (en_rn_msg)
1868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1869 "2887 Reset Needed: Attempting Port "
1870 "Recovery...\n");
1871
1872
1873
1874
1875 if (mbx_action == LPFC_MBX_NO_WAIT) {
1876 spin_lock_irq(&phba->hbalock);
1877 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1878 spin_unlock_irq(&phba->hbalock);
1879 }
1880
1881 lpfc_offline_prep(phba, mbx_action);
1882 lpfc_sli_flush_io_rings(phba);
1883 lpfc_offline(phba);
1884
1885 lpfc_sli4_disable_intr(phba);
1886 rc = lpfc_sli_brdrestart(phba);
1887 if (rc) {
1888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1889 "6309 Failed to restart board\n");
1890 return rc;
1891 }
1892
1893 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1894 if (intr_mode == LPFC_INTR_ERROR) {
1895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1896 "3175 Failed to enable interrupt\n");
1897 return -EIO;
1898 }
1899 phba->intr_mode = intr_mode;
1900 rc = lpfc_online(phba);
1901 if (rc == 0)
1902 lpfc_unblock_mgmt_io(phba);
1903
1904 return rc;
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914static void
1915lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1916{
1917 struct lpfc_vport *vport = phba->pport;
1918 uint32_t event_data;
1919 struct Scsi_Host *shost;
1920 uint32_t if_type;
1921 struct lpfc_register portstat_reg = {0};
1922 uint32_t reg_err1, reg_err2;
1923 uint32_t uerrlo_reg, uemasklo_reg;
1924 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1925 bool en_rn_msg = true;
1926 struct temp_event temp_event_data;
1927 struct lpfc_register portsmphr_reg;
1928 int rc, i;
1929
1930
1931
1932
1933 if (pci_channel_offline(phba->pcidev)) {
1934 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1935 "3166 pci channel is offline\n");
1936 lpfc_sli4_offline_eratt(phba);
1937 return;
1938 }
1939
1940 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1941 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1942 switch (if_type) {
1943 case LPFC_SLI_INTF_IF_TYPE_0:
1944 pci_rd_rc1 = lpfc_readl(
1945 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1946 &uerrlo_reg);
1947 pci_rd_rc2 = lpfc_readl(
1948 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1949 &uemasklo_reg);
1950
1951 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1952 return;
1953 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1954 lpfc_sli4_offline_eratt(phba);
1955 return;
1956 }
1957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1958 "7623 Checking UE recoverable");
1959
1960 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1961 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1962 &portsmphr_reg.word0))
1963 continue;
1964
1965 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1966 &portsmphr_reg);
1967 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1968 LPFC_PORT_SEM_UE_RECOVERABLE)
1969 break;
1970
1971 msleep(1000);
1972 }
1973
1974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1975 "4827 smphr_port_status x%x : Waited %dSec",
1976 smphr_port_status, i);
1977
1978
1979 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1980 LPFC_PORT_SEM_UE_RECOVERABLE) {
1981 for (i = 0; i < 20; i++) {
1982 msleep(1000);
1983 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1984 &portsmphr_reg.word0) &&
1985 (LPFC_POST_STAGE_PORT_READY ==
1986 bf_get(lpfc_port_smphr_port_status,
1987 &portsmphr_reg))) {
1988 rc = lpfc_sli4_port_sta_fn_reset(phba,
1989 LPFC_MBX_NO_WAIT, en_rn_msg);
1990 if (rc == 0)
1991 return;
1992 lpfc_printf_log(phba, KERN_ERR,
1993 LOG_TRACE_EVENT,
1994 "4215 Failed to recover UE");
1995 break;
1996 }
1997 }
1998 }
1999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2000 "7624 Firmware not ready: Failing UE recovery,"
2001 " waited %dSec", i);
2002 phba->link_state = LPFC_HBA_ERROR;
2003 break;
2004
2005 case LPFC_SLI_INTF_IF_TYPE_2:
2006 case LPFC_SLI_INTF_IF_TYPE_6:
2007 pci_rd_rc1 = lpfc_readl(
2008 phba->sli4_hba.u.if_type2.STATUSregaddr,
2009 &portstat_reg.word0);
2010
2011 if (pci_rd_rc1 == -EIO) {
2012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2013 "3151 PCI bus read access failure: x%x\n",
2014 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2015 lpfc_sli4_offline_eratt(phba);
2016 return;
2017 }
2018 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2019 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2020 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2022 "2889 Port Overtemperature event, "
2023 "taking port offline Data: x%x x%x\n",
2024 reg_err1, reg_err2);
2025
2026 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2027 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2028 temp_event_data.event_code = LPFC_CRIT_TEMP;
2029 temp_event_data.data = 0xFFFFFFFF;
2030
2031 shost = lpfc_shost_from_vport(phba->pport);
2032 fc_host_post_vendor_event(shost, fc_get_event_number(),
2033 sizeof(temp_event_data),
2034 (char *)&temp_event_data,
2035 SCSI_NL_VID_TYPE_PCI
2036 | PCI_VENDOR_ID_EMULEX);
2037
2038 spin_lock_irq(&phba->hbalock);
2039 phba->over_temp_state = HBA_OVER_TEMP;
2040 spin_unlock_irq(&phba->hbalock);
2041 lpfc_sli4_offline_eratt(phba);
2042 return;
2043 }
2044 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2045 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2047 "3143 Port Down: Firmware Update "
2048 "Detected\n");
2049 en_rn_msg = false;
2050 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2051 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2053 "3144 Port Down: Debug Dump\n");
2054 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2055 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2057 "3145 Port Down: Provisioning\n");
2058
2059
2060 if (!phba->cfg_enable_hba_reset)
2061 return;
2062
2063
2064 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2065 en_rn_msg);
2066 if (rc == 0) {
2067
2068 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2069 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2070 return;
2071 else
2072 break;
2073 }
2074
2075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2076 "3152 Unrecoverable error\n");
2077 phba->link_state = LPFC_HBA_ERROR;
2078 break;
2079 case LPFC_SLI_INTF_IF_TYPE_1:
2080 default:
2081 break;
2082 }
2083 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2084 "3123 Report dump event to upper layer\n");
2085
2086 lpfc_board_errevt_to_mgmt(phba);
2087
2088 event_data = FC_REG_DUMP_EVENT;
2089 shost = lpfc_shost_from_vport(vport);
2090 fc_host_post_vendor_event(shost, fc_get_event_number(),
2091 sizeof(event_data), (char *) &event_data,
2092 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106void
2107lpfc_handle_eratt(struct lpfc_hba *phba)
2108{
2109 (*phba->lpfc_handle_eratt)(phba);
2110}
2111
2112
2113
2114
2115
2116
2117
2118
2119void
2120lpfc_handle_latt(struct lpfc_hba *phba)
2121{
2122 struct lpfc_vport *vport = phba->pport;
2123 struct lpfc_sli *psli = &phba->sli;
2124 LPFC_MBOXQ_t *pmb;
2125 volatile uint32_t control;
2126 struct lpfc_dmabuf *mp;
2127 int rc = 0;
2128
2129 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2130 if (!pmb) {
2131 rc = 1;
2132 goto lpfc_handle_latt_err_exit;
2133 }
2134
2135 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2136 if (!mp) {
2137 rc = 2;
2138 goto lpfc_handle_latt_free_pmb;
2139 }
2140
2141 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2142 if (!mp->virt) {
2143 rc = 3;
2144 goto lpfc_handle_latt_free_mp;
2145 }
2146
2147
2148 lpfc_els_flush_all_cmd(phba);
2149
2150 psli->slistat.link_event++;
2151 lpfc_read_topology(phba, pmb, mp);
2152 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2153 pmb->vport = vport;
2154
2155 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2156 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2157 if (rc == MBX_NOT_FINISHED) {
2158 rc = 4;
2159 goto lpfc_handle_latt_free_mbuf;
2160 }
2161
2162
2163 spin_lock_irq(&phba->hbalock);
2164 writel(HA_LATT, phba->HAregaddr);
2165 readl(phba->HAregaddr);
2166 spin_unlock_irq(&phba->hbalock);
2167
2168 return;
2169
2170lpfc_handle_latt_free_mbuf:
2171 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2172 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2173lpfc_handle_latt_free_mp:
2174 kfree(mp);
2175lpfc_handle_latt_free_pmb:
2176 mempool_free(pmb, phba->mbox_mem_pool);
2177lpfc_handle_latt_err_exit:
2178
2179 spin_lock_irq(&phba->hbalock);
2180 psli->sli_flag |= LPFC_PROCESS_LA;
2181 control = readl(phba->HCregaddr);
2182 control |= HC_LAINT_ENA;
2183 writel(control, phba->HCregaddr);
2184 readl(phba->HCregaddr);
2185
2186
2187 writel(HA_LATT, phba->HAregaddr);
2188 readl(phba->HAregaddr);
2189 spin_unlock_irq(&phba->hbalock);
2190 lpfc_linkdown(phba);
2191 phba->link_state = LPFC_HBA_ERROR;
2192
2193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2194 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2195
2196 return;
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213int
2214lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2215{
2216 uint8_t lenlo, lenhi;
2217 int Length;
2218 int i, j;
2219 int finished = 0;
2220 int index = 0;
2221
2222 if (!vpd)
2223 return 0;
2224
2225
2226 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2227 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2228 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2229 (uint32_t) vpd[3]);
2230 while (!finished && (index < (len - 4))) {
2231 switch (vpd[index]) {
2232 case 0x82:
2233 case 0x91:
2234 index += 1;
2235 lenlo = vpd[index];
2236 index += 1;
2237 lenhi = vpd[index];
2238 index += 1;
2239 i = ((((unsigned short)lenhi) << 8) + lenlo);
2240 index += i;
2241 break;
2242 case 0x90:
2243 index += 1;
2244 lenlo = vpd[index];
2245 index += 1;
2246 lenhi = vpd[index];
2247 index += 1;
2248 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2249 if (Length > len - index)
2250 Length = len - index;
2251 while (Length > 0) {
2252
2253 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2254 index += 2;
2255 i = vpd[index];
2256 index += 1;
2257 j = 0;
2258 Length -= (3+i);
2259 while(i--) {
2260 phba->SerialNumber[j++] = vpd[index++];
2261 if (j == 31)
2262 break;
2263 }
2264 phba->SerialNumber[j] = 0;
2265 continue;
2266 }
2267 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2268 phba->vpd_flag |= VPD_MODEL_DESC;
2269 index += 2;
2270 i = vpd[index];
2271 index += 1;
2272 j = 0;
2273 Length -= (3+i);
2274 while(i--) {
2275 phba->ModelDesc[j++] = vpd[index++];
2276 if (j == 255)
2277 break;
2278 }
2279 phba->ModelDesc[j] = 0;
2280 continue;
2281 }
2282 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2283 phba->vpd_flag |= VPD_MODEL_NAME;
2284 index += 2;
2285 i = vpd[index];
2286 index += 1;
2287 j = 0;
2288 Length -= (3+i);
2289 while(i--) {
2290 phba->ModelName[j++] = vpd[index++];
2291 if (j == 79)
2292 break;
2293 }
2294 phba->ModelName[j] = 0;
2295 continue;
2296 }
2297 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2298 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2299 index += 2;
2300 i = vpd[index];
2301 index += 1;
2302 j = 0;
2303 Length -= (3+i);
2304 while(i--) {
2305 phba->ProgramType[j++] = vpd[index++];
2306 if (j == 255)
2307 break;
2308 }
2309 phba->ProgramType[j] = 0;
2310 continue;
2311 }
2312 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2313 phba->vpd_flag |= VPD_PORT;
2314 index += 2;
2315 i = vpd[index];
2316 index += 1;
2317 j = 0;
2318 Length -= (3+i);
2319 while(i--) {
2320 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2321 (phba->sli4_hba.pport_name_sta ==
2322 LPFC_SLI4_PPNAME_GET)) {
2323 j++;
2324 index++;
2325 } else
2326 phba->Port[j++] = vpd[index++];
2327 if (j == 19)
2328 break;
2329 }
2330 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2331 (phba->sli4_hba.pport_name_sta ==
2332 LPFC_SLI4_PPNAME_NON))
2333 phba->Port[j] = 0;
2334 continue;
2335 }
2336 else {
2337 index += 2;
2338 i = vpd[index];
2339 index += 1;
2340 index += i;
2341 Length -= (3 + i);
2342 }
2343 }
2344 finished = 0;
2345 break;
2346 case 0x78:
2347 finished = 1;
2348 break;
2349 default:
2350 index ++;
2351 break;
2352 }
2353 }
2354
2355 return(1);
2356}
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370static void
2371lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2372{
2373 lpfc_vpd_t *vp;
2374 uint16_t dev_id = phba->pcidev->device;
2375 int max_speed;
2376 int GE = 0;
2377 int oneConnect = 0;
2378 struct {
2379 char *name;
2380 char *bus;
2381 char *function;
2382 } m = {"<Unknown>", "", ""};
2383
2384 if (mdp && mdp[0] != '\0'
2385 && descp && descp[0] != '\0')
2386 return;
2387
2388 if (phba->lmt & LMT_64Gb)
2389 max_speed = 64;
2390 else if (phba->lmt & LMT_32Gb)
2391 max_speed = 32;
2392 else if (phba->lmt & LMT_16Gb)
2393 max_speed = 16;
2394 else if (phba->lmt & LMT_10Gb)
2395 max_speed = 10;
2396 else if (phba->lmt & LMT_8Gb)
2397 max_speed = 8;
2398 else if (phba->lmt & LMT_4Gb)
2399 max_speed = 4;
2400 else if (phba->lmt & LMT_2Gb)
2401 max_speed = 2;
2402 else if (phba->lmt & LMT_1Gb)
2403 max_speed = 1;
2404 else
2405 max_speed = 0;
2406
2407 vp = &phba->vpd;
2408
2409 switch (dev_id) {
2410 case PCI_DEVICE_ID_FIREFLY:
2411 m = (typeof(m)){"LP6000", "PCI",
2412 "Obsolete, Unsupported Fibre Channel Adapter"};
2413 break;
2414 case PCI_DEVICE_ID_SUPERFLY:
2415 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2416 m = (typeof(m)){"LP7000", "PCI", ""};
2417 else
2418 m = (typeof(m)){"LP7000E", "PCI", ""};
2419 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2420 break;
2421 case PCI_DEVICE_ID_DRAGONFLY:
2422 m = (typeof(m)){"LP8000", "PCI",
2423 "Obsolete, Unsupported Fibre Channel Adapter"};
2424 break;
2425 case PCI_DEVICE_ID_CENTAUR:
2426 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2427 m = (typeof(m)){"LP9002", "PCI", ""};
2428 else
2429 m = (typeof(m)){"LP9000", "PCI", ""};
2430 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2431 break;
2432 case PCI_DEVICE_ID_RFLY:
2433 m = (typeof(m)){"LP952", "PCI",
2434 "Obsolete, Unsupported Fibre Channel Adapter"};
2435 break;
2436 case PCI_DEVICE_ID_PEGASUS:
2437 m = (typeof(m)){"LP9802", "PCI-X",
2438 "Obsolete, Unsupported Fibre Channel Adapter"};
2439 break;
2440 case PCI_DEVICE_ID_THOR:
2441 m = (typeof(m)){"LP10000", "PCI-X",
2442 "Obsolete, Unsupported Fibre Channel Adapter"};
2443 break;
2444 case PCI_DEVICE_ID_VIPER:
2445 m = (typeof(m)){"LPX1000", "PCI-X",
2446 "Obsolete, Unsupported Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_PFLY:
2449 m = (typeof(m)){"LP982", "PCI-X",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2451 break;
2452 case PCI_DEVICE_ID_TFLY:
2453 m = (typeof(m)){"LP1050", "PCI-X",
2454 "Obsolete, Unsupported Fibre Channel Adapter"};
2455 break;
2456 case PCI_DEVICE_ID_HELIOS:
2457 m = (typeof(m)){"LP11000", "PCI-X2",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2459 break;
2460 case PCI_DEVICE_ID_HELIOS_SCSP:
2461 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 break;
2464 case PCI_DEVICE_ID_HELIOS_DCSP:
2465 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2466 "Obsolete, Unsupported Fibre Channel Adapter"};
2467 break;
2468 case PCI_DEVICE_ID_NEPTUNE:
2469 m = (typeof(m)){"LPe1000", "PCIe",
2470 "Obsolete, Unsupported Fibre Channel Adapter"};
2471 break;
2472 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2473 m = (typeof(m)){"LPe1000-SP", "PCIe",
2474 "Obsolete, Unsupported Fibre Channel Adapter"};
2475 break;
2476 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2477 m = (typeof(m)){"LPe1002-SP", "PCIe",
2478 "Obsolete, Unsupported Fibre Channel Adapter"};
2479 break;
2480 case PCI_DEVICE_ID_BMID:
2481 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2482 break;
2483 case PCI_DEVICE_ID_BSMB:
2484 m = (typeof(m)){"LP111", "PCI-X2",
2485 "Obsolete, Unsupported Fibre Channel Adapter"};
2486 break;
2487 case PCI_DEVICE_ID_ZEPHYR:
2488 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2489 break;
2490 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2491 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2492 break;
2493 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2494 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2495 GE = 1;
2496 break;
2497 case PCI_DEVICE_ID_ZMID:
2498 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2499 break;
2500 case PCI_DEVICE_ID_ZSMB:
2501 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2502 break;
2503 case PCI_DEVICE_ID_LP101:
2504 m = (typeof(m)){"LP101", "PCI-X",
2505 "Obsolete, Unsupported Fibre Channel Adapter"};
2506 break;
2507 case PCI_DEVICE_ID_LP10000S:
2508 m = (typeof(m)){"LP10000-S", "PCI",
2509 "Obsolete, Unsupported Fibre Channel Adapter"};
2510 break;
2511 case PCI_DEVICE_ID_LP11000S:
2512 m = (typeof(m)){"LP11000-S", "PCI-X2",
2513 "Obsolete, Unsupported Fibre Channel Adapter"};
2514 break;
2515 case PCI_DEVICE_ID_LPE11000S:
2516 m = (typeof(m)){"LPe11000-S", "PCIe",
2517 "Obsolete, Unsupported Fibre Channel Adapter"};
2518 break;
2519 case PCI_DEVICE_ID_SAT:
2520 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2521 break;
2522 case PCI_DEVICE_ID_SAT_MID:
2523 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2524 break;
2525 case PCI_DEVICE_ID_SAT_SMB:
2526 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2527 break;
2528 case PCI_DEVICE_ID_SAT_DCSP:
2529 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2530 break;
2531 case PCI_DEVICE_ID_SAT_SCSP:
2532 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2533 break;
2534 case PCI_DEVICE_ID_SAT_S:
2535 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2536 break;
2537 case PCI_DEVICE_ID_HORNET:
2538 m = (typeof(m)){"LP21000", "PCIe",
2539 "Obsolete, Unsupported FCoE Adapter"};
2540 GE = 1;
2541 break;
2542 case PCI_DEVICE_ID_PROTEUS_VF:
2543 m = (typeof(m)){"LPev12000", "PCIe IOV",
2544 "Obsolete, Unsupported Fibre Channel Adapter"};
2545 break;
2546 case PCI_DEVICE_ID_PROTEUS_PF:
2547 m = (typeof(m)){"LPev12000", "PCIe IOV",
2548 "Obsolete, Unsupported Fibre Channel Adapter"};
2549 break;
2550 case PCI_DEVICE_ID_PROTEUS_S:
2551 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2552 "Obsolete, Unsupported Fibre Channel Adapter"};
2553 break;
2554 case PCI_DEVICE_ID_TIGERSHARK:
2555 oneConnect = 1;
2556 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2557 break;
2558 case PCI_DEVICE_ID_TOMCAT:
2559 oneConnect = 1;
2560 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2561 break;
2562 case PCI_DEVICE_ID_FALCON:
2563 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2564 "EmulexSecure Fibre"};
2565 break;
2566 case PCI_DEVICE_ID_BALIUS:
2567 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2568 "Obsolete, Unsupported Fibre Channel Adapter"};
2569 break;
2570 case PCI_DEVICE_ID_LANCER_FC:
2571 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2572 break;
2573 case PCI_DEVICE_ID_LANCER_FC_VF:
2574 m = (typeof(m)){"LPe16000", "PCIe",
2575 "Obsolete, Unsupported Fibre Channel Adapter"};
2576 break;
2577 case PCI_DEVICE_ID_LANCER_FCOE:
2578 oneConnect = 1;
2579 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2580 break;
2581 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2582 oneConnect = 1;
2583 m = (typeof(m)){"OCe15100", "PCIe",
2584 "Obsolete, Unsupported FCoE"};
2585 break;
2586 case PCI_DEVICE_ID_LANCER_G6_FC:
2587 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2588 break;
2589 case PCI_DEVICE_ID_LANCER_G7_FC:
2590 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2591 break;
2592 case PCI_DEVICE_ID_SKYHAWK:
2593 case PCI_DEVICE_ID_SKYHAWK_VF:
2594 oneConnect = 1;
2595 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2596 break;
2597 default:
2598 m = (typeof(m)){"Unknown", "", ""};
2599 break;
2600 }
2601
2602 if (mdp && mdp[0] == '\0')
2603 snprintf(mdp, 79,"%s", m.name);
2604
2605
2606
2607
2608 if (descp && descp[0] == '\0') {
2609 if (oneConnect)
2610 snprintf(descp, 255,
2611 "Emulex OneConnect %s, %s Initiator %s",
2612 m.name, m.function,
2613 phba->Port);
2614 else if (max_speed == 0)
2615 snprintf(descp, 255,
2616 "Emulex %s %s %s",
2617 m.name, m.bus, m.function);
2618 else
2619 snprintf(descp, 255,
2620 "Emulex %s %d%s %s %s",
2621 m.name, max_speed, (GE) ? "GE" : "Gb",
2622 m.bus, m.function);
2623 }
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638int
2639lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2640{
2641 IOCB_t *icmd;
2642 struct lpfc_iocbq *iocb;
2643 struct lpfc_dmabuf *mp1, *mp2;
2644
2645 cnt += pring->missbufcnt;
2646
2647
2648 while (cnt > 0) {
2649
2650 iocb = lpfc_sli_get_iocbq(phba);
2651 if (iocb == NULL) {
2652 pring->missbufcnt = cnt;
2653 return cnt;
2654 }
2655 icmd = &iocb->iocb;
2656
2657
2658
2659 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2660 if (mp1)
2661 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2662 if (!mp1 || !mp1->virt) {
2663 kfree(mp1);
2664 lpfc_sli_release_iocbq(phba, iocb);
2665 pring->missbufcnt = cnt;
2666 return cnt;
2667 }
2668
2669 INIT_LIST_HEAD(&mp1->list);
2670
2671 if (cnt > 1) {
2672 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2673 if (mp2)
2674 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2675 &mp2->phys);
2676 if (!mp2 || !mp2->virt) {
2677 kfree(mp2);
2678 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2679 kfree(mp1);
2680 lpfc_sli_release_iocbq(phba, iocb);
2681 pring->missbufcnt = cnt;
2682 return cnt;
2683 }
2684
2685 INIT_LIST_HEAD(&mp2->list);
2686 } else {
2687 mp2 = NULL;
2688 }
2689
2690 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2691 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2692 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2693 icmd->ulpBdeCount = 1;
2694 cnt--;
2695 if (mp2) {
2696 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2697 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2698 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2699 cnt--;
2700 icmd->ulpBdeCount = 2;
2701 }
2702
2703 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2704 icmd->ulpLe = 1;
2705
2706 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2707 IOCB_ERROR) {
2708 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2709 kfree(mp1);
2710 cnt++;
2711 if (mp2) {
2712 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2713 kfree(mp2);
2714 cnt++;
2715 }
2716 lpfc_sli_release_iocbq(phba, iocb);
2717 pring->missbufcnt = cnt;
2718 return cnt;
2719 }
2720 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2721 if (mp2)
2722 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2723 }
2724 pring->missbufcnt = 0;
2725 return 0;
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739static int
2740lpfc_post_rcv_buf(struct lpfc_hba *phba)
2741{
2742 struct lpfc_sli *psli = &phba->sli;
2743
2744
2745 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2746
2747
2748 return 0;
2749}
2750
2751#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2752
2753
2754
2755
2756
2757
2758
2759
2760static void
2761lpfc_sha_init(uint32_t * HashResultPointer)
2762{
2763 HashResultPointer[0] = 0x67452301;
2764 HashResultPointer[1] = 0xEFCDAB89;
2765 HashResultPointer[2] = 0x98BADCFE;
2766 HashResultPointer[3] = 0x10325476;
2767 HashResultPointer[4] = 0xC3D2E1F0;
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780static void
2781lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2782{
2783 int t;
2784 uint32_t TEMP;
2785 uint32_t A, B, C, D, E;
2786 t = 16;
2787 do {
2788 HashWorkingPointer[t] =
2789 S(1,
2790 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2791 8] ^
2792 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2793 } while (++t <= 79);
2794 t = 0;
2795 A = HashResultPointer[0];
2796 B = HashResultPointer[1];
2797 C = HashResultPointer[2];
2798 D = HashResultPointer[3];
2799 E = HashResultPointer[4];
2800
2801 do {
2802 if (t < 20) {
2803 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2804 } else if (t < 40) {
2805 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2806 } else if (t < 60) {
2807 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2808 } else {
2809 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2810 }
2811 TEMP += S(5, A) + E + HashWorkingPointer[t];
2812 E = D;
2813 D = C;
2814 C = S(30, B);
2815 B = A;
2816 A = TEMP;
2817 } while (++t <= 79);
2818
2819 HashResultPointer[0] += A;
2820 HashResultPointer[1] += B;
2821 HashResultPointer[2] += C;
2822 HashResultPointer[3] += D;
2823 HashResultPointer[4] += E;
2824
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837static void
2838lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2839{
2840 *HashWorking = (*RandomChallenge ^ *HashWorking);
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850void
2851lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2852{
2853 int t;
2854 uint32_t *HashWorking;
2855 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2856
2857 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2858 if (!HashWorking)
2859 return;
2860
2861 HashWorking[0] = HashWorking[78] = *pwwnn++;
2862 HashWorking[1] = HashWorking[79] = *pwwnn;
2863
2864 for (t = 0; t < 7; t++)
2865 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2866
2867 lpfc_sha_init(hbainit);
2868 lpfc_sha_iterate(hbainit, HashWorking);
2869 kfree(HashWorking);
2870}
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881void
2882lpfc_cleanup(struct lpfc_vport *vport)
2883{
2884 struct lpfc_hba *phba = vport->phba;
2885 struct lpfc_nodelist *ndlp, *next_ndlp;
2886 int i = 0;
2887
2888 if (phba->link_state > LPFC_LINK_DOWN)
2889 lpfc_port_link_failure(vport);
2890
2891 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2892 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2893 ndlp->nlp_DID == Fabric_DID) {
2894
2895 lpfc_nlp_put(ndlp);
2896 continue;
2897 }
2898
2899 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2900 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2901 lpfc_nlp_put(ndlp);
2902 continue;
2903 }
2904
2905
2906
2907
2908 if (ndlp->nlp_type & NLP_FABRIC &&
2909 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2910 lpfc_disc_state_machine(vport, ndlp, NULL,
2911 NLP_EVT_DEVICE_RECOVERY);
2912
2913 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2914 lpfc_disc_state_machine(vport, ndlp, NULL,
2915 NLP_EVT_DEVICE_RM);
2916 }
2917
2918
2919
2920
2921
2922 while (!list_empty(&vport->fc_nodes)) {
2923 if (i++ > 3000) {
2924 lpfc_printf_vlog(vport, KERN_ERR,
2925 LOG_TRACE_EVENT,
2926 "0233 Nodelist not empty\n");
2927 list_for_each_entry_safe(ndlp, next_ndlp,
2928 &vport->fc_nodes, nlp_listp) {
2929 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2930 LOG_TRACE_EVENT,
2931 "0282 did:x%x ndlp:x%px "
2932 "refcnt:%d xflags x%x nflag x%x\n",
2933 ndlp->nlp_DID, (void *)ndlp,
2934 kref_read(&ndlp->kref),
2935 ndlp->fc4_xpt_flags,
2936 ndlp->nlp_flag);
2937 }
2938 break;
2939 }
2940
2941
2942 msleep(10);
2943 }
2944 lpfc_cleanup_vports_rrqs(vport, NULL);
2945}
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955void
2956lpfc_stop_vport_timers(struct lpfc_vport *vport)
2957{
2958 del_timer_sync(&vport->els_tmofunc);
2959 del_timer_sync(&vport->delayed_disc_tmo);
2960 lpfc_can_disctmo(vport);
2961 return;
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971void
2972__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2973{
2974
2975 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2976
2977
2978 del_timer(&phba->fcf.redisc_wait);
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990void
2991lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2992{
2993 spin_lock_irq(&phba->hbalock);
2994 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2995
2996 spin_unlock_irq(&phba->hbalock);
2997 return;
2998 }
2999 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3000
3001 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3002 spin_unlock_irq(&phba->hbalock);
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012void
3013lpfc_stop_hba_timers(struct lpfc_hba *phba)
3014{
3015 if (phba->pport)
3016 lpfc_stop_vport_timers(phba->pport);
3017 cancel_delayed_work_sync(&phba->eq_delay_work);
3018 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3019 del_timer_sync(&phba->sli.mbox_tmo);
3020 del_timer_sync(&phba->fabric_block_timer);
3021 del_timer_sync(&phba->eratt_poll);
3022 del_timer_sync(&phba->hb_tmofunc);
3023 if (phba->sli_rev == LPFC_SLI_REV4) {
3024 del_timer_sync(&phba->rrq_tmr);
3025 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3026 }
3027 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3028
3029 switch (phba->pci_dev_grp) {
3030 case LPFC_PCI_DEV_LP:
3031
3032 del_timer_sync(&phba->fcp_poll_timer);
3033 break;
3034 case LPFC_PCI_DEV_OC:
3035
3036 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3037 break;
3038 default:
3039 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3040 "0297 Invalid device group (x%x)\n",
3041 phba->pci_dev_grp);
3042 break;
3043 }
3044 return;
3045}
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058static void
3059lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3060{
3061 unsigned long iflag;
3062 uint8_t actcmd = MBX_HEARTBEAT;
3063 unsigned long timeout;
3064
3065 spin_lock_irqsave(&phba->hbalock, iflag);
3066 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3067 spin_unlock_irqrestore(&phba->hbalock, iflag);
3068 if (mbx_action == LPFC_MBX_NO_WAIT)
3069 return;
3070 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3071 spin_lock_irqsave(&phba->hbalock, iflag);
3072 if (phba->sli.mbox_active) {
3073 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3074
3075
3076
3077 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3078 phba->sli.mbox_active) * 1000) + jiffies;
3079 }
3080 spin_unlock_irqrestore(&phba->hbalock, iflag);
3081
3082
3083 while (phba->sli.mbox_active) {
3084
3085 msleep(2);
3086 if (time_after(jiffies, timeout)) {
3087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3088 "2813 Mgmt IO is Blocked %x "
3089 "- mbox cmd %x still active\n",
3090 phba->sli.sli_flag, actcmd);
3091 break;
3092 }
3093 }
3094}
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104void
3105lpfc_sli4_node_prep(struct lpfc_hba *phba)
3106{
3107 struct lpfc_nodelist *ndlp, *next_ndlp;
3108 struct lpfc_vport **vports;
3109 int i, rpi;
3110
3111 if (phba->sli_rev != LPFC_SLI_REV4)
3112 return;
3113
3114 vports = lpfc_create_vport_work_array(phba);
3115 if (vports == NULL)
3116 return;
3117
3118 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3119 if (vports[i]->load_flag & FC_UNLOADING)
3120 continue;
3121
3122 list_for_each_entry_safe(ndlp, next_ndlp,
3123 &vports[i]->fc_nodes,
3124 nlp_listp) {
3125 rpi = lpfc_sli4_alloc_rpi(phba);
3126 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3127
3128 continue;
3129 }
3130 ndlp->nlp_rpi = rpi;
3131 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3132 LOG_NODE | LOG_DISCOVERY,
3133 "0009 Assign RPI x%x to ndlp x%px "
3134 "DID:x%06x flg:x%x\n",
3135 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3136 ndlp->nlp_flag);
3137 }
3138 }
3139 lpfc_destroy_vport_work_array(phba, vports);
3140}
3141
3142
3143
3144
3145
3146
3147
3148
3149static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3150{
3151 struct lpfc_sli4_hdw_queue *qp;
3152 struct lpfc_io_buf *lpfc_ncmd;
3153 struct lpfc_io_buf *lpfc_ncmd_next;
3154 struct lpfc_epd_pool *epd_pool;
3155 unsigned long iflag;
3156
3157 epd_pool = &phba->epd_pool;
3158 qp = &phba->sli4_hba.hdwq[0];
3159
3160 spin_lock_init(&epd_pool->lock);
3161 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3162 spin_lock(&epd_pool->lock);
3163 INIT_LIST_HEAD(&epd_pool->list);
3164 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3165 &qp->lpfc_io_buf_list_put, list) {
3166 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3167 lpfc_ncmd->expedite = true;
3168 qp->put_io_bufs--;
3169 epd_pool->count++;
3170 if (epd_pool->count >= XRI_BATCH)
3171 break;
3172 }
3173 spin_unlock(&epd_pool->lock);
3174 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3185{
3186 struct lpfc_sli4_hdw_queue *qp;
3187 struct lpfc_io_buf *lpfc_ncmd;
3188 struct lpfc_io_buf *lpfc_ncmd_next;
3189 struct lpfc_epd_pool *epd_pool;
3190 unsigned long iflag;
3191
3192 epd_pool = &phba->epd_pool;
3193 qp = &phba->sli4_hba.hdwq[0];
3194
3195 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3196 spin_lock(&epd_pool->lock);
3197 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3198 &epd_pool->list, list) {
3199 list_move_tail(&lpfc_ncmd->list,
3200 &qp->lpfc_io_buf_list_put);
3201 lpfc_ncmd->flags = false;
3202 qp->put_io_bufs++;
3203 epd_pool->count--;
3204 }
3205 spin_unlock(&epd_pool->lock);
3206 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3207}
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3218{
3219 u32 i, j;
3220 u32 hwq_count;
3221 u32 count_per_hwq;
3222 struct lpfc_io_buf *lpfc_ncmd;
3223 struct lpfc_io_buf *lpfc_ncmd_next;
3224 unsigned long iflag;
3225 struct lpfc_sli4_hdw_queue *qp;
3226 struct lpfc_multixri_pool *multixri_pool;
3227 struct lpfc_pbl_pool *pbl_pool;
3228 struct lpfc_pvt_pool *pvt_pool;
3229
3230 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3231 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3232 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3233 phba->sli4_hba.io_xri_cnt);
3234
3235 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3236 lpfc_create_expedite_pool(phba);
3237
3238 hwq_count = phba->cfg_hdw_queue;
3239 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3240
3241 for (i = 0; i < hwq_count; i++) {
3242 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3243
3244 if (!multixri_pool) {
3245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3246 "1238 Failed to allocate memory for "
3247 "multixri_pool\n");
3248
3249 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3250 lpfc_destroy_expedite_pool(phba);
3251
3252 j = 0;
3253 while (j < i) {
3254 qp = &phba->sli4_hba.hdwq[j];
3255 kfree(qp->p_multixri_pool);
3256 j++;
3257 }
3258 phba->cfg_xri_rebalancing = 0;
3259 return;
3260 }
3261
3262 qp = &phba->sli4_hba.hdwq[i];
3263 qp->p_multixri_pool = multixri_pool;
3264
3265 multixri_pool->xri_limit = count_per_hwq;
3266 multixri_pool->rrb_next_hwqid = i;
3267
3268
3269 pbl_pool = &multixri_pool->pbl_pool;
3270 spin_lock_init(&pbl_pool->lock);
3271 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3272 spin_lock(&pbl_pool->lock);
3273 INIT_LIST_HEAD(&pbl_pool->list);
3274 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3275 &qp->lpfc_io_buf_list_put, list) {
3276 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3277 qp->put_io_bufs--;
3278 pbl_pool->count++;
3279 }
3280 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3281 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3282 pbl_pool->count, i);
3283 spin_unlock(&pbl_pool->lock);
3284 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3285
3286
3287 pvt_pool = &multixri_pool->pvt_pool;
3288 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3289 pvt_pool->low_watermark = XRI_BATCH;
3290 spin_lock_init(&pvt_pool->lock);
3291 spin_lock_irqsave(&pvt_pool->lock, iflag);
3292 INIT_LIST_HEAD(&pvt_pool->list);
3293 pvt_pool->count = 0;
3294 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3295 }
3296}
3297
3298
3299
3300
3301
3302
3303
3304static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3305{
3306 u32 i;
3307 u32 hwq_count;
3308 struct lpfc_io_buf *lpfc_ncmd;
3309 struct lpfc_io_buf *lpfc_ncmd_next;
3310 unsigned long iflag;
3311 struct lpfc_sli4_hdw_queue *qp;
3312 struct lpfc_multixri_pool *multixri_pool;
3313 struct lpfc_pbl_pool *pbl_pool;
3314 struct lpfc_pvt_pool *pvt_pool;
3315
3316 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3317 lpfc_destroy_expedite_pool(phba);
3318
3319 if (!(phba->pport->load_flag & FC_UNLOADING))
3320 lpfc_sli_flush_io_rings(phba);
3321
3322 hwq_count = phba->cfg_hdw_queue;
3323
3324 for (i = 0; i < hwq_count; i++) {
3325 qp = &phba->sli4_hba.hdwq[i];
3326 multixri_pool = qp->p_multixri_pool;
3327 if (!multixri_pool)
3328 continue;
3329
3330 qp->p_multixri_pool = NULL;
3331
3332 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3333
3334
3335 pbl_pool = &multixri_pool->pbl_pool;
3336 spin_lock(&pbl_pool->lock);
3337
3338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3339 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3340 pbl_pool->count, i);
3341
3342 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3343 &pbl_pool->list, list) {
3344 list_move_tail(&lpfc_ncmd->list,
3345 &qp->lpfc_io_buf_list_put);
3346 qp->put_io_bufs++;
3347 pbl_pool->count--;
3348 }
3349
3350 INIT_LIST_HEAD(&pbl_pool->list);
3351 pbl_pool->count = 0;
3352
3353 spin_unlock(&pbl_pool->lock);
3354
3355
3356 pvt_pool = &multixri_pool->pvt_pool;
3357 spin_lock(&pvt_pool->lock);
3358
3359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3360 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3361 pvt_pool->count, i);
3362
3363 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3364 &pvt_pool->list, list) {
3365 list_move_tail(&lpfc_ncmd->list,
3366 &qp->lpfc_io_buf_list_put);
3367 qp->put_io_bufs++;
3368 pvt_pool->count--;
3369 }
3370
3371 INIT_LIST_HEAD(&pvt_pool->list);
3372 pvt_pool->count = 0;
3373
3374 spin_unlock(&pvt_pool->lock);
3375 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3376
3377 kfree(multixri_pool);
3378 }
3379}
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393int
3394lpfc_online(struct lpfc_hba *phba)
3395{
3396 struct lpfc_vport *vport;
3397 struct lpfc_vport **vports;
3398 int i, error = 0;
3399 bool vpis_cleared = false;
3400
3401 if (!phba)
3402 return 0;
3403 vport = phba->pport;
3404
3405 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3406 return 0;
3407
3408 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3409 "0458 Bring Adapter online\n");
3410
3411 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3412
3413 if (phba->sli_rev == LPFC_SLI_REV4) {
3414 if (lpfc_sli4_hba_setup(phba)) {
3415 lpfc_unblock_mgmt_io(phba);
3416 return 1;
3417 }
3418 spin_lock_irq(&phba->hbalock);
3419 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3420 vpis_cleared = true;
3421 spin_unlock_irq(&phba->hbalock);
3422
3423
3424
3425
3426 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3427 !phba->nvmet_support) {
3428 error = lpfc_nvme_create_localport(phba->pport);
3429 if (error)
3430 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3431 "6132 NVME restore reg failed "
3432 "on nvmei error x%x\n", error);
3433 }
3434 } else {
3435 lpfc_sli_queue_init(phba);
3436 if (lpfc_sli_hba_setup(phba)) {
3437 lpfc_unblock_mgmt_io(phba);
3438 return 1;
3439 }
3440 }
3441
3442 vports = lpfc_create_vport_work_array(phba);
3443 if (vports != NULL) {
3444 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3445 struct Scsi_Host *shost;
3446 shost = lpfc_shost_from_vport(vports[i]);
3447 spin_lock_irq(shost->host_lock);
3448 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3449 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3450 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3451 if (phba->sli_rev == LPFC_SLI_REV4) {
3452 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3453 if ((vpis_cleared) &&
3454 (vports[i]->port_type !=
3455 LPFC_PHYSICAL_PORT))
3456 vports[i]->vpi = 0;
3457 }
3458 spin_unlock_irq(shost->host_lock);
3459 }
3460 }
3461 lpfc_destroy_vport_work_array(phba, vports);
3462
3463 if (phba->cfg_xri_rebalancing)
3464 lpfc_create_multixri_pools(phba);
3465
3466 lpfc_cpuhp_add(phba);
3467
3468 lpfc_unblock_mgmt_io(phba);
3469 return 0;
3470}
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483void
3484lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3485{
3486 unsigned long iflag;
3487
3488 spin_lock_irqsave(&phba->hbalock, iflag);
3489 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3490 spin_unlock_irqrestore(&phba->hbalock, iflag);
3491}
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502void
3503lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3504{
3505 struct lpfc_vport *vport = phba->pport;
3506 struct lpfc_nodelist *ndlp, *next_ndlp;
3507 struct lpfc_vport **vports;
3508 struct Scsi_Host *shost;
3509 int i;
3510
3511 if (vport->fc_flag & FC_OFFLINE_MODE)
3512 return;
3513
3514 lpfc_block_mgmt_io(phba, mbx_action);
3515
3516 lpfc_linkdown(phba);
3517
3518
3519 vports = lpfc_create_vport_work_array(phba);
3520 if (vports != NULL) {
3521 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3522 if (vports[i]->load_flag & FC_UNLOADING)
3523 continue;
3524 shost = lpfc_shost_from_vport(vports[i]);
3525 spin_lock_irq(shost->host_lock);
3526 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3527 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3528 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3529 spin_unlock_irq(shost->host_lock);
3530
3531 shost = lpfc_shost_from_vport(vports[i]);
3532 list_for_each_entry_safe(ndlp, next_ndlp,
3533 &vports[i]->fc_nodes,
3534 nlp_listp) {
3535 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3536
3537
3538
3539 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3540 continue;
3541 }
3542
3543 spin_lock_irq(&ndlp->lock);
3544 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3545 spin_unlock_irq(&ndlp->lock);
3546
3547
3548
3549
3550
3551 if (phba->sli_rev == LPFC_SLI_REV4) {
3552 lpfc_printf_vlog(vports[i], KERN_INFO,
3553 LOG_NODE | LOG_DISCOVERY,
3554 "0011 Free RPI x%x on "
3555 "ndlp: %p did x%x\n",
3556 ndlp->nlp_rpi, ndlp,
3557 ndlp->nlp_DID);
3558 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3559 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3560 }
3561 lpfc_unreg_rpi(vports[i], ndlp);
3562
3563 if (ndlp->nlp_type & NLP_FABRIC) {
3564 lpfc_disc_state_machine(vports[i], ndlp,
3565 NULL, NLP_EVT_DEVICE_RECOVERY);
3566
3567
3568
3569
3570
3571
3572 if (!(ndlp->fc4_xpt_flags &
3573 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3574 lpfc_disc_state_machine
3575 (vports[i], ndlp,
3576 NULL,
3577 NLP_EVT_DEVICE_RM);
3578 }
3579 }
3580 }
3581 }
3582 lpfc_destroy_vport_work_array(phba, vports);
3583
3584 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3585
3586 if (phba->wq)
3587 flush_workqueue(phba->wq);
3588}
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598void
3599lpfc_offline(struct lpfc_hba *phba)
3600{
3601 struct Scsi_Host *shost;
3602 struct lpfc_vport **vports;
3603 int i;
3604
3605 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3606 return;
3607
3608
3609 lpfc_stop_port(phba);
3610
3611
3612
3613
3614 lpfc_nvmet_destroy_targetport(phba);
3615 lpfc_nvme_destroy_localport(phba->pport);
3616
3617 vports = lpfc_create_vport_work_array(phba);
3618 if (vports != NULL)
3619 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3620 lpfc_stop_vport_timers(vports[i]);
3621 lpfc_destroy_vport_work_array(phba, vports);
3622 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3623 "0460 Bring Adapter offline\n");
3624
3625
3626 lpfc_sli_hba_down(phba);
3627 spin_lock_irq(&phba->hbalock);
3628 phba->work_ha = 0;
3629 spin_unlock_irq(&phba->hbalock);
3630 vports = lpfc_create_vport_work_array(phba);
3631 if (vports != NULL)
3632 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3633 shost = lpfc_shost_from_vport(vports[i]);
3634 spin_lock_irq(shost->host_lock);
3635 vports[i]->work_port_events = 0;
3636 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3637 spin_unlock_irq(shost->host_lock);
3638 }
3639 lpfc_destroy_vport_work_array(phba, vports);
3640
3641
3642
3643 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3644 __lpfc_cpuhp_remove(phba);
3645
3646 if (phba->cfg_xri_rebalancing)
3647 lpfc_destroy_multixri_pools(phba);
3648}
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658static void
3659lpfc_scsi_free(struct lpfc_hba *phba)
3660{
3661 struct lpfc_io_buf *sb, *sb_next;
3662
3663 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3664 return;
3665
3666 spin_lock_irq(&phba->hbalock);
3667
3668
3669
3670 spin_lock(&phba->scsi_buf_list_put_lock);
3671 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3672 list) {
3673 list_del(&sb->list);
3674 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3675 sb->dma_handle);
3676 kfree(sb);
3677 phba->total_scsi_bufs--;
3678 }
3679 spin_unlock(&phba->scsi_buf_list_put_lock);
3680
3681 spin_lock(&phba->scsi_buf_list_get_lock);
3682 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3683 list) {
3684 list_del(&sb->list);
3685 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3686 sb->dma_handle);
3687 kfree(sb);
3688 phba->total_scsi_bufs--;
3689 }
3690 spin_unlock(&phba->scsi_buf_list_get_lock);
3691 spin_unlock_irq(&phba->hbalock);
3692}
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702void
3703lpfc_io_free(struct lpfc_hba *phba)
3704{
3705 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3706 struct lpfc_sli4_hdw_queue *qp;
3707 int idx;
3708
3709 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3710 qp = &phba->sli4_hba.hdwq[idx];
3711
3712 spin_lock(&qp->io_buf_list_put_lock);
3713 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3714 &qp->lpfc_io_buf_list_put,
3715 list) {
3716 list_del(&lpfc_ncmd->list);
3717 qp->put_io_bufs--;
3718 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3719 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3720 if (phba->cfg_xpsgl && !phba->nvmet_support)
3721 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3722 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3723 kfree(lpfc_ncmd);
3724 qp->total_io_bufs--;
3725 }
3726 spin_unlock(&qp->io_buf_list_put_lock);
3727
3728 spin_lock(&qp->io_buf_list_get_lock);
3729 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3730 &qp->lpfc_io_buf_list_get,
3731 list) {
3732 list_del(&lpfc_ncmd->list);
3733 qp->get_io_bufs--;
3734 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3735 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3736 if (phba->cfg_xpsgl && !phba->nvmet_support)
3737 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3738 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3739 kfree(lpfc_ncmd);
3740 qp->total_io_bufs--;
3741 }
3742 spin_unlock(&qp->io_buf_list_get_lock);
3743 }
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758int
3759lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3760{
3761 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3762 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3763 LIST_HEAD(els_sgl_list);
3764 int rc;
3765
3766
3767
3768
3769 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3770
3771 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3772
3773 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3774 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3775 "3157 ELS xri-sgl count increased from "
3776 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3777 els_xri_cnt);
3778
3779 for (i = 0; i < xri_cnt; i++) {
3780 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3781 GFP_KERNEL);
3782 if (sglq_entry == NULL) {
3783 lpfc_printf_log(phba, KERN_ERR,
3784 LOG_TRACE_EVENT,
3785 "2562 Failure to allocate an "
3786 "ELS sgl entry:%d\n", i);
3787 rc = -ENOMEM;
3788 goto out_free_mem;
3789 }
3790 sglq_entry->buff_type = GEN_BUFF_TYPE;
3791 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3792 &sglq_entry->phys);
3793 if (sglq_entry->virt == NULL) {
3794 kfree(sglq_entry);
3795 lpfc_printf_log(phba, KERN_ERR,
3796 LOG_TRACE_EVENT,
3797 "2563 Failure to allocate an "
3798 "ELS mbuf:%d\n", i);
3799 rc = -ENOMEM;
3800 goto out_free_mem;
3801 }
3802 sglq_entry->sgl = sglq_entry->virt;
3803 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3804 sglq_entry->state = SGL_FREED;
3805 list_add_tail(&sglq_entry->list, &els_sgl_list);
3806 }
3807 spin_lock_irq(&phba->hbalock);
3808 spin_lock(&phba->sli4_hba.sgl_list_lock);
3809 list_splice_init(&els_sgl_list,
3810 &phba->sli4_hba.lpfc_els_sgl_list);
3811 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3812 spin_unlock_irq(&phba->hbalock);
3813 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3814
3815 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3816 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3817 "3158 ELS xri-sgl count decreased from "
3818 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3819 els_xri_cnt);
3820 spin_lock_irq(&phba->hbalock);
3821 spin_lock(&phba->sli4_hba.sgl_list_lock);
3822 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3823 &els_sgl_list);
3824
3825 for (i = 0; i < xri_cnt; i++) {
3826 list_remove_head(&els_sgl_list,
3827 sglq_entry, struct lpfc_sglq, list);
3828 if (sglq_entry) {
3829 __lpfc_mbuf_free(phba, sglq_entry->virt,
3830 sglq_entry->phys);
3831 kfree(sglq_entry);
3832 }
3833 }
3834 list_splice_init(&els_sgl_list,
3835 &phba->sli4_hba.lpfc_els_sgl_list);
3836 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3837 spin_unlock_irq(&phba->hbalock);
3838 } else
3839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3840 "3163 ELS xri-sgl count unchanged: %d\n",
3841 els_xri_cnt);
3842 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3843
3844
3845 sglq_entry = NULL;
3846 sglq_entry_next = NULL;
3847 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3848 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3849 lxri = lpfc_sli4_next_xritag(phba);
3850 if (lxri == NO_XRI) {
3851 lpfc_printf_log(phba, KERN_ERR,
3852 LOG_TRACE_EVENT,
3853 "2400 Failed to allocate xri for "
3854 "ELS sgl\n");
3855 rc = -ENOMEM;
3856 goto out_free_mem;
3857 }
3858 sglq_entry->sli4_lxritag = lxri;
3859 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3860 }
3861 return 0;
3862
3863out_free_mem:
3864 lpfc_free_els_sgl_list(phba);
3865 return rc;
3866}
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880int
3881lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3882{
3883 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3884 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3885 uint16_t nvmet_xri_cnt;
3886 LIST_HEAD(nvmet_sgl_list);
3887 int rc;
3888
3889
3890
3891
3892 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3893
3894
3895 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3896 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3897
3898 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3899 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3900 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3901 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3902
3903 for (i = 0; i < xri_cnt; i++) {
3904 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3905 GFP_KERNEL);
3906 if (sglq_entry == NULL) {
3907 lpfc_printf_log(phba, KERN_ERR,
3908 LOG_TRACE_EVENT,
3909 "6303 Failure to allocate an "
3910 "NVMET sgl entry:%d\n", i);
3911 rc = -ENOMEM;
3912 goto out_free_mem;
3913 }
3914 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3915 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3916 &sglq_entry->phys);
3917 if (sglq_entry->virt == NULL) {
3918 kfree(sglq_entry);
3919 lpfc_printf_log(phba, KERN_ERR,
3920 LOG_TRACE_EVENT,
3921 "6304 Failure to allocate an "
3922 "NVMET buf:%d\n", i);
3923 rc = -ENOMEM;
3924 goto out_free_mem;
3925 }
3926 sglq_entry->sgl = sglq_entry->virt;
3927 memset(sglq_entry->sgl, 0,
3928 phba->cfg_sg_dma_buf_size);
3929 sglq_entry->state = SGL_FREED;
3930 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3931 }
3932 spin_lock_irq(&phba->hbalock);
3933 spin_lock(&phba->sli4_hba.sgl_list_lock);
3934 list_splice_init(&nvmet_sgl_list,
3935 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3936 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3937 spin_unlock_irq(&phba->hbalock);
3938 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3939
3940 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3941 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3942 "6305 NVMET xri-sgl count decreased from "
3943 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3944 nvmet_xri_cnt);
3945 spin_lock_irq(&phba->hbalock);
3946 spin_lock(&phba->sli4_hba.sgl_list_lock);
3947 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3948 &nvmet_sgl_list);
3949
3950 for (i = 0; i < xri_cnt; i++) {
3951 list_remove_head(&nvmet_sgl_list,
3952 sglq_entry, struct lpfc_sglq, list);
3953 if (sglq_entry) {
3954 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3955 sglq_entry->phys);
3956 kfree(sglq_entry);
3957 }
3958 }
3959 list_splice_init(&nvmet_sgl_list,
3960 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3961 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3962 spin_unlock_irq(&phba->hbalock);
3963 } else
3964 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3965 "6306 NVMET xri-sgl count unchanged: %d\n",
3966 nvmet_xri_cnt);
3967 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3968
3969
3970 sglq_entry = NULL;
3971 sglq_entry_next = NULL;
3972 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3973 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3974 lxri = lpfc_sli4_next_xritag(phba);
3975 if (lxri == NO_XRI) {
3976 lpfc_printf_log(phba, KERN_ERR,
3977 LOG_TRACE_EVENT,
3978 "6307 Failed to allocate xri for "
3979 "NVMET sgl\n");
3980 rc = -ENOMEM;
3981 goto out_free_mem;
3982 }
3983 sglq_entry->sli4_lxritag = lxri;
3984 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3985 }
3986 return 0;
3987
3988out_free_mem:
3989 lpfc_free_nvmet_sgl_list(phba);
3990 return rc;
3991}
3992
3993int
3994lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3995{
3996 LIST_HEAD(blist);
3997 struct lpfc_sli4_hdw_queue *qp;
3998 struct lpfc_io_buf *lpfc_cmd;
3999 struct lpfc_io_buf *iobufp, *prev_iobufp;
4000 int idx, cnt, xri, inserted;
4001
4002 cnt = 0;
4003 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4004 qp = &phba->sli4_hba.hdwq[idx];
4005 spin_lock_irq(&qp->io_buf_list_get_lock);
4006 spin_lock(&qp->io_buf_list_put_lock);
4007
4008
4009 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4010 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4011 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4012 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4013 cnt += qp->get_io_bufs + qp->put_io_bufs;
4014 qp->get_io_bufs = 0;
4015 qp->put_io_bufs = 0;
4016 qp->total_io_bufs = 0;
4017 spin_unlock(&qp->io_buf_list_put_lock);
4018 spin_unlock_irq(&qp->io_buf_list_get_lock);
4019 }
4020
4021
4022
4023
4024
4025
4026 for (idx = 0; idx < cnt; idx++) {
4027 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4028 if (!lpfc_cmd)
4029 return cnt;
4030 if (idx == 0) {
4031 list_add_tail(&lpfc_cmd->list, cbuf);
4032 continue;
4033 }
4034 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4035 inserted = 0;
4036 prev_iobufp = NULL;
4037 list_for_each_entry(iobufp, cbuf, list) {
4038 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4039 if (prev_iobufp)
4040 list_add(&lpfc_cmd->list,
4041 &prev_iobufp->list);
4042 else
4043 list_add(&lpfc_cmd->list, cbuf);
4044 inserted = 1;
4045 break;
4046 }
4047 prev_iobufp = iobufp;
4048 }
4049 if (!inserted)
4050 list_add_tail(&lpfc_cmd->list, cbuf);
4051 }
4052 return cnt;
4053}
4054
4055int
4056lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4057{
4058 struct lpfc_sli4_hdw_queue *qp;
4059 struct lpfc_io_buf *lpfc_cmd;
4060 int idx, cnt;
4061
4062 qp = phba->sli4_hba.hdwq;
4063 cnt = 0;
4064 while (!list_empty(cbuf)) {
4065 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4066 list_remove_head(cbuf, lpfc_cmd,
4067 struct lpfc_io_buf, list);
4068 if (!lpfc_cmd)
4069 return cnt;
4070 cnt++;
4071 qp = &phba->sli4_hba.hdwq[idx];
4072 lpfc_cmd->hdwq_no = idx;
4073 lpfc_cmd->hdwq = qp;
4074 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4075 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4076 spin_lock(&qp->io_buf_list_put_lock);
4077 list_add_tail(&lpfc_cmd->list,
4078 &qp->lpfc_io_buf_list_put);
4079 qp->put_io_bufs++;
4080 qp->total_io_bufs++;
4081 spin_unlock(&qp->io_buf_list_put_lock);
4082 }
4083 }
4084 return cnt;
4085}
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099int
4100lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4101{
4102 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4103 uint16_t i, lxri, els_xri_cnt;
4104 uint16_t io_xri_cnt, io_xri_max;
4105 LIST_HEAD(io_sgl_list);
4106 int rc, cnt;
4107
4108
4109
4110
4111
4112
4113 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4114 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4115 phba->sli4_hba.io_xri_max = io_xri_max;
4116
4117 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4118 "6074 Current allocated XRI sgl count:%d, "
4119 "maximum XRI count:%d\n",
4120 phba->sli4_hba.io_xri_cnt,
4121 phba->sli4_hba.io_xri_max);
4122
4123 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4124
4125 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4126
4127 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4128 phba->sli4_hba.io_xri_max;
4129
4130 for (i = 0; i < io_xri_cnt; i++) {
4131 list_remove_head(&io_sgl_list, lpfc_ncmd,
4132 struct lpfc_io_buf, list);
4133 if (lpfc_ncmd) {
4134 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4135 lpfc_ncmd->data,
4136 lpfc_ncmd->dma_handle);
4137 kfree(lpfc_ncmd);
4138 }
4139 }
4140 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4141 }
4142
4143
4144 lpfc_ncmd = NULL;
4145 lpfc_ncmd_next = NULL;
4146 phba->sli4_hba.io_xri_cnt = cnt;
4147 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4148 &io_sgl_list, list) {
4149 lxri = lpfc_sli4_next_xritag(phba);
4150 if (lxri == NO_XRI) {
4151 lpfc_printf_log(phba, KERN_ERR,
4152 LOG_TRACE_EVENT,
4153 "6075 Failed to allocate xri for "
4154 "nvme buffer\n");
4155 rc = -ENOMEM;
4156 goto out_free_mem;
4157 }
4158 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4159 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4160 }
4161 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4162 return 0;
4163
4164out_free_mem:
4165 lpfc_io_free(phba);
4166 return rc;
4167}
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183int
4184lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4185{
4186 struct lpfc_io_buf *lpfc_ncmd;
4187 struct lpfc_iocbq *pwqeq;
4188 uint16_t iotag, lxri = 0;
4189 int bcnt, num_posted;
4190 LIST_HEAD(prep_nblist);
4191 LIST_HEAD(post_nblist);
4192 LIST_HEAD(nvme_nblist);
4193
4194 phba->sli4_hba.io_xri_cnt = 0;
4195 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4196 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4197 if (!lpfc_ncmd)
4198 break;
4199
4200
4201
4202
4203
4204 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4205 GFP_KERNEL,
4206 &lpfc_ncmd->dma_handle);
4207 if (!lpfc_ncmd->data) {
4208 kfree(lpfc_ncmd);
4209 break;
4210 }
4211
4212 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4213 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4214 } else {
4215
4216
4217
4218
4219 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4220 (((unsigned long)(lpfc_ncmd->data) &
4221 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4222 lpfc_printf_log(phba, KERN_ERR,
4223 LOG_TRACE_EVENT,
4224 "3369 Memory alignment err: "
4225 "addr=%lx\n",
4226 (unsigned long)lpfc_ncmd->data);
4227 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4228 lpfc_ncmd->data,
4229 lpfc_ncmd->dma_handle);
4230 kfree(lpfc_ncmd);
4231 break;
4232 }
4233 }
4234
4235 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4236
4237 lxri = lpfc_sli4_next_xritag(phba);
4238 if (lxri == NO_XRI) {
4239 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4240 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4241 kfree(lpfc_ncmd);
4242 break;
4243 }
4244 pwqeq = &lpfc_ncmd->cur_iocbq;
4245
4246
4247 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4248 if (iotag == 0) {
4249 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4250 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4251 kfree(lpfc_ncmd);
4252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4253 "6121 Failed to allocate IOTAG for"
4254 " XRI:0x%x\n", lxri);
4255 lpfc_sli4_free_xri(phba, lxri);
4256 break;
4257 }
4258 pwqeq->sli4_lxritag = lxri;
4259 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4260 pwqeq->context1 = lpfc_ncmd;
4261
4262
4263 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4264 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4265 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4266 spin_lock_init(&lpfc_ncmd->buf_lock);
4267
4268
4269 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4270 phba->sli4_hba.io_xri_cnt++;
4271 }
4272 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4273 "6114 Allocate %d out of %d requested new NVME "
4274 "buffers\n", bcnt, num_to_alloc);
4275
4276
4277 if (!list_empty(&post_nblist))
4278 num_posted = lpfc_sli4_post_io_sgl_list(
4279 phba, &post_nblist, bcnt);
4280 else
4281 num_posted = 0;
4282
4283 return num_posted;
4284}
4285
4286static uint64_t
4287lpfc_get_wwpn(struct lpfc_hba *phba)
4288{
4289 uint64_t wwn;
4290 int rc;
4291 LPFC_MBOXQ_t *mboxq;
4292 MAILBOX_t *mb;
4293
4294 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4295 GFP_KERNEL);
4296 if (!mboxq)
4297 return (uint64_t)-1;
4298
4299
4300 lpfc_read_nv(phba, mboxq);
4301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4302 if (rc != MBX_SUCCESS) {
4303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4304 "6019 Mailbox failed , mbxCmd x%x "
4305 "READ_NV, mbxStatus x%x\n",
4306 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4307 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4308 mempool_free(mboxq, phba->mbox_mem_pool);
4309 return (uint64_t) -1;
4310 }
4311 mb = &mboxq->u.mb;
4312 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4313
4314 mempool_free(mboxq, phba->mbox_mem_pool);
4315 if (phba->sli_rev == LPFC_SLI_REV4)
4316 return be64_to_cpu(wwn);
4317 else
4318 return rol64(wwn, 32);
4319}
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337struct lpfc_vport *
4338lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4339{
4340 struct lpfc_vport *vport;
4341 struct Scsi_Host *shost = NULL;
4342 struct scsi_host_template *template;
4343 int error = 0;
4344 int i;
4345 uint64_t wwn;
4346 bool use_no_reset_hba = false;
4347 int rc;
4348
4349 if (lpfc_no_hba_reset_cnt) {
4350 if (phba->sli_rev < LPFC_SLI_REV4 &&
4351 dev == &phba->pcidev->dev) {
4352
4353 lpfc_sli_brdrestart(phba);
4354 rc = lpfc_sli_chipset_init(phba);
4355 if (rc)
4356 return NULL;
4357 }
4358 wwn = lpfc_get_wwpn(phba);
4359 }
4360
4361 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4362 if (wwn == lpfc_no_hba_reset[i]) {
4363 lpfc_printf_log(phba, KERN_ERR,
4364 LOG_TRACE_EVENT,
4365 "6020 Setting use_no_reset port=%llx\n",
4366 wwn);
4367 use_no_reset_hba = true;
4368 break;
4369 }
4370 }
4371
4372
4373 if (dev == &phba->pcidev->dev) {
4374 template = &phba->port_template;
4375
4376 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4377
4378 memcpy(template, &lpfc_template, sizeof(*template));
4379
4380 if (use_no_reset_hba)
4381
4382 template->eh_host_reset_handler = NULL;
4383
4384
4385 memcpy(&phba->vport_template, &lpfc_template,
4386 sizeof(*template));
4387 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4388 phba->vport_template.eh_bus_reset_handler = NULL;
4389 phba->vport_template.eh_host_reset_handler = NULL;
4390 phba->vport_template.vendor_id = 0;
4391
4392
4393 if (phba->sli_rev == LPFC_SLI_REV4) {
4394 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4395 phba->vport_template.sg_tablesize =
4396 phba->cfg_scsi_seg_cnt;
4397 } else {
4398 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4399 phba->vport_template.sg_tablesize =
4400 phba->cfg_sg_seg_cnt;
4401 }
4402
4403 } else {
4404
4405 memcpy(template, &lpfc_template_nvme,
4406 sizeof(*template));
4407 }
4408 } else {
4409 template = &phba->vport_template;
4410 }
4411
4412 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4413 if (!shost)
4414 goto out;
4415
4416 vport = (struct lpfc_vport *) shost->hostdata;
4417 vport->phba = phba;
4418 vport->load_flag |= FC_LOADING;
4419 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4420 vport->fc_rscn_flush = 0;
4421 lpfc_get_vport_cfgparam(vport);
4422
4423
4424 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4425
4426 shost->unique_id = instance;
4427 shost->max_id = LPFC_MAX_TARGET;
4428 shost->max_lun = vport->cfg_max_luns;
4429 shost->this_id = -1;
4430 shost->max_cmd_len = 16;
4431
4432 if (phba->sli_rev == LPFC_SLI_REV4) {
4433 if (!phba->cfg_fcp_mq_threshold ||
4434 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4435 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4436
4437 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4438 phba->cfg_fcp_mq_threshold);
4439
4440 shost->dma_boundary =
4441 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4442
4443 if (phba->cfg_xpsgl && !phba->nvmet_support)
4444 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4445 else
4446 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4447 } else
4448
4449
4450
4451 shost->nr_hw_queues = 1;
4452
4453
4454
4455
4456
4457
4458 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4459 if (dev != &phba->pcidev->dev) {
4460 shost->transportt = lpfc_vport_transport_template;
4461 vport->port_type = LPFC_NPIV_PORT;
4462 } else {
4463 shost->transportt = lpfc_transport_template;
4464 vport->port_type = LPFC_PHYSICAL_PORT;
4465 }
4466
4467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4468 "9081 CreatePort TMPLATE type %x TBLsize %d "
4469 "SEGcnt %d/%d\n",
4470 vport->port_type, shost->sg_tablesize,
4471 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4472
4473
4474 INIT_LIST_HEAD(&vport->fc_nodes);
4475 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4476 spin_lock_init(&vport->work_port_lock);
4477
4478 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4479
4480 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4481
4482 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4483
4484 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4485 lpfc_setup_bg(phba, shost);
4486
4487 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4488 if (error)
4489 goto out_put_shost;
4490
4491 spin_lock_irq(&phba->port_list_lock);
4492 list_add_tail(&vport->listentry, &phba->port_list);
4493 spin_unlock_irq(&phba->port_list_lock);
4494 return vport;
4495
4496out_put_shost:
4497 scsi_host_put(shost);
4498out:
4499 return NULL;
4500}
4501
4502
4503
4504
4505
4506
4507
4508
4509void
4510destroy_port(struct lpfc_vport *vport)
4511{
4512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4513 struct lpfc_hba *phba = vport->phba;
4514
4515 lpfc_debugfs_terminate(vport);
4516 fc_remove_host(shost);
4517 scsi_remove_host(shost);
4518
4519 spin_lock_irq(&phba->port_list_lock);
4520 list_del_init(&vport->listentry);
4521 spin_unlock_irq(&phba->port_list_lock);
4522
4523 lpfc_cleanup(vport);
4524 return;
4525}
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537int
4538lpfc_get_instance(void)
4539{
4540 int ret;
4541
4542 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4543 return ret < 0 ? -1 : ret;
4544}
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4562{
4563 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4564 struct lpfc_hba *phba = vport->phba;
4565 int stat = 0;
4566
4567 spin_lock_irq(shost->host_lock);
4568
4569 if (vport->load_flag & FC_UNLOADING) {
4570 stat = 1;
4571 goto finished;
4572 }
4573 if (time >= msecs_to_jiffies(30 * 1000)) {
4574 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4575 "0461 Scanning longer than 30 "
4576 "seconds. Continuing initialization\n");
4577 stat = 1;
4578 goto finished;
4579 }
4580 if (time >= msecs_to_jiffies(15 * 1000) &&
4581 phba->link_state <= LPFC_LINK_DOWN) {
4582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4583 "0465 Link down longer than 15 "
4584 "seconds. Continuing initialization\n");
4585 stat = 1;
4586 goto finished;
4587 }
4588
4589 if (vport->port_state != LPFC_VPORT_READY)
4590 goto finished;
4591 if (vport->num_disc_nodes || vport->fc_prli_sent)
4592 goto finished;
4593 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4594 goto finished;
4595 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4596 goto finished;
4597
4598 stat = 1;
4599
4600finished:
4601 spin_unlock_irq(shost->host_lock);
4602 return stat;
4603}
4604
4605static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4606{
4607 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4608 struct lpfc_hba *phba = vport->phba;
4609
4610 fc_host_supported_speeds(shost) = 0;
4611
4612
4613
4614
4615 if (phba->hba_flag & HBA_FCOE_MODE)
4616 return;
4617
4618 if (phba->lmt & LMT_128Gb)
4619 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4620 if (phba->lmt & LMT_64Gb)
4621 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4622 if (phba->lmt & LMT_32Gb)
4623 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4624 if (phba->lmt & LMT_16Gb)
4625 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4626 if (phba->lmt & LMT_10Gb)
4627 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4628 if (phba->lmt & LMT_8Gb)
4629 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4630 if (phba->lmt & LMT_4Gb)
4631 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4632 if (phba->lmt & LMT_2Gb)
4633 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4634 if (phba->lmt & LMT_1Gb)
4635 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4636}
4637
4638
4639
4640
4641
4642
4643
4644
4645void lpfc_host_attrib_init(struct Scsi_Host *shost)
4646{
4647 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4648 struct lpfc_hba *phba = vport->phba;
4649
4650
4651
4652
4653 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4654 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4655 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4656
4657 memset(fc_host_supported_fc4s(shost), 0,
4658 sizeof(fc_host_supported_fc4s(shost)));
4659 fc_host_supported_fc4s(shost)[2] = 1;
4660 fc_host_supported_fc4s(shost)[7] = 1;
4661
4662 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4663 sizeof fc_host_symbolic_name(shost));
4664
4665 lpfc_host_supported_speeds_set(shost);
4666
4667 fc_host_maxframe_size(shost) =
4668 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4669 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4670
4671 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4672
4673
4674 memset(fc_host_active_fc4s(shost), 0,
4675 sizeof(fc_host_active_fc4s(shost)));
4676 fc_host_active_fc4s(shost)[2] = 1;
4677 fc_host_active_fc4s(shost)[7] = 1;
4678
4679 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4680 spin_lock_irq(shost->host_lock);
4681 vport->load_flag &= ~FC_LOADING;
4682 spin_unlock_irq(shost->host_lock);
4683}
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693static void
4694lpfc_stop_port_s3(struct lpfc_hba *phba)
4695{
4696
4697 writel(0, phba->HCregaddr);
4698 readl(phba->HCregaddr);
4699
4700 writel(0xffffffff, phba->HAregaddr);
4701 readl(phba->HAregaddr);
4702
4703
4704 lpfc_stop_hba_timers(phba);
4705 phba->pport->work_port_events = 0;
4706}
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716static void
4717lpfc_stop_port_s4(struct lpfc_hba *phba)
4718{
4719
4720 lpfc_stop_hba_timers(phba);
4721 if (phba->pport)
4722 phba->pport->work_port_events = 0;
4723 phba->sli4_hba.intr_enable = 0;
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733void
4734lpfc_stop_port(struct lpfc_hba *phba)
4735{
4736 phba->lpfc_stop_port(phba);
4737
4738 if (phba->wq)
4739 flush_workqueue(phba->wq);
4740}
4741
4742
4743
4744
4745
4746
4747
4748void
4749lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4750{
4751 unsigned long fcf_redisc_wait_tmo =
4752 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4753
4754 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4755 spin_lock_irq(&phba->hbalock);
4756
4757 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4758
4759 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4760 spin_unlock_irq(&phba->hbalock);
4761}
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773static void
4774lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4775{
4776 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4777
4778
4779 spin_lock_irq(&phba->hbalock);
4780 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4781 spin_unlock_irq(&phba->hbalock);
4782 return;
4783 }
4784
4785 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4786
4787 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4788 spin_unlock_irq(&phba->hbalock);
4789 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4790 "2776 FCF rediscover quiescent timer expired\n");
4791
4792 lpfc_worker_wake_up(phba);
4793}
4794
4795
4796
4797
4798
4799
4800
4801
4802static void
4803lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4804 struct lpfc_acqe_link *acqe_link)
4805{
4806 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4807 case LPFC_ASYNC_LINK_FAULT_NONE:
4808 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4809 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4810 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4811 break;
4812 default:
4813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4814 "0398 Unknown link fault code: x%x\n",
4815 bf_get(lpfc_acqe_link_fault, acqe_link));
4816 break;
4817 }
4818}
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830static uint8_t
4831lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4832 struct lpfc_acqe_link *acqe_link)
4833{
4834 uint8_t att_type;
4835
4836 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4837 case LPFC_ASYNC_LINK_STATUS_DOWN:
4838 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4839 att_type = LPFC_ATT_LINK_DOWN;
4840 break;
4841 case LPFC_ASYNC_LINK_STATUS_UP:
4842
4843 att_type = LPFC_ATT_RESERVED;
4844 break;
4845 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4846 att_type = LPFC_ATT_LINK_UP;
4847 break;
4848 default:
4849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4850 "0399 Invalid link attention type: x%x\n",
4851 bf_get(lpfc_acqe_link_status, acqe_link));
4852 att_type = LPFC_ATT_RESERVED;
4853 break;
4854 }
4855 return att_type;
4856}
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866uint32_t
4867lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4868{
4869 uint32_t link_speed;
4870
4871 if (!lpfc_is_link_up(phba))
4872 return 0;
4873
4874 if (phba->sli_rev <= LPFC_SLI_REV3) {
4875 switch (phba->fc_linkspeed) {
4876 case LPFC_LINK_SPEED_1GHZ:
4877 link_speed = 1000;
4878 break;
4879 case LPFC_LINK_SPEED_2GHZ:
4880 link_speed = 2000;
4881 break;
4882 case LPFC_LINK_SPEED_4GHZ:
4883 link_speed = 4000;
4884 break;
4885 case LPFC_LINK_SPEED_8GHZ:
4886 link_speed = 8000;
4887 break;
4888 case LPFC_LINK_SPEED_10GHZ:
4889 link_speed = 10000;
4890 break;
4891 case LPFC_LINK_SPEED_16GHZ:
4892 link_speed = 16000;
4893 break;
4894 default:
4895 link_speed = 0;
4896 }
4897 } else {
4898 if (phba->sli4_hba.link_state.logical_speed)
4899 link_speed =
4900 phba->sli4_hba.link_state.logical_speed;
4901 else
4902 link_speed = phba->sli4_hba.link_state.speed;
4903 }
4904 return link_speed;
4905}
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918static uint32_t
4919lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4920 uint8_t speed_code)
4921{
4922 uint32_t port_speed;
4923
4924 switch (evt_code) {
4925 case LPFC_TRAILER_CODE_LINK:
4926 switch (speed_code) {
4927 case LPFC_ASYNC_LINK_SPEED_ZERO:
4928 port_speed = 0;
4929 break;
4930 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4931 port_speed = 10;
4932 break;
4933 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4934 port_speed = 100;
4935 break;
4936 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4937 port_speed = 1000;
4938 break;
4939 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4940 port_speed = 10000;
4941 break;
4942 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4943 port_speed = 20000;
4944 break;
4945 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4946 port_speed = 25000;
4947 break;
4948 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4949 port_speed = 40000;
4950 break;
4951 case LPFC_ASYNC_LINK_SPEED_100GBPS:
4952 port_speed = 100000;
4953 break;
4954 default:
4955 port_speed = 0;
4956 }
4957 break;
4958 case LPFC_TRAILER_CODE_FC:
4959 switch (speed_code) {
4960 case LPFC_FC_LA_SPEED_UNKNOWN:
4961 port_speed = 0;
4962 break;
4963 case LPFC_FC_LA_SPEED_1G:
4964 port_speed = 1000;
4965 break;
4966 case LPFC_FC_LA_SPEED_2G:
4967 port_speed = 2000;
4968 break;
4969 case LPFC_FC_LA_SPEED_4G:
4970 port_speed = 4000;
4971 break;
4972 case LPFC_FC_LA_SPEED_8G:
4973 port_speed = 8000;
4974 break;
4975 case LPFC_FC_LA_SPEED_10G:
4976 port_speed = 10000;
4977 break;
4978 case LPFC_FC_LA_SPEED_16G:
4979 port_speed = 16000;
4980 break;
4981 case LPFC_FC_LA_SPEED_32G:
4982 port_speed = 32000;
4983 break;
4984 case LPFC_FC_LA_SPEED_64G:
4985 port_speed = 64000;
4986 break;
4987 case LPFC_FC_LA_SPEED_128G:
4988 port_speed = 128000;
4989 break;
4990 default:
4991 port_speed = 0;
4992 }
4993 break;
4994 default:
4995 port_speed = 0;
4996 }
4997 return port_speed;
4998}
4999
5000
5001
5002
5003
5004
5005
5006
5007static void
5008lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5009 struct lpfc_acqe_link *acqe_link)
5010{
5011 struct lpfc_dmabuf *mp;
5012 LPFC_MBOXQ_t *pmb;
5013 MAILBOX_t *mb;
5014 struct lpfc_mbx_read_top *la;
5015 uint8_t att_type;
5016 int rc;
5017
5018 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5019 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5020 return;
5021 phba->fcoe_eventtag = acqe_link->event_tag;
5022 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5023 if (!pmb) {
5024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5025 "0395 The mboxq allocation failed\n");
5026 return;
5027 }
5028 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5029 if (!mp) {
5030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5031 "0396 The lpfc_dmabuf allocation failed\n");
5032 goto out_free_pmb;
5033 }
5034 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5035 if (!mp->virt) {
5036 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5037 "0397 The mbuf allocation failed\n");
5038 goto out_free_dmabuf;
5039 }
5040
5041
5042 lpfc_els_flush_all_cmd(phba);
5043
5044
5045 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5046
5047
5048 phba->sli.slistat.link_event++;
5049
5050
5051 lpfc_read_topology(phba, pmb, mp);
5052 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5053 pmb->vport = phba->pport;
5054
5055
5056 phba->sli4_hba.link_state.speed =
5057 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5058 bf_get(lpfc_acqe_link_speed, acqe_link));
5059 phba->sli4_hba.link_state.duplex =
5060 bf_get(lpfc_acqe_link_duplex, acqe_link);
5061 phba->sli4_hba.link_state.status =
5062 bf_get(lpfc_acqe_link_status, acqe_link);
5063 phba->sli4_hba.link_state.type =
5064 bf_get(lpfc_acqe_link_type, acqe_link);
5065 phba->sli4_hba.link_state.number =
5066 bf_get(lpfc_acqe_link_number, acqe_link);
5067 phba->sli4_hba.link_state.fault =
5068 bf_get(lpfc_acqe_link_fault, acqe_link);
5069 phba->sli4_hba.link_state.logical_speed =
5070 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5071
5072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5073 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5074 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5075 "Logical speed:%dMbps Fault:%d\n",
5076 phba->sli4_hba.link_state.speed,
5077 phba->sli4_hba.link_state.topology,
5078 phba->sli4_hba.link_state.status,
5079 phba->sli4_hba.link_state.type,
5080 phba->sli4_hba.link_state.number,
5081 phba->sli4_hba.link_state.logical_speed,
5082 phba->sli4_hba.link_state.fault);
5083
5084
5085
5086
5087 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5088 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5089 if (rc == MBX_NOT_FINISHED)
5090 goto out_free_dmabuf;
5091 return;
5092 }
5093
5094
5095
5096
5097
5098
5099 mb = &pmb->u.mb;
5100 mb->mbxStatus = MBX_SUCCESS;
5101
5102
5103 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5104
5105
5106 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5107 la->eventTag = acqe_link->event_tag;
5108 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5109 bf_set(lpfc_mbx_read_top_link_spd, la,
5110 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5111
5112
5113 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5114 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5115 bf_set(lpfc_mbx_read_top_il, la, 0);
5116 bf_set(lpfc_mbx_read_top_pb, la, 0);
5117 bf_set(lpfc_mbx_read_top_fa, la, 0);
5118 bf_set(lpfc_mbx_read_top_mm, la, 0);
5119
5120
5121 lpfc_mbx_cmpl_read_topology(phba, pmb);
5122
5123 return;
5124
5125out_free_dmabuf:
5126 kfree(mp);
5127out_free_pmb:
5128 mempool_free(pmb, phba->mbox_mem_pool);
5129}
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142static uint8_t
5143lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5144{
5145 uint8_t port_speed;
5146
5147 switch (speed_code) {
5148 case LPFC_FC_LA_SPEED_1G:
5149 port_speed = LPFC_LINK_SPEED_1GHZ;
5150 break;
5151 case LPFC_FC_LA_SPEED_2G:
5152 port_speed = LPFC_LINK_SPEED_2GHZ;
5153 break;
5154 case LPFC_FC_LA_SPEED_4G:
5155 port_speed = LPFC_LINK_SPEED_4GHZ;
5156 break;
5157 case LPFC_FC_LA_SPEED_8G:
5158 port_speed = LPFC_LINK_SPEED_8GHZ;
5159 break;
5160 case LPFC_FC_LA_SPEED_16G:
5161 port_speed = LPFC_LINK_SPEED_16GHZ;
5162 break;
5163 case LPFC_FC_LA_SPEED_32G:
5164 port_speed = LPFC_LINK_SPEED_32GHZ;
5165 break;
5166 case LPFC_FC_LA_SPEED_64G:
5167 port_speed = LPFC_LINK_SPEED_64GHZ;
5168 break;
5169 case LPFC_FC_LA_SPEED_128G:
5170 port_speed = LPFC_LINK_SPEED_128GHZ;
5171 break;
5172 case LPFC_FC_LA_SPEED_256G:
5173 port_speed = LPFC_LINK_SPEED_256GHZ;
5174 break;
5175 default:
5176 port_speed = 0;
5177 break;
5178 }
5179
5180 return port_speed;
5181}
5182
5183#define trunk_link_status(__idx)\
5184 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5185 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5186 "Link up" : "Link down") : "NA"
5187
5188#define trunk_port_fault(__idx)\
5189 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5190 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5191
5192static void
5193lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5194 struct lpfc_acqe_fc_la *acqe_fc)
5195{
5196 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5197 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5198
5199 phba->sli4_hba.link_state.speed =
5200 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5201 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5202
5203 phba->sli4_hba.link_state.logical_speed =
5204 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5205
5206 phba->fc_linkspeed =
5207 lpfc_async_link_speed_to_read_top(
5208 phba,
5209 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5210
5211 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5212 phba->trunk_link.link0.state =
5213 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5214 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5215 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5216 }
5217 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5218 phba->trunk_link.link1.state =
5219 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5220 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5221 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5222 }
5223 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5224 phba->trunk_link.link2.state =
5225 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5226 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5227 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5228 }
5229 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5230 phba->trunk_link.link3.state =
5231 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5232 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5233 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5234 }
5235
5236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5237 "2910 Async FC Trunking Event - Speed:%d\n"
5238 "\tLogical speed:%d "
5239 "port0: %s port1: %s port2: %s port3: %s\n",
5240 phba->sli4_hba.link_state.speed,
5241 phba->sli4_hba.link_state.logical_speed,
5242 trunk_link_status(0), trunk_link_status(1),
5243 trunk_link_status(2), trunk_link_status(3));
5244
5245 if (port_fault)
5246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5247 "3202 trunk error:0x%x (%s) seen on port0:%s "
5248
5249
5250
5251
5252
5253 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5254 "UNDEFINED. update driver." : trunk_errmsg[err],
5255 trunk_port_fault(0), trunk_port_fault(1),
5256 trunk_port_fault(2), trunk_port_fault(3));
5257}
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269static void
5270lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5271{
5272 struct lpfc_dmabuf *mp;
5273 LPFC_MBOXQ_t *pmb;
5274 MAILBOX_t *mb;
5275 struct lpfc_mbx_read_top *la;
5276 int rc;
5277
5278 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5279 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5281 "2895 Non FC link Event detected.(%d)\n",
5282 bf_get(lpfc_trailer_type, acqe_fc));
5283 return;
5284 }
5285
5286 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5287 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5288 lpfc_update_trunk_link_status(phba, acqe_fc);
5289 return;
5290 }
5291
5292
5293 phba->sli4_hba.link_state.speed =
5294 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5295 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5296 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5297 phba->sli4_hba.link_state.topology =
5298 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5299 phba->sli4_hba.link_state.status =
5300 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5301 phba->sli4_hba.link_state.type =
5302 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5303 phba->sli4_hba.link_state.number =
5304 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5305 phba->sli4_hba.link_state.fault =
5306 bf_get(lpfc_acqe_link_fault, acqe_fc);
5307
5308 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5309 LPFC_FC_LA_TYPE_LINK_DOWN)
5310 phba->sli4_hba.link_state.logical_speed = 0;
5311 else if (!phba->sli4_hba.conf_trunk)
5312 phba->sli4_hba.link_state.logical_speed =
5313 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5314
5315 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5316 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5317 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5318 "%dMbps Fault:%d\n",
5319 phba->sli4_hba.link_state.speed,
5320 phba->sli4_hba.link_state.topology,
5321 phba->sli4_hba.link_state.status,
5322 phba->sli4_hba.link_state.type,
5323 phba->sli4_hba.link_state.number,
5324 phba->sli4_hba.link_state.logical_speed,
5325 phba->sli4_hba.link_state.fault);
5326 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5327 if (!pmb) {
5328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5329 "2897 The mboxq allocation failed\n");
5330 return;
5331 }
5332 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5333 if (!mp) {
5334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5335 "2898 The lpfc_dmabuf allocation failed\n");
5336 goto out_free_pmb;
5337 }
5338 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5339 if (!mp->virt) {
5340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5341 "2899 The mbuf allocation failed\n");
5342 goto out_free_dmabuf;
5343 }
5344
5345
5346 lpfc_els_flush_all_cmd(phba);
5347
5348
5349 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5350
5351
5352 phba->sli.slistat.link_event++;
5353
5354
5355 lpfc_read_topology(phba, pmb, mp);
5356 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5357 pmb->vport = phba->pport;
5358
5359 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5360 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5361
5362 switch (phba->sli4_hba.link_state.status) {
5363 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5364 phba->link_flag |= LS_MDS_LINK_DOWN;
5365 break;
5366 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5367 phba->link_flag |= LS_MDS_LOOPBACK;
5368 break;
5369 default:
5370 break;
5371 }
5372
5373
5374 mb = &pmb->u.mb;
5375 mb->mbxStatus = MBX_SUCCESS;
5376
5377
5378 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5379
5380
5381 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5382 la->eventTag = acqe_fc->event_tag;
5383
5384 if (phba->sli4_hba.link_state.status ==
5385 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5386 bf_set(lpfc_mbx_read_top_att_type, la,
5387 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5388 } else {
5389 bf_set(lpfc_mbx_read_top_att_type, la,
5390 LPFC_FC_LA_TYPE_LINK_DOWN);
5391 }
5392
5393 lpfc_mbx_cmpl_read_topology(phba, pmb);
5394
5395 return;
5396 }
5397
5398 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5399 if (rc == MBX_NOT_FINISHED)
5400 goto out_free_dmabuf;
5401 return;
5402
5403out_free_dmabuf:
5404 kfree(mp);
5405out_free_pmb:
5406 mempool_free(pmb, phba->mbox_mem_pool);
5407}
5408
5409
5410
5411
5412
5413
5414
5415
5416static void
5417lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5418{
5419 char port_name;
5420 char message[128];
5421 uint8_t status;
5422 uint8_t evt_type;
5423 uint8_t operational = 0;
5424 struct temp_event temp_event_data;
5425 struct lpfc_acqe_misconfigured_event *misconfigured;
5426 struct Scsi_Host *shost;
5427 struct lpfc_vport **vports;
5428 int rc, i;
5429
5430 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5431
5432 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5433 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5434 "x%08x x%08x x%08x\n", evt_type,
5435 acqe_sli->event_data1, acqe_sli->event_data2,
5436 acqe_sli->reserved, acqe_sli->trailer);
5437
5438 port_name = phba->Port[0];
5439 if (port_name == 0x00)
5440 port_name = '?';
5441
5442 switch (evt_type) {
5443 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5444 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5445 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5446 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5447
5448 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5449 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5450 acqe_sli->event_data1, port_name);
5451
5452 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5453 shost = lpfc_shost_from_vport(phba->pport);
5454 fc_host_post_vendor_event(shost, fc_get_event_number(),
5455 sizeof(temp_event_data),
5456 (char *)&temp_event_data,
5457 SCSI_NL_VID_TYPE_PCI
5458 | PCI_VENDOR_ID_EMULEX);
5459 break;
5460 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5461 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5462 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5463 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5464
5465 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5466 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5467 acqe_sli->event_data1, port_name);
5468
5469 shost = lpfc_shost_from_vport(phba->pport);
5470 fc_host_post_vendor_event(shost, fc_get_event_number(),
5471 sizeof(temp_event_data),
5472 (char *)&temp_event_data,
5473 SCSI_NL_VID_TYPE_PCI
5474 | PCI_VENDOR_ID_EMULEX);
5475 break;
5476 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5477 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5478 &acqe_sli->event_data1;
5479
5480
5481 switch (phba->sli4_hba.lnk_info.lnk_no) {
5482 case LPFC_LINK_NUMBER_0:
5483 status = bf_get(lpfc_sli_misconfigured_port0_state,
5484 &misconfigured->theEvent);
5485 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5486 &misconfigured->theEvent);
5487 break;
5488 case LPFC_LINK_NUMBER_1:
5489 status = bf_get(lpfc_sli_misconfigured_port1_state,
5490 &misconfigured->theEvent);
5491 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5492 &misconfigured->theEvent);
5493 break;
5494 case LPFC_LINK_NUMBER_2:
5495 status = bf_get(lpfc_sli_misconfigured_port2_state,
5496 &misconfigured->theEvent);
5497 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5498 &misconfigured->theEvent);
5499 break;
5500 case LPFC_LINK_NUMBER_3:
5501 status = bf_get(lpfc_sli_misconfigured_port3_state,
5502 &misconfigured->theEvent);
5503 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5504 &misconfigured->theEvent);
5505 break;
5506 default:
5507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5508 "3296 "
5509 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5510 "event: Invalid link %d",
5511 phba->sli4_hba.lnk_info.lnk_no);
5512 return;
5513 }
5514
5515
5516 if (phba->sli4_hba.lnk_info.optic_state == status)
5517 return;
5518
5519 switch (status) {
5520 case LPFC_SLI_EVENT_STATUS_VALID:
5521 sprintf(message, "Physical Link is functional");
5522 break;
5523 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5524 sprintf(message, "Optics faulted/incorrectly "
5525 "installed/not installed - Reseat optics, "
5526 "if issue not resolved, replace.");
5527 break;
5528 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5529 sprintf(message,
5530 "Optics of two types installed - Remove one "
5531 "optic or install matching pair of optics.");
5532 break;
5533 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5534 sprintf(message, "Incompatible optics - Replace with "
5535 "compatible optics for card to function.");
5536 break;
5537 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5538 sprintf(message, "Unqualified optics - Replace with "
5539 "Avago optics for Warranty and Technical "
5540 "Support - Link is%s operational",
5541 (operational) ? " not" : "");
5542 break;
5543 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5544 sprintf(message, "Uncertified optics - Replace with "
5545 "Avago-certified optics to enable link "
5546 "operation - Link is%s operational",
5547 (operational) ? " not" : "");
5548 break;
5549 default:
5550
5551 sprintf(message, "Unknown event status x%02x", status);
5552 break;
5553 }
5554
5555
5556 rc = lpfc_sli4_read_config(phba);
5557 if (rc) {
5558 phba->lmt = 0;
5559 lpfc_printf_log(phba, KERN_ERR,
5560 LOG_TRACE_EVENT,
5561 "3194 Unable to retrieve supported "
5562 "speeds, rc = 0x%x\n", rc);
5563 }
5564 vports = lpfc_create_vport_work_array(phba);
5565 if (vports != NULL) {
5566 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5567 i++) {
5568 shost = lpfc_shost_from_vport(vports[i]);
5569 lpfc_host_supported_speeds_set(shost);
5570 }
5571 }
5572 lpfc_destroy_vport_work_array(phba, vports);
5573
5574 phba->sli4_hba.lnk_info.optic_state = status;
5575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5576 "3176 Port Name %c %s\n", port_name, message);
5577 break;
5578 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5579 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5580 "3192 Remote DPort Test Initiated - "
5581 "Event Data1:x%08x Event Data2: x%08x\n",
5582 acqe_sli->event_data1, acqe_sli->event_data2);
5583 break;
5584 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5585
5586
5587
5588
5589
5590 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5591 "2699 Misconfigured FA-WWN - Attached device does "
5592 "not support FA-WWN\n");
5593 break;
5594 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5595
5596 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5597 "2518 EEPROM failure - "
5598 "Event Data1: x%08x Event Data2: x%08x\n",
5599 acqe_sli->event_data1, acqe_sli->event_data2);
5600 break;
5601 default:
5602 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5603 "3193 Unrecognized SLI event, type: 0x%x",
5604 evt_type);
5605 break;
5606 }
5607}
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619static struct lpfc_nodelist *
5620lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5621{
5622 struct lpfc_nodelist *ndlp;
5623 struct Scsi_Host *shost;
5624 struct lpfc_hba *phba;
5625
5626 if (!vport)
5627 return NULL;
5628 phba = vport->phba;
5629 if (!phba)
5630 return NULL;
5631 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5632 if (!ndlp) {
5633
5634 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5635 if (!ndlp)
5636 return 0;
5637
5638 ndlp->nlp_type |= NLP_FABRIC;
5639
5640 lpfc_enqueue_node(vport, ndlp);
5641 }
5642 if ((phba->pport->port_state < LPFC_FLOGI) &&
5643 (phba->pport->port_state != LPFC_VPORT_FAILED))
5644 return NULL;
5645
5646 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5647 && (vport->port_state != LPFC_VPORT_FAILED))
5648 return NULL;
5649 shost = lpfc_shost_from_vport(vport);
5650 if (!shost)
5651 return NULL;
5652 lpfc_linkdown_port(vport);
5653 lpfc_cleanup_pending_mbox(vport);
5654 spin_lock_irq(shost->host_lock);
5655 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5656 spin_unlock_irq(shost->host_lock);
5657
5658 return ndlp;
5659}
5660
5661
5662
5663
5664
5665
5666
5667
5668static void
5669lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5670{
5671 struct lpfc_vport **vports;
5672 int i;
5673
5674 vports = lpfc_create_vport_work_array(phba);
5675 if (vports)
5676 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5677 lpfc_sli4_perform_vport_cvl(vports[i]);
5678 lpfc_destroy_vport_work_array(phba, vports);
5679}
5680
5681
5682
5683
5684
5685
5686
5687
5688static void
5689lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5690 struct lpfc_acqe_fip *acqe_fip)
5691{
5692 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5693 int rc;
5694 struct lpfc_vport *vport;
5695 struct lpfc_nodelist *ndlp;
5696 int active_vlink_present;
5697 struct lpfc_vport **vports;
5698 int i;
5699
5700 phba->fc_eventTag = acqe_fip->event_tag;
5701 phba->fcoe_eventtag = acqe_fip->event_tag;
5702 switch (event_type) {
5703 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5704 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5705 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5707 "2546 New FCF event, evt_tag:x%x, "
5708 "index:x%x\n",
5709 acqe_fip->event_tag,
5710 acqe_fip->index);
5711 else
5712 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5713 LOG_DISCOVERY,
5714 "2788 FCF param modified event, "
5715 "evt_tag:x%x, index:x%x\n",
5716 acqe_fip->event_tag,
5717 acqe_fip->index);
5718 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5719
5720
5721
5722
5723
5724 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5725 LOG_DISCOVERY,
5726 "2779 Read FCF (x%x) for updating "
5727 "roundrobin FCF failover bmask\n",
5728 acqe_fip->index);
5729 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5730 }
5731
5732
5733 spin_lock_irq(&phba->hbalock);
5734 if (phba->hba_flag & FCF_TS_INPROG) {
5735 spin_unlock_irq(&phba->hbalock);
5736 break;
5737 }
5738
5739 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5740 spin_unlock_irq(&phba->hbalock);
5741 break;
5742 }
5743
5744
5745 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5746 spin_unlock_irq(&phba->hbalock);
5747 break;
5748 }
5749 spin_unlock_irq(&phba->hbalock);
5750
5751
5752 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5753 "2770 Start FCF table scan per async FCF "
5754 "event, evt_tag:x%x, index:x%x\n",
5755 acqe_fip->event_tag, acqe_fip->index);
5756 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5757 LPFC_FCOE_FCF_GET_FIRST);
5758 if (rc)
5759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5760 "2547 Issue FCF scan read FCF mailbox "
5761 "command failed (x%x)\n", rc);
5762 break;
5763
5764 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5765 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5766 "2548 FCF Table full count 0x%x tag 0x%x\n",
5767 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5768 acqe_fip->event_tag);
5769 break;
5770
5771 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5772 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5774 "2549 FCF (x%x) disconnected from network, "
5775 "tag:x%x\n", acqe_fip->index,
5776 acqe_fip->event_tag);
5777
5778
5779
5780
5781 spin_lock_irq(&phba->hbalock);
5782 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5783 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5784 spin_unlock_irq(&phba->hbalock);
5785
5786 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5787 break;
5788 }
5789 spin_unlock_irq(&phba->hbalock);
5790
5791
5792 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5793 break;
5794
5795
5796
5797
5798
5799
5800
5801 spin_lock_irq(&phba->hbalock);
5802
5803 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5804 spin_unlock_irq(&phba->hbalock);
5805
5806 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5807 "2771 Start FCF fast failover process due to "
5808 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5809 "\n", acqe_fip->event_tag, acqe_fip->index);
5810 rc = lpfc_sli4_redisc_fcf_table(phba);
5811 if (rc) {
5812 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5813 LOG_TRACE_EVENT,
5814 "2772 Issue FCF rediscover mailbox "
5815 "command failed, fail through to FCF "
5816 "dead event\n");
5817 spin_lock_irq(&phba->hbalock);
5818 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5819 spin_unlock_irq(&phba->hbalock);
5820
5821
5822
5823
5824 lpfc_sli4_fcf_dead_failthrough(phba);
5825 } else {
5826
5827 lpfc_sli4_clear_fcf_rr_bmask(phba);
5828
5829
5830
5831
5832 lpfc_sli4_perform_all_vport_cvl(phba);
5833 }
5834 break;
5835 case LPFC_FIP_EVENT_TYPE_CVL:
5836 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5837 lpfc_printf_log(phba, KERN_ERR,
5838 LOG_TRACE_EVENT,
5839 "2718 Clear Virtual Link Received for VPI 0x%x"
5840 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5841
5842 vport = lpfc_find_vport_by_vpid(phba,
5843 acqe_fip->index);
5844 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5845 if (!ndlp)
5846 break;
5847 active_vlink_present = 0;
5848
5849 vports = lpfc_create_vport_work_array(phba);
5850 if (vports) {
5851 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5852 i++) {
5853 if ((!(vports[i]->fc_flag &
5854 FC_VPORT_CVL_RCVD)) &&
5855 (vports[i]->port_state > LPFC_FDISC)) {
5856 active_vlink_present = 1;
5857 break;
5858 }
5859 }
5860 lpfc_destroy_vport_work_array(phba, vports);
5861 }
5862
5863
5864
5865
5866
5867
5868 if (!(vport->load_flag & FC_UNLOADING) &&
5869 active_vlink_present) {
5870
5871
5872
5873
5874 mod_timer(&ndlp->nlp_delayfunc,
5875 jiffies + msecs_to_jiffies(1000));
5876 spin_lock_irq(&ndlp->lock);
5877 ndlp->nlp_flag |= NLP_DELAY_TMO;
5878 spin_unlock_irq(&ndlp->lock);
5879 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5880 vport->port_state = LPFC_FDISC;
5881 } else {
5882
5883
5884
5885
5886
5887
5888
5889 spin_lock_irq(&phba->hbalock);
5890 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5891 spin_unlock_irq(&phba->hbalock);
5892 break;
5893 }
5894
5895 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5896 spin_unlock_irq(&phba->hbalock);
5897 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5898 LOG_DISCOVERY,
5899 "2773 Start FCF failover per CVL, "
5900 "evt_tag:x%x\n", acqe_fip->event_tag);
5901 rc = lpfc_sli4_redisc_fcf_table(phba);
5902 if (rc) {
5903 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5904 LOG_TRACE_EVENT,
5905 "2774 Issue FCF rediscover "
5906 "mailbox command failed, "
5907 "through to CVL event\n");
5908 spin_lock_irq(&phba->hbalock);
5909 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5910 spin_unlock_irq(&phba->hbalock);
5911
5912
5913
5914
5915 lpfc_retry_pport_discovery(phba);
5916 } else
5917
5918
5919
5920
5921 lpfc_sli4_clear_fcf_rr_bmask(phba);
5922 }
5923 break;
5924 default:
5925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5926 "0288 Unknown FCoE event type 0x%x event tag "
5927 "0x%x\n", event_type, acqe_fip->event_tag);
5928 break;
5929 }
5930}
5931
5932
5933
5934
5935
5936
5937
5938
5939static void
5940lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5941 struct lpfc_acqe_dcbx *acqe_dcbx)
5942{
5943 phba->fc_eventTag = acqe_dcbx->event_tag;
5944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5945 "0290 The SLI4 DCBX asynchronous event is not "
5946 "handled yet\n");
5947}
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958static void
5959lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5960 struct lpfc_acqe_grp5 *acqe_grp5)
5961{
5962 uint16_t prev_ll_spd;
5963
5964 phba->fc_eventTag = acqe_grp5->event_tag;
5965 phba->fcoe_eventtag = acqe_grp5->event_tag;
5966 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5967 phba->sli4_hba.link_state.logical_speed =
5968 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5969 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5970 "2789 GRP5 Async Event: Updating logical link speed "
5971 "from %dMbps to %dMbps\n", prev_ll_spd,
5972 phba->sli4_hba.link_state.logical_speed);
5973}
5974
5975
5976
5977
5978
5979
5980
5981
5982void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5983{
5984 struct lpfc_cq_event *cq_event;
5985 unsigned long iflags;
5986
5987
5988 spin_lock_irqsave(&phba->hbalock, iflags);
5989 phba->hba_flag &= ~ASYNC_EVENT;
5990 spin_unlock_irqrestore(&phba->hbalock, iflags);
5991
5992
5993 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
5994 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5995 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5996 cq_event, struct lpfc_cq_event, list);
5997 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
5998 iflags);
5999
6000
6001 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
6002 case LPFC_TRAILER_CODE_LINK:
6003 lpfc_sli4_async_link_evt(phba,
6004 &cq_event->cqe.acqe_link);
6005 break;
6006 case LPFC_TRAILER_CODE_FCOE:
6007 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
6008 break;
6009 case LPFC_TRAILER_CODE_DCBX:
6010 lpfc_sli4_async_dcbx_evt(phba,
6011 &cq_event->cqe.acqe_dcbx);
6012 break;
6013 case LPFC_TRAILER_CODE_GRP5:
6014 lpfc_sli4_async_grp5_evt(phba,
6015 &cq_event->cqe.acqe_grp5);
6016 break;
6017 case LPFC_TRAILER_CODE_FC:
6018 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
6019 break;
6020 case LPFC_TRAILER_CODE_SLI:
6021 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
6022 break;
6023 default:
6024 lpfc_printf_log(phba, KERN_ERR,
6025 LOG_TRACE_EVENT,
6026 "1804 Invalid asynchronous event code: "
6027 "x%x\n", bf_get(lpfc_trailer_code,
6028 &cq_event->cqe.mcqe_cmpl));
6029 break;
6030 }
6031
6032
6033 lpfc_sli4_cq_event_release(phba, cq_event);
6034 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
6035 }
6036 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
6037}
6038
6039
6040
6041
6042
6043
6044
6045
6046void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6047{
6048 int rc;
6049
6050 spin_lock_irq(&phba->hbalock);
6051
6052 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6053
6054 phba->fcf.failover_rec.flag = 0;
6055
6056 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6057 spin_unlock_irq(&phba->hbalock);
6058
6059
6060 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6061 "2777 Start post-quiescent FCF table scan\n");
6062 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6063 if (rc)
6064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6065 "2747 Issue FCF scan read FCF mailbox "
6066 "command failed 0x%x\n", rc);
6067}
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079int
6080lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6081{
6082 int rc;
6083
6084
6085 phba->pci_dev_grp = dev_grp;
6086
6087
6088 if (dev_grp == LPFC_PCI_DEV_OC)
6089 phba->sli_rev = LPFC_SLI_REV4;
6090
6091
6092 rc = lpfc_init_api_table_setup(phba, dev_grp);
6093 if (rc)
6094 return -ENODEV;
6095
6096 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6097 if (rc)
6098 return -ENODEV;
6099
6100 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6101 if (rc)
6102 return -ENODEV;
6103
6104 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6105 if (rc)
6106 return -ENODEV;
6107
6108 return 0;
6109}
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6120{
6121 switch (intr_mode) {
6122 case 0:
6123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6124 "0470 Enable INTx interrupt mode.\n");
6125 break;
6126 case 1:
6127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6128 "0481 Enabled MSI interrupt mode.\n");
6129 break;
6130 case 2:
6131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6132 "0480 Enabled MSI-X interrupt mode.\n");
6133 break;
6134 default:
6135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6136 "0482 Illegal interrupt mode.\n");
6137 break;
6138 }
6139 return;
6140}
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153static int
6154lpfc_enable_pci_dev(struct lpfc_hba *phba)
6155{
6156 struct pci_dev *pdev;
6157
6158
6159 if (!phba->pcidev)
6160 goto out_error;
6161 else
6162 pdev = phba->pcidev;
6163
6164 if (pci_enable_device_mem(pdev))
6165 goto out_error;
6166
6167 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6168 goto out_disable_device;
6169
6170 pci_set_master(pdev);
6171 pci_try_set_mwi(pdev);
6172 pci_save_state(pdev);
6173
6174
6175 if (pci_is_pcie(pdev))
6176 pdev->needs_freset = 1;
6177
6178 return 0;
6179
6180out_disable_device:
6181 pci_disable_device(pdev);
6182out_error:
6183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6184 "1401 Failed to enable pci device\n");
6185 return -ENODEV;
6186}
6187
6188
6189
6190
6191
6192
6193
6194
6195static void
6196lpfc_disable_pci_dev(struct lpfc_hba *phba)
6197{
6198 struct pci_dev *pdev;
6199
6200
6201 if (!phba->pcidev)
6202 return;
6203 else
6204 pdev = phba->pcidev;
6205
6206 pci_release_mem_regions(pdev);
6207 pci_disable_device(pdev);
6208
6209 return;
6210}
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221void
6222lpfc_reset_hba(struct lpfc_hba *phba)
6223{
6224
6225 if (!phba->cfg_enable_hba_reset) {
6226 phba->link_state = LPFC_HBA_ERROR;
6227 return;
6228 }
6229
6230
6231 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
6232 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6233 } else {
6234 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6235 lpfc_sli_flush_io_rings(phba);
6236 }
6237 lpfc_offline(phba);
6238 lpfc_sli_brdrestart(phba);
6239 lpfc_online(phba);
6240 lpfc_unblock_mgmt_io(phba);
6241}
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253uint16_t
6254lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6255{
6256 struct pci_dev *pdev = phba->pcidev;
6257 uint16_t nr_virtfn;
6258 int pos;
6259
6260 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6261 if (pos == 0)
6262 return 0;
6263
6264 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6265 return nr_virtfn;
6266}
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279int
6280lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6281{
6282 struct pci_dev *pdev = phba->pcidev;
6283 uint16_t max_nr_vfn;
6284 int rc;
6285
6286 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6287 if (nr_vfn > max_nr_vfn) {
6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6289 "3057 Requested vfs (%d) greater than "
6290 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6291 return -EINVAL;
6292 }
6293
6294 rc = pci_enable_sriov(pdev, nr_vfn);
6295 if (rc) {
6296 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6297 "2806 Failed to enable sriov on this device "
6298 "with vfn number nr_vf:%d, rc:%d\n",
6299 nr_vfn, rc);
6300 } else
6301 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6302 "2807 Successful enable sriov on this device "
6303 "with vfn number nr_vf:%d\n", nr_vfn);
6304 return rc;
6305}
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318static int
6319lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6320{
6321 struct lpfc_sli *psli = &phba->sli;
6322
6323
6324
6325
6326 atomic_set(&phba->fast_event_count, 0);
6327 atomic_set(&phba->dbg_log_idx, 0);
6328 atomic_set(&phba->dbg_log_cnt, 0);
6329 atomic_set(&phba->dbg_log_dmping, 0);
6330 spin_lock_init(&phba->hbalock);
6331
6332
6333 spin_lock_init(&phba->port_list_lock);
6334 INIT_LIST_HEAD(&phba->port_list);
6335
6336 INIT_LIST_HEAD(&phba->work_list);
6337 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6338
6339
6340 init_waitqueue_head(&phba->work_waitq);
6341
6342 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6343 "1403 Protocols supported %s %s %s\n",
6344 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6345 "SCSI" : " "),
6346 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6347 "NVME" : " "),
6348 (phba->nvmet_support ? "NVMET" : " "));
6349
6350
6351 spin_lock_init(&phba->scsi_buf_list_get_lock);
6352 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6353 spin_lock_init(&phba->scsi_buf_list_put_lock);
6354 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6355
6356
6357 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6358
6359
6360 INIT_LIST_HEAD(&phba->elsbuf);
6361
6362
6363 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6364
6365
6366 spin_lock_init(&phba->devicelock);
6367 INIT_LIST_HEAD(&phba->luns);
6368
6369
6370 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6371
6372 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6373
6374 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6375
6376 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6377
6378 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6379
6380 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6381 lpfc_idle_stat_delay_work);
6382
6383 return 0;
6384}
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397static int
6398lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6399{
6400 int rc, entry_sz;
6401
6402
6403
6404
6405
6406
6407 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6408
6409
6410 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6411 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6412
6413
6414 lpfc_get_cfgparam(phba);
6415
6416
6417 rc = lpfc_setup_driver_resource_phase1(phba);
6418 if (rc)
6419 return -ENODEV;
6420
6421 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6422 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6423
6424 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6425 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6426 }
6427
6428 if (!phba->sli.sli3_ring)
6429 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6430 sizeof(struct lpfc_sli_ring),
6431 GFP_KERNEL);
6432 if (!phba->sli.sli3_ring)
6433 return -ENOMEM;
6434
6435
6436
6437
6438
6439
6440 if (phba->sli_rev == LPFC_SLI_REV4)
6441 entry_sz = sizeof(struct sli4_sge);
6442 else
6443 entry_sz = sizeof(struct ulp_bde64);
6444
6445
6446 if (phba->cfg_enable_bg) {
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6457 sizeof(struct fcp_rsp) +
6458 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6459
6460 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6461 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6462
6463
6464 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6465 } else {
6466
6467
6468
6469
6470
6471 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6472 sizeof(struct fcp_rsp) +
6473 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6474
6475
6476 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6477 }
6478
6479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6480 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6481 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6482 phba->cfg_total_seg_cnt);
6483
6484 phba->max_vpi = LPFC_MAX_VPI;
6485
6486 phba->max_vports = 0;
6487
6488
6489
6490
6491 lpfc_sli_setup(phba);
6492 lpfc_sli_queue_init(phba);
6493
6494
6495 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6496 return -ENOMEM;
6497
6498 phba->lpfc_sg_dma_buf_pool =
6499 dma_pool_create("lpfc_sg_dma_buf_pool",
6500 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6501 BPL_ALIGN_SZ, 0);
6502
6503 if (!phba->lpfc_sg_dma_buf_pool)
6504 goto fail_free_mem;
6505
6506 phba->lpfc_cmd_rsp_buf_pool =
6507 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6508 &phba->pcidev->dev,
6509 sizeof(struct fcp_cmnd) +
6510 sizeof(struct fcp_rsp),
6511 BPL_ALIGN_SZ, 0);
6512
6513 if (!phba->lpfc_cmd_rsp_buf_pool)
6514 goto fail_free_dma_buf_pool;
6515
6516
6517
6518
6519
6520 if (phba->cfg_sriov_nr_virtfn > 0) {
6521 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6522 phba->cfg_sriov_nr_virtfn);
6523 if (rc) {
6524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6525 "2808 Requested number of SR-IOV "
6526 "virtual functions (%d) is not "
6527 "supported\n",
6528 phba->cfg_sriov_nr_virtfn);
6529 phba->cfg_sriov_nr_virtfn = 0;
6530 }
6531 }
6532
6533 return 0;
6534
6535fail_free_dma_buf_pool:
6536 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6537 phba->lpfc_sg_dma_buf_pool = NULL;
6538fail_free_mem:
6539 lpfc_mem_free(phba);
6540 return -ENOMEM;
6541}
6542
6543
6544
6545
6546
6547
6548
6549
6550static void
6551lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6552{
6553
6554 lpfc_mem_free_all(phba);
6555
6556 return;
6557}
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570static int
6571lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6572{
6573 LPFC_MBOXQ_t *mboxq;
6574 MAILBOX_t *mb;
6575 int rc, i, max_buf_size;
6576 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6577 struct lpfc_mqe *mqe;
6578 int longs;
6579 int extra;
6580 uint64_t wwn;
6581 u32 if_type;
6582 u32 if_fam;
6583
6584 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6585 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6586 phba->sli4_hba.curr_disp_cpu = 0;
6587
6588
6589 lpfc_get_cfgparam(phba);
6590
6591
6592 rc = lpfc_setup_driver_resource_phase1(phba);
6593 if (rc)
6594 return -ENODEV;
6595
6596
6597 rc = lpfc_sli4_post_status_check(phba);
6598 if (rc)
6599 return -ENODEV;
6600
6601
6602
6603
6604 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6605
6606
6607
6608
6609
6610 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6611
6612
6613 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6614
6615
6616
6617
6618
6619 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6620 sizeof(struct lpfc_mbox_ext_buf_ctx));
6621 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6622
6623 phba->max_vpi = LPFC_MAX_VPI;
6624
6625
6626 phba->max_vports = 0;
6627
6628
6629 phba->valid_vlan = 0;
6630 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6631 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6632 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6633
6634
6635
6636
6637
6638
6639
6640
6641 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6642 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6643 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6644
6645
6646
6647
6648
6649 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6650 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6651
6652 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6653
6654 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6655 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6656 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6657 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6658 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6659 }
6660
6661
6662 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6663 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6664 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6665 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
6666
6667
6668
6669
6670
6671
6672 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6673
6674 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6675
6676 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6677
6678 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6679
6680 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6681
6682
6683 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6684 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6685 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6686 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6687
6688
6689
6690
6691 INIT_LIST_HEAD(&phba->sli.mboxq);
6692 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6693
6694
6695 phba->sli4_hba.lnk_info.optic_state = 0xff;
6696
6697
6698 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6699 if (rc)
6700 return -ENOMEM;
6701
6702
6703 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6704 LPFC_SLI_INTF_IF_TYPE_2) {
6705 rc = lpfc_pci_function_reset(phba);
6706 if (unlikely(rc)) {
6707 rc = -ENODEV;
6708 goto out_free_mem;
6709 }
6710 phba->temp_sensor_support = 1;
6711 }
6712
6713
6714 rc = lpfc_create_bootstrap_mbox(phba);
6715 if (unlikely(rc))
6716 goto out_free_mem;
6717
6718
6719 rc = lpfc_setup_endian_order(phba);
6720 if (unlikely(rc))
6721 goto out_free_bsmbx;
6722
6723
6724 rc = lpfc_sli4_read_config(phba);
6725 if (unlikely(rc))
6726 goto out_free_bsmbx;
6727 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6728 if (unlikely(rc))
6729 goto out_free_bsmbx;
6730
6731
6732 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6733 LPFC_SLI_INTF_IF_TYPE_0) {
6734 rc = lpfc_pci_function_reset(phba);
6735 if (unlikely(rc))
6736 goto out_free_bsmbx;
6737 }
6738
6739 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6740 GFP_KERNEL);
6741 if (!mboxq) {
6742 rc = -ENOMEM;
6743 goto out_free_bsmbx;
6744 }
6745
6746
6747 phba->nvmet_support = 0;
6748 if (lpfc_enable_nvmet_cnt) {
6749
6750
6751 lpfc_read_nv(phba, mboxq);
6752 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6753 if (rc != MBX_SUCCESS) {
6754 lpfc_printf_log(phba, KERN_ERR,
6755 LOG_TRACE_EVENT,
6756 "6016 Mailbox failed , mbxCmd x%x "
6757 "READ_NV, mbxStatus x%x\n",
6758 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6759 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6760 mempool_free(mboxq, phba->mbox_mem_pool);
6761 rc = -EIO;
6762 goto out_free_bsmbx;
6763 }
6764 mb = &mboxq->u.mb;
6765 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6766 sizeof(uint64_t));
6767 wwn = cpu_to_be64(wwn);
6768 phba->sli4_hba.wwnn.u.name = wwn;
6769 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6770 sizeof(uint64_t));
6771
6772 wwn = cpu_to_be64(wwn);
6773 phba->sli4_hba.wwpn.u.name = wwn;
6774
6775
6776 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6777 if (wwn == lpfc_enable_nvmet[i]) {
6778#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6779 if (lpfc_nvmet_mem_alloc(phba))
6780 break;
6781
6782 phba->nvmet_support = 1;
6783
6784 lpfc_printf_log(phba, KERN_ERR,
6785 LOG_TRACE_EVENT,
6786 "6017 NVME Target %016llx\n",
6787 wwn);
6788#else
6789 lpfc_printf_log(phba, KERN_ERR,
6790 LOG_TRACE_EVENT,
6791 "6021 Can't enable NVME Target."
6792 " NVME_TARGET_FC infrastructure"
6793 " is not in kernel\n");
6794#endif
6795
6796 phba->cfg_xri_rebalancing = 0;
6797 if (phba->irq_chann_mode == NHT_MODE) {
6798 phba->cfg_irq_chann =
6799 phba->sli4_hba.num_present_cpu;
6800 phba->cfg_hdw_queue =
6801 phba->sli4_hba.num_present_cpu;
6802 phba->irq_chann_mode = NORMAL_MODE;
6803 }
6804 break;
6805 }
6806 }
6807 }
6808
6809 lpfc_nvme_mod_param_dep(phba);
6810
6811
6812 lpfc_supported_pages(mboxq);
6813 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6814 if (!rc) {
6815 mqe = &mboxq->u.mqe;
6816 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6817 LPFC_MAX_SUPPORTED_PAGES);
6818 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6819 switch (pn_page[i]) {
6820 case LPFC_SLI4_PARAMETERS:
6821 phba->sli4_hba.pc_sli4_params.supported = 1;
6822 break;
6823 default:
6824 break;
6825 }
6826 }
6827
6828 if (phba->sli4_hba.pc_sli4_params.supported)
6829 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6830 if (rc) {
6831 mempool_free(mboxq, phba->mbox_mem_pool);
6832 rc = -EIO;
6833 goto out_free_bsmbx;
6834 }
6835 }
6836
6837
6838
6839
6840
6841
6842 rc = lpfc_get_sli4_parameters(phba, mboxq);
6843 if (rc) {
6844 if_type = bf_get(lpfc_sli_intf_if_type,
6845 &phba->sli4_hba.sli_intf);
6846 if_fam = bf_get(lpfc_sli_intf_sli_family,
6847 &phba->sli4_hba.sli_intf);
6848 if (phba->sli4_hba.extents_in_use &&
6849 phba->sli4_hba.rpi_hdrs_in_use) {
6850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6851 "2999 Unsupported SLI4 Parameters "
6852 "Extents and RPI headers enabled.\n");
6853 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6854 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6855 mempool_free(mboxq, phba->mbox_mem_pool);
6856 rc = -EIO;
6857 goto out_free_bsmbx;
6858 }
6859 }
6860 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6861 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6862 mempool_free(mboxq, phba->mbox_mem_pool);
6863 rc = -EIO;
6864 goto out_free_bsmbx;
6865 }
6866 }
6867
6868
6869
6870
6871
6872 extra = 2;
6873 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6874 extra++;
6875
6876
6877
6878
6879
6880
6881 max_buf_size = (2 * SLI4_PAGE_SIZE);
6882
6883
6884
6885
6886
6887 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6900 sizeof(struct fcp_rsp) + max_buf_size;
6901
6902
6903 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6904
6905
6906
6907
6908
6909 if (phba->cfg_enable_bg &&
6910 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6911 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6912 else
6913 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6914
6915 } else {
6916
6917
6918
6919
6920
6921 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6922 sizeof(struct fcp_rsp) +
6923 ((phba->cfg_sg_seg_cnt + extra) *
6924 sizeof(struct sli4_sge));
6925
6926
6927 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6928 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6929
6930
6931
6932
6933
6934 }
6935
6936 if (phba->cfg_xpsgl && !phba->nvmet_support)
6937 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6938 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6939 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6940 else
6941 phba->cfg_sg_dma_buf_size =
6942 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6943
6944 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6945 sizeof(struct sli4_sge);
6946
6947
6948 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6949 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6950 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6951 "6300 Reducing NVME sg segment "
6952 "cnt to %d\n",
6953 LPFC_MAX_NVME_SEG_CNT);
6954 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6955 } else
6956 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6957 }
6958
6959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6960 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6961 "total:%d scsi:%d nvme:%d\n",
6962 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6963 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6964 phba->cfg_nvme_seg_cnt);
6965
6966 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6967 i = phba->cfg_sg_dma_buf_size;
6968 else
6969 i = SLI4_PAGE_SIZE;
6970
6971 phba->lpfc_sg_dma_buf_pool =
6972 dma_pool_create("lpfc_sg_dma_buf_pool",
6973 &phba->pcidev->dev,
6974 phba->cfg_sg_dma_buf_size,
6975 i, 0);
6976 if (!phba->lpfc_sg_dma_buf_pool)
6977 goto out_free_bsmbx;
6978
6979 phba->lpfc_cmd_rsp_buf_pool =
6980 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6981 &phba->pcidev->dev,
6982 sizeof(struct fcp_cmnd) +
6983 sizeof(struct fcp_rsp),
6984 i, 0);
6985 if (!phba->lpfc_cmd_rsp_buf_pool)
6986 goto out_free_sg_dma_buf;
6987
6988 mempool_free(mboxq, phba->mbox_mem_pool);
6989
6990
6991 lpfc_sli4_oas_verify(phba);
6992
6993
6994 lpfc_sli4_ras_init(phba);
6995
6996
6997 rc = lpfc_sli4_queue_verify(phba);
6998 if (rc)
6999 goto out_free_cmd_rsp_buf;
7000
7001
7002 rc = lpfc_sli4_cq_event_pool_create(phba);
7003 if (rc)
7004 goto out_free_cmd_rsp_buf;
7005
7006
7007 lpfc_init_sgl_list(phba);
7008
7009
7010 rc = lpfc_init_active_sgl_array(phba);
7011 if (rc) {
7012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7013 "1430 Failed to initialize sgl list.\n");
7014 goto out_destroy_cq_event_pool;
7015 }
7016 rc = lpfc_sli4_init_rpi_hdrs(phba);
7017 if (rc) {
7018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7019 "1432 Failed to initialize rpi headers.\n");
7020 goto out_free_active_sgl;
7021 }
7022
7023
7024 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
7025 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
7026 GFP_KERNEL);
7027 if (!phba->fcf.fcf_rr_bmask) {
7028 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7029 "2759 Failed allocate memory for FCF round "
7030 "robin failover bmask\n");
7031 rc = -ENOMEM;
7032 goto out_remove_rpi_hdrs;
7033 }
7034
7035 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
7036 sizeof(struct lpfc_hba_eq_hdl),
7037 GFP_KERNEL);
7038 if (!phba->sli4_hba.hba_eq_hdl) {
7039 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7040 "2572 Failed allocate memory for "
7041 "fast-path per-EQ handle array\n");
7042 rc = -ENOMEM;
7043 goto out_free_fcf_rr_bmask;
7044 }
7045
7046 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
7047 sizeof(struct lpfc_vector_map_info),
7048 GFP_KERNEL);
7049 if (!phba->sli4_hba.cpu_map) {
7050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7051 "3327 Failed allocate memory for msi-x "
7052 "interrupt vector mapping\n");
7053 rc = -ENOMEM;
7054 goto out_free_hba_eq_hdl;
7055 }
7056
7057 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7058 if (!phba->sli4_hba.eq_info) {
7059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7060 "3321 Failed allocation for per_cpu stats\n");
7061 rc = -ENOMEM;
7062 goto out_free_hba_cpu_map;
7063 }
7064
7065 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7066 sizeof(*phba->sli4_hba.idle_stat),
7067 GFP_KERNEL);
7068 if (!phba->sli4_hba.idle_stat) {
7069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7070 "3390 Failed allocation for idle_stat\n");
7071 rc = -ENOMEM;
7072 goto out_free_hba_eq_info;
7073 }
7074
7075#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7076 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7077 if (!phba->sli4_hba.c_stat) {
7078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7079 "3332 Failed allocating per cpu hdwq stats\n");
7080 rc = -ENOMEM;
7081 goto out_free_hba_idle_stat;
7082 }
7083#endif
7084
7085
7086
7087
7088
7089 if (phba->cfg_sriov_nr_virtfn > 0) {
7090 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7091 phba->cfg_sriov_nr_virtfn);
7092 if (rc) {
7093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7094 "3020 Requested number of SR-IOV "
7095 "virtual functions (%d) is not "
7096 "supported\n",
7097 phba->cfg_sriov_nr_virtfn);
7098 phba->cfg_sriov_nr_virtfn = 0;
7099 }
7100 }
7101
7102 return 0;
7103
7104#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7105out_free_hba_idle_stat:
7106 kfree(phba->sli4_hba.idle_stat);
7107#endif
7108out_free_hba_eq_info:
7109 free_percpu(phba->sli4_hba.eq_info);
7110out_free_hba_cpu_map:
7111 kfree(phba->sli4_hba.cpu_map);
7112out_free_hba_eq_hdl:
7113 kfree(phba->sli4_hba.hba_eq_hdl);
7114out_free_fcf_rr_bmask:
7115 kfree(phba->fcf.fcf_rr_bmask);
7116out_remove_rpi_hdrs:
7117 lpfc_sli4_remove_rpi_hdrs(phba);
7118out_free_active_sgl:
7119 lpfc_free_active_sgl(phba);
7120out_destroy_cq_event_pool:
7121 lpfc_sli4_cq_event_pool_destroy(phba);
7122out_free_cmd_rsp_buf:
7123 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7124 phba->lpfc_cmd_rsp_buf_pool = NULL;
7125out_free_sg_dma_buf:
7126 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7127 phba->lpfc_sg_dma_buf_pool = NULL;
7128out_free_bsmbx:
7129 lpfc_destroy_bootstrap_mbox(phba);
7130out_free_mem:
7131 lpfc_mem_free(phba);
7132 return rc;
7133}
7134
7135
7136
7137
7138
7139
7140
7141
7142static void
7143lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7144{
7145 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7146
7147 free_percpu(phba->sli4_hba.eq_info);
7148#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7149 free_percpu(phba->sli4_hba.c_stat);
7150#endif
7151 kfree(phba->sli4_hba.idle_stat);
7152
7153
7154 kfree(phba->sli4_hba.cpu_map);
7155 phba->sli4_hba.num_possible_cpu = 0;
7156 phba->sli4_hba.num_present_cpu = 0;
7157 phba->sli4_hba.curr_disp_cpu = 0;
7158 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7159
7160
7161 kfree(phba->sli4_hba.hba_eq_hdl);
7162
7163
7164 lpfc_sli4_remove_rpi_hdrs(phba);
7165 lpfc_sli4_remove_rpis(phba);
7166
7167
7168 kfree(phba->fcf.fcf_rr_bmask);
7169
7170
7171 lpfc_free_active_sgl(phba);
7172 lpfc_free_els_sgl_list(phba);
7173 lpfc_free_nvmet_sgl_list(phba);
7174
7175
7176 lpfc_sli4_cq_event_release_all(phba);
7177 lpfc_sli4_cq_event_pool_destroy(phba);
7178
7179
7180 lpfc_sli4_dealloc_resource_identifiers(phba);
7181
7182
7183 lpfc_destroy_bootstrap_mbox(phba);
7184
7185
7186 lpfc_mem_free_all(phba);
7187
7188
7189 list_for_each_entry_safe(conn_entry, next_conn_entry,
7190 &phba->fcf_conn_rec_list, list) {
7191 list_del_init(&conn_entry->list);
7192 kfree(conn_entry);
7193 }
7194
7195 return;
7196}
7197
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208int
7209lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7210{
7211 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7212 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7213 phba->lpfc_selective_reset = lpfc_selective_reset;
7214 switch (dev_grp) {
7215 case LPFC_PCI_DEV_LP:
7216 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7217 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7218 phba->lpfc_stop_port = lpfc_stop_port_s3;
7219 break;
7220 case LPFC_PCI_DEV_OC:
7221 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7222 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7223 phba->lpfc_stop_port = lpfc_stop_port_s4;
7224 break;
7225 default:
7226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7227 "1431 Invalid HBA PCI-device group: 0x%x\n",
7228 dev_grp);
7229 return -ENODEV;
7230 }
7231 return 0;
7232}
7233
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245static int
7246lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7247{
7248 int error;
7249
7250
7251 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7252 "lpfc_worker_%d", phba->brd_no);
7253 if (IS_ERR(phba->worker_thread)) {
7254 error = PTR_ERR(phba->worker_thread);
7255 return error;
7256 }
7257
7258 return 0;
7259}
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269static void
7270lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7271{
7272 if (phba->wq) {
7273 flush_workqueue(phba->wq);
7274 destroy_workqueue(phba->wq);
7275 phba->wq = NULL;
7276 }
7277
7278
7279 if (phba->worker_thread)
7280 kthread_stop(phba->worker_thread);
7281}
7282
7283
7284
7285
7286
7287
7288
7289void
7290lpfc_free_iocb_list(struct lpfc_hba *phba)
7291{
7292 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7293
7294 spin_lock_irq(&phba->hbalock);
7295 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7296 &phba->lpfc_iocb_list, list) {
7297 list_del(&iocbq_entry->list);
7298 kfree(iocbq_entry);
7299 phba->total_iocbq_bufs--;
7300 }
7301 spin_unlock_irq(&phba->hbalock);
7302
7303 return;
7304}
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317
7318int
7319lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7320{
7321 struct lpfc_iocbq *iocbq_entry = NULL;
7322 uint16_t iotag;
7323 int i;
7324
7325
7326 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7327 for (i = 0; i < iocb_count; i++) {
7328 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7329 if (iocbq_entry == NULL) {
7330 printk(KERN_ERR "%s: only allocated %d iocbs of "
7331 "expected %d count. Unloading driver.\n",
7332 __func__, i, iocb_count);
7333 goto out_free_iocbq;
7334 }
7335
7336 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7337 if (iotag == 0) {
7338 kfree(iocbq_entry);
7339 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7340 "Unloading driver.\n", __func__);
7341 goto out_free_iocbq;
7342 }
7343 iocbq_entry->sli4_lxritag = NO_XRI;
7344 iocbq_entry->sli4_xritag = NO_XRI;
7345
7346 spin_lock_irq(&phba->hbalock);
7347 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7348 phba->total_iocbq_bufs++;
7349 spin_unlock_irq(&phba->hbalock);
7350 }
7351
7352 return 0;
7353
7354out_free_iocbq:
7355 lpfc_free_iocb_list(phba);
7356
7357 return -ENOMEM;
7358}
7359
7360
7361
7362
7363
7364
7365
7366
7367void
7368lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7369{
7370 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7371
7372 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7373 list_del(&sglq_entry->list);
7374 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7375 kfree(sglq_entry);
7376 }
7377}
7378
7379
7380
7381
7382
7383
7384
7385static void
7386lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7387{
7388 LIST_HEAD(sglq_list);
7389
7390
7391 spin_lock_irq(&phba->hbalock);
7392 spin_lock(&phba->sli4_hba.sgl_list_lock);
7393 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7394 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7395 spin_unlock_irq(&phba->hbalock);
7396
7397
7398 lpfc_free_sgl_list(phba, &sglq_list);
7399}
7400
7401
7402
7403
7404
7405
7406
7407static void
7408lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7409{
7410 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7411 LIST_HEAD(sglq_list);
7412
7413
7414 spin_lock_irq(&phba->hbalock);
7415 spin_lock(&phba->sli4_hba.sgl_list_lock);
7416 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7417 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7418 spin_unlock_irq(&phba->hbalock);
7419
7420
7421 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7422 list_del(&sglq_entry->list);
7423 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7424 kfree(sglq_entry);
7425 }
7426
7427
7428
7429
7430
7431 phba->sli4_hba.nvmet_xri_cnt = 0;
7432}
7433
7434
7435
7436
7437
7438
7439
7440
7441static int
7442lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7443{
7444 int size;
7445 size = sizeof(struct lpfc_sglq *);
7446 size *= phba->sli4_hba.max_cfg_param.max_xri;
7447
7448 phba->sli4_hba.lpfc_sglq_active_list =
7449 kzalloc(size, GFP_KERNEL);
7450 if (!phba->sli4_hba.lpfc_sglq_active_list)
7451 return -ENOMEM;
7452 return 0;
7453}
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463static void
7464lpfc_free_active_sgl(struct lpfc_hba *phba)
7465{
7466 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7467}
7468
7469
7470
7471
7472
7473
7474
7475
7476
7477static void
7478lpfc_init_sgl_list(struct lpfc_hba *phba)
7479{
7480
7481 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7482 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7483 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7484 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7485
7486
7487 phba->sli4_hba.els_xri_cnt = 0;
7488
7489
7490 phba->sli4_hba.io_xri_cnt = 0;
7491}
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507int
7508lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7509{
7510 int rc = 0;
7511 struct lpfc_rpi_hdr *rpi_hdr;
7512
7513 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7514 if (!phba->sli4_hba.rpi_hdrs_in_use)
7515 return rc;
7516 if (phba->sli4_hba.extents_in_use)
7517 return -EIO;
7518
7519 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7520 if (!rpi_hdr) {
7521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7522 "0391 Error during rpi post operation\n");
7523 lpfc_sli4_remove_rpis(phba);
7524 rc = -ENODEV;
7525 }
7526
7527 return rc;
7528}
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543struct lpfc_rpi_hdr *
7544lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7545{
7546 uint16_t rpi_limit, curr_rpi_range;
7547 struct lpfc_dmabuf *dmabuf;
7548 struct lpfc_rpi_hdr *rpi_hdr;
7549
7550
7551
7552
7553
7554
7555 if (!phba->sli4_hba.rpi_hdrs_in_use)
7556 return NULL;
7557 if (phba->sli4_hba.extents_in_use)
7558 return NULL;
7559
7560
7561 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7562
7563 spin_lock_irq(&phba->hbalock);
7564
7565
7566
7567
7568
7569 curr_rpi_range = phba->sli4_hba.next_rpi;
7570 spin_unlock_irq(&phba->hbalock);
7571
7572
7573 if (curr_rpi_range == rpi_limit)
7574 return NULL;
7575
7576
7577
7578
7579
7580 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7581 if (!dmabuf)
7582 return NULL;
7583
7584 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7585 LPFC_HDR_TEMPLATE_SIZE,
7586 &dmabuf->phys, GFP_KERNEL);
7587 if (!dmabuf->virt) {
7588 rpi_hdr = NULL;
7589 goto err_free_dmabuf;
7590 }
7591
7592 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7593 rpi_hdr = NULL;
7594 goto err_free_coherent;
7595 }
7596
7597
7598 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7599 if (!rpi_hdr)
7600 goto err_free_coherent;
7601
7602 rpi_hdr->dmabuf = dmabuf;
7603 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7604 rpi_hdr->page_count = 1;
7605 spin_lock_irq(&phba->hbalock);
7606
7607
7608 rpi_hdr->start_rpi = curr_rpi_range;
7609 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7610 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7611
7612 spin_unlock_irq(&phba->hbalock);
7613 return rpi_hdr;
7614
7615 err_free_coherent:
7616 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7617 dmabuf->virt, dmabuf->phys);
7618 err_free_dmabuf:
7619 kfree(dmabuf);
7620 return NULL;
7621}
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632void
7633lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7634{
7635 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7636
7637 if (!phba->sli4_hba.rpi_hdrs_in_use)
7638 goto exit;
7639
7640 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7641 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7642 list_del(&rpi_hdr->list);
7643 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7644 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7645 kfree(rpi_hdr->dmabuf);
7646 kfree(rpi_hdr);
7647 }
7648 exit:
7649
7650 phba->sli4_hba.next_rpi = 0;
7651}
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662
7663
7664
7665static struct lpfc_hba *
7666lpfc_hba_alloc(struct pci_dev *pdev)
7667{
7668 struct lpfc_hba *phba;
7669
7670
7671 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7672 if (!phba) {
7673 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7674 return NULL;
7675 }
7676
7677
7678 phba->pcidev = pdev;
7679
7680
7681 phba->brd_no = lpfc_get_instance();
7682 if (phba->brd_no < 0) {
7683 kfree(phba);
7684 return NULL;
7685 }
7686 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7687
7688 spin_lock_init(&phba->ct_ev_lock);
7689 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7690
7691 return phba;
7692}
7693
7694
7695
7696
7697
7698
7699
7700
7701static void
7702lpfc_hba_free(struct lpfc_hba *phba)
7703{
7704 if (phba->sli_rev == LPFC_SLI_REV4)
7705 kfree(phba->sli4_hba.hdwq);
7706
7707
7708 idr_remove(&lpfc_hba_index, phba->brd_no);
7709
7710
7711 kfree(phba->sli.sli3_ring);
7712 phba->sli.sli3_ring = NULL;
7713
7714 kfree(phba);
7715 return;
7716}
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729static int
7730lpfc_create_shost(struct lpfc_hba *phba)
7731{
7732 struct lpfc_vport *vport;
7733 struct Scsi_Host *shost;
7734
7735
7736 phba->fc_edtov = FF_DEF_EDTOV;
7737 phba->fc_ratov = FF_DEF_RATOV;
7738 phba->fc_altov = FF_DEF_ALTOV;
7739 phba->fc_arbtov = FF_DEF_ARBTOV;
7740
7741 atomic_set(&phba->sdev_cnt, 0);
7742 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7743 if (!vport)
7744 return -ENODEV;
7745
7746 shost = lpfc_shost_from_vport(vport);
7747 phba->pport = vport;
7748
7749 if (phba->nvmet_support) {
7750
7751 phba->targetport = NULL;
7752 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7753 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7754 "6076 NVME Target Found\n");
7755 }
7756
7757 lpfc_debugfs_initialize(vport);
7758
7759 pci_set_drvdata(phba->pcidev, shost);
7760
7761
7762
7763
7764
7765 vport->load_flag |= FC_ALLOW_FDMI;
7766 if (phba->cfg_enable_SmartSAN ||
7767 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7768
7769
7770 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7771 if (phba->cfg_enable_SmartSAN)
7772 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7773 else
7774 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7775 }
7776 return 0;
7777}
7778
7779
7780
7781
7782
7783
7784
7785
7786static void
7787lpfc_destroy_shost(struct lpfc_hba *phba)
7788{
7789 struct lpfc_vport *vport = phba->pport;
7790
7791
7792 destroy_port(vport);
7793
7794 return;
7795}
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805static void
7806lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7807{
7808 uint32_t old_mask;
7809 uint32_t old_guard;
7810
7811 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7812 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7813 "1478 Registering BlockGuard with the "
7814 "SCSI layer\n");
7815
7816 old_mask = phba->cfg_prot_mask;
7817 old_guard = phba->cfg_prot_guard;
7818
7819
7820 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7821 SHOST_DIX_TYPE0_PROTECTION |
7822 SHOST_DIX_TYPE1_PROTECTION);
7823 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7824 SHOST_DIX_GUARD_CRC);
7825
7826
7827 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7828 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7829
7830 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7831 if ((old_mask != phba->cfg_prot_mask) ||
7832 (old_guard != phba->cfg_prot_guard))
7833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7834 "1475 Registering BlockGuard with the "
7835 "SCSI layer: mask %d guard %d\n",
7836 phba->cfg_prot_mask,
7837 phba->cfg_prot_guard);
7838
7839 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7840 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7841 } else
7842 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7843 "1479 Not Registering BlockGuard with the SCSI "
7844 "layer, Bad protection parameters: %d %d\n",
7845 old_mask, old_guard);
7846 }
7847}
7848
7849
7850
7851
7852
7853
7854
7855
7856static void
7857lpfc_post_init_setup(struct lpfc_hba *phba)
7858{
7859 struct Scsi_Host *shost;
7860 struct lpfc_adapter_event_header adapter_event;
7861
7862
7863 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7864
7865
7866
7867
7868
7869 shost = pci_get_drvdata(phba->pcidev);
7870 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7871
7872 lpfc_host_attrib_init(shost);
7873
7874 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7875 spin_lock_irq(shost->host_lock);
7876 lpfc_poll_start_timer(phba);
7877 spin_unlock_irq(shost->host_lock);
7878 }
7879
7880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7881 "0428 Perform SCSI scan\n");
7882
7883 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7884 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7885 fc_host_post_vendor_event(shost, fc_get_event_number(),
7886 sizeof(adapter_event),
7887 (char *) &adapter_event,
7888 LPFC_NL_VENDOR_ID);
7889 return;
7890}
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903static int
7904lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7905{
7906 struct pci_dev *pdev = phba->pcidev;
7907 unsigned long bar0map_len, bar2map_len;
7908 int i, hbq_count;
7909 void *ptr;
7910 int error;
7911
7912 if (!pdev)
7913 return -ENODEV;
7914
7915
7916 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7917 if (error)
7918 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7919 if (error)
7920 return error;
7921 error = -ENODEV;
7922
7923
7924
7925
7926 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7927 bar0map_len = pci_resource_len(pdev, 0);
7928
7929 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7930 bar2map_len = pci_resource_len(pdev, 2);
7931
7932
7933 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7934 if (!phba->slim_memmap_p) {
7935 dev_printk(KERN_ERR, &pdev->dev,
7936 "ioremap failed for SLIM memory.\n");
7937 goto out;
7938 }
7939
7940
7941 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7942 if (!phba->ctrl_regs_memmap_p) {
7943 dev_printk(KERN_ERR, &pdev->dev,
7944 "ioremap failed for HBA control registers.\n");
7945 goto out_iounmap_slim;
7946 }
7947
7948
7949 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7950 &phba->slim2p.phys, GFP_KERNEL);
7951 if (!phba->slim2p.virt)
7952 goto out_iounmap;
7953
7954 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7955 phba->mbox_ext = (phba->slim2p.virt +
7956 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7957 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7958 phba->IOCBs = (phba->slim2p.virt +
7959 offsetof(struct lpfc_sli2_slim, IOCBs));
7960
7961 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7962 lpfc_sli_hbq_size(),
7963 &phba->hbqslimp.phys,
7964 GFP_KERNEL);
7965 if (!phba->hbqslimp.virt)
7966 goto out_free_slim;
7967
7968 hbq_count = lpfc_sli_hbq_count();
7969 ptr = phba->hbqslimp.virt;
7970 for (i = 0; i < hbq_count; ++i) {
7971 phba->hbqs[i].hbq_virt = ptr;
7972 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7973 ptr += (lpfc_hbq_defs[i]->entry_count *
7974 sizeof(struct lpfc_hbq_entry));
7975 }
7976 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7977 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7978
7979 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7980
7981 phba->MBslimaddr = phba->slim_memmap_p;
7982 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7983 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7984 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7985 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7986
7987 return 0;
7988
7989out_free_slim:
7990 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7991 phba->slim2p.virt, phba->slim2p.phys);
7992out_iounmap:
7993 iounmap(phba->ctrl_regs_memmap_p);
7994out_iounmap_slim:
7995 iounmap(phba->slim_memmap_p);
7996out:
7997 return error;
7998}
7999
8000
8001
8002
8003
8004
8005
8006
8007static void
8008lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
8009{
8010 struct pci_dev *pdev;
8011
8012
8013 if (!phba->pcidev)
8014 return;
8015 else
8016 pdev = phba->pcidev;
8017
8018
8019 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8020 phba->hbqslimp.virt, phba->hbqslimp.phys);
8021 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8022 phba->slim2p.virt, phba->slim2p.phys);
8023
8024
8025 iounmap(phba->ctrl_regs_memmap_p);
8026 iounmap(phba->slim_memmap_p);
8027
8028 return;
8029}
8030
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040int
8041lpfc_sli4_post_status_check(struct lpfc_hba *phba)
8042{
8043 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8044 struct lpfc_register reg_data;
8045 int i, port_error = 0;
8046 uint32_t if_type;
8047
8048 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8049 memset(®_data, 0, sizeof(reg_data));
8050 if (!phba->sli4_hba.PSMPHRregaddr)
8051 return -ENODEV;
8052
8053
8054 for (i = 0; i < 3000; i++) {
8055 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8056 &portsmphr_reg.word0) ||
8057 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
8058
8059 port_error = -ENODEV;
8060 break;
8061 }
8062 if (LPFC_POST_STAGE_PORT_READY ==
8063 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
8064 break;
8065 msleep(10);
8066 }
8067
8068
8069
8070
8071
8072 if (port_error) {
8073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8074 "1408 Port Failed POST - portsmphr=0x%x, "
8075 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8076 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8077 portsmphr_reg.word0,
8078 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8079 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8080 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8081 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8082 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8083 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8084 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8085 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8086 } else {
8087 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8088 "2534 Device Info: SLIFamily=0x%x, "
8089 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8090 "SLIHint_2=0x%x, FT=0x%x\n",
8091 bf_get(lpfc_sli_intf_sli_family,
8092 &phba->sli4_hba.sli_intf),
8093 bf_get(lpfc_sli_intf_slirev,
8094 &phba->sli4_hba.sli_intf),
8095 bf_get(lpfc_sli_intf_if_type,
8096 &phba->sli4_hba.sli_intf),
8097 bf_get(lpfc_sli_intf_sli_hint1,
8098 &phba->sli4_hba.sli_intf),
8099 bf_get(lpfc_sli_intf_sli_hint2,
8100 &phba->sli4_hba.sli_intf),
8101 bf_get(lpfc_sli_intf_func_type,
8102 &phba->sli4_hba.sli_intf));
8103
8104
8105
8106
8107
8108 if_type = bf_get(lpfc_sli_intf_if_type,
8109 &phba->sli4_hba.sli_intf);
8110 switch (if_type) {
8111 case LPFC_SLI_INTF_IF_TYPE_0:
8112 phba->sli4_hba.ue_mask_lo =
8113 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8114 phba->sli4_hba.ue_mask_hi =
8115 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8116 uerrlo_reg.word0 =
8117 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8118 uerrhi_reg.word0 =
8119 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8120 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8121 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
8122 lpfc_printf_log(phba, KERN_ERR,
8123 LOG_TRACE_EVENT,
8124 "1422 Unrecoverable Error "
8125 "Detected during POST "
8126 "uerr_lo_reg=0x%x, "
8127 "uerr_hi_reg=0x%x, "
8128 "ue_mask_lo_reg=0x%x, "
8129 "ue_mask_hi_reg=0x%x\n",
8130 uerrlo_reg.word0,
8131 uerrhi_reg.word0,
8132 phba->sli4_hba.ue_mask_lo,
8133 phba->sli4_hba.ue_mask_hi);
8134 port_error = -ENODEV;
8135 }
8136 break;
8137 case LPFC_SLI_INTF_IF_TYPE_2:
8138 case LPFC_SLI_INTF_IF_TYPE_6:
8139
8140 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8141 ®_data.word0) ||
8142 (bf_get(lpfc_sliport_status_err, ®_data) &&
8143 !bf_get(lpfc_sliport_status_rn, ®_data))) {
8144 phba->work_status[0] =
8145 readl(phba->sli4_hba.u.if_type2.
8146 ERR1regaddr);
8147 phba->work_status[1] =
8148 readl(phba->sli4_hba.u.if_type2.
8149 ERR2regaddr);
8150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8151 "2888 Unrecoverable port error "
8152 "following POST: port status reg "
8153 "0x%x, port_smphr reg 0x%x, "
8154 "error 1=0x%x, error 2=0x%x\n",
8155 reg_data.word0,
8156 portsmphr_reg.word0,
8157 phba->work_status[0],
8158 phba->work_status[1]);
8159 port_error = -ENODEV;
8160 }
8161 break;
8162 case LPFC_SLI_INTF_IF_TYPE_1:
8163 default:
8164 break;
8165 }
8166 }
8167 return port_error;
8168}
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178static void
8179lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8180{
8181 switch (if_type) {
8182 case LPFC_SLI_INTF_IF_TYPE_0:
8183 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8184 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8185 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8186 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8187 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8188 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8189 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8190 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8191 phba->sli4_hba.SLIINTFregaddr =
8192 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8193 break;
8194 case LPFC_SLI_INTF_IF_TYPE_2:
8195 phba->sli4_hba.u.if_type2.EQDregaddr =
8196 phba->sli4_hba.conf_regs_memmap_p +
8197 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8198 phba->sli4_hba.u.if_type2.ERR1regaddr =
8199 phba->sli4_hba.conf_regs_memmap_p +
8200 LPFC_CTL_PORT_ER1_OFFSET;
8201 phba->sli4_hba.u.if_type2.ERR2regaddr =
8202 phba->sli4_hba.conf_regs_memmap_p +
8203 LPFC_CTL_PORT_ER2_OFFSET;
8204 phba->sli4_hba.u.if_type2.CTRLregaddr =
8205 phba->sli4_hba.conf_regs_memmap_p +
8206 LPFC_CTL_PORT_CTL_OFFSET;
8207 phba->sli4_hba.u.if_type2.STATUSregaddr =
8208 phba->sli4_hba.conf_regs_memmap_p +
8209 LPFC_CTL_PORT_STA_OFFSET;
8210 phba->sli4_hba.SLIINTFregaddr =
8211 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8212 phba->sli4_hba.PSMPHRregaddr =
8213 phba->sli4_hba.conf_regs_memmap_p +
8214 LPFC_CTL_PORT_SEM_OFFSET;
8215 phba->sli4_hba.RQDBregaddr =
8216 phba->sli4_hba.conf_regs_memmap_p +
8217 LPFC_ULP0_RQ_DOORBELL;
8218 phba->sli4_hba.WQDBregaddr =
8219 phba->sli4_hba.conf_regs_memmap_p +
8220 LPFC_ULP0_WQ_DOORBELL;
8221 phba->sli4_hba.CQDBregaddr =
8222 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8223 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8224 phba->sli4_hba.MQDBregaddr =
8225 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8226 phba->sli4_hba.BMBXregaddr =
8227 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8228 break;
8229 case LPFC_SLI_INTF_IF_TYPE_6:
8230 phba->sli4_hba.u.if_type2.EQDregaddr =
8231 phba->sli4_hba.conf_regs_memmap_p +
8232 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8233 phba->sli4_hba.u.if_type2.ERR1regaddr =
8234 phba->sli4_hba.conf_regs_memmap_p +
8235 LPFC_CTL_PORT_ER1_OFFSET;
8236 phba->sli4_hba.u.if_type2.ERR2regaddr =
8237 phba->sli4_hba.conf_regs_memmap_p +
8238 LPFC_CTL_PORT_ER2_OFFSET;
8239 phba->sli4_hba.u.if_type2.CTRLregaddr =
8240 phba->sli4_hba.conf_regs_memmap_p +
8241 LPFC_CTL_PORT_CTL_OFFSET;
8242 phba->sli4_hba.u.if_type2.STATUSregaddr =
8243 phba->sli4_hba.conf_regs_memmap_p +
8244 LPFC_CTL_PORT_STA_OFFSET;
8245 phba->sli4_hba.PSMPHRregaddr =
8246 phba->sli4_hba.conf_regs_memmap_p +
8247 LPFC_CTL_PORT_SEM_OFFSET;
8248 phba->sli4_hba.BMBXregaddr =
8249 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8250 break;
8251 case LPFC_SLI_INTF_IF_TYPE_1:
8252 default:
8253 dev_printk(KERN_ERR, &phba->pcidev->dev,
8254 "FATAL - unsupported SLI4 interface type - %d\n",
8255 if_type);
8256 break;
8257 }
8258}
8259
8260
8261
8262
8263
8264
8265
8266
8267static void
8268lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8269{
8270 switch (if_type) {
8271 case LPFC_SLI_INTF_IF_TYPE_0:
8272 phba->sli4_hba.PSMPHRregaddr =
8273 phba->sli4_hba.ctrl_regs_memmap_p +
8274 LPFC_SLIPORT_IF0_SMPHR;
8275 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8276 LPFC_HST_ISR0;
8277 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8278 LPFC_HST_IMR0;
8279 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8280 LPFC_HST_ISCR0;
8281 break;
8282 case LPFC_SLI_INTF_IF_TYPE_6:
8283 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8284 LPFC_IF6_RQ_DOORBELL;
8285 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8286 LPFC_IF6_WQ_DOORBELL;
8287 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8288 LPFC_IF6_CQ_DOORBELL;
8289 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8290 LPFC_IF6_EQ_DOORBELL;
8291 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8292 LPFC_IF6_MQ_DOORBELL;
8293 break;
8294 case LPFC_SLI_INTF_IF_TYPE_2:
8295 case LPFC_SLI_INTF_IF_TYPE_1:
8296 default:
8297 dev_err(&phba->pcidev->dev,
8298 "FATAL - unsupported SLI4 interface type - %d\n",
8299 if_type);
8300 break;
8301 }
8302}
8303
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314static int
8315lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8316{
8317 if (vf > LPFC_VIR_FUNC_MAX)
8318 return -ENODEV;
8319
8320 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8321 vf * LPFC_VFR_PAGE_SIZE +
8322 LPFC_ULP0_RQ_DOORBELL);
8323 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8324 vf * LPFC_VFR_PAGE_SIZE +
8325 LPFC_ULP0_WQ_DOORBELL);
8326 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8327 vf * LPFC_VFR_PAGE_SIZE +
8328 LPFC_EQCQ_DOORBELL);
8329 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8330 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8331 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8332 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8333 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8334 return 0;
8335}
8336
8337
8338
8339
8340
8341
8342
8343
8344
8345
8346
8347
8348
8349
8350
8351
8352static int
8353lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8354{
8355 uint32_t bmbx_size;
8356 struct lpfc_dmabuf *dmabuf;
8357 struct dma_address *dma_address;
8358 uint32_t pa_addr;
8359 uint64_t phys_addr;
8360
8361 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8362 if (!dmabuf)
8363 return -ENOMEM;
8364
8365
8366
8367
8368
8369 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8370 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8371 &dmabuf->phys, GFP_KERNEL);
8372 if (!dmabuf->virt) {
8373 kfree(dmabuf);
8374 return -ENOMEM;
8375 }
8376
8377
8378
8379
8380
8381
8382
8383
8384 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8385 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8386
8387 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8388 LPFC_ALIGN_16_BYTE);
8389 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8390 LPFC_ALIGN_16_BYTE);
8391
8392
8393
8394
8395
8396
8397
8398
8399
8400 dma_address = &phba->sli4_hba.bmbx.dma_address;
8401 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8402 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8403 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8404 LPFC_BMBX_BIT1_ADDR_HI);
8405
8406 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8407 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8408 LPFC_BMBX_BIT1_ADDR_LO);
8409 return 0;
8410}
8411
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421
8422
8423static void
8424lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8425{
8426 dma_free_coherent(&phba->pcidev->dev,
8427 phba->sli4_hba.bmbx.bmbx_size,
8428 phba->sli4_hba.bmbx.dmabuf->virt,
8429 phba->sli4_hba.bmbx.dmabuf->phys);
8430
8431 kfree(phba->sli4_hba.bmbx.dmabuf);
8432 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8433}
8434
8435static const char * const lpfc_topo_to_str[] = {
8436 "Loop then P2P",
8437 "Loopback",
8438 "P2P Only",
8439 "Unsupported",
8440 "Loop Only",
8441 "Unsupported",
8442 "P2P then Loop",
8443};
8444
8445#define LINK_FLAGS_DEF 0x0
8446#define LINK_FLAGS_P2P 0x1
8447#define LINK_FLAGS_LOOP 0x2
8448
8449
8450
8451
8452
8453
8454
8455
8456
8457
8458static void
8459lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8460{
8461 u8 ptv, tf, pt;
8462
8463 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8464 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8465 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8466
8467 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8468 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8469 ptv, tf, pt);
8470 if (!ptv) {
8471 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8472 "2019 FW does not support persistent topology "
8473 "Using driver parameter defined value [%s]",
8474 lpfc_topo_to_str[phba->cfg_topology]);
8475 return;
8476 }
8477
8478 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8479 switch (phba->pcidev->device) {
8480 case PCI_DEVICE_ID_LANCER_G7_FC:
8481 case PCI_DEVICE_ID_LANCER_G6_FC:
8482 if (!tf) {
8483 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8484 ? FLAGS_TOPOLOGY_MODE_LOOP
8485 : FLAGS_TOPOLOGY_MODE_PT_PT);
8486 } else {
8487 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8488 }
8489 break;
8490 default:
8491 if (tf) {
8492
8493 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8494 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8495 } else {
8496 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8497 ? FLAGS_TOPOLOGY_MODE_PT_PT
8498 : FLAGS_TOPOLOGY_MODE_LOOP);
8499 }
8500 break;
8501 }
8502 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8503 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8504 "2020 Using persistent topology value [%s]",
8505 lpfc_topo_to_str[phba->cfg_topology]);
8506 } else {
8507 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8508 "2021 Invalid topology values from FW "
8509 "Using driver parameter defined value [%s]",
8510 lpfc_topo_to_str[phba->cfg_topology]);
8511 }
8512}
8513
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528int
8529lpfc_sli4_read_config(struct lpfc_hba *phba)
8530{
8531 LPFC_MBOXQ_t *pmb;
8532 struct lpfc_mbx_read_config *rd_config;
8533 union lpfc_sli4_cfg_shdr *shdr;
8534 uint32_t shdr_status, shdr_add_status;
8535 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8536 struct lpfc_rsrc_desc_fcfcoe *desc;
8537 char *pdesc_0;
8538 uint16_t forced_link_speed;
8539 uint32_t if_type, qmin;
8540 int length, i, rc = 0, rc2;
8541
8542 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8543 if (!pmb) {
8544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8545 "2011 Unable to allocate memory for issuing "
8546 "SLI_CONFIG_SPECIAL mailbox command\n");
8547 return -ENOMEM;
8548 }
8549
8550 lpfc_read_config(phba, pmb);
8551
8552 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8553 if (rc != MBX_SUCCESS) {
8554 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8555 "2012 Mailbox failed , mbxCmd x%x "
8556 "READ_CONFIG, mbxStatus x%x\n",
8557 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8558 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8559 rc = -EIO;
8560 } else {
8561 rd_config = &pmb->u.mqe.un.rd_config;
8562 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8563 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8564 phba->sli4_hba.lnk_info.lnk_tp =
8565 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8566 phba->sli4_hba.lnk_info.lnk_no =
8567 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8568 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8569 "3081 lnk_type:%d, lnk_numb:%d\n",
8570 phba->sli4_hba.lnk_info.lnk_tp,
8571 phba->sli4_hba.lnk_info.lnk_no);
8572 } else
8573 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8574 "3082 Mailbox (x%x) returned ldv:x0\n",
8575 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8576 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8577 phba->bbcredit_support = 1;
8578 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8579 }
8580
8581 phba->sli4_hba.conf_trunk =
8582 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8583 phba->sli4_hba.extents_in_use =
8584 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8585 phba->sli4_hba.max_cfg_param.max_xri =
8586 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8587
8588 if (is_kdump_kernel() &&
8589 phba->sli4_hba.max_cfg_param.max_xri > 512)
8590 phba->sli4_hba.max_cfg_param.max_xri = 512;
8591 phba->sli4_hba.max_cfg_param.xri_base =
8592 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8593 phba->sli4_hba.max_cfg_param.max_vpi =
8594 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8595
8596 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8597 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8598 phba->sli4_hba.max_cfg_param.vpi_base =
8599 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8600 phba->sli4_hba.max_cfg_param.max_rpi =
8601 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8602 phba->sli4_hba.max_cfg_param.rpi_base =
8603 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8604 phba->sli4_hba.max_cfg_param.max_vfi =
8605 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8606 phba->sli4_hba.max_cfg_param.vfi_base =
8607 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8608 phba->sli4_hba.max_cfg_param.max_fcfi =
8609 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8610 phba->sli4_hba.max_cfg_param.max_eq =
8611 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8612 phba->sli4_hba.max_cfg_param.max_rq =
8613 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8614 phba->sli4_hba.max_cfg_param.max_wq =
8615 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8616 phba->sli4_hba.max_cfg_param.max_cq =
8617 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8618 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8619 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8620 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8621 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8622 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8623 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8624 phba->max_vports = phba->max_vpi;
8625 lpfc_map_topology(phba, rd_config);
8626 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8627 "2003 cfg params Extents? %d "
8628 "XRI(B:%d M:%d), "
8629 "VPI(B:%d M:%d) "
8630 "VFI(B:%d M:%d) "
8631 "RPI(B:%d M:%d) "
8632 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
8633 phba->sli4_hba.extents_in_use,
8634 phba->sli4_hba.max_cfg_param.xri_base,
8635 phba->sli4_hba.max_cfg_param.max_xri,
8636 phba->sli4_hba.max_cfg_param.vpi_base,
8637 phba->sli4_hba.max_cfg_param.max_vpi,
8638 phba->sli4_hba.max_cfg_param.vfi_base,
8639 phba->sli4_hba.max_cfg_param.max_vfi,
8640 phba->sli4_hba.max_cfg_param.rpi_base,
8641 phba->sli4_hba.max_cfg_param.max_rpi,
8642 phba->sli4_hba.max_cfg_param.max_fcfi,
8643 phba->sli4_hba.max_cfg_param.max_eq,
8644 phba->sli4_hba.max_cfg_param.max_cq,
8645 phba->sli4_hba.max_cfg_param.max_wq,
8646 phba->sli4_hba.max_cfg_param.max_rq,
8647 phba->lmt);
8648
8649
8650
8651
8652
8653 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8654 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8655 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8656 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8657 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8658
8659
8660
8661
8662
8663
8664 qmin -= 4;
8665
8666
8667 if ((phba->cfg_irq_chann > qmin) ||
8668 (phba->cfg_hdw_queue > qmin)) {
8669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8670 "2005 Reducing Queues - "
8671 "FW resource limitation: "
8672 "WQ %d CQ %d EQ %d: min %d: "
8673 "IRQ %d HDWQ %d\n",
8674 phba->sli4_hba.max_cfg_param.max_wq,
8675 phba->sli4_hba.max_cfg_param.max_cq,
8676 phba->sli4_hba.max_cfg_param.max_eq,
8677 qmin, phba->cfg_irq_chann,
8678 phba->cfg_hdw_queue);
8679
8680 if (phba->cfg_irq_chann > qmin)
8681 phba->cfg_irq_chann = qmin;
8682 if (phba->cfg_hdw_queue > qmin)
8683 phba->cfg_hdw_queue = qmin;
8684 }
8685 }
8686
8687 if (rc)
8688 goto read_cfg_out;
8689
8690
8691 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8692 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8693 forced_link_speed =
8694 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8695 if (forced_link_speed) {
8696 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8697
8698 switch (forced_link_speed) {
8699 case LINK_SPEED_1G:
8700 phba->cfg_link_speed =
8701 LPFC_USER_LINK_SPEED_1G;
8702 break;
8703 case LINK_SPEED_2G:
8704 phba->cfg_link_speed =
8705 LPFC_USER_LINK_SPEED_2G;
8706 break;
8707 case LINK_SPEED_4G:
8708 phba->cfg_link_speed =
8709 LPFC_USER_LINK_SPEED_4G;
8710 break;
8711 case LINK_SPEED_8G:
8712 phba->cfg_link_speed =
8713 LPFC_USER_LINK_SPEED_8G;
8714 break;
8715 case LINK_SPEED_10G:
8716 phba->cfg_link_speed =
8717 LPFC_USER_LINK_SPEED_10G;
8718 break;
8719 case LINK_SPEED_16G:
8720 phba->cfg_link_speed =
8721 LPFC_USER_LINK_SPEED_16G;
8722 break;
8723 case LINK_SPEED_32G:
8724 phba->cfg_link_speed =
8725 LPFC_USER_LINK_SPEED_32G;
8726 break;
8727 case LINK_SPEED_64G:
8728 phba->cfg_link_speed =
8729 LPFC_USER_LINK_SPEED_64G;
8730 break;
8731 case 0xffff:
8732 phba->cfg_link_speed =
8733 LPFC_USER_LINK_SPEED_AUTO;
8734 break;
8735 default:
8736 lpfc_printf_log(phba, KERN_ERR,
8737 LOG_TRACE_EVENT,
8738 "0047 Unrecognized link "
8739 "speed : %d\n",
8740 forced_link_speed);
8741 phba->cfg_link_speed =
8742 LPFC_USER_LINK_SPEED_AUTO;
8743 }
8744 }
8745 }
8746
8747
8748 length = phba->sli4_hba.max_cfg_param.max_xri -
8749 lpfc_sli4_get_els_iocb_cnt(phba);
8750 if (phba->cfg_hba_queue_depth > length) {
8751 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8752 "3361 HBA queue depth changed from %d to %d\n",
8753 phba->cfg_hba_queue_depth, length);
8754 phba->cfg_hba_queue_depth = length;
8755 }
8756
8757 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8758 LPFC_SLI_INTF_IF_TYPE_2)
8759 goto read_cfg_out;
8760
8761
8762 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8763 sizeof(struct lpfc_sli4_cfg_mhdr));
8764 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8765 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8766 length, LPFC_SLI4_MBX_EMBED);
8767
8768 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8769 shdr = (union lpfc_sli4_cfg_shdr *)
8770 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8771 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8772 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8773 if (rc2 || shdr_status || shdr_add_status) {
8774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8775 "3026 Mailbox failed , mbxCmd x%x "
8776 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8777 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8778 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8779 goto read_cfg_out;
8780 }
8781
8782
8783 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8784
8785 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8786 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8787 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8788 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8789 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8790 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8791 goto read_cfg_out;
8792
8793 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8794 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8795 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8796 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8797 phba->sli4_hba.iov.pf_number =
8798 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8799 phba->sli4_hba.iov.vf_number =
8800 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8801 break;
8802 }
8803 }
8804
8805 if (i < LPFC_RSRC_DESC_MAX_NUM)
8806 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8807 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8808 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8809 phba->sli4_hba.iov.vf_number);
8810 else
8811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8812 "3028 GET_FUNCTION_CONFIG: failed to find "
8813 "Resource Descriptor:x%x\n",
8814 LPFC_RSRC_DESC_TYPE_FCFCOE);
8815
8816read_cfg_out:
8817 mempool_free(pmb, phba->mbox_mem_pool);
8818 return rc;
8819}
8820
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832
8833
8834static int
8835lpfc_setup_endian_order(struct lpfc_hba *phba)
8836{
8837 LPFC_MBOXQ_t *mboxq;
8838 uint32_t if_type, rc = 0;
8839 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8840 HOST_ENDIAN_HIGH_WORD1};
8841
8842 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8843 switch (if_type) {
8844 case LPFC_SLI_INTF_IF_TYPE_0:
8845 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8846 GFP_KERNEL);
8847 if (!mboxq) {
8848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8849 "0492 Unable to allocate memory for "
8850 "issuing SLI_CONFIG_SPECIAL mailbox "
8851 "command\n");
8852 return -ENOMEM;
8853 }
8854
8855
8856
8857
8858
8859 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8860 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8861 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8862 if (rc != MBX_SUCCESS) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8864 "0493 SLI_CONFIG_SPECIAL mailbox "
8865 "failed with status x%x\n",
8866 rc);
8867 rc = -EIO;
8868 }
8869 mempool_free(mboxq, phba->mbox_mem_pool);
8870 break;
8871 case LPFC_SLI_INTF_IF_TYPE_6:
8872 case LPFC_SLI_INTF_IF_TYPE_2:
8873 case LPFC_SLI_INTF_IF_TYPE_1:
8874 default:
8875 break;
8876 }
8877 return rc;
8878}
8879
8880
8881
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893static int
8894lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8895{
8896
8897
8898
8899
8900
8901 if (phba->nvmet_support) {
8902 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8903 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8904 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8905 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8906 }
8907
8908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8909 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8910 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8911 phba->cfg_nvmet_mrq);
8912
8913
8914 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8915 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8916
8917
8918 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8919 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8920 return 0;
8921}
8922
8923static int
8924lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8925{
8926 struct lpfc_queue *qdesc;
8927 u32 wqesize;
8928 int cpu;
8929
8930 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8931
8932 if (phba->enab_exp_wqcq_pages)
8933
8934 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8935 phba->sli4_hba.cq_esize,
8936 LPFC_CQE_EXP_COUNT, cpu);
8937
8938 else
8939 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8940 phba->sli4_hba.cq_esize,
8941 phba->sli4_hba.cq_ecount, cpu);
8942 if (!qdesc) {
8943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8944 "0499 Failed allocate fast-path IO CQ (%d)\n",
8945 idx);
8946 return 1;
8947 }
8948 qdesc->qe_valid = 1;
8949 qdesc->hdwq = idx;
8950 qdesc->chann = cpu;
8951 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8952
8953
8954 if (phba->enab_exp_wqcq_pages) {
8955
8956 wqesize = (phba->fcp_embed_io) ?
8957 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8958 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8959 wqesize,
8960 LPFC_WQE_EXP_COUNT, cpu);
8961 } else
8962 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8963 phba->sli4_hba.wq_esize,
8964 phba->sli4_hba.wq_ecount, cpu);
8965
8966 if (!qdesc) {
8967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8968 "0503 Failed allocate fast-path IO WQ (%d)\n",
8969 idx);
8970 return 1;
8971 }
8972 qdesc->hdwq = idx;
8973 qdesc->chann = cpu;
8974 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8975 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8976 return 0;
8977}
8978
8979
8980
8981
8982
8983
8984
8985
8986
8987
8988
8989
8990
8991
8992
8993int
8994lpfc_sli4_queue_create(struct lpfc_hba *phba)
8995{
8996 struct lpfc_queue *qdesc;
8997 int idx, cpu, eqcpu;
8998 struct lpfc_sli4_hdw_queue *qp;
8999 struct lpfc_vector_map_info *cpup;
9000 struct lpfc_vector_map_info *eqcpup;
9001 struct lpfc_eq_intr_info *eqi;
9002
9003
9004
9005
9006
9007 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
9008 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
9009 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
9010 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
9011 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
9012 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
9013 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
9014 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
9015 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
9016 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
9017
9018 if (!phba->sli4_hba.hdwq) {
9019 phba->sli4_hba.hdwq = kcalloc(
9020 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
9021 GFP_KERNEL);
9022 if (!phba->sli4_hba.hdwq) {
9023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9024 "6427 Failed allocate memory for "
9025 "fast-path Hardware Queue array\n");
9026 goto out_error;
9027 }
9028
9029 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9030 qp = &phba->sli4_hba.hdwq[idx];
9031 spin_lock_init(&qp->io_buf_list_get_lock);
9032 spin_lock_init(&qp->io_buf_list_put_lock);
9033 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9034 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9035 qp->get_io_bufs = 0;
9036 qp->put_io_bufs = 0;
9037 qp->total_io_bufs = 0;
9038 spin_lock_init(&qp->abts_io_buf_list_lock);
9039 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
9040 qp->abts_scsi_io_bufs = 0;
9041 qp->abts_nvme_io_bufs = 0;
9042 INIT_LIST_HEAD(&qp->sgl_list);
9043 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9044 spin_lock_init(&qp->hdwq_lock);
9045 }
9046 }
9047
9048 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9049 if (phba->nvmet_support) {
9050 phba->sli4_hba.nvmet_cqset = kcalloc(
9051 phba->cfg_nvmet_mrq,
9052 sizeof(struct lpfc_queue *),
9053 GFP_KERNEL);
9054 if (!phba->sli4_hba.nvmet_cqset) {
9055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9056 "3121 Fail allocate memory for "
9057 "fast-path CQ set array\n");
9058 goto out_error;
9059 }
9060 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9061 phba->cfg_nvmet_mrq,
9062 sizeof(struct lpfc_queue *),
9063 GFP_KERNEL);
9064 if (!phba->sli4_hba.nvmet_mrq_hdr) {
9065 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9066 "3122 Fail allocate memory for "
9067 "fast-path RQ set hdr array\n");
9068 goto out_error;
9069 }
9070 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9071 phba->cfg_nvmet_mrq,
9072 sizeof(struct lpfc_queue *),
9073 GFP_KERNEL);
9074 if (!phba->sli4_hba.nvmet_mrq_data) {
9075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9076 "3124 Fail allocate memory for "
9077 "fast-path RQ set data array\n");
9078 goto out_error;
9079 }
9080 }
9081 }
9082
9083 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9084
9085
9086 for_each_present_cpu(cpu) {
9087
9088
9089
9090
9091 cpup = &phba->sli4_hba.cpu_map[cpu];
9092 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9093 continue;
9094
9095
9096 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9097
9098
9099 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9100 phba->sli4_hba.eq_esize,
9101 phba->sli4_hba.eq_ecount, cpu);
9102 if (!qdesc) {
9103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9104 "0497 Failed allocate EQ (%d)\n",
9105 cpup->hdwq);
9106 goto out_error;
9107 }
9108 qdesc->qe_valid = 1;
9109 qdesc->hdwq = cpup->hdwq;
9110 qdesc->chann = cpu;
9111 qdesc->last_cpu = qdesc->chann;
9112
9113
9114 qp->hba_eq = qdesc;
9115
9116 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9117 list_add(&qdesc->cpu_list, &eqi->list);
9118 }
9119
9120
9121
9122
9123 for_each_present_cpu(cpu) {
9124 cpup = &phba->sli4_hba.cpu_map[cpu];
9125
9126
9127 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9128 continue;
9129
9130
9131 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9132 if (qp->hba_eq)
9133 continue;
9134
9135
9136 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9137 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9138 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9139 }
9140
9141
9142 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9143 if (lpfc_alloc_io_wq_cq(phba, idx))
9144 goto out_error;
9145 }
9146
9147 if (phba->nvmet_support) {
9148 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9149 cpu = lpfc_find_cpu_handle(phba, idx,
9150 LPFC_FIND_BY_HDWQ);
9151 qdesc = lpfc_sli4_queue_alloc(phba,
9152 LPFC_DEFAULT_PAGE_SIZE,
9153 phba->sli4_hba.cq_esize,
9154 phba->sli4_hba.cq_ecount,
9155 cpu);
9156 if (!qdesc) {
9157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9158 "3142 Failed allocate NVME "
9159 "CQ Set (%d)\n", idx);
9160 goto out_error;
9161 }
9162 qdesc->qe_valid = 1;
9163 qdesc->hdwq = idx;
9164 qdesc->chann = cpu;
9165 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9166 }
9167 }
9168
9169
9170
9171
9172
9173 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9174
9175 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9176 phba->sli4_hba.cq_esize,
9177 phba->sli4_hba.cq_ecount, cpu);
9178 if (!qdesc) {
9179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9180 "0500 Failed allocate slow-path mailbox CQ\n");
9181 goto out_error;
9182 }
9183 qdesc->qe_valid = 1;
9184 phba->sli4_hba.mbx_cq = qdesc;
9185
9186
9187 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9188 phba->sli4_hba.cq_esize,
9189 phba->sli4_hba.cq_ecount, cpu);
9190 if (!qdesc) {
9191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9192 "0501 Failed allocate slow-path ELS CQ\n");
9193 goto out_error;
9194 }
9195 qdesc->qe_valid = 1;
9196 qdesc->chann = cpu;
9197 phba->sli4_hba.els_cq = qdesc;
9198
9199
9200
9201
9202
9203
9204
9205
9206 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9207 phba->sli4_hba.mq_esize,
9208 phba->sli4_hba.mq_ecount, cpu);
9209 if (!qdesc) {
9210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9211 "0505 Failed allocate slow-path MQ\n");
9212 goto out_error;
9213 }
9214 qdesc->chann = cpu;
9215 phba->sli4_hba.mbx_wq = qdesc;
9216
9217
9218
9219
9220
9221
9222 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9223 phba->sli4_hba.wq_esize,
9224 phba->sli4_hba.wq_ecount, cpu);
9225 if (!qdesc) {
9226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9227 "0504 Failed allocate slow-path ELS WQ\n");
9228 goto out_error;
9229 }
9230 qdesc->chann = cpu;
9231 phba->sli4_hba.els_wq = qdesc;
9232 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9233
9234 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9235
9236 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9237 phba->sli4_hba.cq_esize,
9238 phba->sli4_hba.cq_ecount, cpu);
9239 if (!qdesc) {
9240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9241 "6079 Failed allocate NVME LS CQ\n");
9242 goto out_error;
9243 }
9244 qdesc->chann = cpu;
9245 qdesc->qe_valid = 1;
9246 phba->sli4_hba.nvmels_cq = qdesc;
9247
9248
9249 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9250 phba->sli4_hba.wq_esize,
9251 phba->sli4_hba.wq_ecount, cpu);
9252 if (!qdesc) {
9253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9254 "6080 Failed allocate NVME LS WQ\n");
9255 goto out_error;
9256 }
9257 qdesc->chann = cpu;
9258 phba->sli4_hba.nvmels_wq = qdesc;
9259 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9260 }
9261
9262
9263
9264
9265
9266
9267 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9268 phba->sli4_hba.rq_esize,
9269 phba->sli4_hba.rq_ecount, cpu);
9270 if (!qdesc) {
9271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9272 "0506 Failed allocate receive HRQ\n");
9273 goto out_error;
9274 }
9275 phba->sli4_hba.hdr_rq = qdesc;
9276
9277
9278 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9279 phba->sli4_hba.rq_esize,
9280 phba->sli4_hba.rq_ecount, cpu);
9281 if (!qdesc) {
9282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9283 "0507 Failed allocate receive DRQ\n");
9284 goto out_error;
9285 }
9286 phba->sli4_hba.dat_rq = qdesc;
9287
9288 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9289 phba->nvmet_support) {
9290 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9291 cpu = lpfc_find_cpu_handle(phba, idx,
9292 LPFC_FIND_BY_HDWQ);
9293
9294 qdesc = lpfc_sli4_queue_alloc(phba,
9295 LPFC_DEFAULT_PAGE_SIZE,
9296 phba->sli4_hba.rq_esize,
9297 LPFC_NVMET_RQE_DEF_COUNT,
9298 cpu);
9299 if (!qdesc) {
9300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9301 "3146 Failed allocate "
9302 "receive HRQ\n");
9303 goto out_error;
9304 }
9305 qdesc->hdwq = idx;
9306 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9307
9308
9309 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9310 GFP_KERNEL,
9311 cpu_to_node(cpu));
9312 if (qdesc->rqbp == NULL) {
9313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9314 "6131 Failed allocate "
9315 "Header RQBP\n");
9316 goto out_error;
9317 }
9318
9319
9320 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9321
9322
9323 qdesc = lpfc_sli4_queue_alloc(phba,
9324 LPFC_DEFAULT_PAGE_SIZE,
9325 phba->sli4_hba.rq_esize,
9326 LPFC_NVMET_RQE_DEF_COUNT,
9327 cpu);
9328 if (!qdesc) {
9329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9330 "3156 Failed allocate "
9331 "receive DRQ\n");
9332 goto out_error;
9333 }
9334 qdesc->hdwq = idx;
9335 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9336 }
9337 }
9338
9339
9340 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9341 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9342 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9343 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9344 }
9345 }
9346
9347
9348 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9349 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9350 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9351 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9352 }
9353 }
9354
9355 return 0;
9356
9357out_error:
9358 lpfc_sli4_queue_destroy(phba);
9359 return -ENOMEM;
9360}
9361
9362static inline void
9363__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9364{
9365 if (*qp != NULL) {
9366 lpfc_sli4_queue_free(*qp);
9367 *qp = NULL;
9368 }
9369}
9370
9371static inline void
9372lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9373{
9374 int idx;
9375
9376 if (*qs == NULL)
9377 return;
9378
9379 for (idx = 0; idx < max; idx++)
9380 __lpfc_sli4_release_queue(&(*qs)[idx]);
9381
9382 kfree(*qs);
9383 *qs = NULL;
9384}
9385
9386static inline void
9387lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9388{
9389 struct lpfc_sli4_hdw_queue *hdwq;
9390 struct lpfc_queue *eq;
9391 uint32_t idx;
9392
9393 hdwq = phba->sli4_hba.hdwq;
9394
9395
9396 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9397
9398 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9399 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9400 hdwq[idx].hba_eq = NULL;
9401 hdwq[idx].io_cq = NULL;
9402 hdwq[idx].io_wq = NULL;
9403 if (phba->cfg_xpsgl && !phba->nvmet_support)
9404 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9405 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9406 }
9407
9408 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9409
9410 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9411 lpfc_sli4_queue_free(eq);
9412 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9413 }
9414}
9415
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428void
9429lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9430{
9431
9432
9433
9434
9435
9436 spin_lock_irq(&phba->hbalock);
9437 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9438 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9439 spin_unlock_irq(&phba->hbalock);
9440 msleep(20);
9441 spin_lock_irq(&phba->hbalock);
9442 }
9443 spin_unlock_irq(&phba->hbalock);
9444
9445 lpfc_sli4_cleanup_poll_list(phba);
9446
9447
9448 if (phba->sli4_hba.hdwq)
9449 lpfc_sli4_release_hdwq(phba);
9450
9451 if (phba->nvmet_support) {
9452 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9453 phba->cfg_nvmet_mrq);
9454
9455 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9456 phba->cfg_nvmet_mrq);
9457 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9458 phba->cfg_nvmet_mrq);
9459 }
9460
9461
9462 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9463
9464
9465 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9466
9467
9468 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9469
9470
9471 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9472 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9473
9474
9475 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9476
9477
9478 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9479
9480
9481 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9482
9483
9484 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9485
9486
9487 spin_lock_irq(&phba->hbalock);
9488 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9489 spin_unlock_irq(&phba->hbalock);
9490}
9491
9492int
9493lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9494{
9495 struct lpfc_rqb *rqbp;
9496 struct lpfc_dmabuf *h_buf;
9497 struct rqb_dmabuf *rqb_buffer;
9498
9499 rqbp = rq->rqbp;
9500 while (!list_empty(&rqbp->rqb_buffer_list)) {
9501 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9502 struct lpfc_dmabuf, list);
9503
9504 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9505 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9506 rqbp->buffer_count--;
9507 }
9508 return 1;
9509}
9510
9511static int
9512lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9513 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9514 int qidx, uint32_t qtype)
9515{
9516 struct lpfc_sli_ring *pring;
9517 int rc;
9518
9519 if (!eq || !cq || !wq) {
9520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9521 "6085 Fast-path %s (%d) not allocated\n",
9522 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9523 return -ENOMEM;
9524 }
9525
9526
9527 rc = lpfc_cq_create(phba, cq, eq,
9528 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9529 if (rc) {
9530 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9531 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9532 qidx, (uint32_t)rc);
9533 return rc;
9534 }
9535
9536 if (qtype != LPFC_MBOX) {
9537
9538 if (cq_map)
9539 *cq_map = cq->queue_id;
9540
9541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9542 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9543 qidx, cq->queue_id, qidx, eq->queue_id);
9544
9545
9546 rc = lpfc_wq_create(phba, wq, cq, qtype);
9547 if (rc) {
9548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9549 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9550 qidx, (uint32_t)rc);
9551
9552 return rc;
9553 }
9554
9555
9556 pring = wq->pring;
9557 pring->sli.sli4.wqp = (void *)wq;
9558 cq->pring = pring;
9559
9560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9561 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9562 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9563 } else {
9564 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9565 if (rc) {
9566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9567 "0539 Failed setup of slow-path MQ: "
9568 "rc = 0x%x\n", rc);
9569
9570 return rc;
9571 }
9572
9573 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9574 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9575 phba->sli4_hba.mbx_wq->queue_id,
9576 phba->sli4_hba.mbx_cq->queue_id);
9577 }
9578
9579 return 0;
9580}
9581
9582
9583
9584
9585
9586
9587
9588
9589static void
9590lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9591{
9592 struct lpfc_queue *eq, *childq;
9593 int qidx;
9594
9595 memset(phba->sli4_hba.cq_lookup, 0,
9596 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9597
9598 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9599
9600 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9601 if (!eq)
9602 continue;
9603
9604 list_for_each_entry(childq, &eq->child_list, list) {
9605 if (childq->queue_id > phba->sli4_hba.cq_max)
9606 continue;
9607 if (childq->subtype == LPFC_IO)
9608 phba->sli4_hba.cq_lookup[childq->queue_id] =
9609 childq;
9610 }
9611 }
9612}
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
9623
9624
9625
9626int
9627lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9628{
9629 uint32_t shdr_status, shdr_add_status;
9630 union lpfc_sli4_cfg_shdr *shdr;
9631 struct lpfc_vector_map_info *cpup;
9632 struct lpfc_sli4_hdw_queue *qp;
9633 LPFC_MBOXQ_t *mboxq;
9634 int qidx, cpu;
9635 uint32_t length, usdelay;
9636 int rc = -ENOMEM;
9637
9638
9639 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9640 if (!mboxq) {
9641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9642 "3249 Unable to allocate memory for "
9643 "QUERY_FW_CFG mailbox command\n");
9644 return -ENOMEM;
9645 }
9646 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9647 sizeof(struct lpfc_sli4_cfg_mhdr));
9648 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9649 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9650 length, LPFC_SLI4_MBX_EMBED);
9651
9652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9653
9654 shdr = (union lpfc_sli4_cfg_shdr *)
9655 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9656 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9657 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9658 if (shdr_status || shdr_add_status || rc) {
9659 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9660 "3250 QUERY_FW_CFG mailbox failed with status "
9661 "x%x add_status x%x, mbx status x%x\n",
9662 shdr_status, shdr_add_status, rc);
9663 if (rc != MBX_TIMEOUT)
9664 mempool_free(mboxq, phba->mbox_mem_pool);
9665 rc = -ENXIO;
9666 goto out_error;
9667 }
9668
9669 phba->sli4_hba.fw_func_mode =
9670 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9671 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9672 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9673 phba->sli4_hba.physical_port =
9674 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9675 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9676 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9677 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9678 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9679
9680 if (rc != MBX_TIMEOUT)
9681 mempool_free(mboxq, phba->mbox_mem_pool);
9682
9683
9684
9685
9686 qp = phba->sli4_hba.hdwq;
9687
9688
9689 if (!qp) {
9690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9691 "3147 Fast-path EQs not allocated\n");
9692 rc = -ENOMEM;
9693 goto out_error;
9694 }
9695
9696
9697 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9698
9699 for_each_present_cpu(cpu) {
9700 cpup = &phba->sli4_hba.cpu_map[cpu];
9701
9702
9703
9704
9705 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9706 continue;
9707 if (qidx != cpup->eq)
9708 continue;
9709
9710
9711 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9712 phba->cfg_fcp_imax);
9713 if (rc) {
9714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9715 "0523 Failed setup of fast-path"
9716 " EQ (%d), rc = 0x%x\n",
9717 cpup->eq, (uint32_t)rc);
9718 goto out_destroy;
9719 }
9720
9721
9722 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9723 qp[cpup->hdwq].hba_eq;
9724
9725 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9726 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9727 cpup->eq,
9728 qp[cpup->hdwq].hba_eq->queue_id);
9729 }
9730 }
9731
9732
9733 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9734 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9735 cpup = &phba->sli4_hba.cpu_map[cpu];
9736
9737
9738 rc = lpfc_create_wq_cq(phba,
9739 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9740 qp[qidx].io_cq,
9741 qp[qidx].io_wq,
9742 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9743 qidx,
9744 LPFC_IO);
9745 if (rc) {
9746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9747 "0535 Failed to setup fastpath "
9748 "IO WQ/CQ (%d), rc = 0x%x\n",
9749 qidx, (uint32_t)rc);
9750 goto out_destroy;
9751 }
9752 }
9753
9754
9755
9756
9757
9758
9759
9760 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9762 "0528 %s not allocated\n",
9763 phba->sli4_hba.mbx_cq ?
9764 "Mailbox WQ" : "Mailbox CQ");
9765 rc = -ENOMEM;
9766 goto out_destroy;
9767 }
9768
9769 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9770 phba->sli4_hba.mbx_cq,
9771 phba->sli4_hba.mbx_wq,
9772 NULL, 0, LPFC_MBOX);
9773 if (rc) {
9774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9775 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9776 (uint32_t)rc);
9777 goto out_destroy;
9778 }
9779 if (phba->nvmet_support) {
9780 if (!phba->sli4_hba.nvmet_cqset) {
9781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9782 "3165 Fast-path NVME CQ Set "
9783 "array not allocated\n");
9784 rc = -ENOMEM;
9785 goto out_destroy;
9786 }
9787 if (phba->cfg_nvmet_mrq > 1) {
9788 rc = lpfc_cq_create_set(phba,
9789 phba->sli4_hba.nvmet_cqset,
9790 qp,
9791 LPFC_WCQ, LPFC_NVMET);
9792 if (rc) {
9793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9794 "3164 Failed setup of NVME CQ "
9795 "Set, rc = 0x%x\n",
9796 (uint32_t)rc);
9797 goto out_destroy;
9798 }
9799 } else {
9800
9801 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9802 qp[0].hba_eq,
9803 LPFC_WCQ, LPFC_NVMET);
9804 if (rc) {
9805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9806 "6089 Failed setup NVMET CQ: "
9807 "rc = 0x%x\n", (uint32_t)rc);
9808 goto out_destroy;
9809 }
9810 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9811
9812 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9813 "6090 NVMET CQ setup: cq-id=%d, "
9814 "parent eq-id=%d\n",
9815 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9816 qp[0].hba_eq->queue_id);
9817 }
9818 }
9819
9820
9821 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9822 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9823 "0530 ELS %s not allocated\n",
9824 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9825 rc = -ENOMEM;
9826 goto out_destroy;
9827 }
9828 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9829 phba->sli4_hba.els_cq,
9830 phba->sli4_hba.els_wq,
9831 NULL, 0, LPFC_ELS);
9832 if (rc) {
9833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9834 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9835 (uint32_t)rc);
9836 goto out_destroy;
9837 }
9838 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9839 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9840 phba->sli4_hba.els_wq->queue_id,
9841 phba->sli4_hba.els_cq->queue_id);
9842
9843 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9844
9845 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9847 "6091 LS %s not allocated\n",
9848 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9849 rc = -ENOMEM;
9850 goto out_destroy;
9851 }
9852 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9853 phba->sli4_hba.nvmels_cq,
9854 phba->sli4_hba.nvmels_wq,
9855 NULL, 0, LPFC_NVME_LS);
9856 if (rc) {
9857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9858 "0526 Failed setup of NVVME LS WQ/CQ: "
9859 "rc = 0x%x\n", (uint32_t)rc);
9860 goto out_destroy;
9861 }
9862
9863 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9864 "6096 ELS WQ setup: wq-id=%d, "
9865 "parent cq-id=%d\n",
9866 phba->sli4_hba.nvmels_wq->queue_id,
9867 phba->sli4_hba.nvmels_cq->queue_id);
9868 }
9869
9870
9871
9872
9873 if (phba->nvmet_support) {
9874 if ((!phba->sli4_hba.nvmet_cqset) ||
9875 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9876 (!phba->sli4_hba.nvmet_mrq_data)) {
9877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9878 "6130 MRQ CQ Queues not "
9879 "allocated\n");
9880 rc = -ENOMEM;
9881 goto out_destroy;
9882 }
9883 if (phba->cfg_nvmet_mrq > 1) {
9884 rc = lpfc_mrq_create(phba,
9885 phba->sli4_hba.nvmet_mrq_hdr,
9886 phba->sli4_hba.nvmet_mrq_data,
9887 phba->sli4_hba.nvmet_cqset,
9888 LPFC_NVMET);
9889 if (rc) {
9890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9891 "6098 Failed setup of NVMET "
9892 "MRQ: rc = 0x%x\n",
9893 (uint32_t)rc);
9894 goto out_destroy;
9895 }
9896
9897 } else {
9898 rc = lpfc_rq_create(phba,
9899 phba->sli4_hba.nvmet_mrq_hdr[0],
9900 phba->sli4_hba.nvmet_mrq_data[0],
9901 phba->sli4_hba.nvmet_cqset[0],
9902 LPFC_NVMET);
9903 if (rc) {
9904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9905 "6057 Failed setup of NVMET "
9906 "Receive Queue: rc = 0x%x\n",
9907 (uint32_t)rc);
9908 goto out_destroy;
9909 }
9910
9911 lpfc_printf_log(
9912 phba, KERN_INFO, LOG_INIT,
9913 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9914 "dat-rq-id=%d parent cq-id=%d\n",
9915 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9916 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9917 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9918
9919 }
9920 }
9921
9922 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9923 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9924 "0540 Receive Queue not allocated\n");
9925 rc = -ENOMEM;
9926 goto out_destroy;
9927 }
9928
9929 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9930 phba->sli4_hba.els_cq, LPFC_USOL);
9931 if (rc) {
9932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9933 "0541 Failed setup of Receive Queue: "
9934 "rc = 0x%x\n", (uint32_t)rc);
9935 goto out_destroy;
9936 }
9937
9938 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9939 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9940 "parent cq-id=%d\n",
9941 phba->sli4_hba.hdr_rq->queue_id,
9942 phba->sli4_hba.dat_rq->queue_id,
9943 phba->sli4_hba.els_cq->queue_id);
9944
9945 if (phba->cfg_fcp_imax)
9946 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9947 else
9948 usdelay = 0;
9949
9950 for (qidx = 0; qidx < phba->cfg_irq_chann;
9951 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9952 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9953 usdelay);
9954
9955 if (phba->sli4_hba.cq_max) {
9956 kfree(phba->sli4_hba.cq_lookup);
9957 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9958 sizeof(struct lpfc_queue *), GFP_KERNEL);
9959 if (!phba->sli4_hba.cq_lookup) {
9960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9961 "0549 Failed setup of CQ Lookup table: "
9962 "size 0x%x\n", phba->sli4_hba.cq_max);
9963 rc = -ENOMEM;
9964 goto out_destroy;
9965 }
9966 lpfc_setup_cq_lookup(phba);
9967 }
9968 return 0;
9969
9970out_destroy:
9971 lpfc_sli4_queue_unset(phba);
9972out_error:
9973 return rc;
9974}
9975
9976
9977
9978
9979
9980
9981
9982
9983
9984
9985
9986
9987
9988void
9989lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9990{
9991 struct lpfc_sli4_hdw_queue *qp;
9992 struct lpfc_queue *eq;
9993 int qidx;
9994
9995
9996 if (phba->sli4_hba.mbx_wq)
9997 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9998
9999
10000 if (phba->sli4_hba.nvmels_wq)
10001 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
10002
10003
10004 if (phba->sli4_hba.els_wq)
10005 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
10006
10007
10008 if (phba->sli4_hba.hdr_rq)
10009 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
10010 phba->sli4_hba.dat_rq);
10011
10012
10013 if (phba->sli4_hba.mbx_cq)
10014 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
10015
10016
10017 if (phba->sli4_hba.els_cq)
10018 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
10019
10020
10021 if (phba->sli4_hba.nvmels_cq)
10022 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
10023
10024 if (phba->nvmet_support) {
10025
10026 if (phba->sli4_hba.nvmet_mrq_hdr) {
10027 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10028 lpfc_rq_destroy(
10029 phba,
10030 phba->sli4_hba.nvmet_mrq_hdr[qidx],
10031 phba->sli4_hba.nvmet_mrq_data[qidx]);
10032 }
10033
10034
10035 if (phba->sli4_hba.nvmet_cqset) {
10036 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10037 lpfc_cq_destroy(
10038 phba, phba->sli4_hba.nvmet_cqset[qidx]);
10039 }
10040 }
10041
10042
10043 if (phba->sli4_hba.hdwq) {
10044
10045 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10046
10047 qp = &phba->sli4_hba.hdwq[qidx];
10048 lpfc_wq_destroy(phba, qp->io_wq);
10049 lpfc_cq_destroy(phba, qp->io_cq);
10050 }
10051
10052 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10053
10054 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10055 lpfc_eq_destroy(phba, eq);
10056 }
10057 }
10058
10059 kfree(phba->sli4_hba.cq_lookup);
10060 phba->sli4_hba.cq_lookup = NULL;
10061 phba->sli4_hba.cq_max = 0;
10062}
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080static int
10081lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10082{
10083 struct lpfc_cq_event *cq_event;
10084 int i;
10085
10086 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10087 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10088 if (!cq_event)
10089 goto out_pool_create_fail;
10090 list_add_tail(&cq_event->list,
10091 &phba->sli4_hba.sp_cqe_event_pool);
10092 }
10093 return 0;
10094
10095out_pool_create_fail:
10096 lpfc_sli4_cq_event_pool_destroy(phba);
10097 return -ENOMEM;
10098}
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110static void
10111lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10112{
10113 struct lpfc_cq_event *cq_event, *next_cq_event;
10114
10115 list_for_each_entry_safe(cq_event, next_cq_event,
10116 &phba->sli4_hba.sp_cqe_event_pool, list) {
10117 list_del(&cq_event->list);
10118 kfree(cq_event);
10119 }
10120}
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132struct lpfc_cq_event *
10133__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10134{
10135 struct lpfc_cq_event *cq_event = NULL;
10136
10137 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10138 struct lpfc_cq_event, list);
10139 return cq_event;
10140}
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152struct lpfc_cq_event *
10153lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10154{
10155 struct lpfc_cq_event *cq_event;
10156 unsigned long iflags;
10157
10158 spin_lock_irqsave(&phba->hbalock, iflags);
10159 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10160 spin_unlock_irqrestore(&phba->hbalock, iflags);
10161 return cq_event;
10162}
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172void
10173__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10174 struct lpfc_cq_event *cq_event)
10175{
10176 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10177}
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187void
10188lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10189 struct lpfc_cq_event *cq_event)
10190{
10191 unsigned long iflags;
10192 spin_lock_irqsave(&phba->hbalock, iflags);
10193 __lpfc_sli4_cq_event_release(phba, cq_event);
10194 spin_unlock_irqrestore(&phba->hbalock, iflags);
10195}
10196
10197
10198
10199
10200
10201
10202
10203
10204static void
10205lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10206{
10207 LIST_HEAD(cq_event_list);
10208 struct lpfc_cq_event *cq_event;
10209 unsigned long iflags;
10210
10211
10212
10213
10214 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10215 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10216 &cq_event_list);
10217 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10218
10219
10220 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10221 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10222 &cq_event_list);
10223 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10224
10225 while (!list_empty(&cq_event_list)) {
10226 list_remove_head(&cq_event_list, cq_event,
10227 struct lpfc_cq_event, list);
10228 lpfc_sli4_cq_event_release(phba, cq_event);
10229 }
10230}
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244int
10245lpfc_pci_function_reset(struct lpfc_hba *phba)
10246{
10247 LPFC_MBOXQ_t *mboxq;
10248 uint32_t rc = 0, if_type;
10249 uint32_t shdr_status, shdr_add_status;
10250 uint32_t rdy_chk;
10251 uint32_t port_reset = 0;
10252 union lpfc_sli4_cfg_shdr *shdr;
10253 struct lpfc_register reg_data;
10254 uint16_t devid;
10255
10256 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10257 switch (if_type) {
10258 case LPFC_SLI_INTF_IF_TYPE_0:
10259 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10260 GFP_KERNEL);
10261 if (!mboxq) {
10262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10263 "0494 Unable to allocate memory for "
10264 "issuing SLI_FUNCTION_RESET mailbox "
10265 "command\n");
10266 return -ENOMEM;
10267 }
10268
10269
10270 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10271 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10272 LPFC_SLI4_MBX_EMBED);
10273 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10274 shdr = (union lpfc_sli4_cfg_shdr *)
10275 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10276 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10277 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10278 &shdr->response);
10279 if (rc != MBX_TIMEOUT)
10280 mempool_free(mboxq, phba->mbox_mem_pool);
10281 if (shdr_status || shdr_add_status || rc) {
10282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10283 "0495 SLI_FUNCTION_RESET mailbox "
10284 "failed with status x%x add_status x%x,"
10285 " mbx status x%x\n",
10286 shdr_status, shdr_add_status, rc);
10287 rc = -ENXIO;
10288 }
10289 break;
10290 case LPFC_SLI_INTF_IF_TYPE_2:
10291 case LPFC_SLI_INTF_IF_TYPE_6:
10292wait:
10293
10294
10295
10296
10297
10298 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10299 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10300 STATUSregaddr, ®_data.word0)) {
10301 rc = -ENODEV;
10302 goto out;
10303 }
10304 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10305 break;
10306 msleep(20);
10307 }
10308
10309 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10310 phba->work_status[0] = readl(
10311 phba->sli4_hba.u.if_type2.ERR1regaddr);
10312 phba->work_status[1] = readl(
10313 phba->sli4_hba.u.if_type2.ERR2regaddr);
10314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10315 "2890 Port not ready, port status reg "
10316 "0x%x error 1=0x%x, error 2=0x%x\n",
10317 reg_data.word0,
10318 phba->work_status[0],
10319 phba->work_status[1]);
10320 rc = -ENODEV;
10321 goto out;
10322 }
10323
10324 if (!port_reset) {
10325
10326
10327
10328 reg_data.word0 = 0;
10329 bf_set(lpfc_sliport_ctrl_end, ®_data,
10330 LPFC_SLIPORT_LITTLE_ENDIAN);
10331 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10332 LPFC_SLIPORT_INIT_PORT);
10333 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10334 CTRLregaddr);
10335
10336 pci_read_config_word(phba->pcidev,
10337 PCI_DEVICE_ID, &devid);
10338
10339 port_reset = 1;
10340 msleep(20);
10341 goto wait;
10342 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10343 rc = -ENODEV;
10344 goto out;
10345 }
10346 break;
10347
10348 case LPFC_SLI_INTF_IF_TYPE_1:
10349 default:
10350 break;
10351 }
10352
10353out:
10354
10355 if (rc) {
10356 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10357 "3317 HBA not functional: IP Reset Failed "
10358 "try: echo fw_reset > board_mode\n");
10359 rc = -ENODEV;
10360 }
10361
10362 return rc;
10363}
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376static int
10377lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10378{
10379 struct pci_dev *pdev = phba->pcidev;
10380 unsigned long bar0map_len, bar1map_len, bar2map_len;
10381 int error;
10382 uint32_t if_type;
10383
10384 if (!pdev)
10385 return -ENODEV;
10386
10387
10388 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10389 if (error)
10390 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10391 if (error)
10392 return error;
10393
10394
10395
10396
10397
10398 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10399 &phba->sli4_hba.sli_intf.word0)) {
10400 return -ENODEV;
10401 }
10402
10403
10404 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10405 LPFC_SLI_INTF_VALID) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10407 "2894 SLI_INTF reg contents invalid "
10408 "sli_intf reg 0x%x\n",
10409 phba->sli4_hba.sli_intf.word0);
10410 return -ENODEV;
10411 }
10412
10413 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10414
10415
10416
10417
10418
10419
10420 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10421 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10422 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10423
10424
10425
10426
10427
10428 phba->sli4_hba.conf_regs_memmap_p =
10429 ioremap(phba->pci_bar0_map, bar0map_len);
10430 if (!phba->sli4_hba.conf_regs_memmap_p) {
10431 dev_printk(KERN_ERR, &pdev->dev,
10432 "ioremap failed for SLI4 PCI config "
10433 "registers.\n");
10434 return -ENODEV;
10435 }
10436 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10437
10438 lpfc_sli4_bar0_register_memmap(phba, if_type);
10439 } else {
10440 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10441 bar0map_len = pci_resource_len(pdev, 1);
10442 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10443 dev_printk(KERN_ERR, &pdev->dev,
10444 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10445 return -ENODEV;
10446 }
10447 phba->sli4_hba.conf_regs_memmap_p =
10448 ioremap(phba->pci_bar0_map, bar0map_len);
10449 if (!phba->sli4_hba.conf_regs_memmap_p) {
10450 dev_printk(KERN_ERR, &pdev->dev,
10451 "ioremap failed for SLI4 PCI config "
10452 "registers.\n");
10453 return -ENODEV;
10454 }
10455 lpfc_sli4_bar0_register_memmap(phba, if_type);
10456 }
10457
10458 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10459 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10460
10461
10462
10463
10464 phba->pci_bar1_map = pci_resource_start(pdev,
10465 PCI_64BIT_BAR2);
10466 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10467 phba->sli4_hba.ctrl_regs_memmap_p =
10468 ioremap(phba->pci_bar1_map,
10469 bar1map_len);
10470 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10471 dev_err(&pdev->dev,
10472 "ioremap failed for SLI4 HBA "
10473 "control registers.\n");
10474 error = -ENOMEM;
10475 goto out_iounmap_conf;
10476 }
10477 phba->pci_bar2_memmap_p =
10478 phba->sli4_hba.ctrl_regs_memmap_p;
10479 lpfc_sli4_bar1_register_memmap(phba, if_type);
10480 } else {
10481 error = -ENOMEM;
10482 goto out_iounmap_conf;
10483 }
10484 }
10485
10486 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10487 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10488
10489
10490
10491
10492 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10493 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10494 phba->sli4_hba.drbl_regs_memmap_p =
10495 ioremap(phba->pci_bar1_map, bar1map_len);
10496 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10497 dev_err(&pdev->dev,
10498 "ioremap failed for SLI4 HBA doorbell registers.\n");
10499 error = -ENOMEM;
10500 goto out_iounmap_conf;
10501 }
10502 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10503 lpfc_sli4_bar1_register_memmap(phba, if_type);
10504 }
10505
10506 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10507 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10508
10509
10510
10511
10512 phba->pci_bar2_map = pci_resource_start(pdev,
10513 PCI_64BIT_BAR4);
10514 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10515 phba->sli4_hba.drbl_regs_memmap_p =
10516 ioremap(phba->pci_bar2_map,
10517 bar2map_len);
10518 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10519 dev_err(&pdev->dev,
10520 "ioremap failed for SLI4 HBA"
10521 " doorbell registers.\n");
10522 error = -ENOMEM;
10523 goto out_iounmap_ctrl;
10524 }
10525 phba->pci_bar4_memmap_p =
10526 phba->sli4_hba.drbl_regs_memmap_p;
10527 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10528 if (error)
10529 goto out_iounmap_all;
10530 } else {
10531 error = -ENOMEM;
10532 goto out_iounmap_all;
10533 }
10534 }
10535
10536 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10537 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10538
10539
10540
10541
10542 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10543 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10544 phba->sli4_hba.dpp_regs_memmap_p =
10545 ioremap(phba->pci_bar2_map, bar2map_len);
10546 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10547 dev_err(&pdev->dev,
10548 "ioremap failed for SLI4 HBA dpp registers.\n");
10549 error = -ENOMEM;
10550 goto out_iounmap_ctrl;
10551 }
10552 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10553 }
10554
10555
10556 switch (if_type) {
10557 case LPFC_SLI_INTF_IF_TYPE_0:
10558 case LPFC_SLI_INTF_IF_TYPE_2:
10559 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10560 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10561 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10562 break;
10563 case LPFC_SLI_INTF_IF_TYPE_6:
10564 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10565 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10566 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10567 break;
10568 default:
10569 break;
10570 }
10571
10572 return 0;
10573
10574out_iounmap_all:
10575 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10576out_iounmap_ctrl:
10577 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10578out_iounmap_conf:
10579 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10580
10581 return error;
10582}
10583
10584
10585
10586
10587
10588
10589
10590
10591static void
10592lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10593{
10594 uint32_t if_type;
10595 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10596
10597 switch (if_type) {
10598 case LPFC_SLI_INTF_IF_TYPE_0:
10599 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10600 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10601 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10602 break;
10603 case LPFC_SLI_INTF_IF_TYPE_2:
10604 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10605 break;
10606 case LPFC_SLI_INTF_IF_TYPE_6:
10607 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10608 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10609 if (phba->sli4_hba.dpp_regs_memmap_p)
10610 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10611 break;
10612 case LPFC_SLI_INTF_IF_TYPE_1:
10613 default:
10614 dev_printk(KERN_ERR, &phba->pcidev->dev,
10615 "FATAL - unsupported SLI4 interface type - %d\n",
10616 if_type);
10617 break;
10618 }
10619}
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632static int
10633lpfc_sli_enable_msix(struct lpfc_hba *phba)
10634{
10635 int rc;
10636 LPFC_MBOXQ_t *pmb;
10637
10638
10639 rc = pci_alloc_irq_vectors(phba->pcidev,
10640 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10641 if (rc < 0) {
10642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10643 "0420 PCI enable MSI-X failed (%d)\n", rc);
10644 goto vec_fail_out;
10645 }
10646
10647
10648
10649
10650
10651
10652 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10653 &lpfc_sli_sp_intr_handler, 0,
10654 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10655 if (rc) {
10656 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10657 "0421 MSI-X slow-path request_irq failed "
10658 "(%d)\n", rc);
10659 goto msi_fail_out;
10660 }
10661
10662
10663 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10664 &lpfc_sli_fp_intr_handler, 0,
10665 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10666
10667 if (rc) {
10668 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10669 "0429 MSI-X fast-path request_irq failed "
10670 "(%d)\n", rc);
10671 goto irq_fail_out;
10672 }
10673
10674
10675
10676
10677 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10678
10679 if (!pmb) {
10680 rc = -ENOMEM;
10681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10682 "0474 Unable to allocate memory for issuing "
10683 "MBOX_CONFIG_MSI command\n");
10684 goto mem_fail_out;
10685 }
10686 rc = lpfc_config_msi(phba, pmb);
10687 if (rc)
10688 goto mbx_fail_out;
10689 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10690 if (rc != MBX_SUCCESS) {
10691 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10692 "0351 Config MSI mailbox command failed, "
10693 "mbxCmd x%x, mbxStatus x%x\n",
10694 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10695 goto mbx_fail_out;
10696 }
10697
10698
10699 mempool_free(pmb, phba->mbox_mem_pool);
10700 return rc;
10701
10702mbx_fail_out:
10703
10704 mempool_free(pmb, phba->mbox_mem_pool);
10705
10706mem_fail_out:
10707
10708 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10709
10710irq_fail_out:
10711
10712 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10713
10714msi_fail_out:
10715
10716 pci_free_irq_vectors(phba->pcidev);
10717
10718vec_fail_out:
10719 return rc;
10720}
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736static int
10737lpfc_sli_enable_msi(struct lpfc_hba *phba)
10738{
10739 int rc;
10740
10741 rc = pci_enable_msi(phba->pcidev);
10742 if (!rc)
10743 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10744 "0462 PCI enable MSI mode success.\n");
10745 else {
10746 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10747 "0471 PCI enable MSI mode failed (%d)\n", rc);
10748 return rc;
10749 }
10750
10751 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10752 0, LPFC_DRIVER_NAME, phba);
10753 if (rc) {
10754 pci_disable_msi(phba->pcidev);
10755 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10756 "0478 MSI request_irq failed (%d)\n", rc);
10757 }
10758 return rc;
10759}
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778static uint32_t
10779lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10780{
10781 uint32_t intr_mode = LPFC_INTR_ERROR;
10782 int retval;
10783
10784
10785 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10786 if (retval)
10787 return intr_mode;
10788 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
10789
10790 if (cfg_mode == 2) {
10791
10792 retval = lpfc_sli_enable_msix(phba);
10793 if (!retval) {
10794
10795 phba->intr_type = MSIX;
10796 intr_mode = 2;
10797 }
10798 }
10799
10800
10801 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10802 retval = lpfc_sli_enable_msi(phba);
10803 if (!retval) {
10804
10805 phba->intr_type = MSI;
10806 intr_mode = 1;
10807 }
10808 }
10809
10810
10811 if (phba->intr_type == NONE) {
10812 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10813 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10814 if (!retval) {
10815
10816 phba->intr_type = INTx;
10817 intr_mode = 0;
10818 }
10819 }
10820 return intr_mode;
10821}
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832static void
10833lpfc_sli_disable_intr(struct lpfc_hba *phba)
10834{
10835 int nr_irqs, i;
10836
10837 if (phba->intr_type == MSIX)
10838 nr_irqs = LPFC_MSIX_VECTORS;
10839 else
10840 nr_irqs = 1;
10841
10842 for (i = 0; i < nr_irqs; i++)
10843 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10844 pci_free_irq_vectors(phba->pcidev);
10845
10846
10847 phba->intr_type = NONE;
10848 phba->sli.slistat.sli_intr = 0;
10849}
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859static uint16_t
10860lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10861{
10862 struct lpfc_vector_map_info *cpup;
10863 int cpu;
10864
10865
10866 for_each_present_cpu(cpu) {
10867 cpup = &phba->sli4_hba.cpu_map[cpu];
10868
10869
10870
10871
10872
10873 if ((match == LPFC_FIND_BY_EQ) &&
10874 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10875 (cpup->eq == id))
10876 return cpu;
10877
10878
10879 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10880 return cpu;
10881 }
10882 return 0;
10883}
10884
10885#ifdef CONFIG_X86
10886
10887
10888
10889
10890
10891
10892
10893static int
10894lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10895 uint16_t phys_id, uint16_t core_id)
10896{
10897 struct lpfc_vector_map_info *cpup;
10898 int idx;
10899
10900 for_each_present_cpu(idx) {
10901 cpup = &phba->sli4_hba.cpu_map[idx];
10902
10903 if ((cpup->phys_id == phys_id) &&
10904 (cpup->core_id == core_id) &&
10905 (cpu != idx))
10906 return 1;
10907 }
10908 return 0;
10909}
10910#endif
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921static inline void
10922lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10923 unsigned int cpu)
10924{
10925 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10926 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10927
10928 cpup->eq = eqidx;
10929 cpup->flag |= flag;
10930
10931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10932 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10933 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10934}
10935
10936
10937
10938
10939
10940
10941
10942static void
10943lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10944{
10945 struct lpfc_vector_map_info *cpup;
10946 struct lpfc_eq_intr_info *eqi;
10947 int cpu;
10948
10949 for_each_possible_cpu(cpu) {
10950 cpup = &phba->sli4_hba.cpu_map[cpu];
10951 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10952 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10953 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10954 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10955 cpup->flag = 0;
10956 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10957 INIT_LIST_HEAD(&eqi->list);
10958 eqi->icnt = 0;
10959 }
10960}
10961
10962
10963
10964
10965
10966
10967
10968static void
10969lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10970{
10971 struct lpfc_hba_eq_hdl *eqhdl;
10972 int i;
10973
10974 for (i = 0; i < phba->cfg_irq_chann; i++) {
10975 eqhdl = lpfc_get_eq_hdl(i);
10976 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10977 eqhdl->phba = phba;
10978 }
10979}
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991static void
10992lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10993{
10994 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10995 int max_phys_id, min_phys_id;
10996 int max_core_id, min_core_id;
10997 struct lpfc_vector_map_info *cpup;
10998 struct lpfc_vector_map_info *new_cpup;
10999#ifdef CONFIG_X86
11000 struct cpuinfo_x86 *cpuinfo;
11001#endif
11002#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11003 struct lpfc_hdwq_stat *c_stat;
11004#endif
11005
11006 max_phys_id = 0;
11007 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
11008 max_core_id = 0;
11009 min_core_id = LPFC_VECTOR_MAP_EMPTY;
11010
11011
11012 for_each_present_cpu(cpu) {
11013 cpup = &phba->sli4_hba.cpu_map[cpu];
11014#ifdef CONFIG_X86
11015 cpuinfo = &cpu_data(cpu);
11016 cpup->phys_id = cpuinfo->phys_proc_id;
11017 cpup->core_id = cpuinfo->cpu_core_id;
11018 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
11019 cpup->flag |= LPFC_CPU_MAP_HYPER;
11020#else
11021
11022 cpup->phys_id = 0;
11023 cpup->core_id = cpu;
11024#endif
11025
11026 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11027 "3328 CPU %d physid %d coreid %d flag x%x\n",
11028 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
11029
11030 if (cpup->phys_id > max_phys_id)
11031 max_phys_id = cpup->phys_id;
11032 if (cpup->phys_id < min_phys_id)
11033 min_phys_id = cpup->phys_id;
11034
11035 if (cpup->core_id > max_core_id)
11036 max_core_id = cpup->core_id;
11037 if (cpup->core_id < min_core_id)
11038 min_core_id = cpup->core_id;
11039 }
11040
11041
11042
11043
11044
11045
11046 first_cpu = cpumask_first(cpu_present_mask);
11047 start_cpu = first_cpu;
11048
11049 for_each_present_cpu(cpu) {
11050 cpup = &phba->sli4_hba.cpu_map[cpu];
11051
11052
11053 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11054
11055 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11056
11057
11058
11059
11060
11061
11062 new_cpu = start_cpu;
11063 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11064 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11065 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11066 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
11067 (new_cpup->phys_id == cpup->phys_id))
11068 goto found_same;
11069 new_cpu = cpumask_next(
11070 new_cpu, cpu_present_mask);
11071 if (new_cpu == nr_cpumask_bits)
11072 new_cpu = first_cpu;
11073 }
11074
11075 continue;
11076found_same:
11077
11078 cpup->eq = new_cpup->eq;
11079
11080
11081
11082
11083
11084 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11085 if (start_cpu == nr_cpumask_bits)
11086 start_cpu = first_cpu;
11087
11088 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11089 "3337 Set Affinity: CPU %d "
11090 "eq %d from peer cpu %d same "
11091 "phys_id (%d)\n",
11092 cpu, cpup->eq, new_cpu,
11093 cpup->phys_id);
11094 }
11095 }
11096
11097
11098 start_cpu = first_cpu;
11099
11100 for_each_present_cpu(cpu) {
11101 cpup = &phba->sli4_hba.cpu_map[cpu];
11102
11103
11104 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11105
11106 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11107
11108
11109
11110
11111
11112
11113 new_cpu = start_cpu;
11114 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11115 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11116 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11117 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
11118 goto found_any;
11119 new_cpu = cpumask_next(
11120 new_cpu, cpu_present_mask);
11121 if (new_cpu == nr_cpumask_bits)
11122 new_cpu = first_cpu;
11123 }
11124
11125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11126 "3339 Set Affinity: CPU %d "
11127 "eq %d UNASSIGNED\n",
11128 cpup->hdwq, cpup->eq);
11129 continue;
11130found_any:
11131
11132 cpup->eq = new_cpup->eq;
11133
11134
11135
11136
11137
11138 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11139 if (start_cpu == nr_cpumask_bits)
11140 start_cpu = first_cpu;
11141
11142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11143 "3338 Set Affinity: CPU %d "
11144 "eq %d from peer cpu %d (%d/%d)\n",
11145 cpu, cpup->eq, new_cpu,
11146 new_cpup->phys_id, new_cpup->core_id);
11147 }
11148 }
11149
11150
11151
11152
11153 idx = 0;
11154 for_each_present_cpu(cpu) {
11155 cpup = &phba->sli4_hba.cpu_map[cpu];
11156
11157
11158 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11159 continue;
11160
11161
11162 cpup->hdwq = idx;
11163 idx++;
11164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11165 "3333 Set Affinity: CPU %d (phys %d core %d): "
11166 "hdwq %d eq %d flg x%x\n",
11167 cpu, cpup->phys_id, cpup->core_id,
11168 cpup->hdwq, cpup->eq, cpup->flag);
11169 }
11170
11171
11172
11173
11174
11175
11176
11177
11178 next_idx = idx;
11179 start_cpu = 0;
11180 idx = 0;
11181 for_each_present_cpu(cpu) {
11182 cpup = &phba->sli4_hba.cpu_map[cpu];
11183
11184
11185 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11186 continue;
11187
11188
11189
11190
11191
11192 if (next_idx < phba->cfg_hdw_queue) {
11193 cpup->hdwq = next_idx;
11194 next_idx++;
11195 continue;
11196 }
11197
11198
11199
11200
11201
11202
11203 new_cpu = start_cpu;
11204 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11205 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11206 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11207 new_cpup->phys_id == cpup->phys_id &&
11208 new_cpup->core_id == cpup->core_id) {
11209 goto found_hdwq;
11210 }
11211 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11212 if (new_cpu == nr_cpumask_bits)
11213 new_cpu = first_cpu;
11214 }
11215
11216
11217
11218
11219 new_cpu = start_cpu;
11220 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11221 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11222 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11223 new_cpup->phys_id == cpup->phys_id)
11224 goto found_hdwq;
11225
11226 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11227 if (new_cpu == nr_cpumask_bits)
11228 new_cpu = first_cpu;
11229 }
11230
11231
11232 cpup->hdwq = idx % phba->cfg_hdw_queue;
11233 idx++;
11234 goto logit;
11235 found_hdwq:
11236
11237 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11238 if (start_cpu == nr_cpumask_bits)
11239 start_cpu = first_cpu;
11240 cpup->hdwq = new_cpup->hdwq;
11241 logit:
11242 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11243 "3335 Set Affinity: CPU %d (phys %d core %d): "
11244 "hdwq %d eq %d flg x%x\n",
11245 cpu, cpup->phys_id, cpup->core_id,
11246 cpup->hdwq, cpup->eq, cpup->flag);
11247 }
11248
11249
11250
11251
11252
11253 idx = 0;
11254 for_each_possible_cpu(cpu) {
11255 cpup = &phba->sli4_hba.cpu_map[cpu];
11256#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11257 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11258 c_stat->hdwq_no = cpup->hdwq;
11259#endif
11260 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11261 continue;
11262
11263 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11264#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11265 c_stat->hdwq_no = cpup->hdwq;
11266#endif
11267 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11268 "3340 Set Affinity: not present "
11269 "CPU %d hdwq %d\n",
11270 cpu, cpup->hdwq);
11271 }
11272
11273
11274
11275
11276 return;
11277}
11278
11279
11280
11281
11282
11283
11284
11285
11286static int
11287lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11288 struct list_head *eqlist)
11289{
11290 const struct cpumask *maskp;
11291 struct lpfc_queue *eq;
11292 struct cpumask *tmp;
11293 u16 idx;
11294
11295 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11296 if (!tmp)
11297 return -ENOMEM;
11298
11299 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11300 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11301 if (!maskp)
11302 continue;
11303
11304
11305
11306
11307
11308 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11309 continue;
11310
11311
11312
11313
11314
11315
11316 cpumask_and(tmp, maskp, cpu_online_mask);
11317 if (cpumask_weight(tmp) > 1)
11318 continue;
11319
11320
11321
11322
11323
11324
11325 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11326 list_add(&eq->_poll_list, eqlist);
11327 }
11328 kfree(tmp);
11329 return 0;
11330}
11331
11332static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11333{
11334 if (phba->sli_rev != LPFC_SLI_REV4)
11335 return;
11336
11337 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11338 &phba->cpuhp);
11339
11340
11341
11342
11343 synchronize_rcu();
11344 del_timer_sync(&phba->cpuhp_poll_timer);
11345}
11346
11347static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11348{
11349 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11350 return;
11351
11352 __lpfc_cpuhp_remove(phba);
11353}
11354
11355static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11356{
11357 if (phba->sli_rev != LPFC_SLI_REV4)
11358 return;
11359
11360 rcu_read_lock();
11361
11362 if (!list_empty(&phba->poll_list))
11363 mod_timer(&phba->cpuhp_poll_timer,
11364 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11365
11366 rcu_read_unlock();
11367
11368 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11369 &phba->cpuhp);
11370}
11371
11372static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11373{
11374 if (phba->pport->load_flag & FC_UNLOADING) {
11375 *retval = -EAGAIN;
11376 return true;
11377 }
11378
11379 if (phba->sli_rev != LPFC_SLI_REV4) {
11380 *retval = 0;
11381 return true;
11382 }
11383
11384
11385 return false;
11386}
11387
11388
11389
11390
11391
11392
11393
11394static inline void
11395lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11396{
11397 cpumask_clear(&eqhdl->aff_mask);
11398 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11399 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11400 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11401}
11402
11403
11404
11405
11406
11407
11408static inline void
11409lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11410{
11411 cpumask_clear(&eqhdl->aff_mask);
11412 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11413}
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431static void
11432lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11433{
11434 struct lpfc_vector_map_info *cpup;
11435 struct cpumask *aff_mask;
11436 unsigned int cpu_select, cpu_next, idx;
11437 const struct cpumask *orig_mask;
11438
11439 if (phba->irq_chann_mode == NORMAL_MODE)
11440 return;
11441
11442 orig_mask = &phba->sli4_hba.irq_aff_mask;
11443
11444 if (!cpumask_test_cpu(cpu, orig_mask))
11445 return;
11446
11447 cpup = &phba->sli4_hba.cpu_map[cpu];
11448
11449 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11450 return;
11451
11452 if (offline) {
11453
11454 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11455 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11456
11457
11458 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11459
11460
11461
11462 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11463 aff_mask = lpfc_get_aff_mask(idx);
11464
11465
11466 if (cpumask_test_cpu(cpu, aff_mask))
11467 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11468 cpu_select);
11469 }
11470 } else {
11471
11472 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11473 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11474 }
11475 } else {
11476
11477 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11478 }
11479}
11480
11481static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11482{
11483 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11484 struct lpfc_queue *eq, *next;
11485 LIST_HEAD(eqlist);
11486 int retval;
11487
11488 if (!phba) {
11489 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11490 return 0;
11491 }
11492
11493 if (__lpfc_cpuhp_checks(phba, &retval))
11494 return retval;
11495
11496 lpfc_irq_rebalance(phba, cpu, true);
11497
11498 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11499 if (retval)
11500 return retval;
11501
11502
11503 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11504 list_del_init(&eq->_poll_list);
11505 lpfc_sli4_start_polling(eq);
11506 }
11507
11508 return 0;
11509}
11510
11511static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11512{
11513 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11514 struct lpfc_queue *eq, *next;
11515 unsigned int n;
11516 int retval;
11517
11518 if (!phba) {
11519 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11520 return 0;
11521 }
11522
11523 if (__lpfc_cpuhp_checks(phba, &retval))
11524 return retval;
11525
11526 lpfc_irq_rebalance(phba, cpu, false);
11527
11528 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11529 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11530 if (n == cpu)
11531 lpfc_sli4_stop_polling(eq);
11532 }
11533
11534 return 0;
11535}
11536
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565static int
11566lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11567{
11568 int vectors, rc, index;
11569 char *name;
11570 const struct cpumask *aff_mask = NULL;
11571 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11572 struct lpfc_vector_map_info *cpup;
11573 struct lpfc_hba_eq_hdl *eqhdl;
11574 const struct cpumask *maskp;
11575 unsigned int flags = PCI_IRQ_MSIX;
11576
11577
11578 vectors = phba->cfg_irq_chann;
11579
11580 if (phba->irq_chann_mode != NORMAL_MODE)
11581 aff_mask = &phba->sli4_hba.irq_aff_mask;
11582
11583 if (aff_mask) {
11584 cpu_cnt = cpumask_weight(aff_mask);
11585 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11586
11587
11588
11589
11590 cpu = cpumask_first(aff_mask);
11591 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11592 } else {
11593 flags |= PCI_IRQ_AFFINITY;
11594 }
11595
11596 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11597 if (rc < 0) {
11598 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11599 "0484 PCI enable MSI-X failed (%d)\n", rc);
11600 goto vec_fail_out;
11601 }
11602 vectors = rc;
11603
11604
11605 for (index = 0; index < vectors; index++) {
11606 eqhdl = lpfc_get_eq_hdl(index);
11607 name = eqhdl->handler_name;
11608 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11609 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11610 LPFC_DRIVER_HANDLER_NAME"%d", index);
11611
11612 eqhdl->idx = index;
11613 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11614 &lpfc_sli4_hba_intr_handler, 0,
11615 name, eqhdl);
11616 if (rc) {
11617 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11618 "0486 MSI-X fast-path (%d) "
11619 "request_irq failed (%d)\n", index, rc);
11620 goto cfg_fail_out;
11621 }
11622
11623 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11624
11625 if (aff_mask) {
11626
11627 if (cpu_select < nr_cpu_ids)
11628 lpfc_irq_set_aff(eqhdl, cpu_select);
11629
11630
11631 lpfc_assign_eq_map_info(phba, index,
11632 LPFC_CPU_FIRST_IRQ,
11633 cpu);
11634
11635
11636 cpu = cpumask_next(cpu, aff_mask);
11637
11638
11639 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11640 } else if (vectors == 1) {
11641 cpu = cpumask_first(cpu_present_mask);
11642 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11643 cpu);
11644 } else {
11645 maskp = pci_irq_get_affinity(phba->pcidev, index);
11646
11647
11648 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11649 cpup = &phba->sli4_hba.cpu_map[cpu];
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11664 continue;
11665 lpfc_assign_eq_map_info(phba, index,
11666 LPFC_CPU_FIRST_IRQ,
11667 cpu);
11668 break;
11669 }
11670 }
11671 }
11672
11673 if (vectors != phba->cfg_irq_chann) {
11674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11675 "3238 Reducing IO channels to match number of "
11676 "MSI-X vectors, requested %d got %d\n",
11677 phba->cfg_irq_chann, vectors);
11678 if (phba->cfg_irq_chann > vectors)
11679 phba->cfg_irq_chann = vectors;
11680 }
11681
11682 return rc;
11683
11684cfg_fail_out:
11685
11686 for (--index; index >= 0; index--) {
11687 eqhdl = lpfc_get_eq_hdl(index);
11688 lpfc_irq_clear_aff(eqhdl);
11689 irq_set_affinity_hint(eqhdl->irq, NULL);
11690 free_irq(eqhdl->irq, eqhdl);
11691 }
11692
11693
11694 pci_free_irq_vectors(phba->pcidev);
11695
11696vec_fail_out:
11697 return rc;
11698}
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714static int
11715lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11716{
11717 int rc, index;
11718 unsigned int cpu;
11719 struct lpfc_hba_eq_hdl *eqhdl;
11720
11721 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11722 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11723 if (rc > 0)
11724 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11725 "0487 PCI enable MSI mode success.\n");
11726 else {
11727 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11728 "0488 PCI enable MSI mode failed (%d)\n", rc);
11729 return rc ? rc : -1;
11730 }
11731
11732 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11733 0, LPFC_DRIVER_NAME, phba);
11734 if (rc) {
11735 pci_free_irq_vectors(phba->pcidev);
11736 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11737 "0490 MSI request_irq failed (%d)\n", rc);
11738 return rc;
11739 }
11740
11741 eqhdl = lpfc_get_eq_hdl(0);
11742 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11743
11744 cpu = cpumask_first(cpu_present_mask);
11745 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11746
11747 for (index = 0; index < phba->cfg_irq_chann; index++) {
11748 eqhdl = lpfc_get_eq_hdl(index);
11749 eqhdl->idx = index;
11750 }
11751
11752 return 0;
11753}
11754
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772static uint32_t
11773lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11774{
11775 uint32_t intr_mode = LPFC_INTR_ERROR;
11776 int retval, idx;
11777
11778 if (cfg_mode == 2) {
11779
11780 retval = 0;
11781 if (!retval) {
11782
11783 retval = lpfc_sli4_enable_msix(phba);
11784 if (!retval) {
11785
11786 phba->intr_type = MSIX;
11787 intr_mode = 2;
11788 }
11789 }
11790 }
11791
11792
11793 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11794 retval = lpfc_sli4_enable_msi(phba);
11795 if (!retval) {
11796
11797 phba->intr_type = MSI;
11798 intr_mode = 1;
11799 }
11800 }
11801
11802
11803 if (phba->intr_type == NONE) {
11804 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11805 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11806 if (!retval) {
11807 struct lpfc_hba_eq_hdl *eqhdl;
11808 unsigned int cpu;
11809
11810
11811 phba->intr_type = INTx;
11812 intr_mode = 0;
11813
11814 eqhdl = lpfc_get_eq_hdl(0);
11815 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11816
11817 cpu = cpumask_first(cpu_present_mask);
11818 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11819 cpu);
11820 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11821 eqhdl = lpfc_get_eq_hdl(idx);
11822 eqhdl->idx = idx;
11823 }
11824 }
11825 }
11826 return intr_mode;
11827}
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838static void
11839lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11840{
11841
11842 if (phba->intr_type == MSIX) {
11843 int index;
11844 struct lpfc_hba_eq_hdl *eqhdl;
11845
11846
11847 for (index = 0; index < phba->cfg_irq_chann; index++) {
11848 eqhdl = lpfc_get_eq_hdl(index);
11849 lpfc_irq_clear_aff(eqhdl);
11850 irq_set_affinity_hint(eqhdl->irq, NULL);
11851 free_irq(eqhdl->irq, eqhdl);
11852 }
11853 } else {
11854 free_irq(phba->pcidev->irq, phba);
11855 }
11856
11857 pci_free_irq_vectors(phba->pcidev);
11858
11859
11860 phba->intr_type = NONE;
11861 phba->sli.slistat.sli_intr = 0;
11862}
11863
11864
11865
11866
11867
11868
11869
11870
11871static void
11872lpfc_unset_hba(struct lpfc_hba *phba)
11873{
11874 struct lpfc_vport *vport = phba->pport;
11875 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11876
11877 spin_lock_irq(shost->host_lock);
11878 vport->load_flag |= FC_UNLOADING;
11879 spin_unlock_irq(shost->host_lock);
11880
11881 kfree(phba->vpi_bmask);
11882 kfree(phba->vpi_ids);
11883
11884 lpfc_stop_hba_timers(phba);
11885
11886 phba->pport->work_port_events = 0;
11887
11888 lpfc_sli_hba_down(phba);
11889
11890 lpfc_sli_brdrestart(phba);
11891
11892 lpfc_sli_disable_intr(phba);
11893
11894 return;
11895}
11896
11897
11898
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910static void
11911lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11912{
11913 struct lpfc_sli4_hdw_queue *qp;
11914 int idx, ccnt;
11915 int wait_time = 0;
11916 int io_xri_cmpl = 1;
11917 int nvmet_xri_cmpl = 1;
11918 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11919
11920
11921
11922
11923
11924 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11925
11926
11927 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11928 lpfc_nvme_wait_for_io_drain(phba);
11929
11930 ccnt = 0;
11931 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11932 qp = &phba->sli4_hba.hdwq[idx];
11933 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11934 if (!io_xri_cmpl)
11935 ccnt++;
11936 }
11937 if (ccnt)
11938 io_xri_cmpl = 0;
11939
11940 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11941 nvmet_xri_cmpl =
11942 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11943 }
11944
11945 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11946 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11947 if (!nvmet_xri_cmpl)
11948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11949 "6424 NVMET XRI exchange busy "
11950 "wait time: %d seconds.\n",
11951 wait_time/1000);
11952 if (!io_xri_cmpl)
11953 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11954 "6100 IO XRI exchange busy "
11955 "wait time: %d seconds.\n",
11956 wait_time/1000);
11957 if (!els_xri_cmpl)
11958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11959 "2878 ELS XRI exchange busy "
11960 "wait time: %d seconds.\n",
11961 wait_time/1000);
11962 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11963 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11964 } else {
11965 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11966 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11967 }
11968
11969 ccnt = 0;
11970 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11971 qp = &phba->sli4_hba.hdwq[idx];
11972 io_xri_cmpl = list_empty(
11973 &qp->lpfc_abts_io_buf_list);
11974 if (!io_xri_cmpl)
11975 ccnt++;
11976 }
11977 if (ccnt)
11978 io_xri_cmpl = 0;
11979
11980 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11981 nvmet_xri_cmpl = list_empty(
11982 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11983 }
11984 els_xri_cmpl =
11985 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11986
11987 }
11988}
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000static void
12001lpfc_sli4_hba_unset(struct lpfc_hba *phba)
12002{
12003 int wait_cnt = 0;
12004 LPFC_MBOXQ_t *mboxq;
12005 struct pci_dev *pdev = phba->pcidev;
12006
12007 lpfc_stop_hba_timers(phba);
12008 if (phba->pport)
12009 phba->sli4_hba.intr_enable = 0;
12010
12011
12012
12013
12014
12015
12016
12017 spin_lock_irq(&phba->hbalock);
12018 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12019 spin_unlock_irq(&phba->hbalock);
12020
12021 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12022 msleep(10);
12023 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
12024 break;
12025 }
12026
12027 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12028 spin_lock_irq(&phba->hbalock);
12029 mboxq = phba->sli.mbox_active;
12030 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
12031 __lpfc_mbox_cmpl_put(phba, mboxq);
12032 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12033 phba->sli.mbox_active = NULL;
12034 spin_unlock_irq(&phba->hbalock);
12035 }
12036
12037
12038 lpfc_sli_hba_iocb_abort(phba);
12039
12040
12041 lpfc_sli4_xri_exchange_busy_wait(phba);
12042
12043
12044 if (phba->pport)
12045 lpfc_cpuhp_remove(phba);
12046
12047
12048 lpfc_sli4_disable_intr(phba);
12049
12050
12051 if (phba->cfg_sriov_nr_virtfn)
12052 pci_disable_sriov(pdev);
12053
12054
12055 kthread_stop(phba->worker_thread);
12056
12057
12058 lpfc_ras_stop_fwlog(phba);
12059
12060
12061
12062
12063 lpfc_sli4_queue_unset(phba);
12064 lpfc_sli4_queue_destroy(phba);
12065
12066
12067 lpfc_pci_function_reset(phba);
12068
12069
12070 if (phba->ras_fwlog.ras_enabled)
12071 lpfc_sli4_ras_dma_free(phba);
12072
12073
12074 if (phba->pport)
12075 phba->pport->work_port_events = 0;
12076}
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090int
12091lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12092{
12093 int rc;
12094 struct lpfc_mqe *mqe;
12095 struct lpfc_pc_sli4_params *sli4_params;
12096 uint32_t mbox_tmo;
12097
12098 rc = 0;
12099 mqe = &mboxq->u.mqe;
12100
12101
12102 lpfc_pc_sli4_params(mboxq);
12103 if (!phba->sli4_hba.intr_enable)
12104 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12105 else {
12106 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12107 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12108 }
12109
12110 if (unlikely(rc))
12111 return 1;
12112
12113 sli4_params = &phba->sli4_hba.pc_sli4_params;
12114 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
12115 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
12116 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
12117 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
12118 &mqe->un.sli4_params);
12119 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
12120 &mqe->un.sli4_params);
12121 sli4_params->proto_types = mqe->un.sli4_params.word3;
12122 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
12123 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
12124 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
12125 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
12126 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
12127 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
12128 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
12129 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
12130 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
12131 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
12132 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
12133 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
12134 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
12135 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
12136 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
12137 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
12138 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
12139 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
12140 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
12141 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
12142
12143
12144 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12145 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12146
12147 return rc;
12148}
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158
12159
12160
12161
12162int
12163lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12164{
12165 int rc;
12166 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12167 struct lpfc_pc_sli4_params *sli4_params;
12168 uint32_t mbox_tmo;
12169 int length;
12170 bool exp_wqcq_pages = true;
12171 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12172
12173
12174
12175
12176
12177
12178 phba->sli4_hba.rpi_hdrs_in_use = 1;
12179
12180
12181 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12182 sizeof(struct lpfc_sli4_cfg_mhdr));
12183 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12184 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12185 length, LPFC_SLI4_MBX_EMBED);
12186 if (!phba->sli4_hba.intr_enable)
12187 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12188 else {
12189 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12190 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12191 }
12192 if (unlikely(rc))
12193 return rc;
12194 sli4_params = &phba->sli4_hba.pc_sli4_params;
12195 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12196 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12197 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12198 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12199 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12200 mbx_sli4_parameters);
12201 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12202 mbx_sli4_parameters);
12203 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12204 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12205 else
12206 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12207 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12208 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
12209 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12210 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12211 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12212 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12213 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12214 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12215 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12216 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12217 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12218 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12219 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12220 mbx_sli4_parameters);
12221 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12222 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12223 mbx_sli4_parameters);
12224 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12225 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12226
12227
12228 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12229
12230
12231 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12232 bf_get(cfg_xib, mbx_sli4_parameters));
12233
12234 if (rc) {
12235
12236 sli4_params->nvme = 1;
12237
12238
12239 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12241 "6133 Disabling NVME support: "
12242 "FC4 type not supported: x%x\n",
12243 phba->cfg_enable_fc4_type);
12244 goto fcponly;
12245 }
12246 } else {
12247
12248 sli4_params->nvme = 0;
12249 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12251 "6101 Disabling NVME support: Not "
12252 "supported by firmware (%d %d) x%x\n",
12253 bf_get(cfg_nvme, mbx_sli4_parameters),
12254 bf_get(cfg_xib, mbx_sli4_parameters),
12255 phba->cfg_enable_fc4_type);
12256fcponly:
12257 phba->nvme_support = 0;
12258 phba->nvmet_support = 0;
12259 phba->cfg_nvmet_mrq = 0;
12260 phba->cfg_nvme_seg_cnt = 0;
12261
12262
12263 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12264 return -ENODEV;
12265 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12266 }
12267 }
12268
12269
12270
12271
12272 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12273 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12274
12275
12276 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12277 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12278 phba->cfg_enable_pbde = 0;
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12289 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12290 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12291 else
12292 phba->cfg_suppress_rsp = 0;
12293
12294 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12295 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12296
12297
12298 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12299 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12300
12301
12302
12303
12304
12305
12306 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12307 phba->fcp_embed_io = 1;
12308 else
12309 phba->fcp_embed_io = 0;
12310
12311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12312 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12313 bf_get(cfg_xib, mbx_sli4_parameters),
12314 phba->cfg_enable_pbde,
12315 phba->fcp_embed_io, phba->nvme_support,
12316 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12317
12318 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12319 LPFC_SLI_INTF_IF_TYPE_2) &&
12320 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12321 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12322 exp_wqcq_pages = false;
12323
12324 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12325 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12326 exp_wqcq_pages &&
12327 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12328 phba->enab_exp_wqcq_pages = 1;
12329 else
12330 phba->enab_exp_wqcq_pages = 0;
12331
12332
12333
12334 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12335 phba->mds_diags_support = 1;
12336 else
12337 phba->mds_diags_support = 0;
12338
12339
12340
12341
12342 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12343 phba->nsler = 1;
12344 else
12345 phba->nsler = 0;
12346
12347
12348 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
12349 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
12350 sli4_params->mib_size = mbx_sli4_parameters->mib_size;
12351 sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
12352
12353
12354 if (sli4_params->mi_ver && phba->cfg_enable_mi)
12355 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
12356
12357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12358 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
12359 sli4_params->mi_ver, phba->cfg_enable_mi,
12360 sli4_params->mi_value, sli4_params->mib_bde_cnt,
12361 sli4_params->mib_size);
12362 return 0;
12363}
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379
12380
12381
12382static int
12383lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12384{
12385 struct lpfc_hba *phba;
12386 struct lpfc_vport *vport = NULL;
12387 struct Scsi_Host *shost = NULL;
12388 int error;
12389 uint32_t cfg_mode, intr_mode;
12390
12391
12392 phba = lpfc_hba_alloc(pdev);
12393 if (!phba)
12394 return -ENOMEM;
12395
12396
12397 error = lpfc_enable_pci_dev(phba);
12398 if (error)
12399 goto out_free_phba;
12400
12401
12402 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12403 if (error)
12404 goto out_disable_pci_dev;
12405
12406
12407 error = lpfc_sli_pci_mem_setup(phba);
12408 if (error) {
12409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12410 "1402 Failed to set up pci memory space.\n");
12411 goto out_disable_pci_dev;
12412 }
12413
12414
12415 error = lpfc_sli_driver_resource_setup(phba);
12416 if (error) {
12417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12418 "1404 Failed to set up driver resource.\n");
12419 goto out_unset_pci_mem_s3;
12420 }
12421
12422
12423
12424 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12425 if (error) {
12426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12427 "1405 Failed to initialize iocb list.\n");
12428 goto out_unset_driver_resource_s3;
12429 }
12430
12431
12432 error = lpfc_setup_driver_resource_phase2(phba);
12433 if (error) {
12434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12435 "1406 Failed to set up driver resource.\n");
12436 goto out_free_iocb_list;
12437 }
12438
12439
12440 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12441
12442
12443 error = lpfc_create_shost(phba);
12444 if (error) {
12445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12446 "1407 Failed to create scsi host.\n");
12447 goto out_unset_driver_resource;
12448 }
12449
12450
12451 vport = phba->pport;
12452 error = lpfc_alloc_sysfs_attr(vport);
12453 if (error) {
12454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12455 "1476 Failed to allocate sysfs attr\n");
12456 goto out_destroy_shost;
12457 }
12458
12459 shost = lpfc_shost_from_vport(vport);
12460
12461 cfg_mode = phba->cfg_use_msi;
12462 while (true) {
12463
12464 lpfc_stop_port(phba);
12465
12466 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12467 if (intr_mode == LPFC_INTR_ERROR) {
12468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12469 "0431 Failed to enable interrupt.\n");
12470 error = -ENODEV;
12471 goto out_free_sysfs_attr;
12472 }
12473
12474 if (lpfc_sli_hba_setup(phba)) {
12475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12476 "1477 Failed to set up hba\n");
12477 error = -ENODEV;
12478 goto out_remove_device;
12479 }
12480
12481
12482 msleep(50);
12483
12484 if (intr_mode == 0 ||
12485 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12486
12487 phba->intr_mode = intr_mode;
12488 lpfc_log_intr_mode(phba, intr_mode);
12489 break;
12490 } else {
12491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12492 "0447 Configure interrupt mode (%d) "
12493 "failed active interrupt test.\n",
12494 intr_mode);
12495
12496 lpfc_sli_disable_intr(phba);
12497
12498 cfg_mode = --intr_mode;
12499 }
12500 }
12501
12502
12503 lpfc_post_init_setup(phba);
12504
12505
12506 lpfc_create_static_vport(phba);
12507
12508 return 0;
12509
12510out_remove_device:
12511 lpfc_unset_hba(phba);
12512out_free_sysfs_attr:
12513 lpfc_free_sysfs_attr(vport);
12514out_destroy_shost:
12515 lpfc_destroy_shost(phba);
12516out_unset_driver_resource:
12517 lpfc_unset_driver_resource_phase2(phba);
12518out_free_iocb_list:
12519 lpfc_free_iocb_list(phba);
12520out_unset_driver_resource_s3:
12521 lpfc_sli_driver_resource_unset(phba);
12522out_unset_pci_mem_s3:
12523 lpfc_sli_pci_mem_unset(phba);
12524out_disable_pci_dev:
12525 lpfc_disable_pci_dev(phba);
12526 if (shost)
12527 scsi_host_put(shost);
12528out_free_phba:
12529 lpfc_hba_free(phba);
12530 return error;
12531}
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542static void
12543lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12544{
12545 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12546 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12547 struct lpfc_vport **vports;
12548 struct lpfc_hba *phba = vport->phba;
12549 int i;
12550
12551 spin_lock_irq(&phba->hbalock);
12552 vport->load_flag |= FC_UNLOADING;
12553 spin_unlock_irq(&phba->hbalock);
12554
12555 lpfc_free_sysfs_attr(vport);
12556
12557
12558 vports = lpfc_create_vport_work_array(phba);
12559 if (vports != NULL)
12560 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12561 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12562 continue;
12563 fc_vport_terminate(vports[i]->fc_vport);
12564 }
12565 lpfc_destroy_vport_work_array(phba, vports);
12566
12567
12568 fc_remove_host(shost);
12569 scsi_remove_host(shost);
12570
12571
12572 lpfc_cleanup(vport);
12573
12574
12575
12576
12577
12578
12579
12580
12581 lpfc_sli_hba_down(phba);
12582
12583 kthread_stop(phba->worker_thread);
12584
12585 lpfc_sli_brdrestart(phba);
12586
12587 kfree(phba->vpi_bmask);
12588 kfree(phba->vpi_ids);
12589
12590 lpfc_stop_hba_timers(phba);
12591 spin_lock_irq(&phba->port_list_lock);
12592 list_del_init(&vport->listentry);
12593 spin_unlock_irq(&phba->port_list_lock);
12594
12595 lpfc_debugfs_terminate(vport);
12596
12597
12598 if (phba->cfg_sriov_nr_virtfn)
12599 pci_disable_sriov(pdev);
12600
12601
12602 lpfc_sli_disable_intr(phba);
12603
12604 scsi_host_put(shost);
12605
12606
12607
12608
12609
12610 lpfc_scsi_free(phba);
12611 lpfc_free_iocb_list(phba);
12612
12613 lpfc_mem_free_all(phba);
12614
12615 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12616 phba->hbqslimp.virt, phba->hbqslimp.phys);
12617
12618
12619 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12620 phba->slim2p.virt, phba->slim2p.phys);
12621
12622
12623 iounmap(phba->ctrl_regs_memmap_p);
12624 iounmap(phba->slim_memmap_p);
12625
12626 lpfc_hba_free(phba);
12627
12628 pci_release_mem_regions(pdev);
12629 pci_disable_device(pdev);
12630}
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651
12652static int __maybe_unused
12653lpfc_pci_suspend_one_s3(struct device *dev_d)
12654{
12655 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12656 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12657
12658 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12659 "0473 PCI device Power Management suspend.\n");
12660
12661
12662 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12663 lpfc_offline(phba);
12664 kthread_stop(phba->worker_thread);
12665
12666
12667 lpfc_sli_disable_intr(phba);
12668
12669 return 0;
12670}
12671
12672
12673
12674
12675
12676
12677
12678
12679
12680
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691static int __maybe_unused
12692lpfc_pci_resume_one_s3(struct device *dev_d)
12693{
12694 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12695 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12696 uint32_t intr_mode;
12697 int error;
12698
12699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12700 "0452 PCI device Power Management resume.\n");
12701
12702
12703 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12704 "lpfc_worker_%d", phba->brd_no);
12705 if (IS_ERR(phba->worker_thread)) {
12706 error = PTR_ERR(phba->worker_thread);
12707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12708 "0434 PM resume failed to start worker "
12709 "thread: error=x%x.\n", error);
12710 return error;
12711 }
12712
12713
12714 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12715 if (intr_mode == LPFC_INTR_ERROR) {
12716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12717 "0430 PM resume Failed to enable interrupt\n");
12718 return -EIO;
12719 } else
12720 phba->intr_mode = intr_mode;
12721
12722
12723 lpfc_sli_brdrestart(phba);
12724 lpfc_online(phba);
12725
12726
12727 lpfc_log_intr_mode(phba, phba->intr_mode);
12728
12729 return 0;
12730}
12731
12732
12733
12734
12735
12736
12737
12738
12739static void
12740lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12741{
12742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12743 "2723 PCI channel I/O abort preparing for recovery\n");
12744
12745
12746
12747
12748
12749 lpfc_sli_abort_fcp_rings(phba);
12750}
12751
12752
12753
12754
12755
12756
12757
12758
12759
12760static void
12761lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12762{
12763 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12764 "2710 PCI channel disable preparing for reset\n");
12765
12766
12767 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12768
12769
12770 lpfc_scsi_dev_block(phba);
12771
12772
12773 lpfc_sli_flush_io_rings(phba);
12774
12775
12776 lpfc_stop_hba_timers(phba);
12777
12778
12779 lpfc_sli_disable_intr(phba);
12780 pci_disable_device(phba->pcidev);
12781}
12782
12783
12784
12785
12786
12787
12788
12789
12790
12791static void
12792lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12793{
12794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12795 "2711 PCI channel permanent disable for failure\n");
12796
12797 lpfc_scsi_dev_block(phba);
12798
12799
12800 lpfc_stop_hba_timers(phba);
12801
12802
12803 lpfc_sli_flush_io_rings(phba);
12804}
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824static pci_ers_result_t
12825lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12826{
12827 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12828 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12829
12830 switch (state) {
12831 case pci_channel_io_normal:
12832
12833 lpfc_sli_prep_dev_for_recover(phba);
12834 return PCI_ERS_RESULT_CAN_RECOVER;
12835 case pci_channel_io_frozen:
12836
12837 lpfc_sli_prep_dev_for_reset(phba);
12838 return PCI_ERS_RESULT_NEED_RESET;
12839 case pci_channel_io_perm_failure:
12840
12841 lpfc_sli_prep_dev_for_perm_failure(phba);
12842 return PCI_ERS_RESULT_DISCONNECT;
12843 default:
12844
12845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12846 "0472 Unknown PCI error state: x%x\n", state);
12847 lpfc_sli_prep_dev_for_reset(phba);
12848 return PCI_ERS_RESULT_NEED_RESET;
12849 }
12850}
12851
12852
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870static pci_ers_result_t
12871lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12872{
12873 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12874 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12875 struct lpfc_sli *psli = &phba->sli;
12876 uint32_t intr_mode;
12877
12878 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12879 if (pci_enable_device_mem(pdev)) {
12880 printk(KERN_ERR "lpfc: Cannot re-enable "
12881 "PCI device after reset.\n");
12882 return PCI_ERS_RESULT_DISCONNECT;
12883 }
12884
12885 pci_restore_state(pdev);
12886
12887
12888
12889
12890
12891 pci_save_state(pdev);
12892
12893 if (pdev->is_busmaster)
12894 pci_set_master(pdev);
12895
12896 spin_lock_irq(&phba->hbalock);
12897 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12898 spin_unlock_irq(&phba->hbalock);
12899
12900
12901 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12902 if (intr_mode == LPFC_INTR_ERROR) {
12903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12904 "0427 Cannot re-enable interrupt after "
12905 "slot reset.\n");
12906 return PCI_ERS_RESULT_DISCONNECT;
12907 } else
12908 phba->intr_mode = intr_mode;
12909
12910
12911 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12912 lpfc_offline(phba);
12913 lpfc_sli_brdrestart(phba);
12914
12915
12916 lpfc_log_intr_mode(phba, phba->intr_mode);
12917
12918 return PCI_ERS_RESULT_RECOVERED;
12919}
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931static void
12932lpfc_io_resume_s3(struct pci_dev *pdev)
12933{
12934 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12935 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12936
12937
12938 lpfc_online(phba);
12939}
12940
12941
12942
12943
12944
12945
12946
12947int
12948lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12949{
12950 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12951
12952 if (phba->sli_rev == LPFC_SLI_REV4) {
12953 if (max_xri <= 100)
12954 return 10;
12955 else if (max_xri <= 256)
12956 return 25;
12957 else if (max_xri <= 512)
12958 return 50;
12959 else if (max_xri <= 1024)
12960 return 100;
12961 else if (max_xri <= 1536)
12962 return 150;
12963 else if (max_xri <= 2048)
12964 return 200;
12965 else
12966 return 250;
12967 } else
12968 return 0;
12969}
12970
12971
12972
12973
12974
12975
12976
12977int
12978lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12979{
12980 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12981
12982 if (phba->nvmet_support)
12983 max_xri += LPFC_NVMET_BUF_POST;
12984 return max_xri;
12985}
12986
12987
12988static int
12989lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12990 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12991 const struct firmware *fw)
12992{
12993 int rc;
12994
12995
12996
12997
12998
12999
13000
13001 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
13002 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
13003 magic_number != MAGIC_NUMBER_G6) ||
13004 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
13005 magic_number != MAGIC_NUMBER_G7)) {
13006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13007 "3030 This firmware version is not supported on"
13008 " this HBA model. Device:%x Magic:%x Type:%x "
13009 "ID:%x Size %d %zd\n",
13010 phba->pcidev->device, magic_number, ftype, fid,
13011 fsize, fw->size);
13012 rc = -EINVAL;
13013 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
13014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13015 "3021 Firmware downloads have been prohibited "
13016 "by a system configuration setting on "
13017 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13018 "%zd\n",
13019 phba->pcidev->device, magic_number, ftype, fid,
13020 fsize, fw->size);
13021 rc = -EACCES;
13022 } else {
13023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13024 "3022 FW Download failed. Add Status x%x "
13025 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13026 "%zd\n",
13027 offset, phba->pcidev->device, magic_number,
13028 ftype, fid, fsize, fw->size);
13029 rc = -EIO;
13030 }
13031 return rc;
13032}
13033
13034
13035
13036
13037
13038
13039
13040static void
13041lpfc_write_firmware(const struct firmware *fw, void *context)
13042{
13043 struct lpfc_hba *phba = (struct lpfc_hba *)context;
13044 char fwrev[FW_REV_STR_SIZE];
13045 struct lpfc_grp_hdr *image;
13046 struct list_head dma_buffer_list;
13047 int i, rc = 0;
13048 struct lpfc_dmabuf *dmabuf, *next;
13049 uint32_t offset = 0, temp_offset = 0;
13050 uint32_t magic_number, ftype, fid, fsize;
13051
13052
13053 if (!fw) {
13054 rc = -ENXIO;
13055 goto out;
13056 }
13057 image = (struct lpfc_grp_hdr *)fw->data;
13058
13059 magic_number = be32_to_cpu(image->magic_number);
13060 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
13061 fid = bf_get_be32(lpfc_grp_hdr_id, image);
13062 fsize = be32_to_cpu(image->size);
13063
13064 INIT_LIST_HEAD(&dma_buffer_list);
13065 lpfc_decode_firmware_rev(phba, fwrev, 1);
13066 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
13067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13068 "3023 Updating Firmware, Current Version:%s "
13069 "New Version:%s\n",
13070 fwrev, image->revision);
13071 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13072 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13073 GFP_KERNEL);
13074 if (!dmabuf) {
13075 rc = -ENOMEM;
13076 goto release_out;
13077 }
13078 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13079 SLI4_PAGE_SIZE,
13080 &dmabuf->phys,
13081 GFP_KERNEL);
13082 if (!dmabuf->virt) {
13083 kfree(dmabuf);
13084 rc = -ENOMEM;
13085 goto release_out;
13086 }
13087 list_add_tail(&dmabuf->list, &dma_buffer_list);
13088 }
13089 while (offset < fw->size) {
13090 temp_offset = offset;
13091 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
13092 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
13093 memcpy(dmabuf->virt,
13094 fw->data + temp_offset,
13095 fw->size - temp_offset);
13096 temp_offset = fw->size;
13097 break;
13098 }
13099 memcpy(dmabuf->virt, fw->data + temp_offset,
13100 SLI4_PAGE_SIZE);
13101 temp_offset += SLI4_PAGE_SIZE;
13102 }
13103 rc = lpfc_wr_object(phba, &dma_buffer_list,
13104 (fw->size - offset), &offset);
13105 if (rc) {
13106 rc = lpfc_log_write_firmware_error(phba, offset,
13107 magic_number,
13108 ftype,
13109 fid,
13110 fsize,
13111 fw);
13112 goto release_out;
13113 }
13114 }
13115 rc = offset;
13116 } else
13117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13118 "3029 Skipped Firmware update, Current "
13119 "Version:%s New Version:%s\n",
13120 fwrev, image->revision);
13121
13122release_out:
13123 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13124 list_del(&dmabuf->list);
13125 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13126 dmabuf->virt, dmabuf->phys);
13127 kfree(dmabuf);
13128 }
13129 release_firmware(fw);
13130out:
13131 if (rc < 0)
13132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13133 "3062 Firmware update error, status %d.\n", rc);
13134 else
13135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13136 "3024 Firmware update success: size %d.\n", rc);
13137}
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147int
13148lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13149{
13150 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13151 int ret;
13152 const struct firmware *fw;
13153
13154
13155 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13156 LPFC_SLI_INTF_IF_TYPE_2)
13157 return -EPERM;
13158
13159 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13160
13161 if (fw_upgrade == INT_FW_UPGRADE) {
13162 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13163 file_name, &phba->pcidev->dev,
13164 GFP_KERNEL, (void *)phba,
13165 lpfc_write_firmware);
13166 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13167 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13168 if (!ret)
13169 lpfc_write_firmware(fw, (void *)phba);
13170 } else {
13171 ret = -EINVAL;
13172 }
13173
13174 return ret;
13175}
13176
13177
13178
13179
13180
13181
13182
13183
13184
13185
13186
13187
13188
13189
13190
13191
13192
13193
13194
13195static int
13196lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13197{
13198 struct lpfc_hba *phba;
13199 struct lpfc_vport *vport = NULL;
13200 struct Scsi_Host *shost = NULL;
13201 int error;
13202 uint32_t cfg_mode, intr_mode;
13203
13204
13205 phba = lpfc_hba_alloc(pdev);
13206 if (!phba)
13207 return -ENOMEM;
13208
13209
13210 error = lpfc_enable_pci_dev(phba);
13211 if (error)
13212 goto out_free_phba;
13213
13214
13215 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13216 if (error)
13217 goto out_disable_pci_dev;
13218
13219
13220 error = lpfc_sli4_pci_mem_setup(phba);
13221 if (error) {
13222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13223 "1410 Failed to set up pci memory space.\n");
13224 goto out_disable_pci_dev;
13225 }
13226
13227
13228 error = lpfc_sli4_driver_resource_setup(phba);
13229 if (error) {
13230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13231 "1412 Failed to set up driver resource.\n");
13232 goto out_unset_pci_mem_s4;
13233 }
13234
13235 INIT_LIST_HEAD(&phba->active_rrq_list);
13236 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13237
13238
13239 error = lpfc_setup_driver_resource_phase2(phba);
13240 if (error) {
13241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13242 "1414 Failed to set up driver resource.\n");
13243 goto out_unset_driver_resource_s4;
13244 }
13245
13246
13247 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13248
13249
13250 cfg_mode = phba->cfg_use_msi;
13251
13252
13253 phba->pport = NULL;
13254 lpfc_stop_port(phba);
13255
13256
13257 lpfc_cpu_map_array_init(phba);
13258
13259
13260 lpfc_hba_eq_hdl_array_init(phba);
13261
13262
13263 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13264 if (intr_mode == LPFC_INTR_ERROR) {
13265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13266 "0426 Failed to enable interrupt.\n");
13267 error = -ENODEV;
13268 goto out_unset_driver_resource;
13269 }
13270
13271 if (phba->intr_type != MSIX) {
13272 phba->cfg_irq_chann = 1;
13273 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13274 if (phba->nvmet_support)
13275 phba->cfg_nvmet_mrq = 1;
13276 }
13277 }
13278 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13279
13280
13281 error = lpfc_create_shost(phba);
13282 if (error) {
13283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13284 "1415 Failed to create scsi host.\n");
13285 goto out_disable_intr;
13286 }
13287 vport = phba->pport;
13288 shost = lpfc_shost_from_vport(vport);
13289
13290
13291 error = lpfc_alloc_sysfs_attr(vport);
13292 if (error) {
13293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13294 "1416 Failed to allocate sysfs attr\n");
13295 goto out_destroy_shost;
13296 }
13297
13298
13299 if (lpfc_sli4_hba_setup(phba)) {
13300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13301 "1421 Failed to set up hba\n");
13302 error = -ENODEV;
13303 goto out_free_sysfs_attr;
13304 }
13305
13306
13307 phba->intr_mode = intr_mode;
13308 lpfc_log_intr_mode(phba, intr_mode);
13309
13310
13311 lpfc_post_init_setup(phba);
13312
13313
13314
13315
13316 if (phba->nvmet_support == 0) {
13317 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13318
13319
13320
13321
13322
13323 error = lpfc_nvme_create_localport(vport);
13324 if (error) {
13325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13326 "6004 NVME registration "
13327 "failed, error x%x\n",
13328 error);
13329 }
13330 }
13331 }
13332
13333
13334 if (phba->cfg_request_firmware_upgrade)
13335 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13336
13337
13338 lpfc_create_static_vport(phba);
13339
13340
13341 lpfc_sli4_ras_setup(phba);
13342
13343 INIT_LIST_HEAD(&phba->poll_list);
13344 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13345 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13346
13347 return 0;
13348
13349out_free_sysfs_attr:
13350 lpfc_free_sysfs_attr(vport);
13351out_destroy_shost:
13352 lpfc_destroy_shost(phba);
13353out_disable_intr:
13354 lpfc_sli4_disable_intr(phba);
13355out_unset_driver_resource:
13356 lpfc_unset_driver_resource_phase2(phba);
13357out_unset_driver_resource_s4:
13358 lpfc_sli4_driver_resource_unset(phba);
13359out_unset_pci_mem_s4:
13360 lpfc_sli4_pci_mem_unset(phba);
13361out_disable_pci_dev:
13362 lpfc_disable_pci_dev(phba);
13363 if (shost)
13364 scsi_host_put(shost);
13365out_free_phba:
13366 lpfc_hba_free(phba);
13367 return error;
13368}
13369
13370
13371
13372
13373
13374
13375
13376
13377
13378
13379static void
13380lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13381{
13382 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13384 struct lpfc_vport **vports;
13385 struct lpfc_hba *phba = vport->phba;
13386 int i;
13387
13388
13389 spin_lock_irq(&phba->hbalock);
13390 vport->load_flag |= FC_UNLOADING;
13391 spin_unlock_irq(&phba->hbalock);
13392
13393 lpfc_free_sysfs_attr(vport);
13394
13395
13396 vports = lpfc_create_vport_work_array(phba);
13397 if (vports != NULL)
13398 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13399 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13400 continue;
13401 fc_vport_terminate(vports[i]->fc_vport);
13402 }
13403 lpfc_destroy_vport_work_array(phba, vports);
13404
13405
13406 fc_remove_host(shost);
13407 scsi_remove_host(shost);
13408
13409
13410
13411
13412 lpfc_cleanup(vport);
13413 lpfc_nvmet_destroy_targetport(phba);
13414 lpfc_nvme_destroy_localport(vport);
13415
13416
13417 if (phba->cfg_xri_rebalancing)
13418 lpfc_destroy_multixri_pools(phba);
13419
13420
13421
13422
13423
13424
13425 lpfc_debugfs_terminate(vport);
13426
13427 lpfc_stop_hba_timers(phba);
13428 spin_lock_irq(&phba->port_list_lock);
13429 list_del_init(&vport->listentry);
13430 spin_unlock_irq(&phba->port_list_lock);
13431
13432
13433
13434
13435 lpfc_io_free(phba);
13436 lpfc_free_iocb_list(phba);
13437 lpfc_sli4_hba_unset(phba);
13438
13439 lpfc_unset_driver_resource_phase2(phba);
13440 lpfc_sli4_driver_resource_unset(phba);
13441
13442
13443 lpfc_sli4_pci_mem_unset(phba);
13444
13445
13446 scsi_host_put(shost);
13447 lpfc_disable_pci_dev(phba);
13448
13449
13450 lpfc_hba_free(phba);
13451
13452 return;
13453}
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475static int __maybe_unused
13476lpfc_pci_suspend_one_s4(struct device *dev_d)
13477{
13478 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13479 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13480
13481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13482 "2843 PCI device Power Management suspend.\n");
13483
13484
13485 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13486 lpfc_offline(phba);
13487 kthread_stop(phba->worker_thread);
13488
13489
13490 lpfc_sli4_disable_intr(phba);
13491 lpfc_sli4_queue_destroy(phba);
13492
13493 return 0;
13494}
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505
13506
13507
13508
13509
13510
13511
13512
13513
13514
13515static int __maybe_unused
13516lpfc_pci_resume_one_s4(struct device *dev_d)
13517{
13518 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13519 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13520 uint32_t intr_mode;
13521 int error;
13522
13523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13524 "0292 PCI device Power Management resume.\n");
13525
13526
13527 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13528 "lpfc_worker_%d", phba->brd_no);
13529 if (IS_ERR(phba->worker_thread)) {
13530 error = PTR_ERR(phba->worker_thread);
13531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13532 "0293 PM resume failed to start worker "
13533 "thread: error=x%x.\n", error);
13534 return error;
13535 }
13536
13537
13538 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13539 if (intr_mode == LPFC_INTR_ERROR) {
13540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13541 "0294 PM resume Failed to enable interrupt\n");
13542 return -EIO;
13543 } else
13544 phba->intr_mode = intr_mode;
13545
13546
13547 lpfc_sli_brdrestart(phba);
13548 lpfc_online(phba);
13549
13550
13551 lpfc_log_intr_mode(phba, phba->intr_mode);
13552
13553 return 0;
13554}
13555
13556
13557
13558
13559
13560
13561
13562
13563static void
13564lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13565{
13566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13567 "2828 PCI channel I/O abort preparing for recovery\n");
13568
13569
13570
13571
13572 lpfc_sli_abort_fcp_rings(phba);
13573}
13574
13575
13576
13577
13578
13579
13580
13581
13582
13583static void
13584lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13585{
13586 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13587 "2826 PCI channel disable preparing for reset\n");
13588
13589
13590 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13591
13592
13593 lpfc_scsi_dev_block(phba);
13594
13595
13596 lpfc_sli_flush_io_rings(phba);
13597
13598
13599 lpfc_stop_hba_timers(phba);
13600
13601
13602 lpfc_sli4_disable_intr(phba);
13603 lpfc_sli4_queue_destroy(phba);
13604 pci_disable_device(phba->pcidev);
13605}
13606
13607
13608
13609
13610
13611
13612
13613
13614
13615static void
13616lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13617{
13618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13619 "2827 PCI channel permanent disable for failure\n");
13620
13621
13622 lpfc_scsi_dev_block(phba);
13623
13624
13625 lpfc_stop_hba_timers(phba);
13626
13627
13628 lpfc_sli_flush_io_rings(phba);
13629}
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647static pci_ers_result_t
13648lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13649{
13650 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13651 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13652
13653 switch (state) {
13654 case pci_channel_io_normal:
13655
13656 lpfc_sli4_prep_dev_for_recover(phba);
13657 return PCI_ERS_RESULT_CAN_RECOVER;
13658 case pci_channel_io_frozen:
13659
13660 lpfc_sli4_prep_dev_for_reset(phba);
13661 return PCI_ERS_RESULT_NEED_RESET;
13662 case pci_channel_io_perm_failure:
13663
13664 lpfc_sli4_prep_dev_for_perm_failure(phba);
13665 return PCI_ERS_RESULT_DISCONNECT;
13666 default:
13667
13668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13669 "2825 Unknown PCI error state: x%x\n", state);
13670 lpfc_sli4_prep_dev_for_reset(phba);
13671 return PCI_ERS_RESULT_NEED_RESET;
13672 }
13673}
13674
13675
13676
13677
13678
13679
13680
13681
13682
13683
13684
13685
13686
13687
13688
13689
13690
13691
13692
13693static pci_ers_result_t
13694lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13695{
13696 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13697 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13698 struct lpfc_sli *psli = &phba->sli;
13699 uint32_t intr_mode;
13700
13701 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13702 if (pci_enable_device_mem(pdev)) {
13703 printk(KERN_ERR "lpfc: Cannot re-enable "
13704 "PCI device after reset.\n");
13705 return PCI_ERS_RESULT_DISCONNECT;
13706 }
13707
13708 pci_restore_state(pdev);
13709
13710
13711
13712
13713
13714 pci_save_state(pdev);
13715
13716 if (pdev->is_busmaster)
13717 pci_set_master(pdev);
13718
13719 spin_lock_irq(&phba->hbalock);
13720 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13721 spin_unlock_irq(&phba->hbalock);
13722
13723
13724 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13725 if (intr_mode == LPFC_INTR_ERROR) {
13726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13727 "2824 Cannot re-enable interrupt after "
13728 "slot reset.\n");
13729 return PCI_ERS_RESULT_DISCONNECT;
13730 } else
13731 phba->intr_mode = intr_mode;
13732
13733
13734 lpfc_log_intr_mode(phba, phba->intr_mode);
13735
13736 return PCI_ERS_RESULT_RECOVERED;
13737}
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749static void
13750lpfc_io_resume_s4(struct pci_dev *pdev)
13751{
13752 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13753 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13754
13755
13756
13757
13758
13759
13760
13761 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13762
13763 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13764 lpfc_offline(phba);
13765 lpfc_sli_brdrestart(phba);
13766
13767 lpfc_online(phba);
13768 }
13769}
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785
13786
13787
13788
13789static int
13790lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13791{
13792 int rc;
13793 struct lpfc_sli_intf intf;
13794
13795 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13796 return -ENODEV;
13797
13798 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13799 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13800 rc = lpfc_pci_probe_one_s4(pdev, pid);
13801 else
13802 rc = lpfc_pci_probe_one_s3(pdev, pid);
13803
13804 return rc;
13805}
13806
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817static void
13818lpfc_pci_remove_one(struct pci_dev *pdev)
13819{
13820 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13821 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13822
13823 switch (phba->pci_dev_grp) {
13824 case LPFC_PCI_DEV_LP:
13825 lpfc_pci_remove_one_s3(pdev);
13826 break;
13827 case LPFC_PCI_DEV_OC:
13828 lpfc_pci_remove_one_s4(pdev);
13829 break;
13830 default:
13831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13832 "1424 Invalid PCI device group: 0x%x\n",
13833 phba->pci_dev_grp);
13834 break;
13835 }
13836 return;
13837}
13838
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848
13849
13850
13851
13852static int __maybe_unused
13853lpfc_pci_suspend_one(struct device *dev)
13854{
13855 struct Scsi_Host *shost = dev_get_drvdata(dev);
13856 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13857 int rc = -ENODEV;
13858
13859 switch (phba->pci_dev_grp) {
13860 case LPFC_PCI_DEV_LP:
13861 rc = lpfc_pci_suspend_one_s3(dev);
13862 break;
13863 case LPFC_PCI_DEV_OC:
13864 rc = lpfc_pci_suspend_one_s4(dev);
13865 break;
13866 default:
13867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13868 "1425 Invalid PCI device group: 0x%x\n",
13869 phba->pci_dev_grp);
13870 break;
13871 }
13872 return rc;
13873}
13874
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887
13888static int __maybe_unused
13889lpfc_pci_resume_one(struct device *dev)
13890{
13891 struct Scsi_Host *shost = dev_get_drvdata(dev);
13892 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13893 int rc = -ENODEV;
13894
13895 switch (phba->pci_dev_grp) {
13896 case LPFC_PCI_DEV_LP:
13897 rc = lpfc_pci_resume_one_s3(dev);
13898 break;
13899 case LPFC_PCI_DEV_OC:
13900 rc = lpfc_pci_resume_one_s4(dev);
13901 break;
13902 default:
13903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13904 "1426 Invalid PCI device group: 0x%x\n",
13905 phba->pci_dev_grp);
13906 break;
13907 }
13908 return rc;
13909}
13910
13911
13912
13913
13914
13915
13916
13917
13918
13919
13920
13921
13922
13923
13924
13925
13926static pci_ers_result_t
13927lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13928{
13929 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13930 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13931 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13932
13933 switch (phba->pci_dev_grp) {
13934 case LPFC_PCI_DEV_LP:
13935 rc = lpfc_io_error_detected_s3(pdev, state);
13936 break;
13937 case LPFC_PCI_DEV_OC:
13938 rc = lpfc_io_error_detected_s4(pdev, state);
13939 break;
13940 default:
13941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13942 "1427 Invalid PCI device group: 0x%x\n",
13943 phba->pci_dev_grp);
13944 break;
13945 }
13946 return rc;
13947}
13948
13949
13950
13951
13952
13953
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963static pci_ers_result_t
13964lpfc_io_slot_reset(struct pci_dev *pdev)
13965{
13966 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13967 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13968 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13969
13970 switch (phba->pci_dev_grp) {
13971 case LPFC_PCI_DEV_LP:
13972 rc = lpfc_io_slot_reset_s3(pdev);
13973 break;
13974 case LPFC_PCI_DEV_OC:
13975 rc = lpfc_io_slot_reset_s4(pdev);
13976 break;
13977 default:
13978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13979 "1428 Invalid PCI device group: 0x%x\n",
13980 phba->pci_dev_grp);
13981 break;
13982 }
13983 return rc;
13984}
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994
13995
13996static void
13997lpfc_io_resume(struct pci_dev *pdev)
13998{
13999 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14001
14002 switch (phba->pci_dev_grp) {
14003 case LPFC_PCI_DEV_LP:
14004 lpfc_io_resume_s3(pdev);
14005 break;
14006 case LPFC_PCI_DEV_OC:
14007 lpfc_io_resume_s4(pdev);
14008 break;
14009 default:
14010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14011 "1429 Invalid PCI device group: 0x%x\n",
14012 phba->pci_dev_grp);
14013 break;
14014 }
14015 return;
14016}
14017
14018
14019
14020
14021
14022
14023
14024
14025
14026
14027
14028static void
14029lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14030{
14031
14032 if (!phba->cfg_EnableXLane)
14033 return;
14034
14035 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14036 phba->cfg_fof = 1;
14037 } else {
14038 phba->cfg_fof = 0;
14039 mempool_destroy(phba->device_data_mem_pool);
14040 phba->device_data_mem_pool = NULL;
14041 }
14042
14043 return;
14044}
14045
14046
14047
14048
14049
14050
14051
14052
14053void
14054lpfc_sli4_ras_init(struct lpfc_hba *phba)
14055{
14056 switch (phba->pcidev->device) {
14057 case PCI_DEVICE_ID_LANCER_G6_FC:
14058 case PCI_DEVICE_ID_LANCER_G7_FC:
14059 phba->ras_fwlog.ras_hwsupport = true;
14060 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14061 phba->cfg_ras_fwlog_buffsize)
14062 phba->ras_fwlog.ras_enabled = true;
14063 else
14064 phba->ras_fwlog.ras_enabled = false;
14065 break;
14066 default:
14067 phba->ras_fwlog.ras_hwsupport = false;
14068 }
14069}
14070
14071
14072MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14073
14074static const struct pci_error_handlers lpfc_err_handler = {
14075 .error_detected = lpfc_io_error_detected,
14076 .slot_reset = lpfc_io_slot_reset,
14077 .resume = lpfc_io_resume,
14078};
14079
14080static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
14081 lpfc_pci_suspend_one,
14082 lpfc_pci_resume_one);
14083
14084static struct pci_driver lpfc_driver = {
14085 .name = LPFC_DRIVER_NAME,
14086 .id_table = lpfc_id_table,
14087 .probe = lpfc_pci_probe_one,
14088 .remove = lpfc_pci_remove_one,
14089 .shutdown = lpfc_pci_remove_one,
14090 .driver.pm = &lpfc_pci_pm_ops_one,
14091 .err_handler = &lpfc_err_handler,
14092};
14093
14094static const struct file_operations lpfc_mgmt_fop = {
14095 .owner = THIS_MODULE,
14096};
14097
14098static struct miscdevice lpfc_mgmt_dev = {
14099 .minor = MISC_DYNAMIC_MINOR,
14100 .name = "lpfcmgmt",
14101 .fops = &lpfc_mgmt_fop,
14102};
14103
14104
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116static int __init
14117lpfc_init(void)
14118{
14119 int error = 0;
14120
14121 pr_info(LPFC_MODULE_DESC "\n");
14122 pr_info(LPFC_COPYRIGHT "\n");
14123
14124 error = misc_register(&lpfc_mgmt_dev);
14125 if (error)
14126 printk(KERN_ERR "Could not register lpfcmgmt device, "
14127 "misc_register returned with status %d", error);
14128
14129 error = -ENOMEM;
14130 lpfc_transport_functions.vport_create = lpfc_vport_create;
14131 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14132 lpfc_transport_template =
14133 fc_attach_transport(&lpfc_transport_functions);
14134 if (lpfc_transport_template == NULL)
14135 goto unregister;
14136 lpfc_vport_transport_template =
14137 fc_attach_transport(&lpfc_vport_transport_functions);
14138 if (lpfc_vport_transport_template == NULL) {
14139 fc_release_transport(lpfc_transport_template);
14140 goto unregister;
14141 }
14142 lpfc_wqe_cmd_template();
14143 lpfc_nvmet_cmd_template();
14144
14145
14146 lpfc_present_cpu = num_present_cpus();
14147
14148 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14149 "lpfc/sli4:online",
14150 lpfc_cpu_online, lpfc_cpu_offline);
14151 if (error < 0)
14152 goto cpuhp_failure;
14153 lpfc_cpuhp_state = error;
14154
14155 error = pci_register_driver(&lpfc_driver);
14156 if (error)
14157 goto unwind;
14158
14159 return error;
14160
14161unwind:
14162 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14163cpuhp_failure:
14164 fc_release_transport(lpfc_transport_template);
14165 fc_release_transport(lpfc_vport_transport_template);
14166unregister:
14167 misc_deregister(&lpfc_mgmt_dev);
14168
14169 return error;
14170}
14171
14172void lpfc_dmp_dbg(struct lpfc_hba *phba)
14173{
14174 unsigned int start_idx;
14175 unsigned int dbg_cnt;
14176 unsigned int temp_idx;
14177 int i;
14178 int j = 0;
14179 unsigned long rem_nsec;
14180 struct lpfc_vport **vports;
14181
14182
14183
14184
14185 if (phba->cfg_log_verbose)
14186 return;
14187
14188 vports = lpfc_create_vport_work_array(phba);
14189 if (vports != NULL) {
14190 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14191 if (vports[i]->cfg_log_verbose) {
14192 lpfc_destroy_vport_work_array(phba, vports);
14193 return;
14194 }
14195 }
14196 }
14197 lpfc_destroy_vport_work_array(phba, vports);
14198
14199 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14200 return;
14201
14202 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14203 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14204 if (!dbg_cnt)
14205 goto out;
14206 temp_idx = start_idx;
14207 if (dbg_cnt >= DBG_LOG_SZ) {
14208 dbg_cnt = DBG_LOG_SZ;
14209 temp_idx -= 1;
14210 } else {
14211 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14212 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14213 } else {
14214 if (start_idx < dbg_cnt)
14215 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14216 else
14217 start_idx -= dbg_cnt;
14218 }
14219 }
14220 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14221 start_idx, temp_idx, dbg_cnt);
14222
14223 for (i = 0; i < dbg_cnt; i++) {
14224 if ((start_idx + i) < DBG_LOG_SZ)
14225 temp_idx = (start_idx + i) % DBG_LOG_SZ;
14226 else
14227 temp_idx = j++;
14228 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14229 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14230 temp_idx,
14231 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14232 rem_nsec / 1000,
14233 phba->dbg_log[temp_idx].log);
14234 }
14235out:
14236 atomic_set(&phba->dbg_log_cnt, 0);
14237 atomic_set(&phba->dbg_log_dmping, 0);
14238}
14239
14240__printf(2, 3)
14241void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14242{
14243 unsigned int idx;
14244 va_list args;
14245 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14246 struct va_format vaf;
14247
14248
14249 va_start(args, fmt);
14250 if (unlikely(dbg_dmping)) {
14251 vaf.fmt = fmt;
14252 vaf.va = &args;
14253 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14254 va_end(args);
14255 return;
14256 }
14257 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14258 DBG_LOG_SZ;
14259
14260 atomic_inc(&phba->dbg_log_cnt);
14261
14262 vscnprintf(phba->dbg_log[idx].log,
14263 sizeof(phba->dbg_log[idx].log), fmt, args);
14264 va_end(args);
14265
14266 phba->dbg_log[idx].t_ns = local_clock();
14267}
14268
14269
14270
14271
14272
14273
14274
14275
14276static void __exit
14277lpfc_exit(void)
14278{
14279 misc_deregister(&lpfc_mgmt_dev);
14280 pci_unregister_driver(&lpfc_driver);
14281 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14282 fc_release_transport(lpfc_transport_template);
14283 fc_release_transport(lpfc_vport_transport_template);
14284 idr_destroy(&lpfc_hba_index);
14285}
14286
14287module_init(lpfc_init);
14288module_exit(lpfc_exit);
14289MODULE_LICENSE("GPL");
14290MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14291MODULE_AUTHOR("Broadcom");
14292MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
14293