1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117int
118lpfc_config_port_prep(struct lpfc_hba *phba)
119{
120 lpfc_vpd_t *vp = &phba->vpd;
121 int i = 0, rc;
122 LPFC_MBOXQ_t *pmb;
123 MAILBOX_t *mb;
124 char *lpfc_vpd_data = NULL;
125 uint16_t offset = 0;
126 static char licensed[56] =
127 "key unlock for use with gnu public licensed code only\0";
128 static int init_key = 1;
129
130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
131 if (!pmb) {
132 phba->link_state = LPFC_HBA_ERROR;
133 return -ENOMEM;
134 }
135
136 mb = &pmb->u.mb;
137 phba->link_state = LPFC_INIT_MBX_CMDS;
138
139 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
140 if (init_key) {
141 uint32_t *ptext = (uint32_t *) licensed;
142
143 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
144 *ptext = cpu_to_be32(*ptext);
145 init_key = 0;
146 }
147
148 lpfc_read_nv(phba, pmb);
149 memset((char*)mb->un.varRDnvp.rsvd3, 0,
150 sizeof (mb->un.varRDnvp.rsvd3));
151 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
152 sizeof (licensed));
153
154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
155
156 if (rc != MBX_SUCCESS) {
157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
158 "0324 Config Port initialization "
159 "error, mbxCmd x%x READ_NVPARM, "
160 "mbxStatus x%x\n",
161 mb->mbxCommand, mb->mbxStatus);
162 mempool_free(pmb, phba->mbox_mem_pool);
163 return -ERESTART;
164 }
165 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
166 sizeof(phba->wwnn));
167 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
168 sizeof(phba->wwpn));
169 }
170
171
172
173
174
175 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
176
177
178 lpfc_read_rev(phba, pmb);
179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
180 if (rc != MBX_SUCCESS) {
181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
182 "0439 Adapter failed to init, mbxCmd x%x "
183 "READ_REV, mbxStatus x%x\n",
184 mb->mbxCommand, mb->mbxStatus);
185 mempool_free( pmb, phba->mbox_mem_pool);
186 return -ERESTART;
187 }
188
189
190
191
192
193
194 if (mb->un.varRdRev.rr == 0) {
195 vp->rev.rBit = 0;
196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
197 "0440 Adapter failed to init, READ_REV has "
198 "missing revision information.\n");
199 mempool_free(pmb, phba->mbox_mem_pool);
200 return -ERESTART;
201 }
202
203 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
204 mempool_free(pmb, phba->mbox_mem_pool);
205 return -EINVAL;
206 }
207
208
209 vp->rev.rBit = 1;
210 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
211 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
212 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
213 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
214 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
215 vp->rev.biuRev = mb->un.varRdRev.biuRev;
216 vp->rev.smRev = mb->un.varRdRev.smRev;
217 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
218 vp->rev.endecRev = mb->un.varRdRev.endecRev;
219 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
220 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
221 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
222 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
223 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
224 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
225
226
227
228
229
230 if (vp->rev.feaLevelHigh < 9)
231 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
232
233 if (lpfc_is_LC_HBA(phba->pcidev->device))
234 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
235 sizeof (phba->RandomData));
236
237
238 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
239 if (!lpfc_vpd_data)
240 goto out_free_mbox;
241 do {
242 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
243 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
244
245 if (rc != MBX_SUCCESS) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
247 "0441 VPD not present on adapter, "
248 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
249 mb->mbxCommand, mb->mbxStatus);
250 mb->un.varDmp.word_cnt = 0;
251 }
252
253
254
255 if (mb->un.varDmp.word_cnt == 0)
256 break;
257
258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
265
266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
267
268 kfree(lpfc_vpd_data);
269out_free_mbox:
270 mempool_free(pmb, phba->mbox_mem_pool);
271 return 0;
272}
273
274
275
276
277
278
279
280
281
282
283
284static void
285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286{
287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
288 phba->temp_sensor_support = 1;
289 else
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
292 return;
293}
294
295
296
297
298
299
300
301
302
303
304
305static void
306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
307{
308 struct prog_id *prg;
309 uint32_t prog_id_word;
310 char dist = ' ';
311
312 char dist_char[] = "nabx";
313
314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
315 mempool_free(pmboxq, phba->mbox_mem_pool);
316 return;
317 }
318
319 prg = (struct prog_id *) &prog_id_word;
320
321
322 prog_id_word = pmboxq->u.mb.un.varWords[7];
323
324
325 if (prg->dist < 4)
326 dist = dist_char[prg->dist];
327
328 if ((prg->dist == 3) && (prg->num == 0))
329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
330 prg->ver, prg->rev, prg->lev);
331 else
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
333 prg->ver, prg->rev, prg->lev,
334 dist, prg->num);
335 mempool_free(pmboxq, phba->mbox_mem_pool);
336 return;
337}
338
339
340
341
342
343
344
345
346
347
348void
349lpfc_update_vport_wwn(struct lpfc_vport *vport)
350{
351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353
354
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
361
362
363
364
365
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
369 else
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
372
373
374
375
376
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
381
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
391 }
392 else
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int
411lpfc_config_port_post(struct lpfc_hba *phba)
412{
413 struct lpfc_vport *vport = phba->pport;
414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
415 LPFC_MBOXQ_t *pmb;
416 MAILBOX_t *mb;
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
420 int i, j;
421 int rc;
422
423 spin_lock_irq(&phba->hbalock);
424
425
426
427
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
431
432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 if (!pmb) {
434 phba->link_state = LPFC_HBA_ERROR;
435 return -ENOMEM;
436 }
437 mb = &pmb->u.mb;
438
439
440 rc = lpfc_read_sparam(phba, pmb, 0);
441 if (rc) {
442 mempool_free(pmb, phba->mbox_mem_pool);
443 return -ENOMEM;
444 }
445
446 pmb->vport = vport;
447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
449 "0448 Adapter failed init, mbxCmd x%x "
450 "READ_SPARM mbxStatus x%x\n",
451 mb->mbxCommand, mb->mbxStatus);
452 phba->link_state = LPFC_HBA_ERROR;
453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
454 mempool_free(pmb, phba->mbox_mem_pool);
455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 return -EIO;
458 }
459
460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
461
462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
464 kfree(mp);
465 pmb->ctx_buf = NULL;
466 lpfc_update_vport_wwn(vport);
467
468
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
472
473
474
475 if (phba->SerialNumber[0] == 0) {
476 uint8_t *outptr;
477
478 outptr = &vport->fc_nodename.u.s.IEEE[0];
479 for (i = 0; i < 12; i++) {
480 status = *outptr++;
481 j = ((status & 0xf0) >> 4);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 i++;
489 j = (status & 0xf);
490 if (j <= 9)
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
493 else
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 }
497 }
498
499 lpfc_read_config(phba, pmb);
500 pmb->vport = vport;
501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
503 "0453 Adapter failed to init, mbxCmd x%x "
504 "READ_CONFIG, mbxStatus x%x\n",
505 mb->mbxCommand, mb->mbxStatus);
506 phba->link_state = LPFC_HBA_ERROR;
507 mempool_free( pmb, phba->mbox_mem_pool);
508 return -EIO;
509 }
510
511
512 lpfc_sli_read_link_ste(phba);
513
514
515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
518 phba->cfg_hba_queue_depth,
519 mb->un.varRdConfig.max_xri);
520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
521 }
522
523 phba->lmt = mb->un.varRdConfig.lmt;
524
525
526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
527
528 phba->link_state = LPFC_LINK_DOWN;
529
530
531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
535
536
537 if (phba->sli_rev != 3)
538 lpfc_post_rcv_buf(phba);
539
540
541
542
543 if (phba->intr_type == MSIX) {
544 rc = lpfc_config_msi(phba, pmb);
545 if (rc) {
546 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO;
548 }
549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
550 if (rc != MBX_SUCCESS) {
551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
552 "0352 Config MSI mailbox command "
553 "failed, mbxCmd x%x, mbxStatus x%x\n",
554 pmb->u.mb.mbxCommand,
555 pmb->u.mb.mbxStatus);
556 mempool_free(pmb, phba->mbox_mem_pool);
557 return -EIO;
558 }
559 }
560
561 spin_lock_irq(&phba->hbalock);
562
563 phba->hba_flag &= ~HBA_ERATT_HANDLED;
564
565
566 if (lpfc_readl(phba->HCregaddr, &status)) {
567 spin_unlock_irq(&phba->hbalock);
568 return -EIO;
569 }
570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
571 if (psli->num_rings > 0)
572 status |= HC_R0INT_ENA;
573 if (psli->num_rings > 1)
574 status |= HC_R1INT_ENA;
575 if (psli->num_rings > 2)
576 status |= HC_R2INT_ENA;
577 if (psli->num_rings > 3)
578 status |= HC_R3INT_ENA;
579
580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
581 (phba->cfg_poll & DISABLE_FCP_RING_INT))
582 status &= ~(HC_R0INT_ENA);
583
584 writel(status, phba->HCregaddr);
585 readl(phba->HCregaddr);
586 spin_unlock_irq(&phba->hbalock);
587
588
589 timeout = phba->fc_ratov * 2;
590 mod_timer(&vport->els_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * timeout));
592
593 mod_timer(&phba->hb_tmofunc,
594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
595 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
596 phba->last_completion_time = jiffies;
597
598 mod_timer(&phba->eratt_poll,
599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
600
601 if (phba->hba_flag & LINK_DISABLED) {
602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
603 "2598 Adapter Link is disabled.\n");
604 lpfc_down_link(phba, pmb);
605 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2599 Adapter failed to issue DOWN_LINK"
610 " mbox command rc 0x%x\n", rc);
611
612 mempool_free(pmb, phba->mbox_mem_pool);
613 return -EIO;
614 }
615 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
616 mempool_free(pmb, phba->mbox_mem_pool);
617 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
618 if (rc)
619 return rc;
620 }
621
622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
623 if (!pmb) {
624 phba->link_state = LPFC_HBA_ERROR;
625 return -ENOMEM;
626 }
627
628 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
629 pmb->mbox_cmpl = lpfc_config_async_cmpl;
630 pmb->vport = phba->pport;
631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
632
633 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
635 "0456 Adapter failed to issue "
636 "ASYNCEVT_ENABLE mbox status x%x\n",
637 rc);
638 mempool_free(pmb, phba->mbox_mem_pool);
639 }
640
641
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647
648 lpfc_dump_wakeup_param(phba, pmb);
649 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
650 pmb->vport = phba->pport;
651 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
652
653 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
655 "0435 Adapter failed "
656 "to get Option ROM version status x%x\n", rc);
657 mempool_free(pmb, phba->mbox_mem_pool);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static int
678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
679{
680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698int
699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700 uint32_t flag)
701{
702 struct lpfc_vport *vport = phba->pport;
703 LPFC_MBOXQ_t *pmb;
704 MAILBOX_t *mb;
705 int rc;
706
707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708 if (!pmb) {
709 phba->link_state = LPFC_HBA_ERROR;
710 return -ENOMEM;
711 }
712 mb = &pmb->u.mb;
713 pmb->vport = vport;
714
715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717 !(phba->lmt & LMT_1Gb)) ||
718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719 !(phba->lmt & LMT_2Gb)) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721 !(phba->lmt & LMT_4Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723 !(phba->lmt & LMT_8Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725 !(phba->lmt & LMT_10Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
727 !(phba->lmt & LMT_16Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
729 !(phba->lmt & LMT_32Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731 !(phba->lmt & LMT_64Gb))) {
732
733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
734 "1302 Invalid speed for this board:%d "
735 "Reset link speed to auto.\n",
736 phba->cfg_link_speed);
737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738 }
739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
741 if (phba->sli_rev < LPFC_SLI_REV4)
742 lpfc_set_loopback_flag(phba);
743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
746 "0498 Adapter failed to init, mbxCmd x%x "
747 "INIT_LINK, mbxStatus x%x\n",
748 mb->mbxCommand, mb->mbxStatus);
749 if (phba->sli_rev <= LPFC_SLI_REV3) {
750
751 writel(0, phba->HCregaddr);
752 readl(phba->HCregaddr);
753
754 writel(0xffffffff, phba->HAregaddr);
755 readl(phba->HAregaddr);
756 }
757 phba->link_state = LPFC_HBA_ERROR;
758 if (rc != MBX_BUSY || flag == MBX_POLL)
759 mempool_free(pmb, phba->mbox_mem_pool);
760 return -EIO;
761 }
762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
763 if (flag == MBX_POLL)
764 mempool_free(pmb, phba->mbox_mem_pool);
765
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static int
783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
784{
785 LPFC_MBOXQ_t *pmb;
786 int rc;
787
788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789 if (!pmb) {
790 phba->link_state = LPFC_HBA_ERROR;
791 return -ENOMEM;
792 }
793
794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
795 "0491 Adapter Link is disabled.\n");
796 lpfc_down_link(phba, pmb);
797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
798 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
799 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
801 "2522 Adapter failed to issue DOWN_LINK"
802 " mbox command rc 0x%x\n", rc);
803
804 mempool_free(pmb, phba->mbox_mem_pool);
805 return -EIO;
806 }
807 if (flag == MBX_POLL)
808 mempool_free(pmb, phba->mbox_mem_pool);
809
810 return 0;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824int
825lpfc_hba_down_prep(struct lpfc_hba *phba)
826{
827 struct lpfc_vport **vports;
828 int i;
829
830 if (phba->sli_rev <= LPFC_SLI_REV3) {
831
832 writel(0, phba->HCregaddr);
833 readl(phba->HCregaddr);
834 }
835
836 if (phba->pport->load_flag & FC_UNLOADING)
837 lpfc_cleanup_discovery_resources(phba->pport);
838 else {
839 vports = lpfc_create_vport_work_array(phba);
840 if (vports != NULL)
841 for (i = 0; i <= phba->max_vports &&
842 vports[i] != NULL; i++)
843 lpfc_cleanup_discovery_resources(vports[i]);
844 lpfc_destroy_vport_work_array(phba, vports);
845 }
846 return 0;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static void
863lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
864{
865 struct lpfc_iocbq *rspiocbq;
866 struct hbq_dmabuf *dmabuf;
867 struct lpfc_cq_event *cq_event;
868
869 spin_lock_irq(&phba->hbalock);
870 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
871 spin_unlock_irq(&phba->hbalock);
872
873 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
874
875 spin_lock_irq(&phba->hbalock);
876 list_remove_head(&phba->sli4_hba.sp_queue_event,
877 cq_event, struct lpfc_cq_event, list);
878 spin_unlock_irq(&phba->hbalock);
879
880 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
881 case CQE_CODE_COMPL_WQE:
882 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
883 cq_event);
884 lpfc_sli_release_iocbq(phba, rspiocbq);
885 break;
886 case CQE_CODE_RECEIVE:
887 case CQE_CODE_RECEIVE_V1:
888 dmabuf = container_of(cq_event, struct hbq_dmabuf,
889 cq_event);
890 lpfc_in_buf_free(phba, &dmabuf->dbuf);
891 }
892 }
893}
894
895
896
897
898
899
900
901
902
903
904
905
906static void
907lpfc_hba_free_post_buf(struct lpfc_hba *phba)
908{
909 struct lpfc_sli *psli = &phba->sli;
910 struct lpfc_sli_ring *pring;
911 struct lpfc_dmabuf *mp, *next_mp;
912 LIST_HEAD(buflist);
913 int count;
914
915 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
916 lpfc_sli_hbqbuf_free_all(phba);
917 else {
918
919 pring = &psli->sli3_ring[LPFC_ELS_RING];
920 spin_lock_irq(&phba->hbalock);
921 list_splice_init(&pring->postbufq, &buflist);
922 spin_unlock_irq(&phba->hbalock);
923
924 count = 0;
925 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
926 list_del(&mp->list);
927 count++;
928 lpfc_mbuf_free(phba, mp->virt, mp->phys);
929 kfree(mp);
930 }
931
932 spin_lock_irq(&phba->hbalock);
933 pring->postbufq_cnt -= count;
934 spin_unlock_irq(&phba->hbalock);
935 }
936}
937
938
939
940
941
942
943
944
945
946
947
948static void
949lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
950{
951 struct lpfc_sli *psli = &phba->sli;
952 struct lpfc_queue *qp = NULL;
953 struct lpfc_sli_ring *pring;
954 LIST_HEAD(completions);
955 int i;
956 struct lpfc_iocbq *piocb, *next_iocb;
957
958 if (phba->sli_rev != LPFC_SLI_REV4) {
959 for (i = 0; i < psli->num_rings; i++) {
960 pring = &psli->sli3_ring[i];
961 spin_lock_irq(&phba->hbalock);
962
963
964
965
966 list_splice_init(&pring->txcmplq, &completions);
967 pring->txcmplq_cnt = 0;
968 spin_unlock_irq(&phba->hbalock);
969
970 lpfc_sli_abort_iocb_ring(phba, pring);
971 }
972
973 lpfc_sli_cancel_iocbs(phba, &completions,
974 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
975 return;
976 }
977 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
978 pring = qp->pring;
979 if (!pring)
980 continue;
981 spin_lock_irq(&pring->ring_lock);
982 list_for_each_entry_safe(piocb, next_iocb,
983 &pring->txcmplq, list)
984 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
990
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static int
1007lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1008{
1009 lpfc_hba_free_post_buf(phba);
1010 lpfc_hba_clean_txcmplq(phba);
1011 return 0;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static int
1026lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1027{
1028 struct lpfc_io_buf *psb, *psb_next;
1029 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1030 struct lpfc_sli4_hdw_queue *qp;
1031 LIST_HEAD(aborts);
1032 LIST_HEAD(nvme_aborts);
1033 LIST_HEAD(nvmet_aborts);
1034 struct lpfc_sglq *sglq_entry = NULL;
1035 int cnt, idx;
1036
1037
1038 lpfc_sli_hbqbuf_free_all(phba);
1039 lpfc_hba_clean_txcmplq(phba);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1061
1062
1063
1064
1065 spin_lock_irq(&phba->hbalock);
1066 cnt = 0;
1067 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1068 qp = &phba->sli4_hba.hdwq[idx];
1069
1070 spin_lock(&qp->abts_io_buf_list_lock);
1071 list_splice_init(&qp->lpfc_abts_io_buf_list,
1072 &aborts);
1073
1074 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1075 psb->pCmd = NULL;
1076 psb->status = IOSTAT_SUCCESS;
1077 cnt++;
1078 }
1079 spin_lock(&qp->io_buf_list_put_lock);
1080 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1081 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1082 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1083 qp->abts_scsi_io_bufs = 0;
1084 qp->abts_nvme_io_bufs = 0;
1085 spin_unlock(&qp->io_buf_list_put_lock);
1086 spin_unlock(&qp->abts_io_buf_list_lock);
1087 }
1088 spin_unlock_irq(&phba->hbalock);
1089
1090 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1091 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1092 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1093 &nvmet_aborts);
1094 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1095 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1096 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1097 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1098 }
1099 }
1100
1101 lpfc_sli4_free_sp_events(phba);
1102 return cnt;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116int
1117lpfc_hba_down_post(struct lpfc_hba *phba)
1118{
1119 return (*phba->lpfc_hba_down_post)(phba);
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static void
1135lpfc_hb_timeout(struct timer_list *t)
1136{
1137 struct lpfc_hba *phba;
1138 uint32_t tmo_posted;
1139 unsigned long iflag;
1140
1141 phba = from_timer(phba, t, hb_tmofunc);
1142
1143
1144 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1145 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1146 if (!tmo_posted)
1147 phba->pport->work_port_events |= WORKER_HB_TMO;
1148 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1149
1150
1151 if (!tmo_posted)
1152 lpfc_worker_wake_up(phba);
1153 return;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168static void
1169lpfc_rrq_timeout(struct timer_list *t)
1170{
1171 struct lpfc_hba *phba;
1172 unsigned long iflag;
1173
1174 phba = from_timer(phba, t, rrq_tmr);
1175 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1176 if (!(phba->pport->load_flag & FC_UNLOADING))
1177 phba->hba_flag |= HBA_RRQ_ACTIVE;
1178 else
1179 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1181
1182 if (!(phba->pport->load_flag & FC_UNLOADING))
1183 lpfc_worker_wake_up(phba);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202static void
1203lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1204{
1205 unsigned long drvr_flag;
1206
1207 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1208 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1209 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1210
1211
1212 mempool_free(pmboxq, phba->mbox_mem_pool);
1213 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1214 !(phba->link_state == LPFC_HBA_ERROR) &&
1215 !(phba->pport->load_flag & FC_UNLOADING))
1216 mod_timer(&phba->hb_tmofunc,
1217 jiffies +
1218 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1219 return;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static void
1231lpfc_idle_stat_delay_work(struct work_struct *work)
1232{
1233 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1234 struct lpfc_hba,
1235 idle_stat_delay_work);
1236 struct lpfc_queue *cq;
1237 struct lpfc_sli4_hdw_queue *hdwq;
1238 struct lpfc_idle_stat *idle_stat;
1239 u32 i, idle_percent;
1240 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1241
1242 if (phba->pport->load_flag & FC_UNLOADING)
1243 return;
1244
1245 if (phba->link_state == LPFC_HBA_ERROR ||
1246 phba->pport->fc_flag & FC_OFFLINE_MODE)
1247 goto requeue;
1248
1249 for_each_present_cpu(i) {
1250 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1251 cq = hdwq->io_cq;
1252
1253
1254 if (cq->chann != i)
1255 continue;
1256
1257 idle_stat = &phba->sli4_hba.idle_stat[i];
1258
1259
1260
1261
1262
1263
1264
1265 wall_idle = get_cpu_idle_time(i, &wall, 1);
1266 diff_idle = wall_idle - idle_stat->prev_idle;
1267 diff_wall = wall - idle_stat->prev_wall;
1268
1269 if (diff_wall <= diff_idle)
1270 busy_time = 0;
1271 else
1272 busy_time = diff_wall - diff_idle;
1273
1274 idle_percent = div64_u64(100 * busy_time, diff_wall);
1275 idle_percent = 100 - idle_percent;
1276
1277 if (idle_percent < 15)
1278 cq->poll_mode = LPFC_QUEUE_WORK;
1279 else
1280 cq->poll_mode = LPFC_IRQ_POLL;
1281
1282 idle_stat->prev_idle = wall_idle;
1283 idle_stat->prev_wall = wall;
1284 }
1285
1286requeue:
1287 schedule_delayed_work(&phba->idle_stat_delay_work,
1288 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1289}
1290
1291static void
1292lpfc_hb_eq_delay_work(struct work_struct *work)
1293{
1294 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1295 struct lpfc_hba, eq_delay_work);
1296 struct lpfc_eq_intr_info *eqi, *eqi_new;
1297 struct lpfc_queue *eq, *eq_next;
1298 unsigned char *ena_delay = NULL;
1299 uint32_t usdelay;
1300 int i;
1301
1302 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1303 return;
1304
1305 if (phba->link_state == LPFC_HBA_ERROR ||
1306 phba->pport->fc_flag & FC_OFFLINE_MODE)
1307 goto requeue;
1308
1309 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1310 GFP_KERNEL);
1311 if (!ena_delay)
1312 goto requeue;
1313
1314 for (i = 0; i < phba->cfg_irq_chann; i++) {
1315
1316 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1317 if (!eq)
1318 continue;
1319 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1320 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1321 ena_delay[eq->last_cpu] = 1;
1322 }
1323 }
1324
1325 for_each_present_cpu(i) {
1326 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1327 if (ena_delay[i]) {
1328 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1329 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1330 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1331 } else {
1332 usdelay = 0;
1333 }
1334
1335 eqi->icnt = 0;
1336
1337 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1338 if (unlikely(eq->last_cpu != i)) {
1339 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1340 eq->last_cpu);
1341 list_move_tail(&eq->cpu_list, &eqi_new->list);
1342 continue;
1343 }
1344 if (usdelay != eq->q_mode)
1345 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1346 usdelay);
1347 }
1348 }
1349
1350 kfree(ena_delay);
1351
1352requeue:
1353 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1354 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1365{
1366 u32 i;
1367 u32 hwq_count;
1368
1369 hwq_count = phba->cfg_hdw_queue;
1370 for (i = 0; i < hwq_count; i++) {
1371
1372 lpfc_adjust_pvt_pool_count(phba, i);
1373
1374
1375 lpfc_adjust_high_watermark(phba, i);
1376
1377#ifdef LPFC_MXP_STAT
1378
1379 lpfc_snapshot_mxp(phba, i);
1380#endif
1381 }
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392int
1393lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1394{
1395 LPFC_MBOXQ_t *pmboxq;
1396 int retval;
1397
1398
1399 if (phba->hba_flag & HBA_HBEAT_INP)
1400 return 0;
1401
1402 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1403 if (!pmboxq)
1404 return -ENOMEM;
1405
1406 lpfc_heart_beat(phba, pmboxq);
1407 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1408 pmboxq->vport = phba->pport;
1409 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1410
1411 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1412 mempool_free(pmboxq, phba->mbox_mem_pool);
1413 return -ENXIO;
1414 }
1415 phba->hba_flag |= HBA_HBEAT_INP;
1416
1417 return 0;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430void
1431lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1432{
1433 if (phba->cfg_enable_hba_heartbeat)
1434 return;
1435 phba->hba_flag |= HBA_HBEAT_TMO;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454void
1455lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1456{
1457 struct lpfc_vport **vports;
1458 struct lpfc_dmabuf *buf_ptr;
1459 int retval = 0;
1460 int i, tmo;
1461 struct lpfc_sli *psli = &phba->sli;
1462 LIST_HEAD(completions);
1463
1464 if (phba->cfg_xri_rebalancing) {
1465
1466 lpfc_hb_mxp_handler(phba);
1467 }
1468
1469 vports = lpfc_create_vport_work_array(phba);
1470 if (vports != NULL)
1471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1472 lpfc_rcv_seq_check_edtov(vports[i]);
1473 lpfc_fdmi_change_check(vports[i]);
1474 }
1475 lpfc_destroy_vport_work_array(phba, vports);
1476
1477 if ((phba->link_state == LPFC_HBA_ERROR) ||
1478 (phba->pport->load_flag & FC_UNLOADING) ||
1479 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1480 return;
1481
1482 if (phba->elsbuf_cnt &&
1483 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1484 spin_lock_irq(&phba->hbalock);
1485 list_splice_init(&phba->elsbuf, &completions);
1486 phba->elsbuf_cnt = 0;
1487 phba->elsbuf_prev_cnt = 0;
1488 spin_unlock_irq(&phba->hbalock);
1489
1490 while (!list_empty(&completions)) {
1491 list_remove_head(&completions, buf_ptr,
1492 struct lpfc_dmabuf, list);
1493 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1494 kfree(buf_ptr);
1495 }
1496 }
1497 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1498
1499
1500 if (phba->cfg_enable_hba_heartbeat) {
1501
1502 spin_lock_irq(&phba->pport->work_port_lock);
1503 if (time_after(phba->last_completion_time +
1504 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1505 jiffies)) {
1506 spin_unlock_irq(&phba->pport->work_port_lock);
1507 if (phba->hba_flag & HBA_HBEAT_INP)
1508 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1509 else
1510 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1511 goto out;
1512 }
1513 spin_unlock_irq(&phba->pport->work_port_lock);
1514
1515
1516 if (phba->hba_flag & HBA_HBEAT_INP) {
1517
1518
1519
1520
1521
1522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1523 "0459 Adapter heartbeat still outstanding: "
1524 "last compl time was %d ms.\n",
1525 jiffies_to_msecs(jiffies
1526 - phba->last_completion_time));
1527 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1528 } else {
1529 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1530 (list_empty(&psli->mboxq))) {
1531
1532 retval = lpfc_issue_hb_mbox(phba);
1533 if (retval) {
1534 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1535 goto out;
1536 }
1537 phba->skipped_hb = 0;
1538 } else if (time_before_eq(phba->last_completion_time,
1539 phba->skipped_hb)) {
1540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1541 "2857 Last completion time not "
1542 " updated in %d ms\n",
1543 jiffies_to_msecs(jiffies
1544 - phba->last_completion_time));
1545 } else
1546 phba->skipped_hb = jiffies;
1547
1548 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1549 goto out;
1550 }
1551 } else {
1552
1553 if (phba->hba_flag & HBA_HBEAT_TMO) {
1554 retval = lpfc_issue_hb_mbox(phba);
1555 if (retval)
1556 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1557 else
1558 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1559 goto out;
1560 }
1561 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1562 }
1563out:
1564 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574static void
1575lpfc_offline_eratt(struct lpfc_hba *phba)
1576{
1577 struct lpfc_sli *psli = &phba->sli;
1578
1579 spin_lock_irq(&phba->hbalock);
1580 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1581 spin_unlock_irq(&phba->hbalock);
1582 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1583
1584 lpfc_offline(phba);
1585 lpfc_reset_barrier(phba);
1586 spin_lock_irq(&phba->hbalock);
1587 lpfc_sli_brdreset(phba);
1588 spin_unlock_irq(&phba->hbalock);
1589 lpfc_hba_down_post(phba);
1590 lpfc_sli_brdready(phba, HS_MBRDY);
1591 lpfc_unblock_mgmt_io(phba);
1592 phba->link_state = LPFC_HBA_ERROR;
1593 return;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603void
1604lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1605{
1606 spin_lock_irq(&phba->hbalock);
1607 phba->link_state = LPFC_HBA_ERROR;
1608 spin_unlock_irq(&phba->hbalock);
1609
1610 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1611 lpfc_sli_flush_io_rings(phba);
1612 lpfc_offline(phba);
1613 lpfc_hba_down_post(phba);
1614 lpfc_unblock_mgmt_io(phba);
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static void
1627lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1628{
1629 uint32_t old_host_status = phba->work_hs;
1630 struct lpfc_sli *psli = &phba->sli;
1631
1632
1633
1634
1635 if (pci_channel_offline(phba->pcidev)) {
1636 spin_lock_irq(&phba->hbalock);
1637 phba->hba_flag &= ~DEFER_ERATT;
1638 spin_unlock_irq(&phba->hbalock);
1639 return;
1640 }
1641
1642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1643 "0479 Deferred Adapter Hardware Error "
1644 "Data: x%x x%x x%x\n",
1645 phba->work_hs, phba->work_status[0],
1646 phba->work_status[1]);
1647
1648 spin_lock_irq(&phba->hbalock);
1649 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1650 spin_unlock_irq(&phba->hbalock);
1651
1652
1653
1654
1655
1656
1657
1658 lpfc_sli_abort_fcp_rings(phba);
1659
1660
1661
1662
1663
1664 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1665 lpfc_offline(phba);
1666
1667
1668 while (phba->work_hs & HS_FFER1) {
1669 msleep(100);
1670 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1671 phba->work_hs = UNPLUG_ERR ;
1672 break;
1673 }
1674
1675 if (phba->pport->load_flag & FC_UNLOADING) {
1676 phba->work_hs = 0;
1677 break;
1678 }
1679 }
1680
1681
1682
1683
1684
1685
1686 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1687 phba->work_hs = old_host_status & ~HS_FFER1;
1688
1689 spin_lock_irq(&phba->hbalock);
1690 phba->hba_flag &= ~DEFER_ERATT;
1691 spin_unlock_irq(&phba->hbalock);
1692 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1693 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1694}
1695
1696static void
1697lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1698{
1699 struct lpfc_board_event_header board_event;
1700 struct Scsi_Host *shost;
1701
1702 board_event.event_type = FC_REG_BOARD_EVENT;
1703 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1704 shost = lpfc_shost_from_vport(phba->pport);
1705 fc_host_post_vendor_event(shost, fc_get_event_number(),
1706 sizeof(board_event),
1707 (char *) &board_event,
1708 LPFC_NL_VENDOR_ID);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static void
1722lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1723{
1724 struct lpfc_vport *vport = phba->pport;
1725 struct lpfc_sli *psli = &phba->sli;
1726 uint32_t event_data;
1727 unsigned long temperature;
1728 struct temp_event temp_event_data;
1729 struct Scsi_Host *shost;
1730
1731
1732
1733
1734 if (pci_channel_offline(phba->pcidev)) {
1735 spin_lock_irq(&phba->hbalock);
1736 phba->hba_flag &= ~DEFER_ERATT;
1737 spin_unlock_irq(&phba->hbalock);
1738 return;
1739 }
1740
1741
1742 if (!phba->cfg_enable_hba_reset)
1743 return;
1744
1745
1746 lpfc_board_errevt_to_mgmt(phba);
1747
1748 if (phba->hba_flag & DEFER_ERATT)
1749 lpfc_handle_deferred_eratt(phba);
1750
1751 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1752 if (phba->work_hs & HS_FFER6)
1753
1754 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1755 "1301 Re-establishing Link "
1756 "Data: x%x x%x x%x\n",
1757 phba->work_hs, phba->work_status[0],
1758 phba->work_status[1]);
1759 if (phba->work_hs & HS_FFER8)
1760
1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 "2861 Host Authentication device "
1763 "zeroization Data:x%x x%x x%x\n",
1764 phba->work_hs, phba->work_status[0],
1765 phba->work_status[1]);
1766
1767 spin_lock_irq(&phba->hbalock);
1768 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1769 spin_unlock_irq(&phba->hbalock);
1770
1771
1772
1773
1774
1775
1776
1777 lpfc_sli_abort_fcp_rings(phba);
1778
1779
1780
1781
1782
1783 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1784 lpfc_offline(phba);
1785 lpfc_sli_brdrestart(phba);
1786 if (lpfc_online(phba) == 0) {
1787 lpfc_unblock_mgmt_io(phba);
1788 return;
1789 }
1790 lpfc_unblock_mgmt_io(phba);
1791 } else if (phba->work_hs & HS_CRIT_TEMP) {
1792 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1794 temp_event_data.event_code = LPFC_CRIT_TEMP;
1795 temp_event_data.data = (uint32_t)temperature;
1796
1797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1798 "0406 Adapter maximum temperature exceeded "
1799 "(%ld), taking this port offline "
1800 "Data: x%x x%x x%x\n",
1801 temperature, phba->work_hs,
1802 phba->work_status[0], phba->work_status[1]);
1803
1804 shost = lpfc_shost_from_vport(phba->pport);
1805 fc_host_post_vendor_event(shost, fc_get_event_number(),
1806 sizeof(temp_event_data),
1807 (char *) &temp_event_data,
1808 SCSI_NL_VID_TYPE_PCI
1809 | PCI_VENDOR_ID_EMULEX);
1810
1811 spin_lock_irq(&phba->hbalock);
1812 phba->over_temp_state = HBA_OVER_TEMP;
1813 spin_unlock_irq(&phba->hbalock);
1814 lpfc_offline_eratt(phba);
1815
1816 } else {
1817
1818
1819
1820
1821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1822 "0457 Adapter Hardware Error "
1823 "Data: x%x x%x x%x\n",
1824 phba->work_hs,
1825 phba->work_status[0], phba->work_status[1]);
1826
1827 event_data = FC_REG_DUMP_EVENT;
1828 shost = lpfc_shost_from_vport(vport);
1829 fc_host_post_vendor_event(shost, fc_get_event_number(),
1830 sizeof(event_data), (char *) &event_data,
1831 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1832
1833 lpfc_offline_eratt(phba);
1834 }
1835 return;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static int
1850lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1851 bool en_rn_msg)
1852{
1853 int rc;
1854 uint32_t intr_mode;
1855
1856 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1857 LPFC_SLI_INTF_IF_TYPE_2) {
1858
1859
1860
1861
1862 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1863 if (rc)
1864 return rc;
1865 }
1866
1867
1868 if (en_rn_msg)
1869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1870 "2887 Reset Needed: Attempting Port "
1871 "Recovery...\n");
1872
1873
1874
1875
1876 if (mbx_action == LPFC_MBX_NO_WAIT) {
1877 spin_lock_irq(&phba->hbalock);
1878 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1879 spin_unlock_irq(&phba->hbalock);
1880 }
1881
1882 lpfc_offline_prep(phba, mbx_action);
1883 lpfc_sli_flush_io_rings(phba);
1884 lpfc_offline(phba);
1885
1886 lpfc_sli4_disable_intr(phba);
1887 rc = lpfc_sli_brdrestart(phba);
1888 if (rc) {
1889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890 "6309 Failed to restart board\n");
1891 return rc;
1892 }
1893
1894 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1895 if (intr_mode == LPFC_INTR_ERROR) {
1896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1897 "3175 Failed to enable interrupt\n");
1898 return -EIO;
1899 }
1900 phba->intr_mode = intr_mode;
1901 rc = lpfc_online(phba);
1902 if (rc == 0)
1903 lpfc_unblock_mgmt_io(phba);
1904
1905 return rc;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915static void
1916lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1917{
1918 struct lpfc_vport *vport = phba->pport;
1919 uint32_t event_data;
1920 struct Scsi_Host *shost;
1921 uint32_t if_type;
1922 struct lpfc_register portstat_reg = {0};
1923 uint32_t reg_err1, reg_err2;
1924 uint32_t uerrlo_reg, uemasklo_reg;
1925 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1926 bool en_rn_msg = true;
1927 struct temp_event temp_event_data;
1928 struct lpfc_register portsmphr_reg;
1929 int rc, i;
1930
1931
1932
1933
1934 if (pci_channel_offline(phba->pcidev)) {
1935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1936 "3166 pci channel is offline\n");
1937 lpfc_sli4_offline_eratt(phba);
1938 return;
1939 }
1940
1941 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1942 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1943 switch (if_type) {
1944 case LPFC_SLI_INTF_IF_TYPE_0:
1945 pci_rd_rc1 = lpfc_readl(
1946 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1947 &uerrlo_reg);
1948 pci_rd_rc2 = lpfc_readl(
1949 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1950 &uemasklo_reg);
1951
1952 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1953 return;
1954 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1955 lpfc_sli4_offline_eratt(phba);
1956 return;
1957 }
1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959 "7623 Checking UE recoverable");
1960
1961 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1962 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1963 &portsmphr_reg.word0))
1964 continue;
1965
1966 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1967 &portsmphr_reg);
1968 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1969 LPFC_PORT_SEM_UE_RECOVERABLE)
1970 break;
1971
1972 msleep(1000);
1973 }
1974
1975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1976 "4827 smphr_port_status x%x : Waited %dSec",
1977 smphr_port_status, i);
1978
1979
1980 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1981 LPFC_PORT_SEM_UE_RECOVERABLE) {
1982 for (i = 0; i < 20; i++) {
1983 msleep(1000);
1984 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1985 &portsmphr_reg.word0) &&
1986 (LPFC_POST_STAGE_PORT_READY ==
1987 bf_get(lpfc_port_smphr_port_status,
1988 &portsmphr_reg))) {
1989 rc = lpfc_sli4_port_sta_fn_reset(phba,
1990 LPFC_MBX_NO_WAIT, en_rn_msg);
1991 if (rc == 0)
1992 return;
1993 lpfc_printf_log(phba, KERN_ERR,
1994 LOG_TRACE_EVENT,
1995 "4215 Failed to recover UE");
1996 break;
1997 }
1998 }
1999 }
2000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2001 "7624 Firmware not ready: Failing UE recovery,"
2002 " waited %dSec", i);
2003 phba->link_state = LPFC_HBA_ERROR;
2004 break;
2005
2006 case LPFC_SLI_INTF_IF_TYPE_2:
2007 case LPFC_SLI_INTF_IF_TYPE_6:
2008 pci_rd_rc1 = lpfc_readl(
2009 phba->sli4_hba.u.if_type2.STATUSregaddr,
2010 &portstat_reg.word0);
2011
2012 if (pci_rd_rc1 == -EIO) {
2013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2014 "3151 PCI bus read access failure: x%x\n",
2015 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2016 lpfc_sli4_offline_eratt(phba);
2017 return;
2018 }
2019 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2020 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2021 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2023 "2889 Port Overtemperature event, "
2024 "taking port offline Data: x%x x%x\n",
2025 reg_err1, reg_err2);
2026
2027 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2028 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2029 temp_event_data.event_code = LPFC_CRIT_TEMP;
2030 temp_event_data.data = 0xFFFFFFFF;
2031
2032 shost = lpfc_shost_from_vport(phba->pport);
2033 fc_host_post_vendor_event(shost, fc_get_event_number(),
2034 sizeof(temp_event_data),
2035 (char *)&temp_event_data,
2036 SCSI_NL_VID_TYPE_PCI
2037 | PCI_VENDOR_ID_EMULEX);
2038
2039 spin_lock_irq(&phba->hbalock);
2040 phba->over_temp_state = HBA_OVER_TEMP;
2041 spin_unlock_irq(&phba->hbalock);
2042 lpfc_sli4_offline_eratt(phba);
2043 return;
2044 }
2045 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2046 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048 "3143 Port Down: Firmware Update "
2049 "Detected\n");
2050 en_rn_msg = false;
2051 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2052 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2053 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2054 "3144 Port Down: Debug Dump\n");
2055 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2056 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2058 "3145 Port Down: Provisioning\n");
2059
2060
2061 if (!phba->cfg_enable_hba_reset)
2062 return;
2063
2064
2065 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2066 en_rn_msg);
2067 if (rc == 0) {
2068
2069 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2070 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2071 return;
2072 else
2073 break;
2074 }
2075
2076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2077 "3152 Unrecoverable error\n");
2078 phba->link_state = LPFC_HBA_ERROR;
2079 break;
2080 case LPFC_SLI_INTF_IF_TYPE_1:
2081 default:
2082 break;
2083 }
2084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2085 "3123 Report dump event to upper layer\n");
2086
2087 lpfc_board_errevt_to_mgmt(phba);
2088
2089 event_data = FC_REG_DUMP_EVENT;
2090 shost = lpfc_shost_from_vport(vport);
2091 fc_host_post_vendor_event(shost, fc_get_event_number(),
2092 sizeof(event_data), (char *) &event_data,
2093 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107void
2108lpfc_handle_eratt(struct lpfc_hba *phba)
2109{
2110 (*phba->lpfc_handle_eratt)(phba);
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120void
2121lpfc_handle_latt(struct lpfc_hba *phba)
2122{
2123 struct lpfc_vport *vport = phba->pport;
2124 struct lpfc_sli *psli = &phba->sli;
2125 LPFC_MBOXQ_t *pmb;
2126 volatile uint32_t control;
2127 struct lpfc_dmabuf *mp;
2128 int rc = 0;
2129
2130 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2131 if (!pmb) {
2132 rc = 1;
2133 goto lpfc_handle_latt_err_exit;
2134 }
2135
2136 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2137 if (!mp) {
2138 rc = 2;
2139 goto lpfc_handle_latt_free_pmb;
2140 }
2141
2142 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2143 if (!mp->virt) {
2144 rc = 3;
2145 goto lpfc_handle_latt_free_mp;
2146 }
2147
2148
2149 lpfc_els_flush_all_cmd(phba);
2150
2151 psli->slistat.link_event++;
2152 lpfc_read_topology(phba, pmb, mp);
2153 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2154 pmb->vport = vport;
2155
2156 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2157 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2158 if (rc == MBX_NOT_FINISHED) {
2159 rc = 4;
2160 goto lpfc_handle_latt_free_mbuf;
2161 }
2162
2163
2164 spin_lock_irq(&phba->hbalock);
2165 writel(HA_LATT, phba->HAregaddr);
2166 readl(phba->HAregaddr);
2167 spin_unlock_irq(&phba->hbalock);
2168
2169 return;
2170
2171lpfc_handle_latt_free_mbuf:
2172 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2173 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2174lpfc_handle_latt_free_mp:
2175 kfree(mp);
2176lpfc_handle_latt_free_pmb:
2177 mempool_free(pmb, phba->mbox_mem_pool);
2178lpfc_handle_latt_err_exit:
2179
2180 spin_lock_irq(&phba->hbalock);
2181 psli->sli_flag |= LPFC_PROCESS_LA;
2182 control = readl(phba->HCregaddr);
2183 control |= HC_LAINT_ENA;
2184 writel(control, phba->HCregaddr);
2185 readl(phba->HCregaddr);
2186
2187
2188 writel(HA_LATT, phba->HAregaddr);
2189 readl(phba->HAregaddr);
2190 spin_unlock_irq(&phba->hbalock);
2191 lpfc_linkdown(phba);
2192 phba->link_state = LPFC_HBA_ERROR;
2193
2194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2195 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2196
2197 return;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214int
2215lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2216{
2217 uint8_t lenlo, lenhi;
2218 int Length;
2219 int i, j;
2220 int finished = 0;
2221 int index = 0;
2222
2223 if (!vpd)
2224 return 0;
2225
2226
2227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2228 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2229 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2230 (uint32_t) vpd[3]);
2231 while (!finished && (index < (len - 4))) {
2232 switch (vpd[index]) {
2233 case 0x82:
2234 case 0x91:
2235 index += 1;
2236 lenlo = vpd[index];
2237 index += 1;
2238 lenhi = vpd[index];
2239 index += 1;
2240 i = ((((unsigned short)lenhi) << 8) + lenlo);
2241 index += i;
2242 break;
2243 case 0x90:
2244 index += 1;
2245 lenlo = vpd[index];
2246 index += 1;
2247 lenhi = vpd[index];
2248 index += 1;
2249 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2250 if (Length > len - index)
2251 Length = len - index;
2252 while (Length > 0) {
2253
2254 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2255 index += 2;
2256 i = vpd[index];
2257 index += 1;
2258 j = 0;
2259 Length -= (3+i);
2260 while(i--) {
2261 phba->SerialNumber[j++] = vpd[index++];
2262 if (j == 31)
2263 break;
2264 }
2265 phba->SerialNumber[j] = 0;
2266 continue;
2267 }
2268 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2269 phba->vpd_flag |= VPD_MODEL_DESC;
2270 index += 2;
2271 i = vpd[index];
2272 index += 1;
2273 j = 0;
2274 Length -= (3+i);
2275 while(i--) {
2276 phba->ModelDesc[j++] = vpd[index++];
2277 if (j == 255)
2278 break;
2279 }
2280 phba->ModelDesc[j] = 0;
2281 continue;
2282 }
2283 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2284 phba->vpd_flag |= VPD_MODEL_NAME;
2285 index += 2;
2286 i = vpd[index];
2287 index += 1;
2288 j = 0;
2289 Length -= (3+i);
2290 while(i--) {
2291 phba->ModelName[j++] = vpd[index++];
2292 if (j == 79)
2293 break;
2294 }
2295 phba->ModelName[j] = 0;
2296 continue;
2297 }
2298 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2299 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2300 index += 2;
2301 i = vpd[index];
2302 index += 1;
2303 j = 0;
2304 Length -= (3+i);
2305 while(i--) {
2306 phba->ProgramType[j++] = vpd[index++];
2307 if (j == 255)
2308 break;
2309 }
2310 phba->ProgramType[j] = 0;
2311 continue;
2312 }
2313 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2314 phba->vpd_flag |= VPD_PORT;
2315 index += 2;
2316 i = vpd[index];
2317 index += 1;
2318 j = 0;
2319 Length -= (3+i);
2320 while(i--) {
2321 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2322 (phba->sli4_hba.pport_name_sta ==
2323 LPFC_SLI4_PPNAME_GET)) {
2324 j++;
2325 index++;
2326 } else
2327 phba->Port[j++] = vpd[index++];
2328 if (j == 19)
2329 break;
2330 }
2331 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2332 (phba->sli4_hba.pport_name_sta ==
2333 LPFC_SLI4_PPNAME_NON))
2334 phba->Port[j] = 0;
2335 continue;
2336 }
2337 else {
2338 index += 2;
2339 i = vpd[index];
2340 index += 1;
2341 index += i;
2342 Length -= (3 + i);
2343 }
2344 }
2345 finished = 0;
2346 break;
2347 case 0x78:
2348 finished = 1;
2349 break;
2350 default:
2351 index ++;
2352 break;
2353 }
2354 }
2355
2356 return(1);
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371static void
2372lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2373{
2374 lpfc_vpd_t *vp;
2375 uint16_t dev_id = phba->pcidev->device;
2376 int max_speed;
2377 int GE = 0;
2378 int oneConnect = 0;
2379 struct {
2380 char *name;
2381 char *bus;
2382 char *function;
2383 } m = {"<Unknown>", "", ""};
2384
2385 if (mdp && mdp[0] != '\0'
2386 && descp && descp[0] != '\0')
2387 return;
2388
2389 if (phba->lmt & LMT_64Gb)
2390 max_speed = 64;
2391 else if (phba->lmt & LMT_32Gb)
2392 max_speed = 32;
2393 else if (phba->lmt & LMT_16Gb)
2394 max_speed = 16;
2395 else if (phba->lmt & LMT_10Gb)
2396 max_speed = 10;
2397 else if (phba->lmt & LMT_8Gb)
2398 max_speed = 8;
2399 else if (phba->lmt & LMT_4Gb)
2400 max_speed = 4;
2401 else if (phba->lmt & LMT_2Gb)
2402 max_speed = 2;
2403 else if (phba->lmt & LMT_1Gb)
2404 max_speed = 1;
2405 else
2406 max_speed = 0;
2407
2408 vp = &phba->vpd;
2409
2410 switch (dev_id) {
2411 case PCI_DEVICE_ID_FIREFLY:
2412 m = (typeof(m)){"LP6000", "PCI",
2413 "Obsolete, Unsupported Fibre Channel Adapter"};
2414 break;
2415 case PCI_DEVICE_ID_SUPERFLY:
2416 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2417 m = (typeof(m)){"LP7000", "PCI", ""};
2418 else
2419 m = (typeof(m)){"LP7000E", "PCI", ""};
2420 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2421 break;
2422 case PCI_DEVICE_ID_DRAGONFLY:
2423 m = (typeof(m)){"LP8000", "PCI",
2424 "Obsolete, Unsupported Fibre Channel Adapter"};
2425 break;
2426 case PCI_DEVICE_ID_CENTAUR:
2427 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2428 m = (typeof(m)){"LP9002", "PCI", ""};
2429 else
2430 m = (typeof(m)){"LP9000", "PCI", ""};
2431 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432 break;
2433 case PCI_DEVICE_ID_RFLY:
2434 m = (typeof(m)){"LP952", "PCI",
2435 "Obsolete, Unsupported Fibre Channel Adapter"};
2436 break;
2437 case PCI_DEVICE_ID_PEGASUS:
2438 m = (typeof(m)){"LP9802", "PCI-X",
2439 "Obsolete, Unsupported Fibre Channel Adapter"};
2440 break;
2441 case PCI_DEVICE_ID_THOR:
2442 m = (typeof(m)){"LP10000", "PCI-X",
2443 "Obsolete, Unsupported Fibre Channel Adapter"};
2444 break;
2445 case PCI_DEVICE_ID_VIPER:
2446 m = (typeof(m)){"LPX1000", "PCI-X",
2447 "Obsolete, Unsupported Fibre Channel Adapter"};
2448 break;
2449 case PCI_DEVICE_ID_PFLY:
2450 m = (typeof(m)){"LP982", "PCI-X",
2451 "Obsolete, Unsupported Fibre Channel Adapter"};
2452 break;
2453 case PCI_DEVICE_ID_TFLY:
2454 m = (typeof(m)){"LP1050", "PCI-X",
2455 "Obsolete, Unsupported Fibre Channel Adapter"};
2456 break;
2457 case PCI_DEVICE_ID_HELIOS:
2458 m = (typeof(m)){"LP11000", "PCI-X2",
2459 "Obsolete, Unsupported Fibre Channel Adapter"};
2460 break;
2461 case PCI_DEVICE_ID_HELIOS_SCSP:
2462 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2463 "Obsolete, Unsupported Fibre Channel Adapter"};
2464 break;
2465 case PCI_DEVICE_ID_HELIOS_DCSP:
2466 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2467 "Obsolete, Unsupported Fibre Channel Adapter"};
2468 break;
2469 case PCI_DEVICE_ID_NEPTUNE:
2470 m = (typeof(m)){"LPe1000", "PCIe",
2471 "Obsolete, Unsupported Fibre Channel Adapter"};
2472 break;
2473 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2474 m = (typeof(m)){"LPe1000-SP", "PCIe",
2475 "Obsolete, Unsupported Fibre Channel Adapter"};
2476 break;
2477 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2478 m = (typeof(m)){"LPe1002-SP", "PCIe",
2479 "Obsolete, Unsupported Fibre Channel Adapter"};
2480 break;
2481 case PCI_DEVICE_ID_BMID:
2482 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2483 break;
2484 case PCI_DEVICE_ID_BSMB:
2485 m = (typeof(m)){"LP111", "PCI-X2",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2487 break;
2488 case PCI_DEVICE_ID_ZEPHYR:
2489 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2490 break;
2491 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2492 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2493 break;
2494 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2495 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2496 GE = 1;
2497 break;
2498 case PCI_DEVICE_ID_ZMID:
2499 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2500 break;
2501 case PCI_DEVICE_ID_ZSMB:
2502 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2503 break;
2504 case PCI_DEVICE_ID_LP101:
2505 m = (typeof(m)){"LP101", "PCI-X",
2506 "Obsolete, Unsupported Fibre Channel Adapter"};
2507 break;
2508 case PCI_DEVICE_ID_LP10000S:
2509 m = (typeof(m)){"LP10000-S", "PCI",
2510 "Obsolete, Unsupported Fibre Channel Adapter"};
2511 break;
2512 case PCI_DEVICE_ID_LP11000S:
2513 m = (typeof(m)){"LP11000-S", "PCI-X2",
2514 "Obsolete, Unsupported Fibre Channel Adapter"};
2515 break;
2516 case PCI_DEVICE_ID_LPE11000S:
2517 m = (typeof(m)){"LPe11000-S", "PCIe",
2518 "Obsolete, Unsupported Fibre Channel Adapter"};
2519 break;
2520 case PCI_DEVICE_ID_SAT:
2521 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2522 break;
2523 case PCI_DEVICE_ID_SAT_MID:
2524 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2525 break;
2526 case PCI_DEVICE_ID_SAT_SMB:
2527 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2528 break;
2529 case PCI_DEVICE_ID_SAT_DCSP:
2530 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2531 break;
2532 case PCI_DEVICE_ID_SAT_SCSP:
2533 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2534 break;
2535 case PCI_DEVICE_ID_SAT_S:
2536 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2537 break;
2538 case PCI_DEVICE_ID_HORNET:
2539 m = (typeof(m)){"LP21000", "PCIe",
2540 "Obsolete, Unsupported FCoE Adapter"};
2541 GE = 1;
2542 break;
2543 case PCI_DEVICE_ID_PROTEUS_VF:
2544 m = (typeof(m)){"LPev12000", "PCIe IOV",
2545 "Obsolete, Unsupported Fibre Channel Adapter"};
2546 break;
2547 case PCI_DEVICE_ID_PROTEUS_PF:
2548 m = (typeof(m)){"LPev12000", "PCIe IOV",
2549 "Obsolete, Unsupported Fibre Channel Adapter"};
2550 break;
2551 case PCI_DEVICE_ID_PROTEUS_S:
2552 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2553 "Obsolete, Unsupported Fibre Channel Adapter"};
2554 break;
2555 case PCI_DEVICE_ID_TIGERSHARK:
2556 oneConnect = 1;
2557 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2558 break;
2559 case PCI_DEVICE_ID_TOMCAT:
2560 oneConnect = 1;
2561 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2562 break;
2563 case PCI_DEVICE_ID_FALCON:
2564 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2565 "EmulexSecure Fibre"};
2566 break;
2567 case PCI_DEVICE_ID_BALIUS:
2568 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2569 "Obsolete, Unsupported Fibre Channel Adapter"};
2570 break;
2571 case PCI_DEVICE_ID_LANCER_FC:
2572 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2573 break;
2574 case PCI_DEVICE_ID_LANCER_FC_VF:
2575 m = (typeof(m)){"LPe16000", "PCIe",
2576 "Obsolete, Unsupported Fibre Channel Adapter"};
2577 break;
2578 case PCI_DEVICE_ID_LANCER_FCOE:
2579 oneConnect = 1;
2580 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2581 break;
2582 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2583 oneConnect = 1;
2584 m = (typeof(m)){"OCe15100", "PCIe",
2585 "Obsolete, Unsupported FCoE"};
2586 break;
2587 case PCI_DEVICE_ID_LANCER_G6_FC:
2588 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2589 break;
2590 case PCI_DEVICE_ID_LANCER_G7_FC:
2591 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2592 break;
2593 case PCI_DEVICE_ID_SKYHAWK:
2594 case PCI_DEVICE_ID_SKYHAWK_VF:
2595 oneConnect = 1;
2596 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2597 break;
2598 default:
2599 m = (typeof(m)){"Unknown", "", ""};
2600 break;
2601 }
2602
2603 if (mdp && mdp[0] == '\0')
2604 snprintf(mdp, 79,"%s", m.name);
2605
2606
2607
2608
2609 if (descp && descp[0] == '\0') {
2610 if (oneConnect)
2611 snprintf(descp, 255,
2612 "Emulex OneConnect %s, %s Initiator %s",
2613 m.name, m.function,
2614 phba->Port);
2615 else if (max_speed == 0)
2616 snprintf(descp, 255,
2617 "Emulex %s %s %s",
2618 m.name, m.bus, m.function);
2619 else
2620 snprintf(descp, 255,
2621 "Emulex %s %d%s %s %s",
2622 m.name, max_speed, (GE) ? "GE" : "Gb",
2623 m.bus, m.function);
2624 }
2625}
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639int
2640lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2641{
2642 IOCB_t *icmd;
2643 struct lpfc_iocbq *iocb;
2644 struct lpfc_dmabuf *mp1, *mp2;
2645
2646 cnt += pring->missbufcnt;
2647
2648
2649 while (cnt > 0) {
2650
2651 iocb = lpfc_sli_get_iocbq(phba);
2652 if (iocb == NULL) {
2653 pring->missbufcnt = cnt;
2654 return cnt;
2655 }
2656 icmd = &iocb->iocb;
2657
2658
2659
2660 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2661 if (mp1)
2662 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2663 if (!mp1 || !mp1->virt) {
2664 kfree(mp1);
2665 lpfc_sli_release_iocbq(phba, iocb);
2666 pring->missbufcnt = cnt;
2667 return cnt;
2668 }
2669
2670 INIT_LIST_HEAD(&mp1->list);
2671
2672 if (cnt > 1) {
2673 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2674 if (mp2)
2675 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2676 &mp2->phys);
2677 if (!mp2 || !mp2->virt) {
2678 kfree(mp2);
2679 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2680 kfree(mp1);
2681 lpfc_sli_release_iocbq(phba, iocb);
2682 pring->missbufcnt = cnt;
2683 return cnt;
2684 }
2685
2686 INIT_LIST_HEAD(&mp2->list);
2687 } else {
2688 mp2 = NULL;
2689 }
2690
2691 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2692 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2693 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2694 icmd->ulpBdeCount = 1;
2695 cnt--;
2696 if (mp2) {
2697 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2698 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2699 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2700 cnt--;
2701 icmd->ulpBdeCount = 2;
2702 }
2703
2704 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2705 icmd->ulpLe = 1;
2706
2707 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2708 IOCB_ERROR) {
2709 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2710 kfree(mp1);
2711 cnt++;
2712 if (mp2) {
2713 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2714 kfree(mp2);
2715 cnt++;
2716 }
2717 lpfc_sli_release_iocbq(phba, iocb);
2718 pring->missbufcnt = cnt;
2719 return cnt;
2720 }
2721 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2722 if (mp2)
2723 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2724 }
2725 pring->missbufcnt = 0;
2726 return 0;
2727}
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740static int
2741lpfc_post_rcv_buf(struct lpfc_hba *phba)
2742{
2743 struct lpfc_sli *psli = &phba->sli;
2744
2745
2746 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2747
2748
2749 return 0;
2750}
2751
2752#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2753
2754
2755
2756
2757
2758
2759
2760
2761static void
2762lpfc_sha_init(uint32_t * HashResultPointer)
2763{
2764 HashResultPointer[0] = 0x67452301;
2765 HashResultPointer[1] = 0xEFCDAB89;
2766 HashResultPointer[2] = 0x98BADCFE;
2767 HashResultPointer[3] = 0x10325476;
2768 HashResultPointer[4] = 0xC3D2E1F0;
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static void
2782lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2783{
2784 int t;
2785 uint32_t TEMP;
2786 uint32_t A, B, C, D, E;
2787 t = 16;
2788 do {
2789 HashWorkingPointer[t] =
2790 S(1,
2791 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2792 8] ^
2793 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2794 } while (++t <= 79);
2795 t = 0;
2796 A = HashResultPointer[0];
2797 B = HashResultPointer[1];
2798 C = HashResultPointer[2];
2799 D = HashResultPointer[3];
2800 E = HashResultPointer[4];
2801
2802 do {
2803 if (t < 20) {
2804 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2805 } else if (t < 40) {
2806 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2807 } else if (t < 60) {
2808 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2809 } else {
2810 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2811 }
2812 TEMP += S(5, A) + E + HashWorkingPointer[t];
2813 E = D;
2814 D = C;
2815 C = S(30, B);
2816 B = A;
2817 A = TEMP;
2818 } while (++t <= 79);
2819
2820 HashResultPointer[0] += A;
2821 HashResultPointer[1] += B;
2822 HashResultPointer[2] += C;
2823 HashResultPointer[3] += D;
2824 HashResultPointer[4] += E;
2825
2826}
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static void
2839lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2840{
2841 *HashWorking = (*RandomChallenge ^ *HashWorking);
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851void
2852lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2853{
2854 int t;
2855 uint32_t *HashWorking;
2856 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2857
2858 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2859 if (!HashWorking)
2860 return;
2861
2862 HashWorking[0] = HashWorking[78] = *pwwnn++;
2863 HashWorking[1] = HashWorking[79] = *pwwnn;
2864
2865 for (t = 0; t < 7; t++)
2866 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2867
2868 lpfc_sha_init(hbainit);
2869 lpfc_sha_iterate(hbainit, HashWorking);
2870 kfree(HashWorking);
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882void
2883lpfc_cleanup(struct lpfc_vport *vport)
2884{
2885 struct lpfc_hba *phba = vport->phba;
2886 struct lpfc_nodelist *ndlp, *next_ndlp;
2887 int i = 0;
2888
2889 if (phba->link_state > LPFC_LINK_DOWN)
2890 lpfc_port_link_failure(vport);
2891
2892
2893 if (lpfc_is_vmid_enabled(phba))
2894 lpfc_vmid_vport_cleanup(vport);
2895
2896 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2897 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2898 ndlp->nlp_DID == Fabric_DID) {
2899
2900 lpfc_nlp_put(ndlp);
2901 continue;
2902 }
2903
2904 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2905 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2906 lpfc_nlp_put(ndlp);
2907 continue;
2908 }
2909
2910
2911
2912
2913 if (ndlp->nlp_type & NLP_FABRIC &&
2914 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2915 lpfc_disc_state_machine(vport, ndlp, NULL,
2916 NLP_EVT_DEVICE_RECOVERY);
2917
2918 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2919 lpfc_disc_state_machine(vport, ndlp, NULL,
2920 NLP_EVT_DEVICE_RM);
2921 }
2922
2923
2924
2925
2926
2927 while (!list_empty(&vport->fc_nodes)) {
2928 if (i++ > 3000) {
2929 lpfc_printf_vlog(vport, KERN_ERR,
2930 LOG_TRACE_EVENT,
2931 "0233 Nodelist not empty\n");
2932 list_for_each_entry_safe(ndlp, next_ndlp,
2933 &vport->fc_nodes, nlp_listp) {
2934 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2935 LOG_TRACE_EVENT,
2936 "0282 did:x%x ndlp:x%px "
2937 "refcnt:%d xflags x%x nflag x%x\n",
2938 ndlp->nlp_DID, (void *)ndlp,
2939 kref_read(&ndlp->kref),
2940 ndlp->fc4_xpt_flags,
2941 ndlp->nlp_flag);
2942 }
2943 break;
2944 }
2945
2946
2947 msleep(10);
2948 }
2949 lpfc_cleanup_vports_rrqs(vport, NULL);
2950}
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960void
2961lpfc_stop_vport_timers(struct lpfc_vport *vport)
2962{
2963 del_timer_sync(&vport->els_tmofunc);
2964 del_timer_sync(&vport->delayed_disc_tmo);
2965 lpfc_can_disctmo(vport);
2966 return;
2967}
2968
2969
2970
2971
2972
2973
2974
2975
2976void
2977__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2978{
2979
2980 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2981
2982
2983 del_timer(&phba->fcf.redisc_wait);
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995void
2996lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2997{
2998 spin_lock_irq(&phba->hbalock);
2999 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3000
3001 spin_unlock_irq(&phba->hbalock);
3002 return;
3003 }
3004 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3005
3006 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3007 spin_unlock_irq(&phba->hbalock);
3008}
3009
3010
3011
3012
3013
3014
3015
3016
3017void
3018lpfc_stop_hba_timers(struct lpfc_hba *phba)
3019{
3020 if (phba->pport)
3021 lpfc_stop_vport_timers(phba->pport);
3022 cancel_delayed_work_sync(&phba->eq_delay_work);
3023 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3024 del_timer_sync(&phba->sli.mbox_tmo);
3025 del_timer_sync(&phba->fabric_block_timer);
3026 del_timer_sync(&phba->eratt_poll);
3027 del_timer_sync(&phba->hb_tmofunc);
3028 if (phba->sli_rev == LPFC_SLI_REV4) {
3029 del_timer_sync(&phba->rrq_tmr);
3030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3031 }
3032 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3033
3034 switch (phba->pci_dev_grp) {
3035 case LPFC_PCI_DEV_LP:
3036
3037 del_timer_sync(&phba->fcp_poll_timer);
3038 break;
3039 case LPFC_PCI_DEV_OC:
3040
3041 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3042 break;
3043 default:
3044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3045 "0297 Invalid device group (x%x)\n",
3046 phba->pci_dev_grp);
3047 break;
3048 }
3049 return;
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063static void
3064lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3065{
3066 unsigned long iflag;
3067 uint8_t actcmd = MBX_HEARTBEAT;
3068 unsigned long timeout;
3069
3070 spin_lock_irqsave(&phba->hbalock, iflag);
3071 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3072 spin_unlock_irqrestore(&phba->hbalock, iflag);
3073 if (mbx_action == LPFC_MBX_NO_WAIT)
3074 return;
3075 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3076 spin_lock_irqsave(&phba->hbalock, iflag);
3077 if (phba->sli.mbox_active) {
3078 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3079
3080
3081
3082 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3083 phba->sli.mbox_active) * 1000) + jiffies;
3084 }
3085 spin_unlock_irqrestore(&phba->hbalock, iflag);
3086
3087
3088 while (phba->sli.mbox_active) {
3089
3090 msleep(2);
3091 if (time_after(jiffies, timeout)) {
3092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3093 "2813 Mgmt IO is Blocked %x "
3094 "- mbox cmd %x still active\n",
3095 phba->sli.sli_flag, actcmd);
3096 break;
3097 }
3098 }
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109void
3110lpfc_sli4_node_prep(struct lpfc_hba *phba)
3111{
3112 struct lpfc_nodelist *ndlp, *next_ndlp;
3113 struct lpfc_vport **vports;
3114 int i, rpi;
3115
3116 if (phba->sli_rev != LPFC_SLI_REV4)
3117 return;
3118
3119 vports = lpfc_create_vport_work_array(phba);
3120 if (vports == NULL)
3121 return;
3122
3123 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3124 if (vports[i]->load_flag & FC_UNLOADING)
3125 continue;
3126
3127 list_for_each_entry_safe(ndlp, next_ndlp,
3128 &vports[i]->fc_nodes,
3129 nlp_listp) {
3130 rpi = lpfc_sli4_alloc_rpi(phba);
3131 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3132
3133 continue;
3134 }
3135 ndlp->nlp_rpi = rpi;
3136 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3137 LOG_NODE | LOG_DISCOVERY,
3138 "0009 Assign RPI x%x to ndlp x%px "
3139 "DID:x%06x flg:x%x\n",
3140 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3141 ndlp->nlp_flag);
3142 }
3143 }
3144 lpfc_destroy_vport_work_array(phba, vports);
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3155{
3156 struct lpfc_sli4_hdw_queue *qp;
3157 struct lpfc_io_buf *lpfc_ncmd;
3158 struct lpfc_io_buf *lpfc_ncmd_next;
3159 struct lpfc_epd_pool *epd_pool;
3160 unsigned long iflag;
3161
3162 epd_pool = &phba->epd_pool;
3163 qp = &phba->sli4_hba.hdwq[0];
3164
3165 spin_lock_init(&epd_pool->lock);
3166 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3167 spin_lock(&epd_pool->lock);
3168 INIT_LIST_HEAD(&epd_pool->list);
3169 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3170 &qp->lpfc_io_buf_list_put, list) {
3171 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3172 lpfc_ncmd->expedite = true;
3173 qp->put_io_bufs--;
3174 epd_pool->count++;
3175 if (epd_pool->count >= XRI_BATCH)
3176 break;
3177 }
3178 spin_unlock(&epd_pool->lock);
3179 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3190{
3191 struct lpfc_sli4_hdw_queue *qp;
3192 struct lpfc_io_buf *lpfc_ncmd;
3193 struct lpfc_io_buf *lpfc_ncmd_next;
3194 struct lpfc_epd_pool *epd_pool;
3195 unsigned long iflag;
3196
3197 epd_pool = &phba->epd_pool;
3198 qp = &phba->sli4_hba.hdwq[0];
3199
3200 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3201 spin_lock(&epd_pool->lock);
3202 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3203 &epd_pool->list, list) {
3204 list_move_tail(&lpfc_ncmd->list,
3205 &qp->lpfc_io_buf_list_put);
3206 lpfc_ncmd->flags = false;
3207 qp->put_io_bufs++;
3208 epd_pool->count--;
3209 }
3210 spin_unlock(&epd_pool->lock);
3211 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3223{
3224 u32 i, j;
3225 u32 hwq_count;
3226 u32 count_per_hwq;
3227 struct lpfc_io_buf *lpfc_ncmd;
3228 struct lpfc_io_buf *lpfc_ncmd_next;
3229 unsigned long iflag;
3230 struct lpfc_sli4_hdw_queue *qp;
3231 struct lpfc_multixri_pool *multixri_pool;
3232 struct lpfc_pbl_pool *pbl_pool;
3233 struct lpfc_pvt_pool *pvt_pool;
3234
3235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3236 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3237 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3238 phba->sli4_hba.io_xri_cnt);
3239
3240 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3241 lpfc_create_expedite_pool(phba);
3242
3243 hwq_count = phba->cfg_hdw_queue;
3244 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3245
3246 for (i = 0; i < hwq_count; i++) {
3247 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3248
3249 if (!multixri_pool) {
3250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3251 "1238 Failed to allocate memory for "
3252 "multixri_pool\n");
3253
3254 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3255 lpfc_destroy_expedite_pool(phba);
3256
3257 j = 0;
3258 while (j < i) {
3259 qp = &phba->sli4_hba.hdwq[j];
3260 kfree(qp->p_multixri_pool);
3261 j++;
3262 }
3263 phba->cfg_xri_rebalancing = 0;
3264 return;
3265 }
3266
3267 qp = &phba->sli4_hba.hdwq[i];
3268 qp->p_multixri_pool = multixri_pool;
3269
3270 multixri_pool->xri_limit = count_per_hwq;
3271 multixri_pool->rrb_next_hwqid = i;
3272
3273
3274 pbl_pool = &multixri_pool->pbl_pool;
3275 spin_lock_init(&pbl_pool->lock);
3276 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3277 spin_lock(&pbl_pool->lock);
3278 INIT_LIST_HEAD(&pbl_pool->list);
3279 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3280 &qp->lpfc_io_buf_list_put, list) {
3281 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3282 qp->put_io_bufs--;
3283 pbl_pool->count++;
3284 }
3285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3286 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3287 pbl_pool->count, i);
3288 spin_unlock(&pbl_pool->lock);
3289 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3290
3291
3292 pvt_pool = &multixri_pool->pvt_pool;
3293 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3294 pvt_pool->low_watermark = XRI_BATCH;
3295 spin_lock_init(&pvt_pool->lock);
3296 spin_lock_irqsave(&pvt_pool->lock, iflag);
3297 INIT_LIST_HEAD(&pvt_pool->list);
3298 pvt_pool->count = 0;
3299 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3300 }
3301}
3302
3303
3304
3305
3306
3307
3308
3309static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3310{
3311 u32 i;
3312 u32 hwq_count;
3313 struct lpfc_io_buf *lpfc_ncmd;
3314 struct lpfc_io_buf *lpfc_ncmd_next;
3315 unsigned long iflag;
3316 struct lpfc_sli4_hdw_queue *qp;
3317 struct lpfc_multixri_pool *multixri_pool;
3318 struct lpfc_pbl_pool *pbl_pool;
3319 struct lpfc_pvt_pool *pvt_pool;
3320
3321 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3322 lpfc_destroy_expedite_pool(phba);
3323
3324 if (!(phba->pport->load_flag & FC_UNLOADING))
3325 lpfc_sli_flush_io_rings(phba);
3326
3327 hwq_count = phba->cfg_hdw_queue;
3328
3329 for (i = 0; i < hwq_count; i++) {
3330 qp = &phba->sli4_hba.hdwq[i];
3331 multixri_pool = qp->p_multixri_pool;
3332 if (!multixri_pool)
3333 continue;
3334
3335 qp->p_multixri_pool = NULL;
3336
3337 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3338
3339
3340 pbl_pool = &multixri_pool->pbl_pool;
3341 spin_lock(&pbl_pool->lock);
3342
3343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3344 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3345 pbl_pool->count, i);
3346
3347 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3348 &pbl_pool->list, list) {
3349 list_move_tail(&lpfc_ncmd->list,
3350 &qp->lpfc_io_buf_list_put);
3351 qp->put_io_bufs++;
3352 pbl_pool->count--;
3353 }
3354
3355 INIT_LIST_HEAD(&pbl_pool->list);
3356 pbl_pool->count = 0;
3357
3358 spin_unlock(&pbl_pool->lock);
3359
3360
3361 pvt_pool = &multixri_pool->pvt_pool;
3362 spin_lock(&pvt_pool->lock);
3363
3364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3365 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3366 pvt_pool->count, i);
3367
3368 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3369 &pvt_pool->list, list) {
3370 list_move_tail(&lpfc_ncmd->list,
3371 &qp->lpfc_io_buf_list_put);
3372 qp->put_io_bufs++;
3373 pvt_pool->count--;
3374 }
3375
3376 INIT_LIST_HEAD(&pvt_pool->list);
3377 pvt_pool->count = 0;
3378
3379 spin_unlock(&pvt_pool->lock);
3380 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3381
3382 kfree(multixri_pool);
3383 }
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398int
3399lpfc_online(struct lpfc_hba *phba)
3400{
3401 struct lpfc_vport *vport;
3402 struct lpfc_vport **vports;
3403 int i, error = 0;
3404 bool vpis_cleared = false;
3405
3406 if (!phba)
3407 return 0;
3408 vport = phba->pport;
3409
3410 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3411 return 0;
3412
3413 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3414 "0458 Bring Adapter online\n");
3415
3416 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3417
3418 if (phba->sli_rev == LPFC_SLI_REV4) {
3419 if (lpfc_sli4_hba_setup(phba)) {
3420 lpfc_unblock_mgmt_io(phba);
3421 return 1;
3422 }
3423 spin_lock_irq(&phba->hbalock);
3424 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3425 vpis_cleared = true;
3426 spin_unlock_irq(&phba->hbalock);
3427
3428
3429
3430
3431 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3432 !phba->nvmet_support) {
3433 error = lpfc_nvme_create_localport(phba->pport);
3434 if (error)
3435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3436 "6132 NVME restore reg failed "
3437 "on nvmei error x%x\n", error);
3438 }
3439 } else {
3440 lpfc_sli_queue_init(phba);
3441 if (lpfc_sli_hba_setup(phba)) {
3442 lpfc_unblock_mgmt_io(phba);
3443 return 1;
3444 }
3445 }
3446
3447 vports = lpfc_create_vport_work_array(phba);
3448 if (vports != NULL) {
3449 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3450 struct Scsi_Host *shost;
3451 shost = lpfc_shost_from_vport(vports[i]);
3452 spin_lock_irq(shost->host_lock);
3453 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3454 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3455 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3456 if (phba->sli_rev == LPFC_SLI_REV4) {
3457 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3458 if ((vpis_cleared) &&
3459 (vports[i]->port_type !=
3460 LPFC_PHYSICAL_PORT))
3461 vports[i]->vpi = 0;
3462 }
3463 spin_unlock_irq(shost->host_lock);
3464 }
3465 }
3466 lpfc_destroy_vport_work_array(phba, vports);
3467
3468 if (phba->cfg_xri_rebalancing)
3469 lpfc_create_multixri_pools(phba);
3470
3471 lpfc_cpuhp_add(phba);
3472
3473 lpfc_unblock_mgmt_io(phba);
3474 return 0;
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488void
3489lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3490{
3491 unsigned long iflag;
3492
3493 spin_lock_irqsave(&phba->hbalock, iflag);
3494 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3495 spin_unlock_irqrestore(&phba->hbalock, iflag);
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507void
3508lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3509{
3510 struct lpfc_vport *vport = phba->pport;
3511 struct lpfc_nodelist *ndlp, *next_ndlp;
3512 struct lpfc_vport **vports;
3513 struct Scsi_Host *shost;
3514 int i;
3515
3516 if (vport->fc_flag & FC_OFFLINE_MODE)
3517 return;
3518
3519 lpfc_block_mgmt_io(phba, mbx_action);
3520
3521 lpfc_linkdown(phba);
3522
3523
3524 vports = lpfc_create_vport_work_array(phba);
3525 if (vports != NULL) {
3526 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3527 if (vports[i]->load_flag & FC_UNLOADING)
3528 continue;
3529 shost = lpfc_shost_from_vport(vports[i]);
3530 spin_lock_irq(shost->host_lock);
3531 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3532 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3533 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3534 spin_unlock_irq(shost->host_lock);
3535
3536 shost = lpfc_shost_from_vport(vports[i]);
3537 list_for_each_entry_safe(ndlp, next_ndlp,
3538 &vports[i]->fc_nodes,
3539 nlp_listp) {
3540
3541 spin_lock_irq(&ndlp->lock);
3542 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3543 spin_unlock_irq(&ndlp->lock);
3544
3545
3546
3547
3548
3549 if (phba->sli_rev == LPFC_SLI_REV4) {
3550 lpfc_printf_vlog(vports[i], KERN_INFO,
3551 LOG_NODE | LOG_DISCOVERY,
3552 "0011 Free RPI x%x on "
3553 "ndlp: x%px did x%x\n",
3554 ndlp->nlp_rpi, ndlp,
3555 ndlp->nlp_DID);
3556 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3557 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3558 }
3559 lpfc_unreg_rpi(vports[i], ndlp);
3560
3561 if (ndlp->nlp_type & NLP_FABRIC) {
3562 lpfc_disc_state_machine(vports[i], ndlp,
3563 NULL, NLP_EVT_DEVICE_RECOVERY);
3564
3565
3566
3567
3568
3569
3570 if (!(ndlp->fc4_xpt_flags &
3571 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3572 lpfc_disc_state_machine
3573 (vports[i], ndlp,
3574 NULL,
3575 NLP_EVT_DEVICE_RM);
3576 }
3577 }
3578 }
3579 }
3580 lpfc_destroy_vport_work_array(phba, vports);
3581
3582 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3583
3584 if (phba->wq)
3585 flush_workqueue(phba->wq);
3586}
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596void
3597lpfc_offline(struct lpfc_hba *phba)
3598{
3599 struct Scsi_Host *shost;
3600 struct lpfc_vport **vports;
3601 int i;
3602
3603 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3604 return;
3605
3606
3607 lpfc_stop_port(phba);
3608
3609
3610
3611
3612 lpfc_nvmet_destroy_targetport(phba);
3613 lpfc_nvme_destroy_localport(phba->pport);
3614
3615 vports = lpfc_create_vport_work_array(phba);
3616 if (vports != NULL)
3617 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3618 lpfc_stop_vport_timers(vports[i]);
3619 lpfc_destroy_vport_work_array(phba, vports);
3620 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3621 "0460 Bring Adapter offline\n");
3622
3623
3624 lpfc_sli_hba_down(phba);
3625 spin_lock_irq(&phba->hbalock);
3626 phba->work_ha = 0;
3627 spin_unlock_irq(&phba->hbalock);
3628 vports = lpfc_create_vport_work_array(phba);
3629 if (vports != NULL)
3630 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3631 shost = lpfc_shost_from_vport(vports[i]);
3632 spin_lock_irq(shost->host_lock);
3633 vports[i]->work_port_events = 0;
3634 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3635 spin_unlock_irq(shost->host_lock);
3636 }
3637 lpfc_destroy_vport_work_array(phba, vports);
3638
3639
3640
3641 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3642 __lpfc_cpuhp_remove(phba);
3643
3644 if (phba->cfg_xri_rebalancing)
3645 lpfc_destroy_multixri_pools(phba);
3646}
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656static void
3657lpfc_scsi_free(struct lpfc_hba *phba)
3658{
3659 struct lpfc_io_buf *sb, *sb_next;
3660
3661 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3662 return;
3663
3664 spin_lock_irq(&phba->hbalock);
3665
3666
3667
3668 spin_lock(&phba->scsi_buf_list_put_lock);
3669 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3670 list) {
3671 list_del(&sb->list);
3672 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3673 sb->dma_handle);
3674 kfree(sb);
3675 phba->total_scsi_bufs--;
3676 }
3677 spin_unlock(&phba->scsi_buf_list_put_lock);
3678
3679 spin_lock(&phba->scsi_buf_list_get_lock);
3680 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3681 list) {
3682 list_del(&sb->list);
3683 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3684 sb->dma_handle);
3685 kfree(sb);
3686 phba->total_scsi_bufs--;
3687 }
3688 spin_unlock(&phba->scsi_buf_list_get_lock);
3689 spin_unlock_irq(&phba->hbalock);
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700void
3701lpfc_io_free(struct lpfc_hba *phba)
3702{
3703 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3704 struct lpfc_sli4_hdw_queue *qp;
3705 int idx;
3706
3707 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3708 qp = &phba->sli4_hba.hdwq[idx];
3709
3710 spin_lock(&qp->io_buf_list_put_lock);
3711 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3712 &qp->lpfc_io_buf_list_put,
3713 list) {
3714 list_del(&lpfc_ncmd->list);
3715 qp->put_io_bufs--;
3716 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3717 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3718 if (phba->cfg_xpsgl && !phba->nvmet_support)
3719 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3720 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3721 kfree(lpfc_ncmd);
3722 qp->total_io_bufs--;
3723 }
3724 spin_unlock(&qp->io_buf_list_put_lock);
3725
3726 spin_lock(&qp->io_buf_list_get_lock);
3727 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3728 &qp->lpfc_io_buf_list_get,
3729 list) {
3730 list_del(&lpfc_ncmd->list);
3731 qp->get_io_bufs--;
3732 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3733 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3734 if (phba->cfg_xpsgl && !phba->nvmet_support)
3735 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3736 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3737 kfree(lpfc_ncmd);
3738 qp->total_io_bufs--;
3739 }
3740 spin_unlock(&qp->io_buf_list_get_lock);
3741 }
3742}
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756int
3757lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3758{
3759 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3760 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3761 LIST_HEAD(els_sgl_list);
3762 int rc;
3763
3764
3765
3766
3767 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3768
3769 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3770
3771 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3772 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3773 "3157 ELS xri-sgl count increased from "
3774 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3775 els_xri_cnt);
3776
3777 for (i = 0; i < xri_cnt; i++) {
3778 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3779 GFP_KERNEL);
3780 if (sglq_entry == NULL) {
3781 lpfc_printf_log(phba, KERN_ERR,
3782 LOG_TRACE_EVENT,
3783 "2562 Failure to allocate an "
3784 "ELS sgl entry:%d\n", i);
3785 rc = -ENOMEM;
3786 goto out_free_mem;
3787 }
3788 sglq_entry->buff_type = GEN_BUFF_TYPE;
3789 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3790 &sglq_entry->phys);
3791 if (sglq_entry->virt == NULL) {
3792 kfree(sglq_entry);
3793 lpfc_printf_log(phba, KERN_ERR,
3794 LOG_TRACE_EVENT,
3795 "2563 Failure to allocate an "
3796 "ELS mbuf:%d\n", i);
3797 rc = -ENOMEM;
3798 goto out_free_mem;
3799 }
3800 sglq_entry->sgl = sglq_entry->virt;
3801 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3802 sglq_entry->state = SGL_FREED;
3803 list_add_tail(&sglq_entry->list, &els_sgl_list);
3804 }
3805 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3806 list_splice_init(&els_sgl_list,
3807 &phba->sli4_hba.lpfc_els_sgl_list);
3808 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3809 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3810
3811 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3812 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3813 "3158 ELS xri-sgl count decreased from "
3814 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3815 els_xri_cnt);
3816 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3817 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3818 &els_sgl_list);
3819
3820 for (i = 0; i < xri_cnt; i++) {
3821 list_remove_head(&els_sgl_list,
3822 sglq_entry, struct lpfc_sglq, list);
3823 if (sglq_entry) {
3824 __lpfc_mbuf_free(phba, sglq_entry->virt,
3825 sglq_entry->phys);
3826 kfree(sglq_entry);
3827 }
3828 }
3829 list_splice_init(&els_sgl_list,
3830 &phba->sli4_hba.lpfc_els_sgl_list);
3831 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3832 } else
3833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834 "3163 ELS xri-sgl count unchanged: %d\n",
3835 els_xri_cnt);
3836 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3837
3838
3839 sglq_entry = NULL;
3840 sglq_entry_next = NULL;
3841 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3842 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3843 lxri = lpfc_sli4_next_xritag(phba);
3844 if (lxri == NO_XRI) {
3845 lpfc_printf_log(phba, KERN_ERR,
3846 LOG_TRACE_EVENT,
3847 "2400 Failed to allocate xri for "
3848 "ELS sgl\n");
3849 rc = -ENOMEM;
3850 goto out_free_mem;
3851 }
3852 sglq_entry->sli4_lxritag = lxri;
3853 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3854 }
3855 return 0;
3856
3857out_free_mem:
3858 lpfc_free_els_sgl_list(phba);
3859 return rc;
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874int
3875lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3876{
3877 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3878 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3879 uint16_t nvmet_xri_cnt;
3880 LIST_HEAD(nvmet_sgl_list);
3881 int rc;
3882
3883
3884
3885
3886 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3887
3888
3889 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3890 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3891
3892 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3893 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3894 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3895 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3896
3897 for (i = 0; i < xri_cnt; i++) {
3898 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3899 GFP_KERNEL);
3900 if (sglq_entry == NULL) {
3901 lpfc_printf_log(phba, KERN_ERR,
3902 LOG_TRACE_EVENT,
3903 "6303 Failure to allocate an "
3904 "NVMET sgl entry:%d\n", i);
3905 rc = -ENOMEM;
3906 goto out_free_mem;
3907 }
3908 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3909 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3910 &sglq_entry->phys);
3911 if (sglq_entry->virt == NULL) {
3912 kfree(sglq_entry);
3913 lpfc_printf_log(phba, KERN_ERR,
3914 LOG_TRACE_EVENT,
3915 "6304 Failure to allocate an "
3916 "NVMET buf:%d\n", i);
3917 rc = -ENOMEM;
3918 goto out_free_mem;
3919 }
3920 sglq_entry->sgl = sglq_entry->virt;
3921 memset(sglq_entry->sgl, 0,
3922 phba->cfg_sg_dma_buf_size);
3923 sglq_entry->state = SGL_FREED;
3924 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3925 }
3926 spin_lock_irq(&phba->hbalock);
3927 spin_lock(&phba->sli4_hba.sgl_list_lock);
3928 list_splice_init(&nvmet_sgl_list,
3929 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3930 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3931 spin_unlock_irq(&phba->hbalock);
3932 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3933
3934 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3936 "6305 NVMET xri-sgl count decreased from "
3937 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3938 nvmet_xri_cnt);
3939 spin_lock_irq(&phba->hbalock);
3940 spin_lock(&phba->sli4_hba.sgl_list_lock);
3941 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3942 &nvmet_sgl_list);
3943
3944 for (i = 0; i < xri_cnt; i++) {
3945 list_remove_head(&nvmet_sgl_list,
3946 sglq_entry, struct lpfc_sglq, list);
3947 if (sglq_entry) {
3948 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3949 sglq_entry->phys);
3950 kfree(sglq_entry);
3951 }
3952 }
3953 list_splice_init(&nvmet_sgl_list,
3954 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3955 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3956 spin_unlock_irq(&phba->hbalock);
3957 } else
3958 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3959 "6306 NVMET xri-sgl count unchanged: %d\n",
3960 nvmet_xri_cnt);
3961 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3962
3963
3964 sglq_entry = NULL;
3965 sglq_entry_next = NULL;
3966 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3967 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3968 lxri = lpfc_sli4_next_xritag(phba);
3969 if (lxri == NO_XRI) {
3970 lpfc_printf_log(phba, KERN_ERR,
3971 LOG_TRACE_EVENT,
3972 "6307 Failed to allocate xri for "
3973 "NVMET sgl\n");
3974 rc = -ENOMEM;
3975 goto out_free_mem;
3976 }
3977 sglq_entry->sli4_lxritag = lxri;
3978 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3979 }
3980 return 0;
3981
3982out_free_mem:
3983 lpfc_free_nvmet_sgl_list(phba);
3984 return rc;
3985}
3986
3987int
3988lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3989{
3990 LIST_HEAD(blist);
3991 struct lpfc_sli4_hdw_queue *qp;
3992 struct lpfc_io_buf *lpfc_cmd;
3993 struct lpfc_io_buf *iobufp, *prev_iobufp;
3994 int idx, cnt, xri, inserted;
3995
3996 cnt = 0;
3997 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3998 qp = &phba->sli4_hba.hdwq[idx];
3999 spin_lock_irq(&qp->io_buf_list_get_lock);
4000 spin_lock(&qp->io_buf_list_put_lock);
4001
4002
4003 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4004 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4005 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4006 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4007 cnt += qp->get_io_bufs + qp->put_io_bufs;
4008 qp->get_io_bufs = 0;
4009 qp->put_io_bufs = 0;
4010 qp->total_io_bufs = 0;
4011 spin_unlock(&qp->io_buf_list_put_lock);
4012 spin_unlock_irq(&qp->io_buf_list_get_lock);
4013 }
4014
4015
4016
4017
4018
4019
4020 for (idx = 0; idx < cnt; idx++) {
4021 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4022 if (!lpfc_cmd)
4023 return cnt;
4024 if (idx == 0) {
4025 list_add_tail(&lpfc_cmd->list, cbuf);
4026 continue;
4027 }
4028 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4029 inserted = 0;
4030 prev_iobufp = NULL;
4031 list_for_each_entry(iobufp, cbuf, list) {
4032 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4033 if (prev_iobufp)
4034 list_add(&lpfc_cmd->list,
4035 &prev_iobufp->list);
4036 else
4037 list_add(&lpfc_cmd->list, cbuf);
4038 inserted = 1;
4039 break;
4040 }
4041 prev_iobufp = iobufp;
4042 }
4043 if (!inserted)
4044 list_add_tail(&lpfc_cmd->list, cbuf);
4045 }
4046 return cnt;
4047}
4048
4049int
4050lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4051{
4052 struct lpfc_sli4_hdw_queue *qp;
4053 struct lpfc_io_buf *lpfc_cmd;
4054 int idx, cnt;
4055
4056 qp = phba->sli4_hba.hdwq;
4057 cnt = 0;
4058 while (!list_empty(cbuf)) {
4059 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4060 list_remove_head(cbuf, lpfc_cmd,
4061 struct lpfc_io_buf, list);
4062 if (!lpfc_cmd)
4063 return cnt;
4064 cnt++;
4065 qp = &phba->sli4_hba.hdwq[idx];
4066 lpfc_cmd->hdwq_no = idx;
4067 lpfc_cmd->hdwq = qp;
4068 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4069 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4070 spin_lock(&qp->io_buf_list_put_lock);
4071 list_add_tail(&lpfc_cmd->list,
4072 &qp->lpfc_io_buf_list_put);
4073 qp->put_io_bufs++;
4074 qp->total_io_bufs++;
4075 spin_unlock(&qp->io_buf_list_put_lock);
4076 }
4077 }
4078 return cnt;
4079}
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093int
4094lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4095{
4096 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4097 uint16_t i, lxri, els_xri_cnt;
4098 uint16_t io_xri_cnt, io_xri_max;
4099 LIST_HEAD(io_sgl_list);
4100 int rc, cnt;
4101
4102
4103
4104
4105
4106
4107 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4108 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4109 phba->sli4_hba.io_xri_max = io_xri_max;
4110
4111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4112 "6074 Current allocated XRI sgl count:%d, "
4113 "maximum XRI count:%d\n",
4114 phba->sli4_hba.io_xri_cnt,
4115 phba->sli4_hba.io_xri_max);
4116
4117 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4118
4119 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4120
4121 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4122 phba->sli4_hba.io_xri_max;
4123
4124 for (i = 0; i < io_xri_cnt; i++) {
4125 list_remove_head(&io_sgl_list, lpfc_ncmd,
4126 struct lpfc_io_buf, list);
4127 if (lpfc_ncmd) {
4128 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4129 lpfc_ncmd->data,
4130 lpfc_ncmd->dma_handle);
4131 kfree(lpfc_ncmd);
4132 }
4133 }
4134 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4135 }
4136
4137
4138 lpfc_ncmd = NULL;
4139 lpfc_ncmd_next = NULL;
4140 phba->sli4_hba.io_xri_cnt = cnt;
4141 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4142 &io_sgl_list, list) {
4143 lxri = lpfc_sli4_next_xritag(phba);
4144 if (lxri == NO_XRI) {
4145 lpfc_printf_log(phba, KERN_ERR,
4146 LOG_TRACE_EVENT,
4147 "6075 Failed to allocate xri for "
4148 "nvme buffer\n");
4149 rc = -ENOMEM;
4150 goto out_free_mem;
4151 }
4152 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4153 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4154 }
4155 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4156 return 0;
4157
4158out_free_mem:
4159 lpfc_io_free(phba);
4160 return rc;
4161}
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177int
4178lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4179{
4180 struct lpfc_io_buf *lpfc_ncmd;
4181 struct lpfc_iocbq *pwqeq;
4182 uint16_t iotag, lxri = 0;
4183 int bcnt, num_posted;
4184 LIST_HEAD(prep_nblist);
4185 LIST_HEAD(post_nblist);
4186 LIST_HEAD(nvme_nblist);
4187
4188 phba->sli4_hba.io_xri_cnt = 0;
4189 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4190 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4191 if (!lpfc_ncmd)
4192 break;
4193
4194
4195
4196
4197
4198 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4199 GFP_KERNEL,
4200 &lpfc_ncmd->dma_handle);
4201 if (!lpfc_ncmd->data) {
4202 kfree(lpfc_ncmd);
4203 break;
4204 }
4205
4206 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4207 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4208 } else {
4209
4210
4211
4212
4213 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4214 (((unsigned long)(lpfc_ncmd->data) &
4215 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4216 lpfc_printf_log(phba, KERN_ERR,
4217 LOG_TRACE_EVENT,
4218 "3369 Memory alignment err: "
4219 "addr=%lx\n",
4220 (unsigned long)lpfc_ncmd->data);
4221 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4222 lpfc_ncmd->data,
4223 lpfc_ncmd->dma_handle);
4224 kfree(lpfc_ncmd);
4225 break;
4226 }
4227 }
4228
4229 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4230
4231 lxri = lpfc_sli4_next_xritag(phba);
4232 if (lxri == NO_XRI) {
4233 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4234 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4235 kfree(lpfc_ncmd);
4236 break;
4237 }
4238 pwqeq = &lpfc_ncmd->cur_iocbq;
4239
4240
4241 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4242 if (iotag == 0) {
4243 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4244 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4245 kfree(lpfc_ncmd);
4246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4247 "6121 Failed to allocate IOTAG for"
4248 " XRI:0x%x\n", lxri);
4249 lpfc_sli4_free_xri(phba, lxri);
4250 break;
4251 }
4252 pwqeq->sli4_lxritag = lxri;
4253 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4254 pwqeq->context1 = lpfc_ncmd;
4255
4256
4257 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4258 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4259 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4260 spin_lock_init(&lpfc_ncmd->buf_lock);
4261
4262
4263 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4264 phba->sli4_hba.io_xri_cnt++;
4265 }
4266 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4267 "6114 Allocate %d out of %d requested new NVME "
4268 "buffers\n", bcnt, num_to_alloc);
4269
4270
4271 if (!list_empty(&post_nblist))
4272 num_posted = lpfc_sli4_post_io_sgl_list(
4273 phba, &post_nblist, bcnt);
4274 else
4275 num_posted = 0;
4276
4277 return num_posted;
4278}
4279
4280static uint64_t
4281lpfc_get_wwpn(struct lpfc_hba *phba)
4282{
4283 uint64_t wwn;
4284 int rc;
4285 LPFC_MBOXQ_t *mboxq;
4286 MAILBOX_t *mb;
4287
4288 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4289 GFP_KERNEL);
4290 if (!mboxq)
4291 return (uint64_t)-1;
4292
4293
4294 lpfc_read_nv(phba, mboxq);
4295 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4296 if (rc != MBX_SUCCESS) {
4297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4298 "6019 Mailbox failed , mbxCmd x%x "
4299 "READ_NV, mbxStatus x%x\n",
4300 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4301 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4302 mempool_free(mboxq, phba->mbox_mem_pool);
4303 return (uint64_t) -1;
4304 }
4305 mb = &mboxq->u.mb;
4306 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4307
4308 mempool_free(mboxq, phba->mbox_mem_pool);
4309 if (phba->sli_rev == LPFC_SLI_REV4)
4310 return be64_to_cpu(wwn);
4311 else
4312 return rol64(wwn, 32);
4313}
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326static int
4327lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4328{
4329
4330 if (phba->sli_rev == LPFC_SLI_REV3) {
4331 phba->cfg_vmid_app_header = 0;
4332 phba->cfg_vmid_priority_tagging = 0;
4333 }
4334
4335 if (lpfc_is_vmid_enabled(phba)) {
4336 vport->vmid =
4337 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4338 GFP_KERNEL);
4339 if (!vport->vmid)
4340 return -ENOMEM;
4341
4342 rwlock_init(&vport->vmid_lock);
4343
4344
4345 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4346 vport->vmid_inactivity_timeout =
4347 phba->cfg_vmid_inactivity_timeout;
4348 vport->max_vmid = phba->cfg_max_vmid;
4349 vport->cur_vmid_cnt = 0;
4350
4351 vport->vmid_priority_range = bitmap_zalloc
4352 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4353
4354 if (!vport->vmid_priority_range) {
4355 kfree(vport->vmid);
4356 return -ENOMEM;
4357 }
4358
4359 hash_init(vport->hash_table);
4360 }
4361 return 0;
4362}
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380struct lpfc_vport *
4381lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4382{
4383 struct lpfc_vport *vport;
4384 struct Scsi_Host *shost = NULL;
4385 struct scsi_host_template *template;
4386 int error = 0;
4387 int i;
4388 uint64_t wwn;
4389 bool use_no_reset_hba = false;
4390 int rc;
4391
4392 if (lpfc_no_hba_reset_cnt) {
4393 if (phba->sli_rev < LPFC_SLI_REV4 &&
4394 dev == &phba->pcidev->dev) {
4395
4396 lpfc_sli_brdrestart(phba);
4397 rc = lpfc_sli_chipset_init(phba);
4398 if (rc)
4399 return NULL;
4400 }
4401 wwn = lpfc_get_wwpn(phba);
4402 }
4403
4404 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4405 if (wwn == lpfc_no_hba_reset[i]) {
4406 lpfc_printf_log(phba, KERN_ERR,
4407 LOG_TRACE_EVENT,
4408 "6020 Setting use_no_reset port=%llx\n",
4409 wwn);
4410 use_no_reset_hba = true;
4411 break;
4412 }
4413 }
4414
4415
4416 if (dev == &phba->pcidev->dev) {
4417 template = &phba->port_template;
4418
4419 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4420
4421 memcpy(template, &lpfc_template, sizeof(*template));
4422
4423 if (use_no_reset_hba)
4424
4425 template->eh_host_reset_handler = NULL;
4426
4427
4428 memcpy(&phba->vport_template, &lpfc_template,
4429 sizeof(*template));
4430 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4431 phba->vport_template.eh_bus_reset_handler = NULL;
4432 phba->vport_template.eh_host_reset_handler = NULL;
4433 phba->vport_template.vendor_id = 0;
4434
4435
4436 if (phba->sli_rev == LPFC_SLI_REV4) {
4437 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4438 phba->vport_template.sg_tablesize =
4439 phba->cfg_scsi_seg_cnt;
4440 } else {
4441 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4442 phba->vport_template.sg_tablesize =
4443 phba->cfg_sg_seg_cnt;
4444 }
4445
4446 } else {
4447
4448 memcpy(template, &lpfc_template_nvme,
4449 sizeof(*template));
4450 }
4451 } else {
4452 template = &phba->vport_template;
4453 }
4454
4455 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4456 if (!shost)
4457 goto out;
4458
4459 vport = (struct lpfc_vport *) shost->hostdata;
4460 vport->phba = phba;
4461 vport->load_flag |= FC_LOADING;
4462 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4463 vport->fc_rscn_flush = 0;
4464 lpfc_get_vport_cfgparam(vport);
4465
4466
4467 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4468
4469 shost->unique_id = instance;
4470 shost->max_id = LPFC_MAX_TARGET;
4471 shost->max_lun = vport->cfg_max_luns;
4472 shost->this_id = -1;
4473 shost->max_cmd_len = 16;
4474
4475 if (phba->sli_rev == LPFC_SLI_REV4) {
4476 if (!phba->cfg_fcp_mq_threshold ||
4477 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4478 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4479
4480 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4481 phba->cfg_fcp_mq_threshold);
4482
4483 shost->dma_boundary =
4484 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4485
4486 if (phba->cfg_xpsgl && !phba->nvmet_support)
4487 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4488 else
4489 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4490 } else
4491
4492
4493
4494 shost->nr_hw_queues = 1;
4495
4496
4497
4498
4499
4500
4501 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4502 if (dev != &phba->pcidev->dev) {
4503 shost->transportt = lpfc_vport_transport_template;
4504 vport->port_type = LPFC_NPIV_PORT;
4505 } else {
4506 shost->transportt = lpfc_transport_template;
4507 vport->port_type = LPFC_PHYSICAL_PORT;
4508 }
4509
4510 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4511 "9081 CreatePort TMPLATE type %x TBLsize %d "
4512 "SEGcnt %d/%d\n",
4513 vport->port_type, shost->sg_tablesize,
4514 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4515
4516
4517 rc = lpfc_vmid_res_alloc(phba, vport);
4518
4519 if (rc)
4520 goto out;
4521
4522
4523 INIT_LIST_HEAD(&vport->fc_nodes);
4524 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4525 spin_lock_init(&vport->work_port_lock);
4526
4527 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4528
4529 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4530
4531 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4532
4533 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4534 lpfc_setup_bg(phba, shost);
4535
4536 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4537 if (error)
4538 goto out_put_shost;
4539
4540 spin_lock_irq(&phba->port_list_lock);
4541 list_add_tail(&vport->listentry, &phba->port_list);
4542 spin_unlock_irq(&phba->port_list_lock);
4543 return vport;
4544
4545out_put_shost:
4546 kfree(vport->vmid);
4547 bitmap_free(vport->vmid_priority_range);
4548 scsi_host_put(shost);
4549out:
4550 return NULL;
4551}
4552
4553
4554
4555
4556
4557
4558
4559
4560void
4561destroy_port(struct lpfc_vport *vport)
4562{
4563 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4564 struct lpfc_hba *phba = vport->phba;
4565
4566 lpfc_debugfs_terminate(vport);
4567 fc_remove_host(shost);
4568 scsi_remove_host(shost);
4569
4570 spin_lock_irq(&phba->port_list_lock);
4571 list_del_init(&vport->listentry);
4572 spin_unlock_irq(&phba->port_list_lock);
4573
4574 lpfc_cleanup(vport);
4575 return;
4576}
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588int
4589lpfc_get_instance(void)
4590{
4591 int ret;
4592
4593 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4594 return ret < 0 ? -1 : ret;
4595}
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4613{
4614 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4615 struct lpfc_hba *phba = vport->phba;
4616 int stat = 0;
4617
4618 spin_lock_irq(shost->host_lock);
4619
4620 if (vport->load_flag & FC_UNLOADING) {
4621 stat = 1;
4622 goto finished;
4623 }
4624 if (time >= msecs_to_jiffies(30 * 1000)) {
4625 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4626 "0461 Scanning longer than 30 "
4627 "seconds. Continuing initialization\n");
4628 stat = 1;
4629 goto finished;
4630 }
4631 if (time >= msecs_to_jiffies(15 * 1000) &&
4632 phba->link_state <= LPFC_LINK_DOWN) {
4633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4634 "0465 Link down longer than 15 "
4635 "seconds. Continuing initialization\n");
4636 stat = 1;
4637 goto finished;
4638 }
4639
4640 if (vport->port_state != LPFC_VPORT_READY)
4641 goto finished;
4642 if (vport->num_disc_nodes || vport->fc_prli_sent)
4643 goto finished;
4644 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4645 goto finished;
4646 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4647 goto finished;
4648
4649 stat = 1;
4650
4651finished:
4652 spin_unlock_irq(shost->host_lock);
4653 return stat;
4654}
4655
4656static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4657{
4658 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4659 struct lpfc_hba *phba = vport->phba;
4660
4661 fc_host_supported_speeds(shost) = 0;
4662
4663
4664
4665
4666 if (phba->hba_flag & HBA_FCOE_MODE)
4667 return;
4668
4669 if (phba->lmt & LMT_128Gb)
4670 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4671 if (phba->lmt & LMT_64Gb)
4672 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4673 if (phba->lmt & LMT_32Gb)
4674 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4675 if (phba->lmt & LMT_16Gb)
4676 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4677 if (phba->lmt & LMT_10Gb)
4678 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4679 if (phba->lmt & LMT_8Gb)
4680 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4681 if (phba->lmt & LMT_4Gb)
4682 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4683 if (phba->lmt & LMT_2Gb)
4684 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4685 if (phba->lmt & LMT_1Gb)
4686 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4687}
4688
4689
4690
4691
4692
4693
4694
4695
4696void lpfc_host_attrib_init(struct Scsi_Host *shost)
4697{
4698 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4699 struct lpfc_hba *phba = vport->phba;
4700
4701
4702
4703
4704 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4705 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4706 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4707
4708 memset(fc_host_supported_fc4s(shost), 0,
4709 sizeof(fc_host_supported_fc4s(shost)));
4710 fc_host_supported_fc4s(shost)[2] = 1;
4711 fc_host_supported_fc4s(shost)[7] = 1;
4712
4713 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4714 sizeof fc_host_symbolic_name(shost));
4715
4716 lpfc_host_supported_speeds_set(shost);
4717
4718 fc_host_maxframe_size(shost) =
4719 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4720 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4721
4722 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4723
4724
4725 memset(fc_host_active_fc4s(shost), 0,
4726 sizeof(fc_host_active_fc4s(shost)));
4727 fc_host_active_fc4s(shost)[2] = 1;
4728 fc_host_active_fc4s(shost)[7] = 1;
4729
4730 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4731 spin_lock_irq(shost->host_lock);
4732 vport->load_flag &= ~FC_LOADING;
4733 spin_unlock_irq(shost->host_lock);
4734}
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744static void
4745lpfc_stop_port_s3(struct lpfc_hba *phba)
4746{
4747
4748 writel(0, phba->HCregaddr);
4749 readl(phba->HCregaddr);
4750
4751 writel(0xffffffff, phba->HAregaddr);
4752 readl(phba->HAregaddr);
4753
4754
4755 lpfc_stop_hba_timers(phba);
4756 phba->pport->work_port_events = 0;
4757}
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767static void
4768lpfc_stop_port_s4(struct lpfc_hba *phba)
4769{
4770
4771 lpfc_stop_hba_timers(phba);
4772 if (phba->pport)
4773 phba->pport->work_port_events = 0;
4774 phba->sli4_hba.intr_enable = 0;
4775}
4776
4777
4778
4779
4780
4781
4782
4783
4784void
4785lpfc_stop_port(struct lpfc_hba *phba)
4786{
4787 phba->lpfc_stop_port(phba);
4788
4789 if (phba->wq)
4790 flush_workqueue(phba->wq);
4791}
4792
4793
4794
4795
4796
4797
4798
4799void
4800lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4801{
4802 unsigned long fcf_redisc_wait_tmo =
4803 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4804
4805 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4806 spin_lock_irq(&phba->hbalock);
4807
4808 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4809
4810 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4811 spin_unlock_irq(&phba->hbalock);
4812}
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824static void
4825lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4826{
4827 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4828
4829
4830 spin_lock_irq(&phba->hbalock);
4831 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4832 spin_unlock_irq(&phba->hbalock);
4833 return;
4834 }
4835
4836 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4837
4838 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4839 spin_unlock_irq(&phba->hbalock);
4840 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4841 "2776 FCF rediscover quiescent timer expired\n");
4842
4843 lpfc_worker_wake_up(phba);
4844}
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855static void
4856lpfc_vmid_poll(struct timer_list *t)
4857{
4858 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4859 u32 wake_up = 0;
4860
4861
4862 if (phba->pport->vmid_priority_tagging) {
4863 wake_up = 1;
4864 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4865 }
4866
4867
4868 if (phba->pport->vmid_inactivity_timeout ||
4869 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
4870 wake_up = 1;
4871 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
4872 }
4873
4874 if (wake_up)
4875 lpfc_worker_wake_up(phba);
4876
4877
4878 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
4879 LPFC_VMID_TIMER));
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889static void
4890lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4891 struct lpfc_acqe_link *acqe_link)
4892{
4893 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4894 case LPFC_ASYNC_LINK_FAULT_NONE:
4895 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4896 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4897 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4898 break;
4899 default:
4900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4901 "0398 Unknown link fault code: x%x\n",
4902 bf_get(lpfc_acqe_link_fault, acqe_link));
4903 break;
4904 }
4905}
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917static uint8_t
4918lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4919 struct lpfc_acqe_link *acqe_link)
4920{
4921 uint8_t att_type;
4922
4923 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4924 case LPFC_ASYNC_LINK_STATUS_DOWN:
4925 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4926 att_type = LPFC_ATT_LINK_DOWN;
4927 break;
4928 case LPFC_ASYNC_LINK_STATUS_UP:
4929
4930 att_type = LPFC_ATT_RESERVED;
4931 break;
4932 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4933 att_type = LPFC_ATT_LINK_UP;
4934 break;
4935 default:
4936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4937 "0399 Invalid link attention type: x%x\n",
4938 bf_get(lpfc_acqe_link_status, acqe_link));
4939 att_type = LPFC_ATT_RESERVED;
4940 break;
4941 }
4942 return att_type;
4943}
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953uint32_t
4954lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4955{
4956 uint32_t link_speed;
4957
4958 if (!lpfc_is_link_up(phba))
4959 return 0;
4960
4961 if (phba->sli_rev <= LPFC_SLI_REV3) {
4962 switch (phba->fc_linkspeed) {
4963 case LPFC_LINK_SPEED_1GHZ:
4964 link_speed = 1000;
4965 break;
4966 case LPFC_LINK_SPEED_2GHZ:
4967 link_speed = 2000;
4968 break;
4969 case LPFC_LINK_SPEED_4GHZ:
4970 link_speed = 4000;
4971 break;
4972 case LPFC_LINK_SPEED_8GHZ:
4973 link_speed = 8000;
4974 break;
4975 case LPFC_LINK_SPEED_10GHZ:
4976 link_speed = 10000;
4977 break;
4978 case LPFC_LINK_SPEED_16GHZ:
4979 link_speed = 16000;
4980 break;
4981 default:
4982 link_speed = 0;
4983 }
4984 } else {
4985 if (phba->sli4_hba.link_state.logical_speed)
4986 link_speed =
4987 phba->sli4_hba.link_state.logical_speed;
4988 else
4989 link_speed = phba->sli4_hba.link_state.speed;
4990 }
4991 return link_speed;
4992}
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005static uint32_t
5006lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5007 uint8_t speed_code)
5008{
5009 uint32_t port_speed;
5010
5011 switch (evt_code) {
5012 case LPFC_TRAILER_CODE_LINK:
5013 switch (speed_code) {
5014 case LPFC_ASYNC_LINK_SPEED_ZERO:
5015 port_speed = 0;
5016 break;
5017 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5018 port_speed = 10;
5019 break;
5020 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5021 port_speed = 100;
5022 break;
5023 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5024 port_speed = 1000;
5025 break;
5026 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5027 port_speed = 10000;
5028 break;
5029 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5030 port_speed = 20000;
5031 break;
5032 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5033 port_speed = 25000;
5034 break;
5035 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5036 port_speed = 40000;
5037 break;
5038 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5039 port_speed = 100000;
5040 break;
5041 default:
5042 port_speed = 0;
5043 }
5044 break;
5045 case LPFC_TRAILER_CODE_FC:
5046 switch (speed_code) {
5047 case LPFC_FC_LA_SPEED_UNKNOWN:
5048 port_speed = 0;
5049 break;
5050 case LPFC_FC_LA_SPEED_1G:
5051 port_speed = 1000;
5052 break;
5053 case LPFC_FC_LA_SPEED_2G:
5054 port_speed = 2000;
5055 break;
5056 case LPFC_FC_LA_SPEED_4G:
5057 port_speed = 4000;
5058 break;
5059 case LPFC_FC_LA_SPEED_8G:
5060 port_speed = 8000;
5061 break;
5062 case LPFC_FC_LA_SPEED_10G:
5063 port_speed = 10000;
5064 break;
5065 case LPFC_FC_LA_SPEED_16G:
5066 port_speed = 16000;
5067 break;
5068 case LPFC_FC_LA_SPEED_32G:
5069 port_speed = 32000;
5070 break;
5071 case LPFC_FC_LA_SPEED_64G:
5072 port_speed = 64000;
5073 break;
5074 case LPFC_FC_LA_SPEED_128G:
5075 port_speed = 128000;
5076 break;
5077 default:
5078 port_speed = 0;
5079 }
5080 break;
5081 default:
5082 port_speed = 0;
5083 }
5084 return port_speed;
5085}
5086
5087
5088
5089
5090
5091
5092
5093
5094static void
5095lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5096 struct lpfc_acqe_link *acqe_link)
5097{
5098 struct lpfc_dmabuf *mp;
5099 LPFC_MBOXQ_t *pmb;
5100 MAILBOX_t *mb;
5101 struct lpfc_mbx_read_top *la;
5102 uint8_t att_type;
5103 int rc;
5104
5105 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5106 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5107 return;
5108 phba->fcoe_eventtag = acqe_link->event_tag;
5109 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5110 if (!pmb) {
5111 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5112 "0395 The mboxq allocation failed\n");
5113 return;
5114 }
5115 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5116 if (!mp) {
5117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5118 "0396 The lpfc_dmabuf allocation failed\n");
5119 goto out_free_pmb;
5120 }
5121 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5122 if (!mp->virt) {
5123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5124 "0397 The mbuf allocation failed\n");
5125 goto out_free_dmabuf;
5126 }
5127
5128
5129 lpfc_els_flush_all_cmd(phba);
5130
5131
5132 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5133
5134
5135 phba->sli.slistat.link_event++;
5136
5137
5138 lpfc_read_topology(phba, pmb, mp);
5139 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5140 pmb->vport = phba->pport;
5141
5142
5143 phba->sli4_hba.link_state.speed =
5144 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5145 bf_get(lpfc_acqe_link_speed, acqe_link));
5146 phba->sli4_hba.link_state.duplex =
5147 bf_get(lpfc_acqe_link_duplex, acqe_link);
5148 phba->sli4_hba.link_state.status =
5149 bf_get(lpfc_acqe_link_status, acqe_link);
5150 phba->sli4_hba.link_state.type =
5151 bf_get(lpfc_acqe_link_type, acqe_link);
5152 phba->sli4_hba.link_state.number =
5153 bf_get(lpfc_acqe_link_number, acqe_link);
5154 phba->sli4_hba.link_state.fault =
5155 bf_get(lpfc_acqe_link_fault, acqe_link);
5156 phba->sli4_hba.link_state.logical_speed =
5157 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5158
5159 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5160 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5161 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5162 "Logical speed:%dMbps Fault:%d\n",
5163 phba->sli4_hba.link_state.speed,
5164 phba->sli4_hba.link_state.topology,
5165 phba->sli4_hba.link_state.status,
5166 phba->sli4_hba.link_state.type,
5167 phba->sli4_hba.link_state.number,
5168 phba->sli4_hba.link_state.logical_speed,
5169 phba->sli4_hba.link_state.fault);
5170
5171
5172
5173
5174 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5175 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5176 if (rc == MBX_NOT_FINISHED)
5177 goto out_free_dmabuf;
5178 return;
5179 }
5180
5181
5182
5183
5184
5185
5186 mb = &pmb->u.mb;
5187 mb->mbxStatus = MBX_SUCCESS;
5188
5189
5190 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5191
5192
5193 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5194 la->eventTag = acqe_link->event_tag;
5195 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5196 bf_set(lpfc_mbx_read_top_link_spd, la,
5197 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5198
5199
5200 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5201 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5202 bf_set(lpfc_mbx_read_top_il, la, 0);
5203 bf_set(lpfc_mbx_read_top_pb, la, 0);
5204 bf_set(lpfc_mbx_read_top_fa, la, 0);
5205 bf_set(lpfc_mbx_read_top_mm, la, 0);
5206
5207
5208 lpfc_mbx_cmpl_read_topology(phba, pmb);
5209
5210 return;
5211
5212out_free_dmabuf:
5213 kfree(mp);
5214out_free_pmb:
5215 mempool_free(pmb, phba->mbox_mem_pool);
5216}
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229static uint8_t
5230lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5231{
5232 uint8_t port_speed;
5233
5234 switch (speed_code) {
5235 case LPFC_FC_LA_SPEED_1G:
5236 port_speed = LPFC_LINK_SPEED_1GHZ;
5237 break;
5238 case LPFC_FC_LA_SPEED_2G:
5239 port_speed = LPFC_LINK_SPEED_2GHZ;
5240 break;
5241 case LPFC_FC_LA_SPEED_4G:
5242 port_speed = LPFC_LINK_SPEED_4GHZ;
5243 break;
5244 case LPFC_FC_LA_SPEED_8G:
5245 port_speed = LPFC_LINK_SPEED_8GHZ;
5246 break;
5247 case LPFC_FC_LA_SPEED_16G:
5248 port_speed = LPFC_LINK_SPEED_16GHZ;
5249 break;
5250 case LPFC_FC_LA_SPEED_32G:
5251 port_speed = LPFC_LINK_SPEED_32GHZ;
5252 break;
5253 case LPFC_FC_LA_SPEED_64G:
5254 port_speed = LPFC_LINK_SPEED_64GHZ;
5255 break;
5256 case LPFC_FC_LA_SPEED_128G:
5257 port_speed = LPFC_LINK_SPEED_128GHZ;
5258 break;
5259 case LPFC_FC_LA_SPEED_256G:
5260 port_speed = LPFC_LINK_SPEED_256GHZ;
5261 break;
5262 default:
5263 port_speed = 0;
5264 break;
5265 }
5266
5267 return port_speed;
5268}
5269
5270#define trunk_link_status(__idx)\
5271 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5272 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5273 "Link up" : "Link down") : "NA"
5274
5275#define trunk_port_fault(__idx)\
5276 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5277 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5278
5279static void
5280lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5281 struct lpfc_acqe_fc_la *acqe_fc)
5282{
5283 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5284 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5285
5286 phba->sli4_hba.link_state.speed =
5287 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5288 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5289
5290 phba->sli4_hba.link_state.logical_speed =
5291 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5292
5293 phba->fc_linkspeed =
5294 lpfc_async_link_speed_to_read_top(
5295 phba,
5296 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5297
5298 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5299 phba->trunk_link.link0.state =
5300 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5301 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5302 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5303 }
5304 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5305 phba->trunk_link.link1.state =
5306 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5307 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5308 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5309 }
5310 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5311 phba->trunk_link.link2.state =
5312 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5313 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5314 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5315 }
5316 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5317 phba->trunk_link.link3.state =
5318 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5319 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5320 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5321 }
5322
5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5324 "2910 Async FC Trunking Event - Speed:%d\n"
5325 "\tLogical speed:%d "
5326 "port0: %s port1: %s port2: %s port3: %s\n",
5327 phba->sli4_hba.link_state.speed,
5328 phba->sli4_hba.link_state.logical_speed,
5329 trunk_link_status(0), trunk_link_status(1),
5330 trunk_link_status(2), trunk_link_status(3));
5331
5332 if (port_fault)
5333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5334 "3202 trunk error:0x%x (%s) seen on port0:%s "
5335
5336
5337
5338
5339
5340 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5341 "UNDEFINED. update driver." : trunk_errmsg[err],
5342 trunk_port_fault(0), trunk_port_fault(1),
5343 trunk_port_fault(2), trunk_port_fault(3));
5344}
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356static void
5357lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5358{
5359 struct lpfc_dmabuf *mp;
5360 LPFC_MBOXQ_t *pmb;
5361 MAILBOX_t *mb;
5362 struct lpfc_mbx_read_top *la;
5363 int rc;
5364
5365 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5366 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5368 "2895 Non FC link Event detected.(%d)\n",
5369 bf_get(lpfc_trailer_type, acqe_fc));
5370 return;
5371 }
5372
5373 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5374 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5375 lpfc_update_trunk_link_status(phba, acqe_fc);
5376 return;
5377 }
5378
5379
5380 phba->sli4_hba.link_state.speed =
5381 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5382 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5383 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5384 phba->sli4_hba.link_state.topology =
5385 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5386 phba->sli4_hba.link_state.status =
5387 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5388 phba->sli4_hba.link_state.type =
5389 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5390 phba->sli4_hba.link_state.number =
5391 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5392 phba->sli4_hba.link_state.fault =
5393 bf_get(lpfc_acqe_link_fault, acqe_fc);
5394
5395 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5396 LPFC_FC_LA_TYPE_LINK_DOWN)
5397 phba->sli4_hba.link_state.logical_speed = 0;
5398 else if (!phba->sli4_hba.conf_trunk)
5399 phba->sli4_hba.link_state.logical_speed =
5400 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5401
5402 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5403 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5404 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5405 "%dMbps Fault:%d\n",
5406 phba->sli4_hba.link_state.speed,
5407 phba->sli4_hba.link_state.topology,
5408 phba->sli4_hba.link_state.status,
5409 phba->sli4_hba.link_state.type,
5410 phba->sli4_hba.link_state.number,
5411 phba->sli4_hba.link_state.logical_speed,
5412 phba->sli4_hba.link_state.fault);
5413 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5414 if (!pmb) {
5415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5416 "2897 The mboxq allocation failed\n");
5417 return;
5418 }
5419 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5420 if (!mp) {
5421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5422 "2898 The lpfc_dmabuf allocation failed\n");
5423 goto out_free_pmb;
5424 }
5425 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5426 if (!mp->virt) {
5427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5428 "2899 The mbuf allocation failed\n");
5429 goto out_free_dmabuf;
5430 }
5431
5432
5433 lpfc_els_flush_all_cmd(phba);
5434
5435
5436 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5437
5438
5439 phba->sli.slistat.link_event++;
5440
5441
5442 lpfc_read_topology(phba, pmb, mp);
5443 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5444 pmb->vport = phba->pport;
5445
5446 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5447 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5448
5449 switch (phba->sli4_hba.link_state.status) {
5450 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5451 phba->link_flag |= LS_MDS_LINK_DOWN;
5452 break;
5453 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5454 phba->link_flag |= LS_MDS_LOOPBACK;
5455 break;
5456 default:
5457 break;
5458 }
5459
5460
5461 mb = &pmb->u.mb;
5462 mb->mbxStatus = MBX_SUCCESS;
5463
5464
5465 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5466
5467
5468 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5469 la->eventTag = acqe_fc->event_tag;
5470
5471 if (phba->sli4_hba.link_state.status ==
5472 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5473 bf_set(lpfc_mbx_read_top_att_type, la,
5474 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5475 } else {
5476 bf_set(lpfc_mbx_read_top_att_type, la,
5477 LPFC_FC_LA_TYPE_LINK_DOWN);
5478 }
5479
5480 lpfc_mbx_cmpl_read_topology(phba, pmb);
5481
5482 return;
5483 }
5484
5485 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5486 if (rc == MBX_NOT_FINISHED)
5487 goto out_free_dmabuf;
5488 return;
5489
5490out_free_dmabuf:
5491 kfree(mp);
5492out_free_pmb:
5493 mempool_free(pmb, phba->mbox_mem_pool);
5494}
5495
5496
5497
5498
5499
5500
5501
5502
5503static void
5504lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5505{
5506 char port_name;
5507 char message[128];
5508 uint8_t status;
5509 uint8_t evt_type;
5510 uint8_t operational = 0;
5511 struct temp_event temp_event_data;
5512 struct lpfc_acqe_misconfigured_event *misconfigured;
5513 struct Scsi_Host *shost;
5514 struct lpfc_vport **vports;
5515 int rc, i;
5516
5517 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5518
5519 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5520 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5521 "x%08x x%08x x%08x\n", evt_type,
5522 acqe_sli->event_data1, acqe_sli->event_data2,
5523 acqe_sli->reserved, acqe_sli->trailer);
5524
5525 port_name = phba->Port[0];
5526 if (port_name == 0x00)
5527 port_name = '?';
5528
5529 switch (evt_type) {
5530 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5531 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5532 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5533 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5534
5535 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5536 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5537 acqe_sli->event_data1, port_name);
5538
5539 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5540 shost = lpfc_shost_from_vport(phba->pport);
5541 fc_host_post_vendor_event(shost, fc_get_event_number(),
5542 sizeof(temp_event_data),
5543 (char *)&temp_event_data,
5544 SCSI_NL_VID_TYPE_PCI
5545 | PCI_VENDOR_ID_EMULEX);
5546 break;
5547 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5548 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5549 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5550 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5551
5552 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5553 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5554 acqe_sli->event_data1, port_name);
5555
5556 shost = lpfc_shost_from_vport(phba->pport);
5557 fc_host_post_vendor_event(shost, fc_get_event_number(),
5558 sizeof(temp_event_data),
5559 (char *)&temp_event_data,
5560 SCSI_NL_VID_TYPE_PCI
5561 | PCI_VENDOR_ID_EMULEX);
5562 break;
5563 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5564 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5565 &acqe_sli->event_data1;
5566
5567
5568 switch (phba->sli4_hba.lnk_info.lnk_no) {
5569 case LPFC_LINK_NUMBER_0:
5570 status = bf_get(lpfc_sli_misconfigured_port0_state,
5571 &misconfigured->theEvent);
5572 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5573 &misconfigured->theEvent);
5574 break;
5575 case LPFC_LINK_NUMBER_1:
5576 status = bf_get(lpfc_sli_misconfigured_port1_state,
5577 &misconfigured->theEvent);
5578 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5579 &misconfigured->theEvent);
5580 break;
5581 case LPFC_LINK_NUMBER_2:
5582 status = bf_get(lpfc_sli_misconfigured_port2_state,
5583 &misconfigured->theEvent);
5584 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5585 &misconfigured->theEvent);
5586 break;
5587 case LPFC_LINK_NUMBER_3:
5588 status = bf_get(lpfc_sli_misconfigured_port3_state,
5589 &misconfigured->theEvent);
5590 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5591 &misconfigured->theEvent);
5592 break;
5593 default:
5594 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5595 "3296 "
5596 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5597 "event: Invalid link %d",
5598 phba->sli4_hba.lnk_info.lnk_no);
5599 return;
5600 }
5601
5602
5603 if (phba->sli4_hba.lnk_info.optic_state == status)
5604 return;
5605
5606 switch (status) {
5607 case LPFC_SLI_EVENT_STATUS_VALID:
5608 sprintf(message, "Physical Link is functional");
5609 break;
5610 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5611 sprintf(message, "Optics faulted/incorrectly "
5612 "installed/not installed - Reseat optics, "
5613 "if issue not resolved, replace.");
5614 break;
5615 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5616 sprintf(message,
5617 "Optics of two types installed - Remove one "
5618 "optic or install matching pair of optics.");
5619 break;
5620 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5621 sprintf(message, "Incompatible optics - Replace with "
5622 "compatible optics for card to function.");
5623 break;
5624 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5625 sprintf(message, "Unqualified optics - Replace with "
5626 "Avago optics for Warranty and Technical "
5627 "Support - Link is%s operational",
5628 (operational) ? " not" : "");
5629 break;
5630 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5631 sprintf(message, "Uncertified optics - Replace with "
5632 "Avago-certified optics to enable link "
5633 "operation - Link is%s operational",
5634 (operational) ? " not" : "");
5635 break;
5636 default:
5637
5638 sprintf(message, "Unknown event status x%02x", status);
5639 break;
5640 }
5641
5642
5643 rc = lpfc_sli4_read_config(phba);
5644 if (rc) {
5645 phba->lmt = 0;
5646 lpfc_printf_log(phba, KERN_ERR,
5647 LOG_TRACE_EVENT,
5648 "3194 Unable to retrieve supported "
5649 "speeds, rc = 0x%x\n", rc);
5650 }
5651 vports = lpfc_create_vport_work_array(phba);
5652 if (vports != NULL) {
5653 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5654 i++) {
5655 shost = lpfc_shost_from_vport(vports[i]);
5656 lpfc_host_supported_speeds_set(shost);
5657 }
5658 }
5659 lpfc_destroy_vport_work_array(phba, vports);
5660
5661 phba->sli4_hba.lnk_info.optic_state = status;
5662 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5663 "3176 Port Name %c %s\n", port_name, message);
5664 break;
5665 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5666 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5667 "3192 Remote DPort Test Initiated - "
5668 "Event Data1:x%08x Event Data2: x%08x\n",
5669 acqe_sli->event_data1, acqe_sli->event_data2);
5670 break;
5671 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5672
5673
5674
5675
5676
5677 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5678 "2699 Misconfigured FA-WWN - Attached device does "
5679 "not support FA-WWN\n");
5680 break;
5681 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5682
5683 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5684 "2518 EEPROM failure - "
5685 "Event Data1: x%08x Event Data2: x%08x\n",
5686 acqe_sli->event_data1, acqe_sli->event_data2);
5687 break;
5688 default:
5689 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5690 "3193 Unrecognized SLI event, type: 0x%x",
5691 evt_type);
5692 break;
5693 }
5694}
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706static struct lpfc_nodelist *
5707lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5708{
5709 struct lpfc_nodelist *ndlp;
5710 struct Scsi_Host *shost;
5711 struct lpfc_hba *phba;
5712
5713 if (!vport)
5714 return NULL;
5715 phba = vport->phba;
5716 if (!phba)
5717 return NULL;
5718 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5719 if (!ndlp) {
5720
5721 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5722 if (!ndlp)
5723 return 0;
5724
5725 ndlp->nlp_type |= NLP_FABRIC;
5726
5727 lpfc_enqueue_node(vport, ndlp);
5728 }
5729 if ((phba->pport->port_state < LPFC_FLOGI) &&
5730 (phba->pport->port_state != LPFC_VPORT_FAILED))
5731 return NULL;
5732
5733 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5734 && (vport->port_state != LPFC_VPORT_FAILED))
5735 return NULL;
5736 shost = lpfc_shost_from_vport(vport);
5737 if (!shost)
5738 return NULL;
5739 lpfc_linkdown_port(vport);
5740 lpfc_cleanup_pending_mbox(vport);
5741 spin_lock_irq(shost->host_lock);
5742 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5743 spin_unlock_irq(shost->host_lock);
5744
5745 return ndlp;
5746}
5747
5748
5749
5750
5751
5752
5753
5754
5755static void
5756lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5757{
5758 struct lpfc_vport **vports;
5759 int i;
5760
5761 vports = lpfc_create_vport_work_array(phba);
5762 if (vports)
5763 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5764 lpfc_sli4_perform_vport_cvl(vports[i]);
5765 lpfc_destroy_vport_work_array(phba, vports);
5766}
5767
5768
5769
5770
5771
5772
5773
5774
5775static void
5776lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5777 struct lpfc_acqe_fip *acqe_fip)
5778{
5779 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5780 int rc;
5781 struct lpfc_vport *vport;
5782 struct lpfc_nodelist *ndlp;
5783 int active_vlink_present;
5784 struct lpfc_vport **vports;
5785 int i;
5786
5787 phba->fc_eventTag = acqe_fip->event_tag;
5788 phba->fcoe_eventtag = acqe_fip->event_tag;
5789 switch (event_type) {
5790 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5791 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5792 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5794 "2546 New FCF event, evt_tag:x%x, "
5795 "index:x%x\n",
5796 acqe_fip->event_tag,
5797 acqe_fip->index);
5798 else
5799 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5800 LOG_DISCOVERY,
5801 "2788 FCF param modified event, "
5802 "evt_tag:x%x, index:x%x\n",
5803 acqe_fip->event_tag,
5804 acqe_fip->index);
5805 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5806
5807
5808
5809
5810
5811 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5812 LOG_DISCOVERY,
5813 "2779 Read FCF (x%x) for updating "
5814 "roundrobin FCF failover bmask\n",
5815 acqe_fip->index);
5816 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5817 }
5818
5819
5820 spin_lock_irq(&phba->hbalock);
5821 if (phba->hba_flag & FCF_TS_INPROG) {
5822 spin_unlock_irq(&phba->hbalock);
5823 break;
5824 }
5825
5826 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5827 spin_unlock_irq(&phba->hbalock);
5828 break;
5829 }
5830
5831
5832 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5833 spin_unlock_irq(&phba->hbalock);
5834 break;
5835 }
5836 spin_unlock_irq(&phba->hbalock);
5837
5838
5839 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5840 "2770 Start FCF table scan per async FCF "
5841 "event, evt_tag:x%x, index:x%x\n",
5842 acqe_fip->event_tag, acqe_fip->index);
5843 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5844 LPFC_FCOE_FCF_GET_FIRST);
5845 if (rc)
5846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5847 "2547 Issue FCF scan read FCF mailbox "
5848 "command failed (x%x)\n", rc);
5849 break;
5850
5851 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5853 "2548 FCF Table full count 0x%x tag 0x%x\n",
5854 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5855 acqe_fip->event_tag);
5856 break;
5857
5858 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5859 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5861 "2549 FCF (x%x) disconnected from network, "
5862 "tag:x%x\n", acqe_fip->index,
5863 acqe_fip->event_tag);
5864
5865
5866
5867
5868 spin_lock_irq(&phba->hbalock);
5869 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5870 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5871 spin_unlock_irq(&phba->hbalock);
5872
5873 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5874 break;
5875 }
5876 spin_unlock_irq(&phba->hbalock);
5877
5878
5879 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5880 break;
5881
5882
5883
5884
5885
5886
5887
5888 spin_lock_irq(&phba->hbalock);
5889
5890 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5891 spin_unlock_irq(&phba->hbalock);
5892
5893 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5894 "2771 Start FCF fast failover process due to "
5895 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5896 "\n", acqe_fip->event_tag, acqe_fip->index);
5897 rc = lpfc_sli4_redisc_fcf_table(phba);
5898 if (rc) {
5899 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5900 LOG_TRACE_EVENT,
5901 "2772 Issue FCF rediscover mailbox "
5902 "command failed, fail through to FCF "
5903 "dead event\n");
5904 spin_lock_irq(&phba->hbalock);
5905 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5906 spin_unlock_irq(&phba->hbalock);
5907
5908
5909
5910
5911 lpfc_sli4_fcf_dead_failthrough(phba);
5912 } else {
5913
5914 lpfc_sli4_clear_fcf_rr_bmask(phba);
5915
5916
5917
5918
5919 lpfc_sli4_perform_all_vport_cvl(phba);
5920 }
5921 break;
5922 case LPFC_FIP_EVENT_TYPE_CVL:
5923 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5924 lpfc_printf_log(phba, KERN_ERR,
5925 LOG_TRACE_EVENT,
5926 "2718 Clear Virtual Link Received for VPI 0x%x"
5927 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5928
5929 vport = lpfc_find_vport_by_vpid(phba,
5930 acqe_fip->index);
5931 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5932 if (!ndlp)
5933 break;
5934 active_vlink_present = 0;
5935
5936 vports = lpfc_create_vport_work_array(phba);
5937 if (vports) {
5938 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5939 i++) {
5940 if ((!(vports[i]->fc_flag &
5941 FC_VPORT_CVL_RCVD)) &&
5942 (vports[i]->port_state > LPFC_FDISC)) {
5943 active_vlink_present = 1;
5944 break;
5945 }
5946 }
5947 lpfc_destroy_vport_work_array(phba, vports);
5948 }
5949
5950
5951
5952
5953
5954
5955 if (!(vport->load_flag & FC_UNLOADING) &&
5956 active_vlink_present) {
5957
5958
5959
5960
5961 mod_timer(&ndlp->nlp_delayfunc,
5962 jiffies + msecs_to_jiffies(1000));
5963 spin_lock_irq(&ndlp->lock);
5964 ndlp->nlp_flag |= NLP_DELAY_TMO;
5965 spin_unlock_irq(&ndlp->lock);
5966 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5967 vport->port_state = LPFC_FDISC;
5968 } else {
5969
5970
5971
5972
5973
5974
5975
5976 spin_lock_irq(&phba->hbalock);
5977 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5978 spin_unlock_irq(&phba->hbalock);
5979 break;
5980 }
5981
5982 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5983 spin_unlock_irq(&phba->hbalock);
5984 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5985 LOG_DISCOVERY,
5986 "2773 Start FCF failover per CVL, "
5987 "evt_tag:x%x\n", acqe_fip->event_tag);
5988 rc = lpfc_sli4_redisc_fcf_table(phba);
5989 if (rc) {
5990 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5991 LOG_TRACE_EVENT,
5992 "2774 Issue FCF rediscover "
5993 "mailbox command failed, "
5994 "through to CVL event\n");
5995 spin_lock_irq(&phba->hbalock);
5996 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5997 spin_unlock_irq(&phba->hbalock);
5998
5999
6000
6001
6002 lpfc_retry_pport_discovery(phba);
6003 } else
6004
6005
6006
6007
6008 lpfc_sli4_clear_fcf_rr_bmask(phba);
6009 }
6010 break;
6011 default:
6012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6013 "0288 Unknown FCoE event type 0x%x event tag "
6014 "0x%x\n", event_type, acqe_fip->event_tag);
6015 break;
6016 }
6017}
6018
6019
6020
6021
6022
6023
6024
6025
6026static void
6027lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6028 struct lpfc_acqe_dcbx *acqe_dcbx)
6029{
6030 phba->fc_eventTag = acqe_dcbx->event_tag;
6031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6032 "0290 The SLI4 DCBX asynchronous event is not "
6033 "handled yet\n");
6034}
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045static void
6046lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6047 struct lpfc_acqe_grp5 *acqe_grp5)
6048{
6049 uint16_t prev_ll_spd;
6050
6051 phba->fc_eventTag = acqe_grp5->event_tag;
6052 phba->fcoe_eventtag = acqe_grp5->event_tag;
6053 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6054 phba->sli4_hba.link_state.logical_speed =
6055 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6056 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6057 "2789 GRP5 Async Event: Updating logical link speed "
6058 "from %dMbps to %dMbps\n", prev_ll_spd,
6059 phba->sli4_hba.link_state.logical_speed);
6060}
6061
6062
6063
6064
6065
6066
6067
6068
6069void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
6070{
6071 struct lpfc_cq_event *cq_event;
6072 unsigned long iflags;
6073
6074
6075 spin_lock_irqsave(&phba->hbalock, iflags);
6076 phba->hba_flag &= ~ASYNC_EVENT;
6077 spin_unlock_irqrestore(&phba->hbalock, iflags);
6078
6079
6080 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
6081 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
6082 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
6083 cq_event, struct lpfc_cq_event, list);
6084 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
6085 iflags);
6086
6087
6088 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
6089 case LPFC_TRAILER_CODE_LINK:
6090 lpfc_sli4_async_link_evt(phba,
6091 &cq_event->cqe.acqe_link);
6092 break;
6093 case LPFC_TRAILER_CODE_FCOE:
6094 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
6095 break;
6096 case LPFC_TRAILER_CODE_DCBX:
6097 lpfc_sli4_async_dcbx_evt(phba,
6098 &cq_event->cqe.acqe_dcbx);
6099 break;
6100 case LPFC_TRAILER_CODE_GRP5:
6101 lpfc_sli4_async_grp5_evt(phba,
6102 &cq_event->cqe.acqe_grp5);
6103 break;
6104 case LPFC_TRAILER_CODE_FC:
6105 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
6106 break;
6107 case LPFC_TRAILER_CODE_SLI:
6108 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
6109 break;
6110 default:
6111 lpfc_printf_log(phba, KERN_ERR,
6112 LOG_TRACE_EVENT,
6113 "1804 Invalid asynchronous event code: "
6114 "x%x\n", bf_get(lpfc_trailer_code,
6115 &cq_event->cqe.mcqe_cmpl));
6116 break;
6117 }
6118
6119
6120 lpfc_sli4_cq_event_release(phba, cq_event);
6121 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
6122 }
6123 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
6124}
6125
6126
6127
6128
6129
6130
6131
6132
6133void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6134{
6135 int rc;
6136
6137 spin_lock_irq(&phba->hbalock);
6138
6139 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6140
6141 phba->fcf.failover_rec.flag = 0;
6142
6143 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6144 spin_unlock_irq(&phba->hbalock);
6145
6146
6147 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6148 "2777 Start post-quiescent FCF table scan\n");
6149 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6150 if (rc)
6151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6152 "2747 Issue FCF scan read FCF mailbox "
6153 "command failed 0x%x\n", rc);
6154}
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166int
6167lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6168{
6169 int rc;
6170
6171
6172 phba->pci_dev_grp = dev_grp;
6173
6174
6175 if (dev_grp == LPFC_PCI_DEV_OC)
6176 phba->sli_rev = LPFC_SLI_REV4;
6177
6178
6179 rc = lpfc_init_api_table_setup(phba, dev_grp);
6180 if (rc)
6181 return -ENODEV;
6182
6183 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6184 if (rc)
6185 return -ENODEV;
6186
6187 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6188 if (rc)
6189 return -ENODEV;
6190
6191 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6192 if (rc)
6193 return -ENODEV;
6194
6195 return 0;
6196}
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6207{
6208 switch (intr_mode) {
6209 case 0:
6210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6211 "0470 Enable INTx interrupt mode.\n");
6212 break;
6213 case 1:
6214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6215 "0481 Enabled MSI interrupt mode.\n");
6216 break;
6217 case 2:
6218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6219 "0480 Enabled MSI-X interrupt mode.\n");
6220 break;
6221 default:
6222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6223 "0482 Illegal interrupt mode.\n");
6224 break;
6225 }
6226 return;
6227}
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240static int
6241lpfc_enable_pci_dev(struct lpfc_hba *phba)
6242{
6243 struct pci_dev *pdev;
6244
6245
6246 if (!phba->pcidev)
6247 goto out_error;
6248 else
6249 pdev = phba->pcidev;
6250
6251 if (pci_enable_device_mem(pdev))
6252 goto out_error;
6253
6254 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6255 goto out_disable_device;
6256
6257 pci_set_master(pdev);
6258 pci_try_set_mwi(pdev);
6259 pci_save_state(pdev);
6260
6261
6262 if (pci_is_pcie(pdev))
6263 pdev->needs_freset = 1;
6264
6265 return 0;
6266
6267out_disable_device:
6268 pci_disable_device(pdev);
6269out_error:
6270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6271 "1401 Failed to enable pci device\n");
6272 return -ENODEV;
6273}
6274
6275
6276
6277
6278
6279
6280
6281
6282static void
6283lpfc_disable_pci_dev(struct lpfc_hba *phba)
6284{
6285 struct pci_dev *pdev;
6286
6287
6288 if (!phba->pcidev)
6289 return;
6290 else
6291 pdev = phba->pcidev;
6292
6293 pci_release_mem_regions(pdev);
6294 pci_disable_device(pdev);
6295
6296 return;
6297}
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308void
6309lpfc_reset_hba(struct lpfc_hba *phba)
6310{
6311
6312 if (!phba->cfg_enable_hba_reset) {
6313 phba->link_state = LPFC_HBA_ERROR;
6314 return;
6315 }
6316
6317
6318 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
6319 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6320 } else {
6321 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6322 lpfc_sli_flush_io_rings(phba);
6323 }
6324 lpfc_offline(phba);
6325 lpfc_sli_brdrestart(phba);
6326 lpfc_online(phba);
6327 lpfc_unblock_mgmt_io(phba);
6328}
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340uint16_t
6341lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6342{
6343 struct pci_dev *pdev = phba->pcidev;
6344 uint16_t nr_virtfn;
6345 int pos;
6346
6347 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6348 if (pos == 0)
6349 return 0;
6350
6351 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6352 return nr_virtfn;
6353}
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366int
6367lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6368{
6369 struct pci_dev *pdev = phba->pcidev;
6370 uint16_t max_nr_vfn;
6371 int rc;
6372
6373 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6374 if (nr_vfn > max_nr_vfn) {
6375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376 "3057 Requested vfs (%d) greater than "
6377 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6378 return -EINVAL;
6379 }
6380
6381 rc = pci_enable_sriov(pdev, nr_vfn);
6382 if (rc) {
6383 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6384 "2806 Failed to enable sriov on this device "
6385 "with vfn number nr_vf:%d, rc:%d\n",
6386 nr_vfn, rc);
6387 } else
6388 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6389 "2807 Successful enable sriov on this device "
6390 "with vfn number nr_vf:%d\n", nr_vfn);
6391 return rc;
6392}
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405static int
6406lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6407{
6408 struct lpfc_sli *psli = &phba->sli;
6409
6410
6411
6412
6413 atomic_set(&phba->fast_event_count, 0);
6414 atomic_set(&phba->dbg_log_idx, 0);
6415 atomic_set(&phba->dbg_log_cnt, 0);
6416 atomic_set(&phba->dbg_log_dmping, 0);
6417 spin_lock_init(&phba->hbalock);
6418
6419
6420 spin_lock_init(&phba->port_list_lock);
6421 INIT_LIST_HEAD(&phba->port_list);
6422
6423 INIT_LIST_HEAD(&phba->work_list);
6424 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6425
6426
6427 init_waitqueue_head(&phba->work_waitq);
6428
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "1403 Protocols supported %s %s %s\n",
6431 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6432 "SCSI" : " "),
6433 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6434 "NVME" : " "),
6435 (phba->nvmet_support ? "NVMET" : " "));
6436
6437
6438 spin_lock_init(&phba->scsi_buf_list_get_lock);
6439 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6440 spin_lock_init(&phba->scsi_buf_list_put_lock);
6441 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6442
6443
6444 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6445
6446
6447 INIT_LIST_HEAD(&phba->elsbuf);
6448
6449
6450 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6451
6452
6453 spin_lock_init(&phba->devicelock);
6454 INIT_LIST_HEAD(&phba->luns);
6455
6456
6457 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6458
6459 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6460
6461 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6462
6463 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6464
6465 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6466
6467 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6468 lpfc_idle_stat_delay_work);
6469
6470 return 0;
6471}
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484static int
6485lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6486{
6487 int rc, entry_sz;
6488
6489
6490
6491
6492
6493
6494 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6495
6496
6497 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6498 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6499
6500
6501 lpfc_get_cfgparam(phba);
6502
6503
6504 rc = lpfc_setup_driver_resource_phase1(phba);
6505 if (rc)
6506 return -ENODEV;
6507
6508 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6509 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6510
6511 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6512 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6513 }
6514
6515 if (!phba->sli.sli3_ring)
6516 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6517 sizeof(struct lpfc_sli_ring),
6518 GFP_KERNEL);
6519 if (!phba->sli.sli3_ring)
6520 return -ENOMEM;
6521
6522
6523
6524
6525
6526
6527 if (phba->sli_rev == LPFC_SLI_REV4)
6528 entry_sz = sizeof(struct sli4_sge);
6529 else
6530 entry_sz = sizeof(struct ulp_bde64);
6531
6532
6533 if (phba->cfg_enable_bg) {
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6544 sizeof(struct fcp_rsp) +
6545 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6546
6547 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6548 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6549
6550
6551 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6552 } else {
6553
6554
6555
6556
6557
6558 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6559 sizeof(struct fcp_rsp) +
6560 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6561
6562
6563 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6564 }
6565
6566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6567 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6568 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6569 phba->cfg_total_seg_cnt);
6570
6571 phba->max_vpi = LPFC_MAX_VPI;
6572
6573 phba->max_vports = 0;
6574
6575
6576
6577
6578 lpfc_sli_setup(phba);
6579 lpfc_sli_queue_init(phba);
6580
6581
6582 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6583 return -ENOMEM;
6584
6585 phba->lpfc_sg_dma_buf_pool =
6586 dma_pool_create("lpfc_sg_dma_buf_pool",
6587 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6588 BPL_ALIGN_SZ, 0);
6589
6590 if (!phba->lpfc_sg_dma_buf_pool)
6591 goto fail_free_mem;
6592
6593 phba->lpfc_cmd_rsp_buf_pool =
6594 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6595 &phba->pcidev->dev,
6596 sizeof(struct fcp_cmnd) +
6597 sizeof(struct fcp_rsp),
6598 BPL_ALIGN_SZ, 0);
6599
6600 if (!phba->lpfc_cmd_rsp_buf_pool)
6601 goto fail_free_dma_buf_pool;
6602
6603
6604
6605
6606
6607 if (phba->cfg_sriov_nr_virtfn > 0) {
6608 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6609 phba->cfg_sriov_nr_virtfn);
6610 if (rc) {
6611 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6612 "2808 Requested number of SR-IOV "
6613 "virtual functions (%d) is not "
6614 "supported\n",
6615 phba->cfg_sriov_nr_virtfn);
6616 phba->cfg_sriov_nr_virtfn = 0;
6617 }
6618 }
6619
6620 return 0;
6621
6622fail_free_dma_buf_pool:
6623 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6624 phba->lpfc_sg_dma_buf_pool = NULL;
6625fail_free_mem:
6626 lpfc_mem_free(phba);
6627 return -ENOMEM;
6628}
6629
6630
6631
6632
6633
6634
6635
6636
6637static void
6638lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6639{
6640
6641 lpfc_mem_free_all(phba);
6642
6643 return;
6644}
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657static int
6658lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6659{
6660 LPFC_MBOXQ_t *mboxq;
6661 MAILBOX_t *mb;
6662 int rc, i, max_buf_size;
6663 int longs;
6664 int extra;
6665 uint64_t wwn;
6666 u32 if_type;
6667 u32 if_fam;
6668
6669 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6670 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6671 phba->sli4_hba.curr_disp_cpu = 0;
6672
6673
6674 lpfc_get_cfgparam(phba);
6675
6676
6677 rc = lpfc_setup_driver_resource_phase1(phba);
6678 if (rc)
6679 return -ENODEV;
6680
6681
6682 rc = lpfc_sli4_post_status_check(phba);
6683 if (rc)
6684 return -ENODEV;
6685
6686
6687
6688
6689 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6690
6691
6692
6693
6694
6695 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6696
6697
6698 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6699
6700
6701
6702
6703
6704 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6705 sizeof(struct lpfc_mbox_ext_buf_ctx));
6706 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6707
6708 phba->max_vpi = LPFC_MAX_VPI;
6709
6710
6711 phba->max_vports = 0;
6712
6713
6714 phba->valid_vlan = 0;
6715 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6716 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6717 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6718
6719
6720
6721
6722
6723
6724
6725
6726 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6727 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6728 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6729
6730
6731 if (lpfc_is_vmid_enabled(phba))
6732 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
6733
6734
6735
6736
6737
6738 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6739 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6740
6741 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6742
6743 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6744 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6745 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6746 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6747 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6748 }
6749
6750
6751 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6752 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6753 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6754 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
6755
6756
6757
6758
6759
6760
6761 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6762
6763 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6764
6765 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6766
6767 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6768
6769 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6770
6771
6772 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6773 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6774 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6775 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6776
6777
6778
6779
6780 INIT_LIST_HEAD(&phba->sli.mboxq);
6781 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6782
6783
6784 phba->sli4_hba.lnk_info.optic_state = 0xff;
6785
6786
6787 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6788 if (rc)
6789 return -ENOMEM;
6790
6791
6792 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6793 LPFC_SLI_INTF_IF_TYPE_2) {
6794 rc = lpfc_pci_function_reset(phba);
6795 if (unlikely(rc)) {
6796 rc = -ENODEV;
6797 goto out_free_mem;
6798 }
6799 phba->temp_sensor_support = 1;
6800 }
6801
6802
6803 rc = lpfc_create_bootstrap_mbox(phba);
6804 if (unlikely(rc))
6805 goto out_free_mem;
6806
6807
6808 rc = lpfc_setup_endian_order(phba);
6809 if (unlikely(rc))
6810 goto out_free_bsmbx;
6811
6812
6813 rc = lpfc_sli4_read_config(phba);
6814 if (unlikely(rc))
6815 goto out_free_bsmbx;
6816 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6817 if (unlikely(rc))
6818 goto out_free_bsmbx;
6819
6820
6821 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6822 LPFC_SLI_INTF_IF_TYPE_0) {
6823 rc = lpfc_pci_function_reset(phba);
6824 if (unlikely(rc))
6825 goto out_free_bsmbx;
6826 }
6827
6828 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6829 GFP_KERNEL);
6830 if (!mboxq) {
6831 rc = -ENOMEM;
6832 goto out_free_bsmbx;
6833 }
6834
6835
6836 phba->nvmet_support = 0;
6837 if (lpfc_enable_nvmet_cnt) {
6838
6839
6840 lpfc_read_nv(phba, mboxq);
6841 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6842 if (rc != MBX_SUCCESS) {
6843 lpfc_printf_log(phba, KERN_ERR,
6844 LOG_TRACE_EVENT,
6845 "6016 Mailbox failed , mbxCmd x%x "
6846 "READ_NV, mbxStatus x%x\n",
6847 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6848 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6849 mempool_free(mboxq, phba->mbox_mem_pool);
6850 rc = -EIO;
6851 goto out_free_bsmbx;
6852 }
6853 mb = &mboxq->u.mb;
6854 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6855 sizeof(uint64_t));
6856 wwn = cpu_to_be64(wwn);
6857 phba->sli4_hba.wwnn.u.name = wwn;
6858 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6859 sizeof(uint64_t));
6860
6861 wwn = cpu_to_be64(wwn);
6862 phba->sli4_hba.wwpn.u.name = wwn;
6863
6864
6865 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6866 if (wwn == lpfc_enable_nvmet[i]) {
6867#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6868 if (lpfc_nvmet_mem_alloc(phba))
6869 break;
6870
6871 phba->nvmet_support = 1;
6872
6873 lpfc_printf_log(phba, KERN_ERR,
6874 LOG_TRACE_EVENT,
6875 "6017 NVME Target %016llx\n",
6876 wwn);
6877#else
6878 lpfc_printf_log(phba, KERN_ERR,
6879 LOG_TRACE_EVENT,
6880 "6021 Can't enable NVME Target."
6881 " NVME_TARGET_FC infrastructure"
6882 " is not in kernel\n");
6883#endif
6884
6885 phba->cfg_xri_rebalancing = 0;
6886 if (phba->irq_chann_mode == NHT_MODE) {
6887 phba->cfg_irq_chann =
6888 phba->sli4_hba.num_present_cpu;
6889 phba->cfg_hdw_queue =
6890 phba->sli4_hba.num_present_cpu;
6891 phba->irq_chann_mode = NORMAL_MODE;
6892 }
6893 break;
6894 }
6895 }
6896 }
6897
6898 lpfc_nvme_mod_param_dep(phba);
6899
6900
6901
6902
6903
6904
6905 rc = lpfc_get_sli4_parameters(phba, mboxq);
6906 if (rc) {
6907 if_type = bf_get(lpfc_sli_intf_if_type,
6908 &phba->sli4_hba.sli_intf);
6909 if_fam = bf_get(lpfc_sli_intf_sli_family,
6910 &phba->sli4_hba.sli_intf);
6911 if (phba->sli4_hba.extents_in_use &&
6912 phba->sli4_hba.rpi_hdrs_in_use) {
6913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6914 "2999 Unsupported SLI4 Parameters "
6915 "Extents and RPI headers enabled.\n");
6916 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6917 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6918 mempool_free(mboxq, phba->mbox_mem_pool);
6919 rc = -EIO;
6920 goto out_free_bsmbx;
6921 }
6922 }
6923 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6924 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6925 mempool_free(mboxq, phba->mbox_mem_pool);
6926 rc = -EIO;
6927 goto out_free_bsmbx;
6928 }
6929 }
6930
6931
6932
6933
6934
6935 extra = 2;
6936 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6937 extra++;
6938
6939
6940
6941
6942
6943
6944 max_buf_size = (2 * SLI4_PAGE_SIZE);
6945
6946
6947
6948
6949
6950 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6963 sizeof(struct fcp_rsp) + max_buf_size;
6964
6965
6966 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6967
6968
6969
6970
6971
6972 if (phba->cfg_enable_bg &&
6973 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6974 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6975 else
6976 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6977
6978 } else {
6979
6980
6981
6982
6983
6984 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6985 sizeof(struct fcp_rsp) +
6986 ((phba->cfg_sg_seg_cnt + extra) *
6987 sizeof(struct sli4_sge));
6988
6989
6990 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6991 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6992
6993
6994
6995
6996
6997 }
6998
6999 if (phba->cfg_xpsgl && !phba->nvmet_support)
7000 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
7001 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
7002 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
7003 else
7004 phba->cfg_sg_dma_buf_size =
7005 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
7006
7007 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
7008 sizeof(struct sli4_sge);
7009
7010
7011 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7012 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
7013 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
7014 "6300 Reducing NVME sg segment "
7015 "cnt to %d\n",
7016 LPFC_MAX_NVME_SEG_CNT);
7017 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7018 } else
7019 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
7020 }
7021
7022 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7023 "9087 sg_seg_cnt:%d dmabuf_size:%d "
7024 "total:%d scsi:%d nvme:%d\n",
7025 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7026 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
7027 phba->cfg_nvme_seg_cnt);
7028
7029 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
7030 i = phba->cfg_sg_dma_buf_size;
7031 else
7032 i = SLI4_PAGE_SIZE;
7033
7034 phba->lpfc_sg_dma_buf_pool =
7035 dma_pool_create("lpfc_sg_dma_buf_pool",
7036 &phba->pcidev->dev,
7037 phba->cfg_sg_dma_buf_size,
7038 i, 0);
7039 if (!phba->lpfc_sg_dma_buf_pool)
7040 goto out_free_bsmbx;
7041
7042 phba->lpfc_cmd_rsp_buf_pool =
7043 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7044 &phba->pcidev->dev,
7045 sizeof(struct fcp_cmnd) +
7046 sizeof(struct fcp_rsp),
7047 i, 0);
7048 if (!phba->lpfc_cmd_rsp_buf_pool)
7049 goto out_free_sg_dma_buf;
7050
7051 mempool_free(mboxq, phba->mbox_mem_pool);
7052
7053
7054 lpfc_sli4_oas_verify(phba);
7055
7056
7057 lpfc_sli4_ras_init(phba);
7058
7059
7060 rc = lpfc_sli4_queue_verify(phba);
7061 if (rc)
7062 goto out_free_cmd_rsp_buf;
7063
7064
7065 rc = lpfc_sli4_cq_event_pool_create(phba);
7066 if (rc)
7067 goto out_free_cmd_rsp_buf;
7068
7069
7070 lpfc_init_sgl_list(phba);
7071
7072
7073 rc = lpfc_init_active_sgl_array(phba);
7074 if (rc) {
7075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7076 "1430 Failed to initialize sgl list.\n");
7077 goto out_destroy_cq_event_pool;
7078 }
7079 rc = lpfc_sli4_init_rpi_hdrs(phba);
7080 if (rc) {
7081 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7082 "1432 Failed to initialize rpi headers.\n");
7083 goto out_free_active_sgl;
7084 }
7085
7086
7087 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
7088 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
7089 GFP_KERNEL);
7090 if (!phba->fcf.fcf_rr_bmask) {
7091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7092 "2759 Failed allocate memory for FCF round "
7093 "robin failover bmask\n");
7094 rc = -ENOMEM;
7095 goto out_remove_rpi_hdrs;
7096 }
7097
7098 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
7099 sizeof(struct lpfc_hba_eq_hdl),
7100 GFP_KERNEL);
7101 if (!phba->sli4_hba.hba_eq_hdl) {
7102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7103 "2572 Failed allocate memory for "
7104 "fast-path per-EQ handle array\n");
7105 rc = -ENOMEM;
7106 goto out_free_fcf_rr_bmask;
7107 }
7108
7109 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
7110 sizeof(struct lpfc_vector_map_info),
7111 GFP_KERNEL);
7112 if (!phba->sli4_hba.cpu_map) {
7113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7114 "3327 Failed allocate memory for msi-x "
7115 "interrupt vector mapping\n");
7116 rc = -ENOMEM;
7117 goto out_free_hba_eq_hdl;
7118 }
7119
7120 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7121 if (!phba->sli4_hba.eq_info) {
7122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7123 "3321 Failed allocation for per_cpu stats\n");
7124 rc = -ENOMEM;
7125 goto out_free_hba_cpu_map;
7126 }
7127
7128 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7129 sizeof(*phba->sli4_hba.idle_stat),
7130 GFP_KERNEL);
7131 if (!phba->sli4_hba.idle_stat) {
7132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7133 "3390 Failed allocation for idle_stat\n");
7134 rc = -ENOMEM;
7135 goto out_free_hba_eq_info;
7136 }
7137
7138#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7139 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7140 if (!phba->sli4_hba.c_stat) {
7141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7142 "3332 Failed allocating per cpu hdwq stats\n");
7143 rc = -ENOMEM;
7144 goto out_free_hba_idle_stat;
7145 }
7146#endif
7147
7148
7149
7150
7151
7152 if (phba->cfg_sriov_nr_virtfn > 0) {
7153 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7154 phba->cfg_sriov_nr_virtfn);
7155 if (rc) {
7156 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7157 "3020 Requested number of SR-IOV "
7158 "virtual functions (%d) is not "
7159 "supported\n",
7160 phba->cfg_sriov_nr_virtfn);
7161 phba->cfg_sriov_nr_virtfn = 0;
7162 }
7163 }
7164
7165 return 0;
7166
7167#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7168out_free_hba_idle_stat:
7169 kfree(phba->sli4_hba.idle_stat);
7170#endif
7171out_free_hba_eq_info:
7172 free_percpu(phba->sli4_hba.eq_info);
7173out_free_hba_cpu_map:
7174 kfree(phba->sli4_hba.cpu_map);
7175out_free_hba_eq_hdl:
7176 kfree(phba->sli4_hba.hba_eq_hdl);
7177out_free_fcf_rr_bmask:
7178 kfree(phba->fcf.fcf_rr_bmask);
7179out_remove_rpi_hdrs:
7180 lpfc_sli4_remove_rpi_hdrs(phba);
7181out_free_active_sgl:
7182 lpfc_free_active_sgl(phba);
7183out_destroy_cq_event_pool:
7184 lpfc_sli4_cq_event_pool_destroy(phba);
7185out_free_cmd_rsp_buf:
7186 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7187 phba->lpfc_cmd_rsp_buf_pool = NULL;
7188out_free_sg_dma_buf:
7189 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7190 phba->lpfc_sg_dma_buf_pool = NULL;
7191out_free_bsmbx:
7192 lpfc_destroy_bootstrap_mbox(phba);
7193out_free_mem:
7194 lpfc_mem_free(phba);
7195 return rc;
7196}
7197
7198
7199
7200
7201
7202
7203
7204
7205static void
7206lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7207{
7208 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7209
7210 free_percpu(phba->sli4_hba.eq_info);
7211#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7212 free_percpu(phba->sli4_hba.c_stat);
7213#endif
7214 kfree(phba->sli4_hba.idle_stat);
7215
7216
7217 kfree(phba->sli4_hba.cpu_map);
7218 phba->sli4_hba.num_possible_cpu = 0;
7219 phba->sli4_hba.num_present_cpu = 0;
7220 phba->sli4_hba.curr_disp_cpu = 0;
7221 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7222
7223
7224 kfree(phba->sli4_hba.hba_eq_hdl);
7225
7226
7227 lpfc_sli4_remove_rpi_hdrs(phba);
7228 lpfc_sli4_remove_rpis(phba);
7229
7230
7231 kfree(phba->fcf.fcf_rr_bmask);
7232
7233
7234 lpfc_free_active_sgl(phba);
7235 lpfc_free_els_sgl_list(phba);
7236 lpfc_free_nvmet_sgl_list(phba);
7237
7238
7239 lpfc_sli4_cq_event_release_all(phba);
7240 lpfc_sli4_cq_event_pool_destroy(phba);
7241
7242
7243 lpfc_sli4_dealloc_resource_identifiers(phba);
7244
7245
7246 lpfc_destroy_bootstrap_mbox(phba);
7247
7248
7249 lpfc_mem_free_all(phba);
7250
7251
7252 list_for_each_entry_safe(conn_entry, next_conn_entry,
7253 &phba->fcf_conn_rec_list, list) {
7254 list_del_init(&conn_entry->list);
7255 kfree(conn_entry);
7256 }
7257
7258 return;
7259}
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271int
7272lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7273{
7274 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7275 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7276 phba->lpfc_selective_reset = lpfc_selective_reset;
7277 switch (dev_grp) {
7278 case LPFC_PCI_DEV_LP:
7279 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7280 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7281 phba->lpfc_stop_port = lpfc_stop_port_s3;
7282 break;
7283 case LPFC_PCI_DEV_OC:
7284 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7285 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7286 phba->lpfc_stop_port = lpfc_stop_port_s4;
7287 break;
7288 default:
7289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7290 "1431 Invalid HBA PCI-device group: 0x%x\n",
7291 dev_grp);
7292 return -ENODEV;
7293 }
7294 return 0;
7295}
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308static int
7309lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7310{
7311 int error;
7312
7313
7314 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7315 "lpfc_worker_%d", phba->brd_no);
7316 if (IS_ERR(phba->worker_thread)) {
7317 error = PTR_ERR(phba->worker_thread);
7318 return error;
7319 }
7320
7321 return 0;
7322}
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332static void
7333lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7334{
7335 if (phba->wq) {
7336 flush_workqueue(phba->wq);
7337 destroy_workqueue(phba->wq);
7338 phba->wq = NULL;
7339 }
7340
7341
7342 if (phba->worker_thread)
7343 kthread_stop(phba->worker_thread);
7344}
7345
7346
7347
7348
7349
7350
7351
7352void
7353lpfc_free_iocb_list(struct lpfc_hba *phba)
7354{
7355 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7356
7357 spin_lock_irq(&phba->hbalock);
7358 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7359 &phba->lpfc_iocb_list, list) {
7360 list_del(&iocbq_entry->list);
7361 kfree(iocbq_entry);
7362 phba->total_iocbq_bufs--;
7363 }
7364 spin_unlock_irq(&phba->hbalock);
7365
7366 return;
7367}
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381int
7382lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7383{
7384 struct lpfc_iocbq *iocbq_entry = NULL;
7385 uint16_t iotag;
7386 int i;
7387
7388
7389 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7390 for (i = 0; i < iocb_count; i++) {
7391 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7392 if (iocbq_entry == NULL) {
7393 printk(KERN_ERR "%s: only allocated %d iocbs of "
7394 "expected %d count. Unloading driver.\n",
7395 __func__, i, iocb_count);
7396 goto out_free_iocbq;
7397 }
7398
7399 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7400 if (iotag == 0) {
7401 kfree(iocbq_entry);
7402 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7403 "Unloading driver.\n", __func__);
7404 goto out_free_iocbq;
7405 }
7406 iocbq_entry->sli4_lxritag = NO_XRI;
7407 iocbq_entry->sli4_xritag = NO_XRI;
7408
7409 spin_lock_irq(&phba->hbalock);
7410 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7411 phba->total_iocbq_bufs++;
7412 spin_unlock_irq(&phba->hbalock);
7413 }
7414
7415 return 0;
7416
7417out_free_iocbq:
7418 lpfc_free_iocb_list(phba);
7419
7420 return -ENOMEM;
7421}
7422
7423
7424
7425
7426
7427
7428
7429
7430void
7431lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7432{
7433 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7434
7435 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7436 list_del(&sglq_entry->list);
7437 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7438 kfree(sglq_entry);
7439 }
7440}
7441
7442
7443
7444
7445
7446
7447
7448static void
7449lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7450{
7451 LIST_HEAD(sglq_list);
7452
7453
7454 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
7455 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7456 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
7457
7458
7459 lpfc_free_sgl_list(phba, &sglq_list);
7460}
7461
7462
7463
7464
7465
7466
7467
7468static void
7469lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7470{
7471 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7472 LIST_HEAD(sglq_list);
7473
7474
7475 spin_lock_irq(&phba->hbalock);
7476 spin_lock(&phba->sli4_hba.sgl_list_lock);
7477 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7478 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7479 spin_unlock_irq(&phba->hbalock);
7480
7481
7482 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7483 list_del(&sglq_entry->list);
7484 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7485 kfree(sglq_entry);
7486 }
7487
7488
7489
7490
7491
7492 phba->sli4_hba.nvmet_xri_cnt = 0;
7493}
7494
7495
7496
7497
7498
7499
7500
7501
7502static int
7503lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7504{
7505 int size;
7506 size = sizeof(struct lpfc_sglq *);
7507 size *= phba->sli4_hba.max_cfg_param.max_xri;
7508
7509 phba->sli4_hba.lpfc_sglq_active_list =
7510 kzalloc(size, GFP_KERNEL);
7511 if (!phba->sli4_hba.lpfc_sglq_active_list)
7512 return -ENOMEM;
7513 return 0;
7514}
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524static void
7525lpfc_free_active_sgl(struct lpfc_hba *phba)
7526{
7527 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7528}
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538static void
7539lpfc_init_sgl_list(struct lpfc_hba *phba)
7540{
7541
7542 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7543 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7544 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7545 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7546
7547
7548 phba->sli4_hba.els_xri_cnt = 0;
7549
7550
7551 phba->sli4_hba.io_xri_cnt = 0;
7552}
7553
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568int
7569lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7570{
7571 int rc = 0;
7572 struct lpfc_rpi_hdr *rpi_hdr;
7573
7574 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7575 if (!phba->sli4_hba.rpi_hdrs_in_use)
7576 return rc;
7577 if (phba->sli4_hba.extents_in_use)
7578 return -EIO;
7579
7580 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7581 if (!rpi_hdr) {
7582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7583 "0391 Error during rpi post operation\n");
7584 lpfc_sli4_remove_rpis(phba);
7585 rc = -ENODEV;
7586 }
7587
7588 return rc;
7589}
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604struct lpfc_rpi_hdr *
7605lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7606{
7607 uint16_t rpi_limit, curr_rpi_range;
7608 struct lpfc_dmabuf *dmabuf;
7609 struct lpfc_rpi_hdr *rpi_hdr;
7610
7611
7612
7613
7614
7615
7616 if (!phba->sli4_hba.rpi_hdrs_in_use)
7617 return NULL;
7618 if (phba->sli4_hba.extents_in_use)
7619 return NULL;
7620
7621
7622 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7623
7624 spin_lock_irq(&phba->hbalock);
7625
7626
7627
7628
7629
7630 curr_rpi_range = phba->sli4_hba.next_rpi;
7631 spin_unlock_irq(&phba->hbalock);
7632
7633
7634 if (curr_rpi_range == rpi_limit)
7635 return NULL;
7636
7637
7638
7639
7640
7641 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7642 if (!dmabuf)
7643 return NULL;
7644
7645 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7646 LPFC_HDR_TEMPLATE_SIZE,
7647 &dmabuf->phys, GFP_KERNEL);
7648 if (!dmabuf->virt) {
7649 rpi_hdr = NULL;
7650 goto err_free_dmabuf;
7651 }
7652
7653 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7654 rpi_hdr = NULL;
7655 goto err_free_coherent;
7656 }
7657
7658
7659 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7660 if (!rpi_hdr)
7661 goto err_free_coherent;
7662
7663 rpi_hdr->dmabuf = dmabuf;
7664 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7665 rpi_hdr->page_count = 1;
7666 spin_lock_irq(&phba->hbalock);
7667
7668
7669 rpi_hdr->start_rpi = curr_rpi_range;
7670 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7671 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7672
7673 spin_unlock_irq(&phba->hbalock);
7674 return rpi_hdr;
7675
7676 err_free_coherent:
7677 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7678 dmabuf->virt, dmabuf->phys);
7679 err_free_dmabuf:
7680 kfree(dmabuf);
7681 return NULL;
7682}
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693void
7694lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7695{
7696 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7697
7698 if (!phba->sli4_hba.rpi_hdrs_in_use)
7699 goto exit;
7700
7701 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7702 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7703 list_del(&rpi_hdr->list);
7704 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7705 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7706 kfree(rpi_hdr->dmabuf);
7707 kfree(rpi_hdr);
7708 }
7709 exit:
7710
7711 phba->sli4_hba.next_rpi = 0;
7712}
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726static struct lpfc_hba *
7727lpfc_hba_alloc(struct pci_dev *pdev)
7728{
7729 struct lpfc_hba *phba;
7730
7731
7732 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7733 if (!phba) {
7734 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7735 return NULL;
7736 }
7737
7738
7739 phba->pcidev = pdev;
7740
7741
7742 phba->brd_no = lpfc_get_instance();
7743 if (phba->brd_no < 0) {
7744 kfree(phba);
7745 return NULL;
7746 }
7747 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7748
7749 spin_lock_init(&phba->ct_ev_lock);
7750 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7751
7752 return phba;
7753}
7754
7755
7756
7757
7758
7759
7760
7761
7762static void
7763lpfc_hba_free(struct lpfc_hba *phba)
7764{
7765 if (phba->sli_rev == LPFC_SLI_REV4)
7766 kfree(phba->sli4_hba.hdwq);
7767
7768
7769 idr_remove(&lpfc_hba_index, phba->brd_no);
7770
7771
7772 kfree(phba->sli.sli3_ring);
7773 phba->sli.sli3_ring = NULL;
7774
7775 kfree(phba);
7776 return;
7777}
7778
7779
7780
7781
7782
7783
7784
7785
7786
7787
7788
7789
7790static int
7791lpfc_create_shost(struct lpfc_hba *phba)
7792{
7793 struct lpfc_vport *vport;
7794 struct Scsi_Host *shost;
7795
7796
7797 phba->fc_edtov = FF_DEF_EDTOV;
7798 phba->fc_ratov = FF_DEF_RATOV;
7799 phba->fc_altov = FF_DEF_ALTOV;
7800 phba->fc_arbtov = FF_DEF_ARBTOV;
7801
7802 atomic_set(&phba->sdev_cnt, 0);
7803 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7804 if (!vport)
7805 return -ENODEV;
7806
7807 shost = lpfc_shost_from_vport(vport);
7808 phba->pport = vport;
7809
7810 if (phba->nvmet_support) {
7811
7812 phba->targetport = NULL;
7813 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7815 "6076 NVME Target Found\n");
7816 }
7817
7818 lpfc_debugfs_initialize(vport);
7819
7820 pci_set_drvdata(phba->pcidev, shost);
7821
7822
7823
7824
7825
7826 vport->load_flag |= FC_ALLOW_FDMI;
7827 if (phba->cfg_enable_SmartSAN ||
7828 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7829
7830
7831 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7832 if (phba->cfg_enable_SmartSAN)
7833 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7834 else
7835 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7836 }
7837 return 0;
7838}
7839
7840
7841
7842
7843
7844
7845
7846
7847static void
7848lpfc_destroy_shost(struct lpfc_hba *phba)
7849{
7850 struct lpfc_vport *vport = phba->pport;
7851
7852
7853 destroy_port(vport);
7854
7855 return;
7856}
7857
7858
7859
7860
7861
7862
7863
7864
7865
7866static void
7867lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7868{
7869 uint32_t old_mask;
7870 uint32_t old_guard;
7871
7872 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7874 "1478 Registering BlockGuard with the "
7875 "SCSI layer\n");
7876
7877 old_mask = phba->cfg_prot_mask;
7878 old_guard = phba->cfg_prot_guard;
7879
7880
7881 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7882 SHOST_DIX_TYPE0_PROTECTION |
7883 SHOST_DIX_TYPE1_PROTECTION);
7884 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7885 SHOST_DIX_GUARD_CRC);
7886
7887
7888 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7889 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7890
7891 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7892 if ((old_mask != phba->cfg_prot_mask) ||
7893 (old_guard != phba->cfg_prot_guard))
7894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7895 "1475 Registering BlockGuard with the "
7896 "SCSI layer: mask %d guard %d\n",
7897 phba->cfg_prot_mask,
7898 phba->cfg_prot_guard);
7899
7900 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7901 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7902 } else
7903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7904 "1479 Not Registering BlockGuard with the SCSI "
7905 "layer, Bad protection parameters: %d %d\n",
7906 old_mask, old_guard);
7907 }
7908}
7909
7910
7911
7912
7913
7914
7915
7916
7917static void
7918lpfc_post_init_setup(struct lpfc_hba *phba)
7919{
7920 struct Scsi_Host *shost;
7921 struct lpfc_adapter_event_header adapter_event;
7922
7923
7924 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7925
7926
7927
7928
7929
7930 shost = pci_get_drvdata(phba->pcidev);
7931 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7932
7933 lpfc_host_attrib_init(shost);
7934
7935 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7936 spin_lock_irq(shost->host_lock);
7937 lpfc_poll_start_timer(phba);
7938 spin_unlock_irq(shost->host_lock);
7939 }
7940
7941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7942 "0428 Perform SCSI scan\n");
7943
7944 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7945 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7946 fc_host_post_vendor_event(shost, fc_get_event_number(),
7947 sizeof(adapter_event),
7948 (char *) &adapter_event,
7949 LPFC_NL_VENDOR_ID);
7950 return;
7951}
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964static int
7965lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7966{
7967 struct pci_dev *pdev = phba->pcidev;
7968 unsigned long bar0map_len, bar2map_len;
7969 int i, hbq_count;
7970 void *ptr;
7971 int error;
7972
7973 if (!pdev)
7974 return -ENODEV;
7975
7976
7977 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7978 if (error)
7979 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7980 if (error)
7981 return error;
7982 error = -ENODEV;
7983
7984
7985
7986
7987 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7988 bar0map_len = pci_resource_len(pdev, 0);
7989
7990 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7991 bar2map_len = pci_resource_len(pdev, 2);
7992
7993
7994 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7995 if (!phba->slim_memmap_p) {
7996 dev_printk(KERN_ERR, &pdev->dev,
7997 "ioremap failed for SLIM memory.\n");
7998 goto out;
7999 }
8000
8001
8002 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
8003 if (!phba->ctrl_regs_memmap_p) {
8004 dev_printk(KERN_ERR, &pdev->dev,
8005 "ioremap failed for HBA control registers.\n");
8006 goto out_iounmap_slim;
8007 }
8008
8009
8010 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8011 &phba->slim2p.phys, GFP_KERNEL);
8012 if (!phba->slim2p.virt)
8013 goto out_iounmap;
8014
8015 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
8016 phba->mbox_ext = (phba->slim2p.virt +
8017 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
8018 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
8019 phba->IOCBs = (phba->slim2p.virt +
8020 offsetof(struct lpfc_sli2_slim, IOCBs));
8021
8022 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
8023 lpfc_sli_hbq_size(),
8024 &phba->hbqslimp.phys,
8025 GFP_KERNEL);
8026 if (!phba->hbqslimp.virt)
8027 goto out_free_slim;
8028
8029 hbq_count = lpfc_sli_hbq_count();
8030 ptr = phba->hbqslimp.virt;
8031 for (i = 0; i < hbq_count; ++i) {
8032 phba->hbqs[i].hbq_virt = ptr;
8033 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
8034 ptr += (lpfc_hbq_defs[i]->entry_count *
8035 sizeof(struct lpfc_hbq_entry));
8036 }
8037 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
8038 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
8039
8040 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
8041
8042 phba->MBslimaddr = phba->slim_memmap_p;
8043 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
8044 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
8045 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
8046 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
8047
8048 return 0;
8049
8050out_free_slim:
8051 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8052 phba->slim2p.virt, phba->slim2p.phys);
8053out_iounmap:
8054 iounmap(phba->ctrl_regs_memmap_p);
8055out_iounmap_slim:
8056 iounmap(phba->slim_memmap_p);
8057out:
8058 return error;
8059}
8060
8061
8062
8063
8064
8065
8066
8067
8068static void
8069lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
8070{
8071 struct pci_dev *pdev;
8072
8073
8074 if (!phba->pcidev)
8075 return;
8076 else
8077 pdev = phba->pcidev;
8078
8079
8080 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8081 phba->hbqslimp.virt, phba->hbqslimp.phys);
8082 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8083 phba->slim2p.virt, phba->slim2p.phys);
8084
8085
8086 iounmap(phba->ctrl_regs_memmap_p);
8087 iounmap(phba->slim_memmap_p);
8088
8089 return;
8090}
8091
8092
8093
8094
8095
8096
8097
8098
8099
8100
8101int
8102lpfc_sli4_post_status_check(struct lpfc_hba *phba)
8103{
8104 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8105 struct lpfc_register reg_data;
8106 int i, port_error = 0;
8107 uint32_t if_type;
8108
8109 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8110 memset(®_data, 0, sizeof(reg_data));
8111 if (!phba->sli4_hba.PSMPHRregaddr)
8112 return -ENODEV;
8113
8114
8115 for (i = 0; i < 3000; i++) {
8116 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8117 &portsmphr_reg.word0) ||
8118 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
8119
8120 port_error = -ENODEV;
8121 break;
8122 }
8123 if (LPFC_POST_STAGE_PORT_READY ==
8124 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
8125 break;
8126 msleep(10);
8127 }
8128
8129
8130
8131
8132
8133 if (port_error) {
8134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8135 "1408 Port Failed POST - portsmphr=0x%x, "
8136 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8137 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8138 portsmphr_reg.word0,
8139 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8140 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8141 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8142 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8143 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8144 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8145 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8146 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8147 } else {
8148 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8149 "2534 Device Info: SLIFamily=0x%x, "
8150 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8151 "SLIHint_2=0x%x, FT=0x%x\n",
8152 bf_get(lpfc_sli_intf_sli_family,
8153 &phba->sli4_hba.sli_intf),
8154 bf_get(lpfc_sli_intf_slirev,
8155 &phba->sli4_hba.sli_intf),
8156 bf_get(lpfc_sli_intf_if_type,
8157 &phba->sli4_hba.sli_intf),
8158 bf_get(lpfc_sli_intf_sli_hint1,
8159 &phba->sli4_hba.sli_intf),
8160 bf_get(lpfc_sli_intf_sli_hint2,
8161 &phba->sli4_hba.sli_intf),
8162 bf_get(lpfc_sli_intf_func_type,
8163 &phba->sli4_hba.sli_intf));
8164
8165
8166
8167
8168
8169 if_type = bf_get(lpfc_sli_intf_if_type,
8170 &phba->sli4_hba.sli_intf);
8171 switch (if_type) {
8172 case LPFC_SLI_INTF_IF_TYPE_0:
8173 phba->sli4_hba.ue_mask_lo =
8174 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8175 phba->sli4_hba.ue_mask_hi =
8176 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8177 uerrlo_reg.word0 =
8178 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8179 uerrhi_reg.word0 =
8180 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8181 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8182 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
8183 lpfc_printf_log(phba, KERN_ERR,
8184 LOG_TRACE_EVENT,
8185 "1422 Unrecoverable Error "
8186 "Detected during POST "
8187 "uerr_lo_reg=0x%x, "
8188 "uerr_hi_reg=0x%x, "
8189 "ue_mask_lo_reg=0x%x, "
8190 "ue_mask_hi_reg=0x%x\n",
8191 uerrlo_reg.word0,
8192 uerrhi_reg.word0,
8193 phba->sli4_hba.ue_mask_lo,
8194 phba->sli4_hba.ue_mask_hi);
8195 port_error = -ENODEV;
8196 }
8197 break;
8198 case LPFC_SLI_INTF_IF_TYPE_2:
8199 case LPFC_SLI_INTF_IF_TYPE_6:
8200
8201 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8202 ®_data.word0) ||
8203 (bf_get(lpfc_sliport_status_err, ®_data) &&
8204 !bf_get(lpfc_sliport_status_rn, ®_data))) {
8205 phba->work_status[0] =
8206 readl(phba->sli4_hba.u.if_type2.
8207 ERR1regaddr);
8208 phba->work_status[1] =
8209 readl(phba->sli4_hba.u.if_type2.
8210 ERR2regaddr);
8211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8212 "2888 Unrecoverable port error "
8213 "following POST: port status reg "
8214 "0x%x, port_smphr reg 0x%x, "
8215 "error 1=0x%x, error 2=0x%x\n",
8216 reg_data.word0,
8217 portsmphr_reg.word0,
8218 phba->work_status[0],
8219 phba->work_status[1]);
8220 port_error = -ENODEV;
8221 }
8222 break;
8223 case LPFC_SLI_INTF_IF_TYPE_1:
8224 default:
8225 break;
8226 }
8227 }
8228 return port_error;
8229}
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239static void
8240lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8241{
8242 switch (if_type) {
8243 case LPFC_SLI_INTF_IF_TYPE_0:
8244 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8245 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8246 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8247 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8248 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8249 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8250 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8251 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8252 phba->sli4_hba.SLIINTFregaddr =
8253 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8254 break;
8255 case LPFC_SLI_INTF_IF_TYPE_2:
8256 phba->sli4_hba.u.if_type2.EQDregaddr =
8257 phba->sli4_hba.conf_regs_memmap_p +
8258 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8259 phba->sli4_hba.u.if_type2.ERR1regaddr =
8260 phba->sli4_hba.conf_regs_memmap_p +
8261 LPFC_CTL_PORT_ER1_OFFSET;
8262 phba->sli4_hba.u.if_type2.ERR2regaddr =
8263 phba->sli4_hba.conf_regs_memmap_p +
8264 LPFC_CTL_PORT_ER2_OFFSET;
8265 phba->sli4_hba.u.if_type2.CTRLregaddr =
8266 phba->sli4_hba.conf_regs_memmap_p +
8267 LPFC_CTL_PORT_CTL_OFFSET;
8268 phba->sli4_hba.u.if_type2.STATUSregaddr =
8269 phba->sli4_hba.conf_regs_memmap_p +
8270 LPFC_CTL_PORT_STA_OFFSET;
8271 phba->sli4_hba.SLIINTFregaddr =
8272 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8273 phba->sli4_hba.PSMPHRregaddr =
8274 phba->sli4_hba.conf_regs_memmap_p +
8275 LPFC_CTL_PORT_SEM_OFFSET;
8276 phba->sli4_hba.RQDBregaddr =
8277 phba->sli4_hba.conf_regs_memmap_p +
8278 LPFC_ULP0_RQ_DOORBELL;
8279 phba->sli4_hba.WQDBregaddr =
8280 phba->sli4_hba.conf_regs_memmap_p +
8281 LPFC_ULP0_WQ_DOORBELL;
8282 phba->sli4_hba.CQDBregaddr =
8283 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8284 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8285 phba->sli4_hba.MQDBregaddr =
8286 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8287 phba->sli4_hba.BMBXregaddr =
8288 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8289 break;
8290 case LPFC_SLI_INTF_IF_TYPE_6:
8291 phba->sli4_hba.u.if_type2.EQDregaddr =
8292 phba->sli4_hba.conf_regs_memmap_p +
8293 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8294 phba->sli4_hba.u.if_type2.ERR1regaddr =
8295 phba->sli4_hba.conf_regs_memmap_p +
8296 LPFC_CTL_PORT_ER1_OFFSET;
8297 phba->sli4_hba.u.if_type2.ERR2regaddr =
8298 phba->sli4_hba.conf_regs_memmap_p +
8299 LPFC_CTL_PORT_ER2_OFFSET;
8300 phba->sli4_hba.u.if_type2.CTRLregaddr =
8301 phba->sli4_hba.conf_regs_memmap_p +
8302 LPFC_CTL_PORT_CTL_OFFSET;
8303 phba->sli4_hba.u.if_type2.STATUSregaddr =
8304 phba->sli4_hba.conf_regs_memmap_p +
8305 LPFC_CTL_PORT_STA_OFFSET;
8306 phba->sli4_hba.PSMPHRregaddr =
8307 phba->sli4_hba.conf_regs_memmap_p +
8308 LPFC_CTL_PORT_SEM_OFFSET;
8309 phba->sli4_hba.BMBXregaddr =
8310 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8311 break;
8312 case LPFC_SLI_INTF_IF_TYPE_1:
8313 default:
8314 dev_printk(KERN_ERR, &phba->pcidev->dev,
8315 "FATAL - unsupported SLI4 interface type - %d\n",
8316 if_type);
8317 break;
8318 }
8319}
8320
8321
8322
8323
8324
8325
8326
8327
8328static void
8329lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8330{
8331 switch (if_type) {
8332 case LPFC_SLI_INTF_IF_TYPE_0:
8333 phba->sli4_hba.PSMPHRregaddr =
8334 phba->sli4_hba.ctrl_regs_memmap_p +
8335 LPFC_SLIPORT_IF0_SMPHR;
8336 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8337 LPFC_HST_ISR0;
8338 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8339 LPFC_HST_IMR0;
8340 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8341 LPFC_HST_ISCR0;
8342 break;
8343 case LPFC_SLI_INTF_IF_TYPE_6:
8344 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8345 LPFC_IF6_RQ_DOORBELL;
8346 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8347 LPFC_IF6_WQ_DOORBELL;
8348 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8349 LPFC_IF6_CQ_DOORBELL;
8350 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8351 LPFC_IF6_EQ_DOORBELL;
8352 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8353 LPFC_IF6_MQ_DOORBELL;
8354 break;
8355 case LPFC_SLI_INTF_IF_TYPE_2:
8356 case LPFC_SLI_INTF_IF_TYPE_1:
8357 default:
8358 dev_err(&phba->pcidev->dev,
8359 "FATAL - unsupported SLI4 interface type - %d\n",
8360 if_type);
8361 break;
8362 }
8363}
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375static int
8376lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8377{
8378 if (vf > LPFC_VIR_FUNC_MAX)
8379 return -ENODEV;
8380
8381 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8382 vf * LPFC_VFR_PAGE_SIZE +
8383 LPFC_ULP0_RQ_DOORBELL);
8384 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8385 vf * LPFC_VFR_PAGE_SIZE +
8386 LPFC_ULP0_WQ_DOORBELL);
8387 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8388 vf * LPFC_VFR_PAGE_SIZE +
8389 LPFC_EQCQ_DOORBELL);
8390 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8391 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8392 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8393 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8394 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8395 return 0;
8396}
8397
8398
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408
8409
8410
8411
8412
8413static int
8414lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8415{
8416 uint32_t bmbx_size;
8417 struct lpfc_dmabuf *dmabuf;
8418 struct dma_address *dma_address;
8419 uint32_t pa_addr;
8420 uint64_t phys_addr;
8421
8422 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8423 if (!dmabuf)
8424 return -ENOMEM;
8425
8426
8427
8428
8429
8430 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8431 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8432 &dmabuf->phys, GFP_KERNEL);
8433 if (!dmabuf->virt) {
8434 kfree(dmabuf);
8435 return -ENOMEM;
8436 }
8437
8438
8439
8440
8441
8442
8443
8444
8445 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8446 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8447
8448 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8449 LPFC_ALIGN_16_BYTE);
8450 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8451 LPFC_ALIGN_16_BYTE);
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461 dma_address = &phba->sli4_hba.bmbx.dma_address;
8462 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8463 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8464 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8465 LPFC_BMBX_BIT1_ADDR_HI);
8466
8467 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8468 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8469 LPFC_BMBX_BIT1_ADDR_LO);
8470 return 0;
8471}
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484static void
8485lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8486{
8487 dma_free_coherent(&phba->pcidev->dev,
8488 phba->sli4_hba.bmbx.bmbx_size,
8489 phba->sli4_hba.bmbx.dmabuf->virt,
8490 phba->sli4_hba.bmbx.dmabuf->phys);
8491
8492 kfree(phba->sli4_hba.bmbx.dmabuf);
8493 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8494}
8495
8496static const char * const lpfc_topo_to_str[] = {
8497 "Loop then P2P",
8498 "Loopback",
8499 "P2P Only",
8500 "Unsupported",
8501 "Loop Only",
8502 "Unsupported",
8503 "P2P then Loop",
8504};
8505
8506#define LINK_FLAGS_DEF 0x0
8507#define LINK_FLAGS_P2P 0x1
8508#define LINK_FLAGS_LOOP 0x2
8509
8510
8511
8512
8513
8514
8515
8516
8517
8518
8519static void
8520lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8521{
8522 u8 ptv, tf, pt;
8523
8524 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8525 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8526 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8527
8528 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8529 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8530 ptv, tf, pt);
8531 if (!ptv) {
8532 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8533 "2019 FW does not support persistent topology "
8534 "Using driver parameter defined value [%s]",
8535 lpfc_topo_to_str[phba->cfg_topology]);
8536 return;
8537 }
8538
8539 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8540 switch (phba->pcidev->device) {
8541 case PCI_DEVICE_ID_LANCER_G7_FC:
8542 case PCI_DEVICE_ID_LANCER_G6_FC:
8543 if (!tf) {
8544 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8545 ? FLAGS_TOPOLOGY_MODE_LOOP
8546 : FLAGS_TOPOLOGY_MODE_PT_PT);
8547 } else {
8548 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8549 }
8550 break;
8551 default:
8552 if (tf) {
8553
8554 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8555 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8556 } else {
8557 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8558 ? FLAGS_TOPOLOGY_MODE_PT_PT
8559 : FLAGS_TOPOLOGY_MODE_LOOP);
8560 }
8561 break;
8562 }
8563 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8564 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8565 "2020 Using persistent topology value [%s]",
8566 lpfc_topo_to_str[phba->cfg_topology]);
8567 } else {
8568 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8569 "2021 Invalid topology values from FW "
8570 "Using driver parameter defined value [%s]",
8571 lpfc_topo_to_str[phba->cfg_topology]);
8572 }
8573}
8574
8575
8576
8577
8578
8579
8580
8581
8582
8583
8584
8585
8586
8587
8588
8589int
8590lpfc_sli4_read_config(struct lpfc_hba *phba)
8591{
8592 LPFC_MBOXQ_t *pmb;
8593 struct lpfc_mbx_read_config *rd_config;
8594 union lpfc_sli4_cfg_shdr *shdr;
8595 uint32_t shdr_status, shdr_add_status;
8596 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8597 struct lpfc_rsrc_desc_fcfcoe *desc;
8598 char *pdesc_0;
8599 uint16_t forced_link_speed;
8600 uint32_t if_type, qmin;
8601 int length, i, rc = 0, rc2;
8602
8603 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8604 if (!pmb) {
8605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8606 "2011 Unable to allocate memory for issuing "
8607 "SLI_CONFIG_SPECIAL mailbox command\n");
8608 return -ENOMEM;
8609 }
8610
8611 lpfc_read_config(phba, pmb);
8612
8613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8614 if (rc != MBX_SUCCESS) {
8615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8616 "2012 Mailbox failed , mbxCmd x%x "
8617 "READ_CONFIG, mbxStatus x%x\n",
8618 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8619 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8620 rc = -EIO;
8621 } else {
8622 rd_config = &pmb->u.mqe.un.rd_config;
8623 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8624 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8625 phba->sli4_hba.lnk_info.lnk_tp =
8626 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8627 phba->sli4_hba.lnk_info.lnk_no =
8628 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8629 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8630 "3081 lnk_type:%d, lnk_numb:%d\n",
8631 phba->sli4_hba.lnk_info.lnk_tp,
8632 phba->sli4_hba.lnk_info.lnk_no);
8633 } else
8634 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8635 "3082 Mailbox (x%x) returned ldv:x0\n",
8636 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8637 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8638 phba->bbcredit_support = 1;
8639 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8640 }
8641
8642 phba->sli4_hba.conf_trunk =
8643 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8644 phba->sli4_hba.extents_in_use =
8645 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8646 phba->sli4_hba.max_cfg_param.max_xri =
8647 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8648
8649 if (is_kdump_kernel() &&
8650 phba->sli4_hba.max_cfg_param.max_xri > 512)
8651 phba->sli4_hba.max_cfg_param.max_xri = 512;
8652 phba->sli4_hba.max_cfg_param.xri_base =
8653 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8654 phba->sli4_hba.max_cfg_param.max_vpi =
8655 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8656
8657 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8658 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8659 phba->sli4_hba.max_cfg_param.vpi_base =
8660 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8661 phba->sli4_hba.max_cfg_param.max_rpi =
8662 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8663 phba->sli4_hba.max_cfg_param.rpi_base =
8664 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8665 phba->sli4_hba.max_cfg_param.max_vfi =
8666 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8667 phba->sli4_hba.max_cfg_param.vfi_base =
8668 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8669 phba->sli4_hba.max_cfg_param.max_fcfi =
8670 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8671 phba->sli4_hba.max_cfg_param.max_eq =
8672 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8673 phba->sli4_hba.max_cfg_param.max_rq =
8674 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8675 phba->sli4_hba.max_cfg_param.max_wq =
8676 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8677 phba->sli4_hba.max_cfg_param.max_cq =
8678 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8679 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8680 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8681 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8682 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8683 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8684 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8685 phba->max_vports = phba->max_vpi;
8686 lpfc_map_topology(phba, rd_config);
8687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8688 "2003 cfg params Extents? %d "
8689 "XRI(B:%d M:%d), "
8690 "VPI(B:%d M:%d) "
8691 "VFI(B:%d M:%d) "
8692 "RPI(B:%d M:%d) "
8693 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
8694 phba->sli4_hba.extents_in_use,
8695 phba->sli4_hba.max_cfg_param.xri_base,
8696 phba->sli4_hba.max_cfg_param.max_xri,
8697 phba->sli4_hba.max_cfg_param.vpi_base,
8698 phba->sli4_hba.max_cfg_param.max_vpi,
8699 phba->sli4_hba.max_cfg_param.vfi_base,
8700 phba->sli4_hba.max_cfg_param.max_vfi,
8701 phba->sli4_hba.max_cfg_param.rpi_base,
8702 phba->sli4_hba.max_cfg_param.max_rpi,
8703 phba->sli4_hba.max_cfg_param.max_fcfi,
8704 phba->sli4_hba.max_cfg_param.max_eq,
8705 phba->sli4_hba.max_cfg_param.max_cq,
8706 phba->sli4_hba.max_cfg_param.max_wq,
8707 phba->sli4_hba.max_cfg_param.max_rq,
8708 phba->lmt);
8709
8710
8711
8712
8713
8714 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8715 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8716 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8717 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8718 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8719
8720
8721
8722
8723
8724
8725 qmin -= 4;
8726
8727
8728 if ((phba->cfg_irq_chann > qmin) ||
8729 (phba->cfg_hdw_queue > qmin)) {
8730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8731 "2005 Reducing Queues - "
8732 "FW resource limitation: "
8733 "WQ %d CQ %d EQ %d: min %d: "
8734 "IRQ %d HDWQ %d\n",
8735 phba->sli4_hba.max_cfg_param.max_wq,
8736 phba->sli4_hba.max_cfg_param.max_cq,
8737 phba->sli4_hba.max_cfg_param.max_eq,
8738 qmin, phba->cfg_irq_chann,
8739 phba->cfg_hdw_queue);
8740
8741 if (phba->cfg_irq_chann > qmin)
8742 phba->cfg_irq_chann = qmin;
8743 if (phba->cfg_hdw_queue > qmin)
8744 phba->cfg_hdw_queue = qmin;
8745 }
8746 }
8747
8748 if (rc)
8749 goto read_cfg_out;
8750
8751
8752 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8753 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8754 forced_link_speed =
8755 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8756 if (forced_link_speed) {
8757 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8758
8759 switch (forced_link_speed) {
8760 case LINK_SPEED_1G:
8761 phba->cfg_link_speed =
8762 LPFC_USER_LINK_SPEED_1G;
8763 break;
8764 case LINK_SPEED_2G:
8765 phba->cfg_link_speed =
8766 LPFC_USER_LINK_SPEED_2G;
8767 break;
8768 case LINK_SPEED_4G:
8769 phba->cfg_link_speed =
8770 LPFC_USER_LINK_SPEED_4G;
8771 break;
8772 case LINK_SPEED_8G:
8773 phba->cfg_link_speed =
8774 LPFC_USER_LINK_SPEED_8G;
8775 break;
8776 case LINK_SPEED_10G:
8777 phba->cfg_link_speed =
8778 LPFC_USER_LINK_SPEED_10G;
8779 break;
8780 case LINK_SPEED_16G:
8781 phba->cfg_link_speed =
8782 LPFC_USER_LINK_SPEED_16G;
8783 break;
8784 case LINK_SPEED_32G:
8785 phba->cfg_link_speed =
8786 LPFC_USER_LINK_SPEED_32G;
8787 break;
8788 case LINK_SPEED_64G:
8789 phba->cfg_link_speed =
8790 LPFC_USER_LINK_SPEED_64G;
8791 break;
8792 case 0xffff:
8793 phba->cfg_link_speed =
8794 LPFC_USER_LINK_SPEED_AUTO;
8795 break;
8796 default:
8797 lpfc_printf_log(phba, KERN_ERR,
8798 LOG_TRACE_EVENT,
8799 "0047 Unrecognized link "
8800 "speed : %d\n",
8801 forced_link_speed);
8802 phba->cfg_link_speed =
8803 LPFC_USER_LINK_SPEED_AUTO;
8804 }
8805 }
8806 }
8807
8808
8809 length = phba->sli4_hba.max_cfg_param.max_xri -
8810 lpfc_sli4_get_els_iocb_cnt(phba);
8811 if (phba->cfg_hba_queue_depth > length) {
8812 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8813 "3361 HBA queue depth changed from %d to %d\n",
8814 phba->cfg_hba_queue_depth, length);
8815 phba->cfg_hba_queue_depth = length;
8816 }
8817
8818 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8819 LPFC_SLI_INTF_IF_TYPE_2)
8820 goto read_cfg_out;
8821
8822
8823 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8824 sizeof(struct lpfc_sli4_cfg_mhdr));
8825 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8826 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8827 length, LPFC_SLI4_MBX_EMBED);
8828
8829 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8830 shdr = (union lpfc_sli4_cfg_shdr *)
8831 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8834 if (rc2 || shdr_status || shdr_add_status) {
8835 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8836 "3026 Mailbox failed , mbxCmd x%x "
8837 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8838 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8839 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8840 goto read_cfg_out;
8841 }
8842
8843
8844 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8845
8846 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8847 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8848 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8849 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8850 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8851 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8852 goto read_cfg_out;
8853
8854 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8855 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8856 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8857 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8858 phba->sli4_hba.iov.pf_number =
8859 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8860 phba->sli4_hba.iov.vf_number =
8861 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8862 break;
8863 }
8864 }
8865
8866 if (i < LPFC_RSRC_DESC_MAX_NUM)
8867 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8868 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8869 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8870 phba->sli4_hba.iov.vf_number);
8871 else
8872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8873 "3028 GET_FUNCTION_CONFIG: failed to find "
8874 "Resource Descriptor:x%x\n",
8875 LPFC_RSRC_DESC_TYPE_FCFCOE);
8876
8877read_cfg_out:
8878 mempool_free(pmb, phba->mbox_mem_pool);
8879 return rc;
8880}
8881
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893
8894
8895static int
8896lpfc_setup_endian_order(struct lpfc_hba *phba)
8897{
8898 LPFC_MBOXQ_t *mboxq;
8899 uint32_t if_type, rc = 0;
8900 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8901 HOST_ENDIAN_HIGH_WORD1};
8902
8903 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8904 switch (if_type) {
8905 case LPFC_SLI_INTF_IF_TYPE_0:
8906 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8907 GFP_KERNEL);
8908 if (!mboxq) {
8909 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8910 "0492 Unable to allocate memory for "
8911 "issuing SLI_CONFIG_SPECIAL mailbox "
8912 "command\n");
8913 return -ENOMEM;
8914 }
8915
8916
8917
8918
8919
8920 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8921 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8922 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8923 if (rc != MBX_SUCCESS) {
8924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8925 "0493 SLI_CONFIG_SPECIAL mailbox "
8926 "failed with status x%x\n",
8927 rc);
8928 rc = -EIO;
8929 }
8930 mempool_free(mboxq, phba->mbox_mem_pool);
8931 break;
8932 case LPFC_SLI_INTF_IF_TYPE_6:
8933 case LPFC_SLI_INTF_IF_TYPE_2:
8934 case LPFC_SLI_INTF_IF_TYPE_1:
8935 default:
8936 break;
8937 }
8938 return rc;
8939}
8940
8941
8942
8943
8944
8945
8946
8947
8948
8949
8950
8951
8952
8953
8954static int
8955lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8956{
8957
8958
8959
8960
8961
8962 if (phba->nvmet_support) {
8963 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8964 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8965 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8966 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8967 }
8968
8969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8970 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8971 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8972 phba->cfg_nvmet_mrq);
8973
8974
8975 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8976 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8977
8978
8979 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8980 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8981 return 0;
8982}
8983
8984static int
8985lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8986{
8987 struct lpfc_queue *qdesc;
8988 u32 wqesize;
8989 int cpu;
8990
8991 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8992
8993 if (phba->enab_exp_wqcq_pages)
8994
8995 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8996 phba->sli4_hba.cq_esize,
8997 LPFC_CQE_EXP_COUNT, cpu);
8998
8999 else
9000 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9001 phba->sli4_hba.cq_esize,
9002 phba->sli4_hba.cq_ecount, cpu);
9003 if (!qdesc) {
9004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9005 "0499 Failed allocate fast-path IO CQ (%d)\n",
9006 idx);
9007 return 1;
9008 }
9009 qdesc->qe_valid = 1;
9010 qdesc->hdwq = idx;
9011 qdesc->chann = cpu;
9012 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
9013
9014
9015 if (phba->enab_exp_wqcq_pages) {
9016
9017 wqesize = (phba->fcp_embed_io) ?
9018 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
9019 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
9020 wqesize,
9021 LPFC_WQE_EXP_COUNT, cpu);
9022 } else
9023 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9024 phba->sli4_hba.wq_esize,
9025 phba->sli4_hba.wq_ecount, cpu);
9026
9027 if (!qdesc) {
9028 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9029 "0503 Failed allocate fast-path IO WQ (%d)\n",
9030 idx);
9031 return 1;
9032 }
9033 qdesc->hdwq = idx;
9034 qdesc->chann = cpu;
9035 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
9036 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9037 return 0;
9038}
9039
9040
9041
9042
9043
9044
9045
9046
9047
9048
9049
9050
9051
9052
9053
9054int
9055lpfc_sli4_queue_create(struct lpfc_hba *phba)
9056{
9057 struct lpfc_queue *qdesc;
9058 int idx, cpu, eqcpu;
9059 struct lpfc_sli4_hdw_queue *qp;
9060 struct lpfc_vector_map_info *cpup;
9061 struct lpfc_vector_map_info *eqcpup;
9062 struct lpfc_eq_intr_info *eqi;
9063
9064
9065
9066
9067
9068 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
9069 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
9070 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
9071 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
9072 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
9073 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
9074 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
9075 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
9076 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
9077 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
9078
9079 if (!phba->sli4_hba.hdwq) {
9080 phba->sli4_hba.hdwq = kcalloc(
9081 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
9082 GFP_KERNEL);
9083 if (!phba->sli4_hba.hdwq) {
9084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9085 "6427 Failed allocate memory for "
9086 "fast-path Hardware Queue array\n");
9087 goto out_error;
9088 }
9089
9090 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9091 qp = &phba->sli4_hba.hdwq[idx];
9092 spin_lock_init(&qp->io_buf_list_get_lock);
9093 spin_lock_init(&qp->io_buf_list_put_lock);
9094 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9095 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9096 qp->get_io_bufs = 0;
9097 qp->put_io_bufs = 0;
9098 qp->total_io_bufs = 0;
9099 spin_lock_init(&qp->abts_io_buf_list_lock);
9100 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
9101 qp->abts_scsi_io_bufs = 0;
9102 qp->abts_nvme_io_bufs = 0;
9103 INIT_LIST_HEAD(&qp->sgl_list);
9104 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9105 spin_lock_init(&qp->hdwq_lock);
9106 }
9107 }
9108
9109 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9110 if (phba->nvmet_support) {
9111 phba->sli4_hba.nvmet_cqset = kcalloc(
9112 phba->cfg_nvmet_mrq,
9113 sizeof(struct lpfc_queue *),
9114 GFP_KERNEL);
9115 if (!phba->sli4_hba.nvmet_cqset) {
9116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9117 "3121 Fail allocate memory for "
9118 "fast-path CQ set array\n");
9119 goto out_error;
9120 }
9121 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9122 phba->cfg_nvmet_mrq,
9123 sizeof(struct lpfc_queue *),
9124 GFP_KERNEL);
9125 if (!phba->sli4_hba.nvmet_mrq_hdr) {
9126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9127 "3122 Fail allocate memory for "
9128 "fast-path RQ set hdr array\n");
9129 goto out_error;
9130 }
9131 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9132 phba->cfg_nvmet_mrq,
9133 sizeof(struct lpfc_queue *),
9134 GFP_KERNEL);
9135 if (!phba->sli4_hba.nvmet_mrq_data) {
9136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9137 "3124 Fail allocate memory for "
9138 "fast-path RQ set data array\n");
9139 goto out_error;
9140 }
9141 }
9142 }
9143
9144 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9145
9146
9147 for_each_present_cpu(cpu) {
9148
9149
9150
9151
9152 cpup = &phba->sli4_hba.cpu_map[cpu];
9153 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9154 continue;
9155
9156
9157 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9158
9159
9160 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9161 phba->sli4_hba.eq_esize,
9162 phba->sli4_hba.eq_ecount, cpu);
9163 if (!qdesc) {
9164 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9165 "0497 Failed allocate EQ (%d)\n",
9166 cpup->hdwq);
9167 goto out_error;
9168 }
9169 qdesc->qe_valid = 1;
9170 qdesc->hdwq = cpup->hdwq;
9171 qdesc->chann = cpu;
9172 qdesc->last_cpu = qdesc->chann;
9173
9174
9175 qp->hba_eq = qdesc;
9176
9177 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9178 list_add(&qdesc->cpu_list, &eqi->list);
9179 }
9180
9181
9182
9183
9184 for_each_present_cpu(cpu) {
9185 cpup = &phba->sli4_hba.cpu_map[cpu];
9186
9187
9188 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9189 continue;
9190
9191
9192 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9193 if (qp->hba_eq)
9194 continue;
9195
9196
9197 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9198 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9199 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9200 }
9201
9202
9203 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9204 if (lpfc_alloc_io_wq_cq(phba, idx))
9205 goto out_error;
9206 }
9207
9208 if (phba->nvmet_support) {
9209 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9210 cpu = lpfc_find_cpu_handle(phba, idx,
9211 LPFC_FIND_BY_HDWQ);
9212 qdesc = lpfc_sli4_queue_alloc(phba,
9213 LPFC_DEFAULT_PAGE_SIZE,
9214 phba->sli4_hba.cq_esize,
9215 phba->sli4_hba.cq_ecount,
9216 cpu);
9217 if (!qdesc) {
9218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9219 "3142 Failed allocate NVME "
9220 "CQ Set (%d)\n", idx);
9221 goto out_error;
9222 }
9223 qdesc->qe_valid = 1;
9224 qdesc->hdwq = idx;
9225 qdesc->chann = cpu;
9226 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9227 }
9228 }
9229
9230
9231
9232
9233
9234 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9235
9236 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9237 phba->sli4_hba.cq_esize,
9238 phba->sli4_hba.cq_ecount, cpu);
9239 if (!qdesc) {
9240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9241 "0500 Failed allocate slow-path mailbox CQ\n");
9242 goto out_error;
9243 }
9244 qdesc->qe_valid = 1;
9245 phba->sli4_hba.mbx_cq = qdesc;
9246
9247
9248 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9249 phba->sli4_hba.cq_esize,
9250 phba->sli4_hba.cq_ecount, cpu);
9251 if (!qdesc) {
9252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9253 "0501 Failed allocate slow-path ELS CQ\n");
9254 goto out_error;
9255 }
9256 qdesc->qe_valid = 1;
9257 qdesc->chann = cpu;
9258 phba->sli4_hba.els_cq = qdesc;
9259
9260
9261
9262
9263
9264
9265
9266
9267 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9268 phba->sli4_hba.mq_esize,
9269 phba->sli4_hba.mq_ecount, cpu);
9270 if (!qdesc) {
9271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9272 "0505 Failed allocate slow-path MQ\n");
9273 goto out_error;
9274 }
9275 qdesc->chann = cpu;
9276 phba->sli4_hba.mbx_wq = qdesc;
9277
9278
9279
9280
9281
9282
9283 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9284 phba->sli4_hba.wq_esize,
9285 phba->sli4_hba.wq_ecount, cpu);
9286 if (!qdesc) {
9287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9288 "0504 Failed allocate slow-path ELS WQ\n");
9289 goto out_error;
9290 }
9291 qdesc->chann = cpu;
9292 phba->sli4_hba.els_wq = qdesc;
9293 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9294
9295 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9296
9297 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9298 phba->sli4_hba.cq_esize,
9299 phba->sli4_hba.cq_ecount, cpu);
9300 if (!qdesc) {
9301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9302 "6079 Failed allocate NVME LS CQ\n");
9303 goto out_error;
9304 }
9305 qdesc->chann = cpu;
9306 qdesc->qe_valid = 1;
9307 phba->sli4_hba.nvmels_cq = qdesc;
9308
9309
9310 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9311 phba->sli4_hba.wq_esize,
9312 phba->sli4_hba.wq_ecount, cpu);
9313 if (!qdesc) {
9314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9315 "6080 Failed allocate NVME LS WQ\n");
9316 goto out_error;
9317 }
9318 qdesc->chann = cpu;
9319 phba->sli4_hba.nvmels_wq = qdesc;
9320 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9321 }
9322
9323
9324
9325
9326
9327
9328 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9329 phba->sli4_hba.rq_esize,
9330 phba->sli4_hba.rq_ecount, cpu);
9331 if (!qdesc) {
9332 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9333 "0506 Failed allocate receive HRQ\n");
9334 goto out_error;
9335 }
9336 phba->sli4_hba.hdr_rq = qdesc;
9337
9338
9339 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9340 phba->sli4_hba.rq_esize,
9341 phba->sli4_hba.rq_ecount, cpu);
9342 if (!qdesc) {
9343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9344 "0507 Failed allocate receive DRQ\n");
9345 goto out_error;
9346 }
9347 phba->sli4_hba.dat_rq = qdesc;
9348
9349 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9350 phba->nvmet_support) {
9351 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9352 cpu = lpfc_find_cpu_handle(phba, idx,
9353 LPFC_FIND_BY_HDWQ);
9354
9355 qdesc = lpfc_sli4_queue_alloc(phba,
9356 LPFC_DEFAULT_PAGE_SIZE,
9357 phba->sli4_hba.rq_esize,
9358 LPFC_NVMET_RQE_DEF_COUNT,
9359 cpu);
9360 if (!qdesc) {
9361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9362 "3146 Failed allocate "
9363 "receive HRQ\n");
9364 goto out_error;
9365 }
9366 qdesc->hdwq = idx;
9367 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9368
9369
9370 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9371 GFP_KERNEL,
9372 cpu_to_node(cpu));
9373 if (qdesc->rqbp == NULL) {
9374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9375 "6131 Failed allocate "
9376 "Header RQBP\n");
9377 goto out_error;
9378 }
9379
9380
9381 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9382
9383
9384 qdesc = lpfc_sli4_queue_alloc(phba,
9385 LPFC_DEFAULT_PAGE_SIZE,
9386 phba->sli4_hba.rq_esize,
9387 LPFC_NVMET_RQE_DEF_COUNT,
9388 cpu);
9389 if (!qdesc) {
9390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9391 "3156 Failed allocate "
9392 "receive DRQ\n");
9393 goto out_error;
9394 }
9395 qdesc->hdwq = idx;
9396 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9397 }
9398 }
9399
9400
9401 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9402 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9403 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9404 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9405 }
9406 }
9407
9408
9409 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9410 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9411 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9412 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9413 }
9414 }
9415
9416 return 0;
9417
9418out_error:
9419 lpfc_sli4_queue_destroy(phba);
9420 return -ENOMEM;
9421}
9422
9423static inline void
9424__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9425{
9426 if (*qp != NULL) {
9427 lpfc_sli4_queue_free(*qp);
9428 *qp = NULL;
9429 }
9430}
9431
9432static inline void
9433lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9434{
9435 int idx;
9436
9437 if (*qs == NULL)
9438 return;
9439
9440 for (idx = 0; idx < max; idx++)
9441 __lpfc_sli4_release_queue(&(*qs)[idx]);
9442
9443 kfree(*qs);
9444 *qs = NULL;
9445}
9446
9447static inline void
9448lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9449{
9450 struct lpfc_sli4_hdw_queue *hdwq;
9451 struct lpfc_queue *eq;
9452 uint32_t idx;
9453
9454 hdwq = phba->sli4_hba.hdwq;
9455
9456
9457 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9458
9459 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9460 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9461 hdwq[idx].hba_eq = NULL;
9462 hdwq[idx].io_cq = NULL;
9463 hdwq[idx].io_wq = NULL;
9464 if (phba->cfg_xpsgl && !phba->nvmet_support)
9465 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9466 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9467 }
9468
9469 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9470
9471 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9472 lpfc_sli4_queue_free(eq);
9473 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9474 }
9475}
9476
9477
9478
9479
9480
9481
9482
9483
9484
9485
9486
9487
9488
9489void
9490lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9491{
9492
9493
9494
9495
9496
9497 spin_lock_irq(&phba->hbalock);
9498 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9499 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9500 spin_unlock_irq(&phba->hbalock);
9501 msleep(20);
9502 spin_lock_irq(&phba->hbalock);
9503 }
9504 spin_unlock_irq(&phba->hbalock);
9505
9506 lpfc_sli4_cleanup_poll_list(phba);
9507
9508
9509 if (phba->sli4_hba.hdwq)
9510 lpfc_sli4_release_hdwq(phba);
9511
9512 if (phba->nvmet_support) {
9513 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9514 phba->cfg_nvmet_mrq);
9515
9516 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9517 phba->cfg_nvmet_mrq);
9518 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9519 phba->cfg_nvmet_mrq);
9520 }
9521
9522
9523 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9524
9525
9526 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9527
9528
9529 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9530
9531
9532 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9533 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9534
9535
9536 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9537
9538
9539 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9540
9541
9542 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9543
9544
9545 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9546
9547
9548 spin_lock_irq(&phba->hbalock);
9549 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9550 spin_unlock_irq(&phba->hbalock);
9551}
9552
9553int
9554lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9555{
9556 struct lpfc_rqb *rqbp;
9557 struct lpfc_dmabuf *h_buf;
9558 struct rqb_dmabuf *rqb_buffer;
9559
9560 rqbp = rq->rqbp;
9561 while (!list_empty(&rqbp->rqb_buffer_list)) {
9562 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9563 struct lpfc_dmabuf, list);
9564
9565 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9566 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9567 rqbp->buffer_count--;
9568 }
9569 return 1;
9570}
9571
9572static int
9573lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9574 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9575 int qidx, uint32_t qtype)
9576{
9577 struct lpfc_sli_ring *pring;
9578 int rc;
9579
9580 if (!eq || !cq || !wq) {
9581 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9582 "6085 Fast-path %s (%d) not allocated\n",
9583 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9584 return -ENOMEM;
9585 }
9586
9587
9588 rc = lpfc_cq_create(phba, cq, eq,
9589 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9590 if (rc) {
9591 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9592 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9593 qidx, (uint32_t)rc);
9594 return rc;
9595 }
9596
9597 if (qtype != LPFC_MBOX) {
9598
9599 if (cq_map)
9600 *cq_map = cq->queue_id;
9601
9602 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9603 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9604 qidx, cq->queue_id, qidx, eq->queue_id);
9605
9606
9607 rc = lpfc_wq_create(phba, wq, cq, qtype);
9608 if (rc) {
9609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9610 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9611 qidx, (uint32_t)rc);
9612
9613 return rc;
9614 }
9615
9616
9617 pring = wq->pring;
9618 pring->sli.sli4.wqp = (void *)wq;
9619 cq->pring = pring;
9620
9621 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9622 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9623 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9624 } else {
9625 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9626 if (rc) {
9627 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9628 "0539 Failed setup of slow-path MQ: "
9629 "rc = 0x%x\n", rc);
9630
9631 return rc;
9632 }
9633
9634 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9635 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9636 phba->sli4_hba.mbx_wq->queue_id,
9637 phba->sli4_hba.mbx_cq->queue_id);
9638 }
9639
9640 return 0;
9641}
9642
9643
9644
9645
9646
9647
9648
9649
9650static void
9651lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9652{
9653 struct lpfc_queue *eq, *childq;
9654 int qidx;
9655
9656 memset(phba->sli4_hba.cq_lookup, 0,
9657 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9658
9659 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9660
9661 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9662 if (!eq)
9663 continue;
9664
9665 list_for_each_entry(childq, &eq->child_list, list) {
9666 if (childq->queue_id > phba->sli4_hba.cq_max)
9667 continue;
9668 if (childq->subtype == LPFC_IO)
9669 phba->sli4_hba.cq_lookup[childq->queue_id] =
9670 childq;
9671 }
9672 }
9673}
9674
9675
9676
9677
9678
9679
9680
9681
9682
9683
9684
9685
9686
9687int
9688lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9689{
9690 uint32_t shdr_status, shdr_add_status;
9691 union lpfc_sli4_cfg_shdr *shdr;
9692 struct lpfc_vector_map_info *cpup;
9693 struct lpfc_sli4_hdw_queue *qp;
9694 LPFC_MBOXQ_t *mboxq;
9695 int qidx, cpu;
9696 uint32_t length, usdelay;
9697 int rc = -ENOMEM;
9698
9699
9700 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9701 if (!mboxq) {
9702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9703 "3249 Unable to allocate memory for "
9704 "QUERY_FW_CFG mailbox command\n");
9705 return -ENOMEM;
9706 }
9707 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9708 sizeof(struct lpfc_sli4_cfg_mhdr));
9709 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9710 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9711 length, LPFC_SLI4_MBX_EMBED);
9712
9713 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9714
9715 shdr = (union lpfc_sli4_cfg_shdr *)
9716 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9717 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9718 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9719 if (shdr_status || shdr_add_status || rc) {
9720 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9721 "3250 QUERY_FW_CFG mailbox failed with status "
9722 "x%x add_status x%x, mbx status x%x\n",
9723 shdr_status, shdr_add_status, rc);
9724 mempool_free(mboxq, phba->mbox_mem_pool);
9725 rc = -ENXIO;
9726 goto out_error;
9727 }
9728
9729 phba->sli4_hba.fw_func_mode =
9730 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9731 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9732 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9733 phba->sli4_hba.physical_port =
9734 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9735 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9736 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9737 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9738 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9739
9740 mempool_free(mboxq, phba->mbox_mem_pool);
9741
9742
9743
9744
9745 qp = phba->sli4_hba.hdwq;
9746
9747
9748 if (!qp) {
9749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9750 "3147 Fast-path EQs not allocated\n");
9751 rc = -ENOMEM;
9752 goto out_error;
9753 }
9754
9755
9756 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9757
9758 for_each_present_cpu(cpu) {
9759 cpup = &phba->sli4_hba.cpu_map[cpu];
9760
9761
9762
9763
9764 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9765 continue;
9766 if (qidx != cpup->eq)
9767 continue;
9768
9769
9770 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9771 phba->cfg_fcp_imax);
9772 if (rc) {
9773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9774 "0523 Failed setup of fast-path"
9775 " EQ (%d), rc = 0x%x\n",
9776 cpup->eq, (uint32_t)rc);
9777 goto out_destroy;
9778 }
9779
9780
9781 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9782 qp[cpup->hdwq].hba_eq;
9783
9784 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9785 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9786 cpup->eq,
9787 qp[cpup->hdwq].hba_eq->queue_id);
9788 }
9789 }
9790
9791
9792 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9793 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9794 cpup = &phba->sli4_hba.cpu_map[cpu];
9795
9796
9797 rc = lpfc_create_wq_cq(phba,
9798 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9799 qp[qidx].io_cq,
9800 qp[qidx].io_wq,
9801 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9802 qidx,
9803 LPFC_IO);
9804 if (rc) {
9805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9806 "0535 Failed to setup fastpath "
9807 "IO WQ/CQ (%d), rc = 0x%x\n",
9808 qidx, (uint32_t)rc);
9809 goto out_destroy;
9810 }
9811 }
9812
9813
9814
9815
9816
9817
9818
9819 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9821 "0528 %s not allocated\n",
9822 phba->sli4_hba.mbx_cq ?
9823 "Mailbox WQ" : "Mailbox CQ");
9824 rc = -ENOMEM;
9825 goto out_destroy;
9826 }
9827
9828 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9829 phba->sli4_hba.mbx_cq,
9830 phba->sli4_hba.mbx_wq,
9831 NULL, 0, LPFC_MBOX);
9832 if (rc) {
9833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9834 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9835 (uint32_t)rc);
9836 goto out_destroy;
9837 }
9838 if (phba->nvmet_support) {
9839 if (!phba->sli4_hba.nvmet_cqset) {
9840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9841 "3165 Fast-path NVME CQ Set "
9842 "array not allocated\n");
9843 rc = -ENOMEM;
9844 goto out_destroy;
9845 }
9846 if (phba->cfg_nvmet_mrq > 1) {
9847 rc = lpfc_cq_create_set(phba,
9848 phba->sli4_hba.nvmet_cqset,
9849 qp,
9850 LPFC_WCQ, LPFC_NVMET);
9851 if (rc) {
9852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9853 "3164 Failed setup of NVME CQ "
9854 "Set, rc = 0x%x\n",
9855 (uint32_t)rc);
9856 goto out_destroy;
9857 }
9858 } else {
9859
9860 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9861 qp[0].hba_eq,
9862 LPFC_WCQ, LPFC_NVMET);
9863 if (rc) {
9864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9865 "6089 Failed setup NVMET CQ: "
9866 "rc = 0x%x\n", (uint32_t)rc);
9867 goto out_destroy;
9868 }
9869 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9870
9871 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9872 "6090 NVMET CQ setup: cq-id=%d, "
9873 "parent eq-id=%d\n",
9874 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9875 qp[0].hba_eq->queue_id);
9876 }
9877 }
9878
9879
9880 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9882 "0530 ELS %s not allocated\n",
9883 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9884 rc = -ENOMEM;
9885 goto out_destroy;
9886 }
9887 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9888 phba->sli4_hba.els_cq,
9889 phba->sli4_hba.els_wq,
9890 NULL, 0, LPFC_ELS);
9891 if (rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9894 (uint32_t)rc);
9895 goto out_destroy;
9896 }
9897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9898 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9899 phba->sli4_hba.els_wq->queue_id,
9900 phba->sli4_hba.els_cq->queue_id);
9901
9902 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9903
9904 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9906 "6091 LS %s not allocated\n",
9907 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9908 rc = -ENOMEM;
9909 goto out_destroy;
9910 }
9911 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9912 phba->sli4_hba.nvmels_cq,
9913 phba->sli4_hba.nvmels_wq,
9914 NULL, 0, LPFC_NVME_LS);
9915 if (rc) {
9916 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9917 "0526 Failed setup of NVVME LS WQ/CQ: "
9918 "rc = 0x%x\n", (uint32_t)rc);
9919 goto out_destroy;
9920 }
9921
9922 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9923 "6096 ELS WQ setup: wq-id=%d, "
9924 "parent cq-id=%d\n",
9925 phba->sli4_hba.nvmels_wq->queue_id,
9926 phba->sli4_hba.nvmels_cq->queue_id);
9927 }
9928
9929
9930
9931
9932 if (phba->nvmet_support) {
9933 if ((!phba->sli4_hba.nvmet_cqset) ||
9934 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9935 (!phba->sli4_hba.nvmet_mrq_data)) {
9936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9937 "6130 MRQ CQ Queues not "
9938 "allocated\n");
9939 rc = -ENOMEM;
9940 goto out_destroy;
9941 }
9942 if (phba->cfg_nvmet_mrq > 1) {
9943 rc = lpfc_mrq_create(phba,
9944 phba->sli4_hba.nvmet_mrq_hdr,
9945 phba->sli4_hba.nvmet_mrq_data,
9946 phba->sli4_hba.nvmet_cqset,
9947 LPFC_NVMET);
9948 if (rc) {
9949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9950 "6098 Failed setup of NVMET "
9951 "MRQ: rc = 0x%x\n",
9952 (uint32_t)rc);
9953 goto out_destroy;
9954 }
9955
9956 } else {
9957 rc = lpfc_rq_create(phba,
9958 phba->sli4_hba.nvmet_mrq_hdr[0],
9959 phba->sli4_hba.nvmet_mrq_data[0],
9960 phba->sli4_hba.nvmet_cqset[0],
9961 LPFC_NVMET);
9962 if (rc) {
9963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9964 "6057 Failed setup of NVMET "
9965 "Receive Queue: rc = 0x%x\n",
9966 (uint32_t)rc);
9967 goto out_destroy;
9968 }
9969
9970 lpfc_printf_log(
9971 phba, KERN_INFO, LOG_INIT,
9972 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9973 "dat-rq-id=%d parent cq-id=%d\n",
9974 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9975 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9976 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9977
9978 }
9979 }
9980
9981 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9983 "0540 Receive Queue not allocated\n");
9984 rc = -ENOMEM;
9985 goto out_destroy;
9986 }
9987
9988 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9989 phba->sli4_hba.els_cq, LPFC_USOL);
9990 if (rc) {
9991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9992 "0541 Failed setup of Receive Queue: "
9993 "rc = 0x%x\n", (uint32_t)rc);
9994 goto out_destroy;
9995 }
9996
9997 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9998 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9999 "parent cq-id=%d\n",
10000 phba->sli4_hba.hdr_rq->queue_id,
10001 phba->sli4_hba.dat_rq->queue_id,
10002 phba->sli4_hba.els_cq->queue_id);
10003
10004 if (phba->cfg_fcp_imax)
10005 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
10006 else
10007 usdelay = 0;
10008
10009 for (qidx = 0; qidx < phba->cfg_irq_chann;
10010 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
10011 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
10012 usdelay);
10013
10014 if (phba->sli4_hba.cq_max) {
10015 kfree(phba->sli4_hba.cq_lookup);
10016 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
10017 sizeof(struct lpfc_queue *), GFP_KERNEL);
10018 if (!phba->sli4_hba.cq_lookup) {
10019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10020 "0549 Failed setup of CQ Lookup table: "
10021 "size 0x%x\n", phba->sli4_hba.cq_max);
10022 rc = -ENOMEM;
10023 goto out_destroy;
10024 }
10025 lpfc_setup_cq_lookup(phba);
10026 }
10027 return 0;
10028
10029out_destroy:
10030 lpfc_sli4_queue_unset(phba);
10031out_error:
10032 return rc;
10033}
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047void
10048lpfc_sli4_queue_unset(struct lpfc_hba *phba)
10049{
10050 struct lpfc_sli4_hdw_queue *qp;
10051 struct lpfc_queue *eq;
10052 int qidx;
10053
10054
10055 if (phba->sli4_hba.mbx_wq)
10056 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
10057
10058
10059 if (phba->sli4_hba.nvmels_wq)
10060 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
10061
10062
10063 if (phba->sli4_hba.els_wq)
10064 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
10065
10066
10067 if (phba->sli4_hba.hdr_rq)
10068 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
10069 phba->sli4_hba.dat_rq);
10070
10071
10072 if (phba->sli4_hba.mbx_cq)
10073 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
10074
10075
10076 if (phba->sli4_hba.els_cq)
10077 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
10078
10079
10080 if (phba->sli4_hba.nvmels_cq)
10081 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
10082
10083 if (phba->nvmet_support) {
10084
10085 if (phba->sli4_hba.nvmet_mrq_hdr) {
10086 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10087 lpfc_rq_destroy(
10088 phba,
10089 phba->sli4_hba.nvmet_mrq_hdr[qidx],
10090 phba->sli4_hba.nvmet_mrq_data[qidx]);
10091 }
10092
10093
10094 if (phba->sli4_hba.nvmet_cqset) {
10095 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10096 lpfc_cq_destroy(
10097 phba, phba->sli4_hba.nvmet_cqset[qidx]);
10098 }
10099 }
10100
10101
10102 if (phba->sli4_hba.hdwq) {
10103
10104 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10105
10106 qp = &phba->sli4_hba.hdwq[qidx];
10107 lpfc_wq_destroy(phba, qp->io_wq);
10108 lpfc_cq_destroy(phba, qp->io_cq);
10109 }
10110
10111 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10112
10113 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10114 lpfc_eq_destroy(phba, eq);
10115 }
10116 }
10117
10118 kfree(phba->sli4_hba.cq_lookup);
10119 phba->sli4_hba.cq_lookup = NULL;
10120 phba->sli4_hba.cq_max = 0;
10121}
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139static int
10140lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10141{
10142 struct lpfc_cq_event *cq_event;
10143 int i;
10144
10145 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10146 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10147 if (!cq_event)
10148 goto out_pool_create_fail;
10149 list_add_tail(&cq_event->list,
10150 &phba->sli4_hba.sp_cqe_event_pool);
10151 }
10152 return 0;
10153
10154out_pool_create_fail:
10155 lpfc_sli4_cq_event_pool_destroy(phba);
10156 return -ENOMEM;
10157}
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169static void
10170lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10171{
10172 struct lpfc_cq_event *cq_event, *next_cq_event;
10173
10174 list_for_each_entry_safe(cq_event, next_cq_event,
10175 &phba->sli4_hba.sp_cqe_event_pool, list) {
10176 list_del(&cq_event->list);
10177 kfree(cq_event);
10178 }
10179}
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191struct lpfc_cq_event *
10192__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10193{
10194 struct lpfc_cq_event *cq_event = NULL;
10195
10196 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10197 struct lpfc_cq_event, list);
10198 return cq_event;
10199}
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211struct lpfc_cq_event *
10212lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10213{
10214 struct lpfc_cq_event *cq_event;
10215 unsigned long iflags;
10216
10217 spin_lock_irqsave(&phba->hbalock, iflags);
10218 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10219 spin_unlock_irqrestore(&phba->hbalock, iflags);
10220 return cq_event;
10221}
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231void
10232__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10233 struct lpfc_cq_event *cq_event)
10234{
10235 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10236}
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246void
10247lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10248 struct lpfc_cq_event *cq_event)
10249{
10250 unsigned long iflags;
10251 spin_lock_irqsave(&phba->hbalock, iflags);
10252 __lpfc_sli4_cq_event_release(phba, cq_event);
10253 spin_unlock_irqrestore(&phba->hbalock, iflags);
10254}
10255
10256
10257
10258
10259
10260
10261
10262
10263static void
10264lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10265{
10266 LIST_HEAD(cq_event_list);
10267 struct lpfc_cq_event *cq_event;
10268 unsigned long iflags;
10269
10270
10271
10272
10273 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10274 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10275 &cq_event_list);
10276 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10277
10278
10279 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10280 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10281 &cq_event_list);
10282 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10283
10284 while (!list_empty(&cq_event_list)) {
10285 list_remove_head(&cq_event_list, cq_event,
10286 struct lpfc_cq_event, list);
10287 lpfc_sli4_cq_event_release(phba, cq_event);
10288 }
10289}
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303int
10304lpfc_pci_function_reset(struct lpfc_hba *phba)
10305{
10306 LPFC_MBOXQ_t *mboxq;
10307 uint32_t rc = 0, if_type;
10308 uint32_t shdr_status, shdr_add_status;
10309 uint32_t rdy_chk;
10310 uint32_t port_reset = 0;
10311 union lpfc_sli4_cfg_shdr *shdr;
10312 struct lpfc_register reg_data;
10313 uint16_t devid;
10314
10315 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10316 switch (if_type) {
10317 case LPFC_SLI_INTF_IF_TYPE_0:
10318 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10319 GFP_KERNEL);
10320 if (!mboxq) {
10321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10322 "0494 Unable to allocate memory for "
10323 "issuing SLI_FUNCTION_RESET mailbox "
10324 "command\n");
10325 return -ENOMEM;
10326 }
10327
10328
10329 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10330 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10331 LPFC_SLI4_MBX_EMBED);
10332 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10333 shdr = (union lpfc_sli4_cfg_shdr *)
10334 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10337 &shdr->response);
10338 mempool_free(mboxq, phba->mbox_mem_pool);
10339 if (shdr_status || shdr_add_status || rc) {
10340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10341 "0495 SLI_FUNCTION_RESET mailbox "
10342 "failed with status x%x add_status x%x,"
10343 " mbx status x%x\n",
10344 shdr_status, shdr_add_status, rc);
10345 rc = -ENXIO;
10346 }
10347 break;
10348 case LPFC_SLI_INTF_IF_TYPE_2:
10349 case LPFC_SLI_INTF_IF_TYPE_6:
10350wait:
10351
10352
10353
10354
10355
10356 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10357 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10358 STATUSregaddr, ®_data.word0)) {
10359 rc = -ENODEV;
10360 goto out;
10361 }
10362 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10363 break;
10364 msleep(20);
10365 }
10366
10367 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10368 phba->work_status[0] = readl(
10369 phba->sli4_hba.u.if_type2.ERR1regaddr);
10370 phba->work_status[1] = readl(
10371 phba->sli4_hba.u.if_type2.ERR2regaddr);
10372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10373 "2890 Port not ready, port status reg "
10374 "0x%x error 1=0x%x, error 2=0x%x\n",
10375 reg_data.word0,
10376 phba->work_status[0],
10377 phba->work_status[1]);
10378 rc = -ENODEV;
10379 goto out;
10380 }
10381
10382 if (!port_reset) {
10383
10384
10385
10386 reg_data.word0 = 0;
10387 bf_set(lpfc_sliport_ctrl_end, ®_data,
10388 LPFC_SLIPORT_LITTLE_ENDIAN);
10389 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10390 LPFC_SLIPORT_INIT_PORT);
10391 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10392 CTRLregaddr);
10393
10394 pci_read_config_word(phba->pcidev,
10395 PCI_DEVICE_ID, &devid);
10396
10397 port_reset = 1;
10398 msleep(20);
10399 goto wait;
10400 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10401 rc = -ENODEV;
10402 goto out;
10403 }
10404 break;
10405
10406 case LPFC_SLI_INTF_IF_TYPE_1:
10407 default:
10408 break;
10409 }
10410
10411out:
10412
10413 if (rc) {
10414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415 "3317 HBA not functional: IP Reset Failed "
10416 "try: echo fw_reset > board_mode\n");
10417 rc = -ENODEV;
10418 }
10419
10420 return rc;
10421}
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434static int
10435lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10436{
10437 struct pci_dev *pdev = phba->pcidev;
10438 unsigned long bar0map_len, bar1map_len, bar2map_len;
10439 int error;
10440 uint32_t if_type;
10441
10442 if (!pdev)
10443 return -ENODEV;
10444
10445
10446 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10447 if (error)
10448 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10449 if (error)
10450 return error;
10451
10452
10453
10454
10455
10456 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10457 &phba->sli4_hba.sli_intf.word0)) {
10458 return -ENODEV;
10459 }
10460
10461
10462 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10463 LPFC_SLI_INTF_VALID) {
10464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10465 "2894 SLI_INTF reg contents invalid "
10466 "sli_intf reg 0x%x\n",
10467 phba->sli4_hba.sli_intf.word0);
10468 return -ENODEV;
10469 }
10470
10471 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10472
10473
10474
10475
10476
10477
10478 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10479 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10480 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10481
10482
10483
10484
10485
10486 phba->sli4_hba.conf_regs_memmap_p =
10487 ioremap(phba->pci_bar0_map, bar0map_len);
10488 if (!phba->sli4_hba.conf_regs_memmap_p) {
10489 dev_printk(KERN_ERR, &pdev->dev,
10490 "ioremap failed for SLI4 PCI config "
10491 "registers.\n");
10492 return -ENODEV;
10493 }
10494 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10495
10496 lpfc_sli4_bar0_register_memmap(phba, if_type);
10497 } else {
10498 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10499 bar0map_len = pci_resource_len(pdev, 1);
10500 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10501 dev_printk(KERN_ERR, &pdev->dev,
10502 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10503 return -ENODEV;
10504 }
10505 phba->sli4_hba.conf_regs_memmap_p =
10506 ioremap(phba->pci_bar0_map, bar0map_len);
10507 if (!phba->sli4_hba.conf_regs_memmap_p) {
10508 dev_printk(KERN_ERR, &pdev->dev,
10509 "ioremap failed for SLI4 PCI config "
10510 "registers.\n");
10511 return -ENODEV;
10512 }
10513 lpfc_sli4_bar0_register_memmap(phba, if_type);
10514 }
10515
10516 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10517 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10518
10519
10520
10521
10522 phba->pci_bar1_map = pci_resource_start(pdev,
10523 PCI_64BIT_BAR2);
10524 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10525 phba->sli4_hba.ctrl_regs_memmap_p =
10526 ioremap(phba->pci_bar1_map,
10527 bar1map_len);
10528 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10529 dev_err(&pdev->dev,
10530 "ioremap failed for SLI4 HBA "
10531 "control registers.\n");
10532 error = -ENOMEM;
10533 goto out_iounmap_conf;
10534 }
10535 phba->pci_bar2_memmap_p =
10536 phba->sli4_hba.ctrl_regs_memmap_p;
10537 lpfc_sli4_bar1_register_memmap(phba, if_type);
10538 } else {
10539 error = -ENOMEM;
10540 goto out_iounmap_conf;
10541 }
10542 }
10543
10544 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10545 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10546
10547
10548
10549
10550 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10551 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10552 phba->sli4_hba.drbl_regs_memmap_p =
10553 ioremap(phba->pci_bar1_map, bar1map_len);
10554 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10555 dev_err(&pdev->dev,
10556 "ioremap failed for SLI4 HBA doorbell registers.\n");
10557 error = -ENOMEM;
10558 goto out_iounmap_conf;
10559 }
10560 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10561 lpfc_sli4_bar1_register_memmap(phba, if_type);
10562 }
10563
10564 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10565 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10566
10567
10568
10569
10570 phba->pci_bar2_map = pci_resource_start(pdev,
10571 PCI_64BIT_BAR4);
10572 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10573 phba->sli4_hba.drbl_regs_memmap_p =
10574 ioremap(phba->pci_bar2_map,
10575 bar2map_len);
10576 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10577 dev_err(&pdev->dev,
10578 "ioremap failed for SLI4 HBA"
10579 " doorbell registers.\n");
10580 error = -ENOMEM;
10581 goto out_iounmap_ctrl;
10582 }
10583 phba->pci_bar4_memmap_p =
10584 phba->sli4_hba.drbl_regs_memmap_p;
10585 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10586 if (error)
10587 goto out_iounmap_all;
10588 } else {
10589 error = -ENOMEM;
10590 goto out_iounmap_all;
10591 }
10592 }
10593
10594 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10595 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10596
10597
10598
10599
10600 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10601 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10602 phba->sli4_hba.dpp_regs_memmap_p =
10603 ioremap(phba->pci_bar2_map, bar2map_len);
10604 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10605 dev_err(&pdev->dev,
10606 "ioremap failed for SLI4 HBA dpp registers.\n");
10607 error = -ENOMEM;
10608 goto out_iounmap_ctrl;
10609 }
10610 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10611 }
10612
10613
10614 switch (if_type) {
10615 case LPFC_SLI_INTF_IF_TYPE_0:
10616 case LPFC_SLI_INTF_IF_TYPE_2:
10617 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10618 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10619 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10620 break;
10621 case LPFC_SLI_INTF_IF_TYPE_6:
10622 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10623 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10624 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10625 break;
10626 default:
10627 break;
10628 }
10629
10630 return 0;
10631
10632out_iounmap_all:
10633 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10634out_iounmap_ctrl:
10635 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10636out_iounmap_conf:
10637 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10638
10639 return error;
10640}
10641
10642
10643
10644
10645
10646
10647
10648
10649static void
10650lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10651{
10652 uint32_t if_type;
10653 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10654
10655 switch (if_type) {
10656 case LPFC_SLI_INTF_IF_TYPE_0:
10657 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10658 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10659 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10660 break;
10661 case LPFC_SLI_INTF_IF_TYPE_2:
10662 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10663 break;
10664 case LPFC_SLI_INTF_IF_TYPE_6:
10665 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10666 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10667 if (phba->sli4_hba.dpp_regs_memmap_p)
10668 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10669 break;
10670 case LPFC_SLI_INTF_IF_TYPE_1:
10671 default:
10672 dev_printk(KERN_ERR, &phba->pcidev->dev,
10673 "FATAL - unsupported SLI4 interface type - %d\n",
10674 if_type);
10675 break;
10676 }
10677}
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690static int
10691lpfc_sli_enable_msix(struct lpfc_hba *phba)
10692{
10693 int rc;
10694 LPFC_MBOXQ_t *pmb;
10695
10696
10697 rc = pci_alloc_irq_vectors(phba->pcidev,
10698 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10699 if (rc < 0) {
10700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10701 "0420 PCI enable MSI-X failed (%d)\n", rc);
10702 goto vec_fail_out;
10703 }
10704
10705
10706
10707
10708
10709
10710 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10711 &lpfc_sli_sp_intr_handler, 0,
10712 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10713 if (rc) {
10714 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10715 "0421 MSI-X slow-path request_irq failed "
10716 "(%d)\n", rc);
10717 goto msi_fail_out;
10718 }
10719
10720
10721 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10722 &lpfc_sli_fp_intr_handler, 0,
10723 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10724
10725 if (rc) {
10726 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10727 "0429 MSI-X fast-path request_irq failed "
10728 "(%d)\n", rc);
10729 goto irq_fail_out;
10730 }
10731
10732
10733
10734
10735 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10736
10737 if (!pmb) {
10738 rc = -ENOMEM;
10739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10740 "0474 Unable to allocate memory for issuing "
10741 "MBOX_CONFIG_MSI command\n");
10742 goto mem_fail_out;
10743 }
10744 rc = lpfc_config_msi(phba, pmb);
10745 if (rc)
10746 goto mbx_fail_out;
10747 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10748 if (rc != MBX_SUCCESS) {
10749 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10750 "0351 Config MSI mailbox command failed, "
10751 "mbxCmd x%x, mbxStatus x%x\n",
10752 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10753 goto mbx_fail_out;
10754 }
10755
10756
10757 mempool_free(pmb, phba->mbox_mem_pool);
10758 return rc;
10759
10760mbx_fail_out:
10761
10762 mempool_free(pmb, phba->mbox_mem_pool);
10763
10764mem_fail_out:
10765
10766 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10767
10768irq_fail_out:
10769
10770 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10771
10772msi_fail_out:
10773
10774 pci_free_irq_vectors(phba->pcidev);
10775
10776vec_fail_out:
10777 return rc;
10778}
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794static int
10795lpfc_sli_enable_msi(struct lpfc_hba *phba)
10796{
10797 int rc;
10798
10799 rc = pci_enable_msi(phba->pcidev);
10800 if (!rc)
10801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10802 "0462 PCI enable MSI mode success.\n");
10803 else {
10804 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10805 "0471 PCI enable MSI mode failed (%d)\n", rc);
10806 return rc;
10807 }
10808
10809 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10810 0, LPFC_DRIVER_NAME, phba);
10811 if (rc) {
10812 pci_disable_msi(phba->pcidev);
10813 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10814 "0478 MSI request_irq failed (%d)\n", rc);
10815 }
10816 return rc;
10817}
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836static uint32_t
10837lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10838{
10839 uint32_t intr_mode = LPFC_INTR_ERROR;
10840 int retval;
10841
10842
10843 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10844 if (retval)
10845 return intr_mode;
10846 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
10847
10848 if (cfg_mode == 2) {
10849
10850 retval = lpfc_sli_enable_msix(phba);
10851 if (!retval) {
10852
10853 phba->intr_type = MSIX;
10854 intr_mode = 2;
10855 }
10856 }
10857
10858
10859 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10860 retval = lpfc_sli_enable_msi(phba);
10861 if (!retval) {
10862
10863 phba->intr_type = MSI;
10864 intr_mode = 1;
10865 }
10866 }
10867
10868
10869 if (phba->intr_type == NONE) {
10870 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10871 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10872 if (!retval) {
10873
10874 phba->intr_type = INTx;
10875 intr_mode = 0;
10876 }
10877 }
10878 return intr_mode;
10879}
10880
10881
10882
10883
10884
10885
10886
10887
10888
10889
10890static void
10891lpfc_sli_disable_intr(struct lpfc_hba *phba)
10892{
10893 int nr_irqs, i;
10894
10895 if (phba->intr_type == MSIX)
10896 nr_irqs = LPFC_MSIX_VECTORS;
10897 else
10898 nr_irqs = 1;
10899
10900 for (i = 0; i < nr_irqs; i++)
10901 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10902 pci_free_irq_vectors(phba->pcidev);
10903
10904
10905 phba->intr_type = NONE;
10906 phba->sli.slistat.sli_intr = 0;
10907}
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917static uint16_t
10918lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10919{
10920 struct lpfc_vector_map_info *cpup;
10921 int cpu;
10922
10923
10924 for_each_present_cpu(cpu) {
10925 cpup = &phba->sli4_hba.cpu_map[cpu];
10926
10927
10928
10929
10930
10931 if ((match == LPFC_FIND_BY_EQ) &&
10932 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10933 (cpup->eq == id))
10934 return cpu;
10935
10936
10937 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10938 return cpu;
10939 }
10940 return 0;
10941}
10942
10943#ifdef CONFIG_X86
10944
10945
10946
10947
10948
10949
10950
10951static int
10952lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10953 uint16_t phys_id, uint16_t core_id)
10954{
10955 struct lpfc_vector_map_info *cpup;
10956 int idx;
10957
10958 for_each_present_cpu(idx) {
10959 cpup = &phba->sli4_hba.cpu_map[idx];
10960
10961 if ((cpup->phys_id == phys_id) &&
10962 (cpup->core_id == core_id) &&
10963 (cpu != idx))
10964 return 1;
10965 }
10966 return 0;
10967}
10968#endif
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979static inline void
10980lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10981 unsigned int cpu)
10982{
10983 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10984 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10985
10986 cpup->eq = eqidx;
10987 cpup->flag |= flag;
10988
10989 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10990 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10991 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10992}
10993
10994
10995
10996
10997
10998
10999
11000static void
11001lpfc_cpu_map_array_init(struct lpfc_hba *phba)
11002{
11003 struct lpfc_vector_map_info *cpup;
11004 struct lpfc_eq_intr_info *eqi;
11005 int cpu;
11006
11007 for_each_possible_cpu(cpu) {
11008 cpup = &phba->sli4_hba.cpu_map[cpu];
11009 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
11010 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
11011 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
11012 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
11013 cpup->flag = 0;
11014 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
11015 INIT_LIST_HEAD(&eqi->list);
11016 eqi->icnt = 0;
11017 }
11018}
11019
11020
11021
11022
11023
11024
11025
11026static void
11027lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
11028{
11029 struct lpfc_hba_eq_hdl *eqhdl;
11030 int i;
11031
11032 for (i = 0; i < phba->cfg_irq_chann; i++) {
11033 eqhdl = lpfc_get_eq_hdl(i);
11034 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
11035 eqhdl->phba = phba;
11036 }
11037}
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049static void
11050lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
11051{
11052 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
11053 int max_phys_id, min_phys_id;
11054 int max_core_id, min_core_id;
11055 struct lpfc_vector_map_info *cpup;
11056 struct lpfc_vector_map_info *new_cpup;
11057#ifdef CONFIG_X86
11058 struct cpuinfo_x86 *cpuinfo;
11059#endif
11060#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11061 struct lpfc_hdwq_stat *c_stat;
11062#endif
11063
11064 max_phys_id = 0;
11065 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
11066 max_core_id = 0;
11067 min_core_id = LPFC_VECTOR_MAP_EMPTY;
11068
11069
11070 for_each_present_cpu(cpu) {
11071 cpup = &phba->sli4_hba.cpu_map[cpu];
11072#ifdef CONFIG_X86
11073 cpuinfo = &cpu_data(cpu);
11074 cpup->phys_id = cpuinfo->phys_proc_id;
11075 cpup->core_id = cpuinfo->cpu_core_id;
11076 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
11077 cpup->flag |= LPFC_CPU_MAP_HYPER;
11078#else
11079
11080 cpup->phys_id = 0;
11081 cpup->core_id = cpu;
11082#endif
11083
11084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11085 "3328 CPU %d physid %d coreid %d flag x%x\n",
11086 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
11087
11088 if (cpup->phys_id > max_phys_id)
11089 max_phys_id = cpup->phys_id;
11090 if (cpup->phys_id < min_phys_id)
11091 min_phys_id = cpup->phys_id;
11092
11093 if (cpup->core_id > max_core_id)
11094 max_core_id = cpup->core_id;
11095 if (cpup->core_id < min_core_id)
11096 min_core_id = cpup->core_id;
11097 }
11098
11099
11100
11101
11102
11103
11104 first_cpu = cpumask_first(cpu_present_mask);
11105 start_cpu = first_cpu;
11106
11107 for_each_present_cpu(cpu) {
11108 cpup = &phba->sli4_hba.cpu_map[cpu];
11109
11110
11111 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11112
11113 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11114
11115
11116
11117
11118
11119
11120 new_cpu = start_cpu;
11121 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11122 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11123 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11124 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
11125 (new_cpup->phys_id == cpup->phys_id))
11126 goto found_same;
11127 new_cpu = cpumask_next(
11128 new_cpu, cpu_present_mask);
11129 if (new_cpu == nr_cpumask_bits)
11130 new_cpu = first_cpu;
11131 }
11132
11133 continue;
11134found_same:
11135
11136 cpup->eq = new_cpup->eq;
11137
11138
11139
11140
11141
11142 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11143 if (start_cpu == nr_cpumask_bits)
11144 start_cpu = first_cpu;
11145
11146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11147 "3337 Set Affinity: CPU %d "
11148 "eq %d from peer cpu %d same "
11149 "phys_id (%d)\n",
11150 cpu, cpup->eq, new_cpu,
11151 cpup->phys_id);
11152 }
11153 }
11154
11155
11156 start_cpu = first_cpu;
11157
11158 for_each_present_cpu(cpu) {
11159 cpup = &phba->sli4_hba.cpu_map[cpu];
11160
11161
11162 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11163
11164 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11165
11166
11167
11168
11169
11170
11171 new_cpu = start_cpu;
11172 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11173 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11174 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11175 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
11176 goto found_any;
11177 new_cpu = cpumask_next(
11178 new_cpu, cpu_present_mask);
11179 if (new_cpu == nr_cpumask_bits)
11180 new_cpu = first_cpu;
11181 }
11182
11183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11184 "3339 Set Affinity: CPU %d "
11185 "eq %d UNASSIGNED\n",
11186 cpup->hdwq, cpup->eq);
11187 continue;
11188found_any:
11189
11190 cpup->eq = new_cpup->eq;
11191
11192
11193
11194
11195
11196 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11197 if (start_cpu == nr_cpumask_bits)
11198 start_cpu = first_cpu;
11199
11200 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11201 "3338 Set Affinity: CPU %d "
11202 "eq %d from peer cpu %d (%d/%d)\n",
11203 cpu, cpup->eq, new_cpu,
11204 new_cpup->phys_id, new_cpup->core_id);
11205 }
11206 }
11207
11208
11209
11210
11211 idx = 0;
11212 for_each_present_cpu(cpu) {
11213 cpup = &phba->sli4_hba.cpu_map[cpu];
11214
11215
11216 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11217 continue;
11218
11219
11220 cpup->hdwq = idx;
11221 idx++;
11222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11223 "3333 Set Affinity: CPU %d (phys %d core %d): "
11224 "hdwq %d eq %d flg x%x\n",
11225 cpu, cpup->phys_id, cpup->core_id,
11226 cpup->hdwq, cpup->eq, cpup->flag);
11227 }
11228
11229
11230
11231
11232
11233
11234
11235
11236 next_idx = idx;
11237 start_cpu = 0;
11238 idx = 0;
11239 for_each_present_cpu(cpu) {
11240 cpup = &phba->sli4_hba.cpu_map[cpu];
11241
11242
11243 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11244 continue;
11245
11246
11247
11248
11249
11250 if (next_idx < phba->cfg_hdw_queue) {
11251 cpup->hdwq = next_idx;
11252 next_idx++;
11253 continue;
11254 }
11255
11256
11257
11258
11259
11260
11261 new_cpu = start_cpu;
11262 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11263 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11264 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11265 new_cpup->phys_id == cpup->phys_id &&
11266 new_cpup->core_id == cpup->core_id) {
11267 goto found_hdwq;
11268 }
11269 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11270 if (new_cpu == nr_cpumask_bits)
11271 new_cpu = first_cpu;
11272 }
11273
11274
11275
11276
11277 new_cpu = start_cpu;
11278 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11279 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11280 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11281 new_cpup->phys_id == cpup->phys_id)
11282 goto found_hdwq;
11283
11284 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11285 if (new_cpu == nr_cpumask_bits)
11286 new_cpu = first_cpu;
11287 }
11288
11289
11290 cpup->hdwq = idx % phba->cfg_hdw_queue;
11291 idx++;
11292 goto logit;
11293 found_hdwq:
11294
11295 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11296 if (start_cpu == nr_cpumask_bits)
11297 start_cpu = first_cpu;
11298 cpup->hdwq = new_cpup->hdwq;
11299 logit:
11300 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11301 "3335 Set Affinity: CPU %d (phys %d core %d): "
11302 "hdwq %d eq %d flg x%x\n",
11303 cpu, cpup->phys_id, cpup->core_id,
11304 cpup->hdwq, cpup->eq, cpup->flag);
11305 }
11306
11307
11308
11309
11310
11311 idx = 0;
11312 for_each_possible_cpu(cpu) {
11313 cpup = &phba->sli4_hba.cpu_map[cpu];
11314#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11315 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11316 c_stat->hdwq_no = cpup->hdwq;
11317#endif
11318 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11319 continue;
11320
11321 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11322#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11323 c_stat->hdwq_no = cpup->hdwq;
11324#endif
11325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11326 "3340 Set Affinity: not present "
11327 "CPU %d hdwq %d\n",
11328 cpu, cpup->hdwq);
11329 }
11330
11331
11332
11333
11334 return;
11335}
11336
11337
11338
11339
11340
11341
11342
11343
11344static int
11345lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11346 struct list_head *eqlist)
11347{
11348 const struct cpumask *maskp;
11349 struct lpfc_queue *eq;
11350 struct cpumask *tmp;
11351 u16 idx;
11352
11353 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11354 if (!tmp)
11355 return -ENOMEM;
11356
11357 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11358 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11359 if (!maskp)
11360 continue;
11361
11362
11363
11364
11365
11366 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11367 continue;
11368
11369
11370
11371
11372
11373
11374 cpumask_and(tmp, maskp, cpu_online_mask);
11375 if (cpumask_weight(tmp) > 1)
11376 continue;
11377
11378
11379
11380
11381
11382
11383 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11384 list_add(&eq->_poll_list, eqlist);
11385 }
11386 kfree(tmp);
11387 return 0;
11388}
11389
11390static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11391{
11392 if (phba->sli_rev != LPFC_SLI_REV4)
11393 return;
11394
11395 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11396 &phba->cpuhp);
11397
11398
11399
11400
11401 synchronize_rcu();
11402 del_timer_sync(&phba->cpuhp_poll_timer);
11403}
11404
11405static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11406{
11407 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11408 return;
11409
11410 __lpfc_cpuhp_remove(phba);
11411}
11412
11413static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11414{
11415 if (phba->sli_rev != LPFC_SLI_REV4)
11416 return;
11417
11418 rcu_read_lock();
11419
11420 if (!list_empty(&phba->poll_list))
11421 mod_timer(&phba->cpuhp_poll_timer,
11422 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11423
11424 rcu_read_unlock();
11425
11426 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11427 &phba->cpuhp);
11428}
11429
11430static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11431{
11432 if (phba->pport->load_flag & FC_UNLOADING) {
11433 *retval = -EAGAIN;
11434 return true;
11435 }
11436
11437 if (phba->sli_rev != LPFC_SLI_REV4) {
11438 *retval = 0;
11439 return true;
11440 }
11441
11442
11443 return false;
11444}
11445
11446
11447
11448
11449
11450
11451
11452static inline void
11453lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11454{
11455 cpumask_clear(&eqhdl->aff_mask);
11456 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11457 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11458 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11459}
11460
11461
11462
11463
11464
11465
11466static inline void
11467lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11468{
11469 cpumask_clear(&eqhdl->aff_mask);
11470 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11471}
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489static void
11490lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11491{
11492 struct lpfc_vector_map_info *cpup;
11493 struct cpumask *aff_mask;
11494 unsigned int cpu_select, cpu_next, idx;
11495 const struct cpumask *orig_mask;
11496
11497 if (phba->irq_chann_mode == NORMAL_MODE)
11498 return;
11499
11500 orig_mask = &phba->sli4_hba.irq_aff_mask;
11501
11502 if (!cpumask_test_cpu(cpu, orig_mask))
11503 return;
11504
11505 cpup = &phba->sli4_hba.cpu_map[cpu];
11506
11507 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11508 return;
11509
11510 if (offline) {
11511
11512 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11513 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11514
11515
11516 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11517
11518
11519
11520 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11521 aff_mask = lpfc_get_aff_mask(idx);
11522
11523
11524 if (cpumask_test_cpu(cpu, aff_mask))
11525 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11526 cpu_select);
11527 }
11528 } else {
11529
11530 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11531 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11532 }
11533 } else {
11534
11535 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11536 }
11537}
11538
11539static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11540{
11541 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11542 struct lpfc_queue *eq, *next;
11543 LIST_HEAD(eqlist);
11544 int retval;
11545
11546 if (!phba) {
11547 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11548 return 0;
11549 }
11550
11551 if (__lpfc_cpuhp_checks(phba, &retval))
11552 return retval;
11553
11554 lpfc_irq_rebalance(phba, cpu, true);
11555
11556 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11557 if (retval)
11558 return retval;
11559
11560
11561 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11562 list_del_init(&eq->_poll_list);
11563 lpfc_sli4_start_polling(eq);
11564 }
11565
11566 return 0;
11567}
11568
11569static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11570{
11571 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11572 struct lpfc_queue *eq, *next;
11573 unsigned int n;
11574 int retval;
11575
11576 if (!phba) {
11577 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11578 return 0;
11579 }
11580
11581 if (__lpfc_cpuhp_checks(phba, &retval))
11582 return retval;
11583
11584 lpfc_irq_rebalance(phba, cpu, false);
11585
11586 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11587 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11588 if (n == cpu)
11589 lpfc_sli4_stop_polling(eq);
11590 }
11591
11592 return 0;
11593}
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623static int
11624lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11625{
11626 int vectors, rc, index;
11627 char *name;
11628 const struct cpumask *aff_mask = NULL;
11629 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11630 struct lpfc_vector_map_info *cpup;
11631 struct lpfc_hba_eq_hdl *eqhdl;
11632 const struct cpumask *maskp;
11633 unsigned int flags = PCI_IRQ_MSIX;
11634
11635
11636 vectors = phba->cfg_irq_chann;
11637
11638 if (phba->irq_chann_mode != NORMAL_MODE)
11639 aff_mask = &phba->sli4_hba.irq_aff_mask;
11640
11641 if (aff_mask) {
11642 cpu_cnt = cpumask_weight(aff_mask);
11643 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11644
11645
11646
11647
11648 cpu = cpumask_first(aff_mask);
11649 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11650 } else {
11651 flags |= PCI_IRQ_AFFINITY;
11652 }
11653
11654 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11655 if (rc < 0) {
11656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11657 "0484 PCI enable MSI-X failed (%d)\n", rc);
11658 goto vec_fail_out;
11659 }
11660 vectors = rc;
11661
11662
11663 for (index = 0; index < vectors; index++) {
11664 eqhdl = lpfc_get_eq_hdl(index);
11665 name = eqhdl->handler_name;
11666 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11667 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11668 LPFC_DRIVER_HANDLER_NAME"%d", index);
11669
11670 eqhdl->idx = index;
11671 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11672 &lpfc_sli4_hba_intr_handler, 0,
11673 name, eqhdl);
11674 if (rc) {
11675 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11676 "0486 MSI-X fast-path (%d) "
11677 "request_irq failed (%d)\n", index, rc);
11678 goto cfg_fail_out;
11679 }
11680
11681 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11682
11683 if (aff_mask) {
11684
11685 if (cpu_select < nr_cpu_ids)
11686 lpfc_irq_set_aff(eqhdl, cpu_select);
11687
11688
11689 lpfc_assign_eq_map_info(phba, index,
11690 LPFC_CPU_FIRST_IRQ,
11691 cpu);
11692
11693
11694 cpu = cpumask_next(cpu, aff_mask);
11695
11696
11697 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11698 } else if (vectors == 1) {
11699 cpu = cpumask_first(cpu_present_mask);
11700 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11701 cpu);
11702 } else {
11703 maskp = pci_irq_get_affinity(phba->pcidev, index);
11704
11705
11706 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11707 cpup = &phba->sli4_hba.cpu_map[cpu];
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11722 continue;
11723 lpfc_assign_eq_map_info(phba, index,
11724 LPFC_CPU_FIRST_IRQ,
11725 cpu);
11726 break;
11727 }
11728 }
11729 }
11730
11731 if (vectors != phba->cfg_irq_chann) {
11732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11733 "3238 Reducing IO channels to match number of "
11734 "MSI-X vectors, requested %d got %d\n",
11735 phba->cfg_irq_chann, vectors);
11736 if (phba->cfg_irq_chann > vectors)
11737 phba->cfg_irq_chann = vectors;
11738 }
11739
11740 return rc;
11741
11742cfg_fail_out:
11743
11744 for (--index; index >= 0; index--) {
11745 eqhdl = lpfc_get_eq_hdl(index);
11746 lpfc_irq_clear_aff(eqhdl);
11747 irq_set_affinity_hint(eqhdl->irq, NULL);
11748 free_irq(eqhdl->irq, eqhdl);
11749 }
11750
11751
11752 pci_free_irq_vectors(phba->pcidev);
11753
11754vec_fail_out:
11755 return rc;
11756}
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772static int
11773lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11774{
11775 int rc, index;
11776 unsigned int cpu;
11777 struct lpfc_hba_eq_hdl *eqhdl;
11778
11779 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11780 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11781 if (rc > 0)
11782 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11783 "0487 PCI enable MSI mode success.\n");
11784 else {
11785 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11786 "0488 PCI enable MSI mode failed (%d)\n", rc);
11787 return rc ? rc : -1;
11788 }
11789
11790 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11791 0, LPFC_DRIVER_NAME, phba);
11792 if (rc) {
11793 pci_free_irq_vectors(phba->pcidev);
11794 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11795 "0490 MSI request_irq failed (%d)\n", rc);
11796 return rc;
11797 }
11798
11799 eqhdl = lpfc_get_eq_hdl(0);
11800 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11801
11802 cpu = cpumask_first(cpu_present_mask);
11803 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11804
11805 for (index = 0; index < phba->cfg_irq_chann; index++) {
11806 eqhdl = lpfc_get_eq_hdl(index);
11807 eqhdl->idx = index;
11808 }
11809
11810 return 0;
11811}
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830static uint32_t
11831lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11832{
11833 uint32_t intr_mode = LPFC_INTR_ERROR;
11834 int retval, idx;
11835
11836 if (cfg_mode == 2) {
11837
11838 retval = 0;
11839 if (!retval) {
11840
11841 retval = lpfc_sli4_enable_msix(phba);
11842 if (!retval) {
11843
11844 phba->intr_type = MSIX;
11845 intr_mode = 2;
11846 }
11847 }
11848 }
11849
11850
11851 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11852 retval = lpfc_sli4_enable_msi(phba);
11853 if (!retval) {
11854
11855 phba->intr_type = MSI;
11856 intr_mode = 1;
11857 }
11858 }
11859
11860
11861 if (phba->intr_type == NONE) {
11862 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11863 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11864 if (!retval) {
11865 struct lpfc_hba_eq_hdl *eqhdl;
11866 unsigned int cpu;
11867
11868
11869 phba->intr_type = INTx;
11870 intr_mode = 0;
11871
11872 eqhdl = lpfc_get_eq_hdl(0);
11873 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11874
11875 cpu = cpumask_first(cpu_present_mask);
11876 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11877 cpu);
11878 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11879 eqhdl = lpfc_get_eq_hdl(idx);
11880 eqhdl->idx = idx;
11881 }
11882 }
11883 }
11884 return intr_mode;
11885}
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896static void
11897lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11898{
11899
11900 if (phba->intr_type == MSIX) {
11901 int index;
11902 struct lpfc_hba_eq_hdl *eqhdl;
11903
11904
11905 for (index = 0; index < phba->cfg_irq_chann; index++) {
11906 eqhdl = lpfc_get_eq_hdl(index);
11907 lpfc_irq_clear_aff(eqhdl);
11908 irq_set_affinity_hint(eqhdl->irq, NULL);
11909 free_irq(eqhdl->irq, eqhdl);
11910 }
11911 } else {
11912 free_irq(phba->pcidev->irq, phba);
11913 }
11914
11915 pci_free_irq_vectors(phba->pcidev);
11916
11917
11918 phba->intr_type = NONE;
11919 phba->sli.slistat.sli_intr = 0;
11920}
11921
11922
11923
11924
11925
11926
11927
11928
11929static void
11930lpfc_unset_hba(struct lpfc_hba *phba)
11931{
11932 struct lpfc_vport *vport = phba->pport;
11933 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11934
11935 spin_lock_irq(shost->host_lock);
11936 vport->load_flag |= FC_UNLOADING;
11937 spin_unlock_irq(shost->host_lock);
11938
11939 kfree(phba->vpi_bmask);
11940 kfree(phba->vpi_ids);
11941
11942 lpfc_stop_hba_timers(phba);
11943
11944 phba->pport->work_port_events = 0;
11945
11946 lpfc_sli_hba_down(phba);
11947
11948 lpfc_sli_brdrestart(phba);
11949
11950 lpfc_sli_disable_intr(phba);
11951
11952 return;
11953}
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968static void
11969lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11970{
11971 struct lpfc_sli4_hdw_queue *qp;
11972 int idx, ccnt;
11973 int wait_time = 0;
11974 int io_xri_cmpl = 1;
11975 int nvmet_xri_cmpl = 1;
11976 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11977
11978
11979
11980
11981
11982 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11983
11984
11985 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11986 lpfc_nvme_wait_for_io_drain(phba);
11987
11988 ccnt = 0;
11989 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11990 qp = &phba->sli4_hba.hdwq[idx];
11991 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11992 if (!io_xri_cmpl)
11993 ccnt++;
11994 }
11995 if (ccnt)
11996 io_xri_cmpl = 0;
11997
11998 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11999 nvmet_xri_cmpl =
12000 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12001 }
12002
12003 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
12004 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
12005 if (!nvmet_xri_cmpl)
12006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12007 "6424 NVMET XRI exchange busy "
12008 "wait time: %d seconds.\n",
12009 wait_time/1000);
12010 if (!io_xri_cmpl)
12011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12012 "6100 IO XRI exchange busy "
12013 "wait time: %d seconds.\n",
12014 wait_time/1000);
12015 if (!els_xri_cmpl)
12016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12017 "2878 ELS XRI exchange busy "
12018 "wait time: %d seconds.\n",
12019 wait_time/1000);
12020 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
12021 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
12022 } else {
12023 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
12024 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
12025 }
12026
12027 ccnt = 0;
12028 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12029 qp = &phba->sli4_hba.hdwq[idx];
12030 io_xri_cmpl = list_empty(
12031 &qp->lpfc_abts_io_buf_list);
12032 if (!io_xri_cmpl)
12033 ccnt++;
12034 }
12035 if (ccnt)
12036 io_xri_cmpl = 0;
12037
12038 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12039 nvmet_xri_cmpl = list_empty(
12040 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12041 }
12042 els_xri_cmpl =
12043 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
12044
12045 }
12046}
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058static void
12059lpfc_sli4_hba_unset(struct lpfc_hba *phba)
12060{
12061 int wait_cnt = 0;
12062 LPFC_MBOXQ_t *mboxq;
12063 struct pci_dev *pdev = phba->pcidev;
12064
12065 lpfc_stop_hba_timers(phba);
12066 if (phba->pport)
12067 phba->sli4_hba.intr_enable = 0;
12068
12069
12070
12071
12072
12073
12074
12075 spin_lock_irq(&phba->hbalock);
12076 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12077 spin_unlock_irq(&phba->hbalock);
12078
12079 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12080 msleep(10);
12081 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
12082 break;
12083 }
12084
12085 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12086 spin_lock_irq(&phba->hbalock);
12087 mboxq = phba->sli.mbox_active;
12088 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
12089 __lpfc_mbox_cmpl_put(phba, mboxq);
12090 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12091 phba->sli.mbox_active = NULL;
12092 spin_unlock_irq(&phba->hbalock);
12093 }
12094
12095
12096 lpfc_sli_hba_iocb_abort(phba);
12097
12098
12099 lpfc_sli4_xri_exchange_busy_wait(phba);
12100
12101
12102 if (phba->pport)
12103 lpfc_cpuhp_remove(phba);
12104
12105
12106 lpfc_sli4_disable_intr(phba);
12107
12108
12109 if (phba->cfg_sriov_nr_virtfn)
12110 pci_disable_sriov(pdev);
12111
12112
12113 kthread_stop(phba->worker_thread);
12114
12115
12116 lpfc_ras_stop_fwlog(phba);
12117
12118
12119
12120
12121 lpfc_sli4_queue_unset(phba);
12122 lpfc_sli4_queue_destroy(phba);
12123
12124
12125 lpfc_pci_function_reset(phba);
12126
12127
12128 if (phba->ras_fwlog.ras_enabled)
12129 lpfc_sli4_ras_dma_free(phba);
12130
12131
12132 if (phba->pport)
12133 phba->pport->work_port_events = 0;
12134}
12135
12136
12137
12138
12139
12140
12141
12142
12143
12144
12145
12146
12147
12148int
12149lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12150{
12151 int rc;
12152 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12153 struct lpfc_pc_sli4_params *sli4_params;
12154 uint32_t mbox_tmo;
12155 int length;
12156 bool exp_wqcq_pages = true;
12157 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12158
12159
12160
12161
12162
12163
12164 phba->sli4_hba.rpi_hdrs_in_use = 1;
12165
12166
12167 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12168 sizeof(struct lpfc_sli4_cfg_mhdr));
12169 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12170 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12171 length, LPFC_SLI4_MBX_EMBED);
12172 if (!phba->sli4_hba.intr_enable)
12173 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12174 else {
12175 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12176 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12177 }
12178 if (unlikely(rc))
12179 return rc;
12180 sli4_params = &phba->sli4_hba.pc_sli4_params;
12181 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12182 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12183 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12184 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12185 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12186 mbx_sli4_parameters);
12187 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12188 mbx_sli4_parameters);
12189 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12190 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12191 else
12192 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12193 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12194 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
12195 mbx_sli4_parameters);
12196 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12197 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12198 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12199 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12200 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12201 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12202 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12203 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12204 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12205 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12206 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12207 mbx_sli4_parameters);
12208 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12209 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12210 mbx_sli4_parameters);
12211 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12212 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12213
12214
12215 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12216
12217
12218 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12219 bf_get(cfg_xib, mbx_sli4_parameters));
12220
12221 if (rc) {
12222
12223 sli4_params->nvme = 1;
12224
12225
12226 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12228 "6133 Disabling NVME support: "
12229 "FC4 type not supported: x%x\n",
12230 phba->cfg_enable_fc4_type);
12231 goto fcponly;
12232 }
12233 } else {
12234
12235 sli4_params->nvme = 0;
12236 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12238 "6101 Disabling NVME support: Not "
12239 "supported by firmware (%d %d) x%x\n",
12240 bf_get(cfg_nvme, mbx_sli4_parameters),
12241 bf_get(cfg_xib, mbx_sli4_parameters),
12242 phba->cfg_enable_fc4_type);
12243fcponly:
12244 phba->nvme_support = 0;
12245 phba->nvmet_support = 0;
12246 phba->cfg_nvmet_mrq = 0;
12247 phba->cfg_nvme_seg_cnt = 0;
12248
12249
12250 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12251 return -ENODEV;
12252 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12253 }
12254 }
12255
12256
12257
12258
12259 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12260 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12261
12262
12263 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12264 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12265 phba->cfg_enable_pbde = 0;
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12276 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12277 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12278 else
12279 phba->cfg_suppress_rsp = 0;
12280
12281 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12282 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12283
12284
12285 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12286 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12287
12288
12289
12290
12291
12292
12293 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12294 phba->fcp_embed_io = 1;
12295 else
12296 phba->fcp_embed_io = 0;
12297
12298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12299 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12300 bf_get(cfg_xib, mbx_sli4_parameters),
12301 phba->cfg_enable_pbde,
12302 phba->fcp_embed_io, phba->nvme_support,
12303 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12304
12305 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12306 LPFC_SLI_INTF_IF_TYPE_2) &&
12307 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12308 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12309 exp_wqcq_pages = false;
12310
12311 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12312 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12313 exp_wqcq_pages &&
12314 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12315 phba->enab_exp_wqcq_pages = 1;
12316 else
12317 phba->enab_exp_wqcq_pages = 0;
12318
12319
12320
12321 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12322 phba->mds_diags_support = 1;
12323 else
12324 phba->mds_diags_support = 0;
12325
12326
12327
12328
12329 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12330 phba->nsler = 1;
12331 else
12332 phba->nsler = 0;
12333
12334
12335 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
12336 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
12337 sli4_params->mib_size = mbx_sli4_parameters->mib_size;
12338 sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
12339
12340
12341 if (sli4_params->mi_ver && phba->cfg_enable_mi)
12342 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
12343
12344 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12345 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
12346 sli4_params->mi_ver, phba->cfg_enable_mi,
12347 sli4_params->mi_value, sli4_params->mib_bde_cnt,
12348 sli4_params->mib_size);
12349 return 0;
12350}
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369static int
12370lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12371{
12372 struct lpfc_hba *phba;
12373 struct lpfc_vport *vport = NULL;
12374 struct Scsi_Host *shost = NULL;
12375 int error;
12376 uint32_t cfg_mode, intr_mode;
12377
12378
12379 phba = lpfc_hba_alloc(pdev);
12380 if (!phba)
12381 return -ENOMEM;
12382
12383
12384 error = lpfc_enable_pci_dev(phba);
12385 if (error)
12386 goto out_free_phba;
12387
12388
12389 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12390 if (error)
12391 goto out_disable_pci_dev;
12392
12393
12394 error = lpfc_sli_pci_mem_setup(phba);
12395 if (error) {
12396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12397 "1402 Failed to set up pci memory space.\n");
12398 goto out_disable_pci_dev;
12399 }
12400
12401
12402 error = lpfc_sli_driver_resource_setup(phba);
12403 if (error) {
12404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12405 "1404 Failed to set up driver resource.\n");
12406 goto out_unset_pci_mem_s3;
12407 }
12408
12409
12410
12411 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12412 if (error) {
12413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12414 "1405 Failed to initialize iocb list.\n");
12415 goto out_unset_driver_resource_s3;
12416 }
12417
12418
12419 error = lpfc_setup_driver_resource_phase2(phba);
12420 if (error) {
12421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12422 "1406 Failed to set up driver resource.\n");
12423 goto out_free_iocb_list;
12424 }
12425
12426
12427 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12428
12429
12430 error = lpfc_create_shost(phba);
12431 if (error) {
12432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12433 "1407 Failed to create scsi host.\n");
12434 goto out_unset_driver_resource;
12435 }
12436
12437
12438 vport = phba->pport;
12439 error = lpfc_alloc_sysfs_attr(vport);
12440 if (error) {
12441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12442 "1476 Failed to allocate sysfs attr\n");
12443 goto out_destroy_shost;
12444 }
12445
12446 shost = lpfc_shost_from_vport(vport);
12447
12448 cfg_mode = phba->cfg_use_msi;
12449 while (true) {
12450
12451 lpfc_stop_port(phba);
12452
12453 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12454 if (intr_mode == LPFC_INTR_ERROR) {
12455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12456 "0431 Failed to enable interrupt.\n");
12457 error = -ENODEV;
12458 goto out_free_sysfs_attr;
12459 }
12460
12461 if (lpfc_sli_hba_setup(phba)) {
12462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12463 "1477 Failed to set up hba\n");
12464 error = -ENODEV;
12465 goto out_remove_device;
12466 }
12467
12468
12469 msleep(50);
12470
12471 if (intr_mode == 0 ||
12472 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12473
12474 phba->intr_mode = intr_mode;
12475 lpfc_log_intr_mode(phba, intr_mode);
12476 break;
12477 } else {
12478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12479 "0447 Configure interrupt mode (%d) "
12480 "failed active interrupt test.\n",
12481 intr_mode);
12482
12483 lpfc_sli_disable_intr(phba);
12484
12485 cfg_mode = --intr_mode;
12486 }
12487 }
12488
12489
12490 lpfc_post_init_setup(phba);
12491
12492
12493 lpfc_create_static_vport(phba);
12494
12495 return 0;
12496
12497out_remove_device:
12498 lpfc_unset_hba(phba);
12499out_free_sysfs_attr:
12500 lpfc_free_sysfs_attr(vport);
12501out_destroy_shost:
12502 lpfc_destroy_shost(phba);
12503out_unset_driver_resource:
12504 lpfc_unset_driver_resource_phase2(phba);
12505out_free_iocb_list:
12506 lpfc_free_iocb_list(phba);
12507out_unset_driver_resource_s3:
12508 lpfc_sli_driver_resource_unset(phba);
12509out_unset_pci_mem_s3:
12510 lpfc_sli_pci_mem_unset(phba);
12511out_disable_pci_dev:
12512 lpfc_disable_pci_dev(phba);
12513 if (shost)
12514 scsi_host_put(shost);
12515out_free_phba:
12516 lpfc_hba_free(phba);
12517 return error;
12518}
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529static void
12530lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12531{
12532 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12533 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12534 struct lpfc_vport **vports;
12535 struct lpfc_hba *phba = vport->phba;
12536 int i;
12537
12538 spin_lock_irq(&phba->hbalock);
12539 vport->load_flag |= FC_UNLOADING;
12540 spin_unlock_irq(&phba->hbalock);
12541
12542 lpfc_free_sysfs_attr(vport);
12543
12544
12545 vports = lpfc_create_vport_work_array(phba);
12546 if (vports != NULL)
12547 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12548 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12549 continue;
12550 fc_vport_terminate(vports[i]->fc_vport);
12551 }
12552 lpfc_destroy_vport_work_array(phba, vports);
12553
12554
12555 fc_remove_host(shost);
12556 scsi_remove_host(shost);
12557
12558
12559 lpfc_cleanup(vport);
12560
12561
12562
12563
12564
12565
12566
12567
12568 lpfc_sli_hba_down(phba);
12569
12570 kthread_stop(phba->worker_thread);
12571
12572 lpfc_sli_brdrestart(phba);
12573
12574 kfree(phba->vpi_bmask);
12575 kfree(phba->vpi_ids);
12576
12577 lpfc_stop_hba_timers(phba);
12578 spin_lock_irq(&phba->port_list_lock);
12579 list_del_init(&vport->listentry);
12580 spin_unlock_irq(&phba->port_list_lock);
12581
12582 lpfc_debugfs_terminate(vport);
12583
12584
12585 if (phba->cfg_sriov_nr_virtfn)
12586 pci_disable_sriov(pdev);
12587
12588
12589 lpfc_sli_disable_intr(phba);
12590
12591 scsi_host_put(shost);
12592
12593
12594
12595
12596
12597 lpfc_scsi_free(phba);
12598 lpfc_free_iocb_list(phba);
12599
12600 lpfc_mem_free_all(phba);
12601
12602 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12603 phba->hbqslimp.virt, phba->hbqslimp.phys);
12604
12605
12606 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12607 phba->slim2p.virt, phba->slim2p.phys);
12608
12609
12610 iounmap(phba->ctrl_regs_memmap_p);
12611 iounmap(phba->slim_memmap_p);
12612
12613 lpfc_hba_free(phba);
12614
12615 pci_release_mem_regions(pdev);
12616 pci_disable_device(pdev);
12617}
12618
12619
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639static int __maybe_unused
12640lpfc_pci_suspend_one_s3(struct device *dev_d)
12641{
12642 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12643 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12644
12645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12646 "0473 PCI device Power Management suspend.\n");
12647
12648
12649 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12650 lpfc_offline(phba);
12651 kthread_stop(phba->worker_thread);
12652
12653
12654 lpfc_sli_disable_intr(phba);
12655
12656 return 0;
12657}
12658
12659
12660
12661
12662
12663
12664
12665
12666
12667
12668
12669
12670
12671
12672
12673
12674
12675
12676
12677
12678static int __maybe_unused
12679lpfc_pci_resume_one_s3(struct device *dev_d)
12680{
12681 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
12682 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12683 uint32_t intr_mode;
12684 int error;
12685
12686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12687 "0452 PCI device Power Management resume.\n");
12688
12689
12690 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12691 "lpfc_worker_%d", phba->brd_no);
12692 if (IS_ERR(phba->worker_thread)) {
12693 error = PTR_ERR(phba->worker_thread);
12694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12695 "0434 PM resume failed to start worker "
12696 "thread: error=x%x.\n", error);
12697 return error;
12698 }
12699
12700
12701 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12702 if (intr_mode == LPFC_INTR_ERROR) {
12703 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12704 "0430 PM resume Failed to enable interrupt\n");
12705 return -EIO;
12706 } else
12707 phba->intr_mode = intr_mode;
12708
12709
12710 lpfc_sli_brdrestart(phba);
12711 lpfc_online(phba);
12712
12713
12714 lpfc_log_intr_mode(phba, phba->intr_mode);
12715
12716 return 0;
12717}
12718
12719
12720
12721
12722
12723
12724
12725
12726static void
12727lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12728{
12729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12730 "2723 PCI channel I/O abort preparing for recovery\n");
12731
12732
12733
12734
12735
12736 lpfc_sli_abort_fcp_rings(phba);
12737}
12738
12739
12740
12741
12742
12743
12744
12745
12746
12747static void
12748lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12749{
12750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12751 "2710 PCI channel disable preparing for reset\n");
12752
12753
12754 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12755
12756
12757 lpfc_scsi_dev_block(phba);
12758
12759
12760 lpfc_sli_flush_io_rings(phba);
12761
12762
12763 lpfc_stop_hba_timers(phba);
12764
12765
12766 lpfc_sli_disable_intr(phba);
12767 pci_disable_device(phba->pcidev);
12768}
12769
12770
12771
12772
12773
12774
12775
12776
12777
12778static void
12779lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12780{
12781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12782 "2711 PCI channel permanent disable for failure\n");
12783
12784 lpfc_scsi_dev_block(phba);
12785
12786
12787 lpfc_stop_hba_timers(phba);
12788
12789
12790 lpfc_sli_flush_io_rings(phba);
12791}
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811static pci_ers_result_t
12812lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12813{
12814 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12815 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12816
12817 switch (state) {
12818 case pci_channel_io_normal:
12819
12820 lpfc_sli_prep_dev_for_recover(phba);
12821 return PCI_ERS_RESULT_CAN_RECOVER;
12822 case pci_channel_io_frozen:
12823
12824 lpfc_sli_prep_dev_for_reset(phba);
12825 return PCI_ERS_RESULT_NEED_RESET;
12826 case pci_channel_io_perm_failure:
12827
12828 lpfc_sli_prep_dev_for_perm_failure(phba);
12829 return PCI_ERS_RESULT_DISCONNECT;
12830 default:
12831
12832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12833 "0472 Unknown PCI error state: x%x\n", state);
12834 lpfc_sli_prep_dev_for_reset(phba);
12835 return PCI_ERS_RESULT_NEED_RESET;
12836 }
12837}
12838
12839
12840
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857static pci_ers_result_t
12858lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12859{
12860 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12861 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12862 struct lpfc_sli *psli = &phba->sli;
12863 uint32_t intr_mode;
12864
12865 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12866 if (pci_enable_device_mem(pdev)) {
12867 printk(KERN_ERR "lpfc: Cannot re-enable "
12868 "PCI device after reset.\n");
12869 return PCI_ERS_RESULT_DISCONNECT;
12870 }
12871
12872 pci_restore_state(pdev);
12873
12874
12875
12876
12877
12878 pci_save_state(pdev);
12879
12880 if (pdev->is_busmaster)
12881 pci_set_master(pdev);
12882
12883 spin_lock_irq(&phba->hbalock);
12884 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12885 spin_unlock_irq(&phba->hbalock);
12886
12887
12888 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12889 if (intr_mode == LPFC_INTR_ERROR) {
12890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12891 "0427 Cannot re-enable interrupt after "
12892 "slot reset.\n");
12893 return PCI_ERS_RESULT_DISCONNECT;
12894 } else
12895 phba->intr_mode = intr_mode;
12896
12897
12898 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12899 lpfc_offline(phba);
12900 lpfc_sli_brdrestart(phba);
12901
12902
12903 lpfc_log_intr_mode(phba, phba->intr_mode);
12904
12905 return PCI_ERS_RESULT_RECOVERED;
12906}
12907
12908
12909
12910
12911
12912
12913
12914
12915
12916
12917
12918static void
12919lpfc_io_resume_s3(struct pci_dev *pdev)
12920{
12921 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12922 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12923
12924
12925 lpfc_online(phba);
12926}
12927
12928
12929
12930
12931
12932
12933
12934int
12935lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12936{
12937 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12938
12939 if (phba->sli_rev == LPFC_SLI_REV4) {
12940 if (max_xri <= 100)
12941 return 10;
12942 else if (max_xri <= 256)
12943 return 25;
12944 else if (max_xri <= 512)
12945 return 50;
12946 else if (max_xri <= 1024)
12947 return 100;
12948 else if (max_xri <= 1536)
12949 return 150;
12950 else if (max_xri <= 2048)
12951 return 200;
12952 else
12953 return 250;
12954 } else
12955 return 0;
12956}
12957
12958
12959
12960
12961
12962
12963
12964int
12965lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12966{
12967 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12968
12969 if (phba->nvmet_support)
12970 max_xri += LPFC_NVMET_BUF_POST;
12971 return max_xri;
12972}
12973
12974
12975static int
12976lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12977 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12978 const struct firmware *fw)
12979{
12980 int rc;
12981
12982
12983
12984
12985
12986
12987
12988 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12989 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12990 magic_number != MAGIC_NUMBER_G6) ||
12991 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12992 magic_number != MAGIC_NUMBER_G7)) {
12993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12994 "3030 This firmware version is not supported on"
12995 " this HBA model. Device:%x Magic:%x Type:%x "
12996 "ID:%x Size %d %zd\n",
12997 phba->pcidev->device, magic_number, ftype, fid,
12998 fsize, fw->size);
12999 rc = -EINVAL;
13000 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
13001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13002 "3021 Firmware downloads have been prohibited "
13003 "by a system configuration setting on "
13004 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13005 "%zd\n",
13006 phba->pcidev->device, magic_number, ftype, fid,
13007 fsize, fw->size);
13008 rc = -EACCES;
13009 } else {
13010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13011 "3022 FW Download failed. Add Status x%x "
13012 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13013 "%zd\n",
13014 offset, phba->pcidev->device, magic_number,
13015 ftype, fid, fsize, fw->size);
13016 rc = -EIO;
13017 }
13018 return rc;
13019}
13020
13021
13022
13023
13024
13025
13026
13027static void
13028lpfc_write_firmware(const struct firmware *fw, void *context)
13029{
13030 struct lpfc_hba *phba = (struct lpfc_hba *)context;
13031 char fwrev[FW_REV_STR_SIZE];
13032 struct lpfc_grp_hdr *image;
13033 struct list_head dma_buffer_list;
13034 int i, rc = 0;
13035 struct lpfc_dmabuf *dmabuf, *next;
13036 uint32_t offset = 0, temp_offset = 0;
13037 uint32_t magic_number, ftype, fid, fsize;
13038
13039
13040 if (!fw) {
13041 rc = -ENXIO;
13042 goto out;
13043 }
13044 image = (struct lpfc_grp_hdr *)fw->data;
13045
13046 magic_number = be32_to_cpu(image->magic_number);
13047 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
13048 fid = bf_get_be32(lpfc_grp_hdr_id, image);
13049 fsize = be32_to_cpu(image->size);
13050
13051 INIT_LIST_HEAD(&dma_buffer_list);
13052 lpfc_decode_firmware_rev(phba, fwrev, 1);
13053 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
13054 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13055 "3023 Updating Firmware, Current Version:%s "
13056 "New Version:%s\n",
13057 fwrev, image->revision);
13058 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13059 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13060 GFP_KERNEL);
13061 if (!dmabuf) {
13062 rc = -ENOMEM;
13063 goto release_out;
13064 }
13065 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13066 SLI4_PAGE_SIZE,
13067 &dmabuf->phys,
13068 GFP_KERNEL);
13069 if (!dmabuf->virt) {
13070 kfree(dmabuf);
13071 rc = -ENOMEM;
13072 goto release_out;
13073 }
13074 list_add_tail(&dmabuf->list, &dma_buffer_list);
13075 }
13076 while (offset < fw->size) {
13077 temp_offset = offset;
13078 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
13079 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
13080 memcpy(dmabuf->virt,
13081 fw->data + temp_offset,
13082 fw->size - temp_offset);
13083 temp_offset = fw->size;
13084 break;
13085 }
13086 memcpy(dmabuf->virt, fw->data + temp_offset,
13087 SLI4_PAGE_SIZE);
13088 temp_offset += SLI4_PAGE_SIZE;
13089 }
13090 rc = lpfc_wr_object(phba, &dma_buffer_list,
13091 (fw->size - offset), &offset);
13092 if (rc) {
13093 rc = lpfc_log_write_firmware_error(phba, offset,
13094 magic_number,
13095 ftype,
13096 fid,
13097 fsize,
13098 fw);
13099 goto release_out;
13100 }
13101 }
13102 rc = offset;
13103 } else
13104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13105 "3029 Skipped Firmware update, Current "
13106 "Version:%s New Version:%s\n",
13107 fwrev, image->revision);
13108
13109release_out:
13110 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13111 list_del(&dmabuf->list);
13112 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13113 dmabuf->virt, dmabuf->phys);
13114 kfree(dmabuf);
13115 }
13116 release_firmware(fw);
13117out:
13118 if (rc < 0)
13119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13120 "3062 Firmware update error, status %d.\n", rc);
13121 else
13122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13123 "3024 Firmware update success: size %d.\n", rc);
13124}
13125
13126
13127
13128
13129
13130
13131
13132
13133
13134int
13135lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13136{
13137 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13138 int ret;
13139 const struct firmware *fw;
13140
13141
13142 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13143 LPFC_SLI_INTF_IF_TYPE_2)
13144 return -EPERM;
13145
13146 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13147
13148 if (fw_upgrade == INT_FW_UPGRADE) {
13149 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
13150 file_name, &phba->pcidev->dev,
13151 GFP_KERNEL, (void *)phba,
13152 lpfc_write_firmware);
13153 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13154 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13155 if (!ret)
13156 lpfc_write_firmware(fw, (void *)phba);
13157 } else {
13158 ret = -EINVAL;
13159 }
13160
13161 return ret;
13162}
13163
13164
13165
13166
13167
13168
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181
13182static int
13183lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13184{
13185 struct lpfc_hba *phba;
13186 struct lpfc_vport *vport = NULL;
13187 struct Scsi_Host *shost = NULL;
13188 int error;
13189 uint32_t cfg_mode, intr_mode;
13190
13191
13192 phba = lpfc_hba_alloc(pdev);
13193 if (!phba)
13194 return -ENOMEM;
13195
13196 INIT_LIST_HEAD(&phba->poll_list);
13197
13198
13199 error = lpfc_enable_pci_dev(phba);
13200 if (error)
13201 goto out_free_phba;
13202
13203
13204 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13205 if (error)
13206 goto out_disable_pci_dev;
13207
13208
13209 error = lpfc_sli4_pci_mem_setup(phba);
13210 if (error) {
13211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13212 "1410 Failed to set up pci memory space.\n");
13213 goto out_disable_pci_dev;
13214 }
13215
13216
13217 error = lpfc_sli4_driver_resource_setup(phba);
13218 if (error) {
13219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13220 "1412 Failed to set up driver resource.\n");
13221 goto out_unset_pci_mem_s4;
13222 }
13223
13224 INIT_LIST_HEAD(&phba->active_rrq_list);
13225 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13226
13227
13228 error = lpfc_setup_driver_resource_phase2(phba);
13229 if (error) {
13230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13231 "1414 Failed to set up driver resource.\n");
13232 goto out_unset_driver_resource_s4;
13233 }
13234
13235
13236 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13237
13238
13239 cfg_mode = phba->cfg_use_msi;
13240
13241
13242 phba->pport = NULL;
13243 lpfc_stop_port(phba);
13244
13245
13246 lpfc_cpu_map_array_init(phba);
13247
13248
13249 lpfc_hba_eq_hdl_array_init(phba);
13250
13251
13252 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13253 if (intr_mode == LPFC_INTR_ERROR) {
13254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13255 "0426 Failed to enable interrupt.\n");
13256 error = -ENODEV;
13257 goto out_unset_driver_resource;
13258 }
13259
13260 if (phba->intr_type != MSIX) {
13261 phba->cfg_irq_chann = 1;
13262 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13263 if (phba->nvmet_support)
13264 phba->cfg_nvmet_mrq = 1;
13265 }
13266 }
13267 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13268
13269
13270 error = lpfc_create_shost(phba);
13271 if (error) {
13272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13273 "1415 Failed to create scsi host.\n");
13274 goto out_disable_intr;
13275 }
13276 vport = phba->pport;
13277 shost = lpfc_shost_from_vport(vport);
13278
13279
13280 error = lpfc_alloc_sysfs_attr(vport);
13281 if (error) {
13282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13283 "1416 Failed to allocate sysfs attr\n");
13284 goto out_destroy_shost;
13285 }
13286
13287
13288 if (lpfc_sli4_hba_setup(phba)) {
13289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13290 "1421 Failed to set up hba\n");
13291 error = -ENODEV;
13292 goto out_free_sysfs_attr;
13293 }
13294
13295
13296 phba->intr_mode = intr_mode;
13297 lpfc_log_intr_mode(phba, intr_mode);
13298
13299
13300 lpfc_post_init_setup(phba);
13301
13302
13303
13304
13305 if (phba->nvmet_support == 0) {
13306 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13307
13308
13309
13310
13311
13312 error = lpfc_nvme_create_localport(vport);
13313 if (error) {
13314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13315 "6004 NVME registration "
13316 "failed, error x%x\n",
13317 error);
13318 }
13319 }
13320 }
13321
13322
13323 if (phba->cfg_request_firmware_upgrade)
13324 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13325
13326
13327 lpfc_create_static_vport(phba);
13328
13329
13330 lpfc_sli4_ras_setup(phba);
13331
13332 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13333 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13334
13335 return 0;
13336
13337out_free_sysfs_attr:
13338 lpfc_free_sysfs_attr(vport);
13339out_destroy_shost:
13340 lpfc_destroy_shost(phba);
13341out_disable_intr:
13342 lpfc_sli4_disable_intr(phba);
13343out_unset_driver_resource:
13344 lpfc_unset_driver_resource_phase2(phba);
13345out_unset_driver_resource_s4:
13346 lpfc_sli4_driver_resource_unset(phba);
13347out_unset_pci_mem_s4:
13348 lpfc_sli4_pci_mem_unset(phba);
13349out_disable_pci_dev:
13350 lpfc_disable_pci_dev(phba);
13351 if (shost)
13352 scsi_host_put(shost);
13353out_free_phba:
13354 lpfc_hba_free(phba);
13355 return error;
13356}
13357
13358
13359
13360
13361
13362
13363
13364
13365
13366
13367static void
13368lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13369{
13370 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13371 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13372 struct lpfc_vport **vports;
13373 struct lpfc_hba *phba = vport->phba;
13374 int i;
13375
13376
13377 spin_lock_irq(&phba->hbalock);
13378 vport->load_flag |= FC_UNLOADING;
13379 spin_unlock_irq(&phba->hbalock);
13380
13381 lpfc_free_sysfs_attr(vport);
13382
13383
13384 vports = lpfc_create_vport_work_array(phba);
13385 if (vports != NULL)
13386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13387 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13388 continue;
13389 fc_vport_terminate(vports[i]->fc_vport);
13390 }
13391 lpfc_destroy_vport_work_array(phba, vports);
13392
13393
13394 fc_remove_host(shost);
13395 scsi_remove_host(shost);
13396
13397
13398
13399
13400 lpfc_cleanup(vport);
13401 lpfc_nvmet_destroy_targetport(phba);
13402 lpfc_nvme_destroy_localport(vport);
13403
13404
13405 if (phba->cfg_xri_rebalancing)
13406 lpfc_destroy_multixri_pools(phba);
13407
13408
13409
13410
13411
13412
13413 lpfc_debugfs_terminate(vport);
13414
13415 lpfc_stop_hba_timers(phba);
13416 spin_lock_irq(&phba->port_list_lock);
13417 list_del_init(&vport->listentry);
13418 spin_unlock_irq(&phba->port_list_lock);
13419
13420
13421
13422
13423 lpfc_io_free(phba);
13424 lpfc_free_iocb_list(phba);
13425 lpfc_sli4_hba_unset(phba);
13426
13427 lpfc_unset_driver_resource_phase2(phba);
13428 lpfc_sli4_driver_resource_unset(phba);
13429
13430
13431 lpfc_sli4_pci_mem_unset(phba);
13432
13433
13434 scsi_host_put(shost);
13435 lpfc_disable_pci_dev(phba);
13436
13437
13438 lpfc_hba_free(phba);
13439
13440 return;
13441}
13442
13443
13444
13445
13446
13447
13448
13449
13450
13451
13452
13453
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463static int __maybe_unused
13464lpfc_pci_suspend_one_s4(struct device *dev_d)
13465{
13466 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13467 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13468
13469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13470 "2843 PCI device Power Management suspend.\n");
13471
13472
13473 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13474 lpfc_offline(phba);
13475 kthread_stop(phba->worker_thread);
13476
13477
13478 lpfc_sli4_disable_intr(phba);
13479 lpfc_sli4_queue_destroy(phba);
13480
13481 return 0;
13482}
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503static int __maybe_unused
13504lpfc_pci_resume_one_s4(struct device *dev_d)
13505{
13506 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
13507 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13508 uint32_t intr_mode;
13509 int error;
13510
13511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13512 "0292 PCI device Power Management resume.\n");
13513
13514
13515 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13516 "lpfc_worker_%d", phba->brd_no);
13517 if (IS_ERR(phba->worker_thread)) {
13518 error = PTR_ERR(phba->worker_thread);
13519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13520 "0293 PM resume failed to start worker "
13521 "thread: error=x%x.\n", error);
13522 return error;
13523 }
13524
13525
13526 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13527 if (intr_mode == LPFC_INTR_ERROR) {
13528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13529 "0294 PM resume Failed to enable interrupt\n");
13530 return -EIO;
13531 } else
13532 phba->intr_mode = intr_mode;
13533
13534
13535 lpfc_sli_brdrestart(phba);
13536 lpfc_online(phba);
13537
13538
13539 lpfc_log_intr_mode(phba, phba->intr_mode);
13540
13541 return 0;
13542}
13543
13544
13545
13546
13547
13548
13549
13550
13551static void
13552lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13553{
13554 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13555 "2828 PCI channel I/O abort preparing for recovery\n");
13556
13557
13558
13559
13560 lpfc_sli_abort_fcp_rings(phba);
13561}
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571static void
13572lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13573{
13574 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13575 "2826 PCI channel disable preparing for reset\n");
13576
13577
13578 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13579
13580
13581 lpfc_scsi_dev_block(phba);
13582
13583
13584 lpfc_sli_flush_io_rings(phba);
13585
13586
13587 lpfc_stop_hba_timers(phba);
13588
13589
13590 lpfc_sli4_disable_intr(phba);
13591 lpfc_sli4_queue_destroy(phba);
13592 pci_disable_device(phba->pcidev);
13593}
13594
13595
13596
13597
13598
13599
13600
13601
13602
13603static void
13604lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13605{
13606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13607 "2827 PCI channel permanent disable for failure\n");
13608
13609
13610 lpfc_scsi_dev_block(phba);
13611
13612
13613 lpfc_stop_hba_timers(phba);
13614
13615
13616 lpfc_sli_flush_io_rings(phba);
13617}
13618
13619
13620
13621
13622
13623
13624
13625
13626
13627
13628
13629
13630
13631
13632
13633
13634
13635static pci_ers_result_t
13636lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13637{
13638 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13639 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13640
13641 switch (state) {
13642 case pci_channel_io_normal:
13643
13644 lpfc_sli4_prep_dev_for_recover(phba);
13645 return PCI_ERS_RESULT_CAN_RECOVER;
13646 case pci_channel_io_frozen:
13647
13648 lpfc_sli4_prep_dev_for_reset(phba);
13649 return PCI_ERS_RESULT_NEED_RESET;
13650 case pci_channel_io_perm_failure:
13651
13652 lpfc_sli4_prep_dev_for_perm_failure(phba);
13653 return PCI_ERS_RESULT_DISCONNECT;
13654 default:
13655
13656 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13657 "2825 Unknown PCI error state: x%x\n", state);
13658 lpfc_sli4_prep_dev_for_reset(phba);
13659 return PCI_ERS_RESULT_NEED_RESET;
13660 }
13661}
13662
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680
13681static pci_ers_result_t
13682lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13683{
13684 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13685 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13686 struct lpfc_sli *psli = &phba->sli;
13687 uint32_t intr_mode;
13688
13689 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13690 if (pci_enable_device_mem(pdev)) {
13691 printk(KERN_ERR "lpfc: Cannot re-enable "
13692 "PCI device after reset.\n");
13693 return PCI_ERS_RESULT_DISCONNECT;
13694 }
13695
13696 pci_restore_state(pdev);
13697
13698
13699
13700
13701
13702 pci_save_state(pdev);
13703
13704 if (pdev->is_busmaster)
13705 pci_set_master(pdev);
13706
13707 spin_lock_irq(&phba->hbalock);
13708 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13709 spin_unlock_irq(&phba->hbalock);
13710
13711
13712 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13713 if (intr_mode == LPFC_INTR_ERROR) {
13714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13715 "2824 Cannot re-enable interrupt after "
13716 "slot reset.\n");
13717 return PCI_ERS_RESULT_DISCONNECT;
13718 } else
13719 phba->intr_mode = intr_mode;
13720
13721
13722 lpfc_log_intr_mode(phba, phba->intr_mode);
13723
13724 return PCI_ERS_RESULT_RECOVERED;
13725}
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735
13736
13737static void
13738lpfc_io_resume_s4(struct pci_dev *pdev)
13739{
13740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13742
13743
13744
13745
13746
13747
13748
13749 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13750
13751 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13752 lpfc_offline(phba);
13753 lpfc_sli_brdrestart(phba);
13754
13755 lpfc_online(phba);
13756 }
13757}
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767
13768
13769
13770
13771
13772
13773
13774
13775
13776
13777static int
13778lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13779{
13780 int rc;
13781 struct lpfc_sli_intf intf;
13782
13783 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13784 return -ENODEV;
13785
13786 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13787 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13788 rc = lpfc_pci_probe_one_s4(pdev, pid);
13789 else
13790 rc = lpfc_pci_probe_one_s3(pdev, pid);
13791
13792 return rc;
13793}
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803
13804
13805static void
13806lpfc_pci_remove_one(struct pci_dev *pdev)
13807{
13808 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13809 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13810
13811 switch (phba->pci_dev_grp) {
13812 case LPFC_PCI_DEV_LP:
13813 lpfc_pci_remove_one_s3(pdev);
13814 break;
13815 case LPFC_PCI_DEV_OC:
13816 lpfc_pci_remove_one_s4(pdev);
13817 break;
13818 default:
13819 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13820 "1424 Invalid PCI device group: 0x%x\n",
13821 phba->pci_dev_grp);
13822 break;
13823 }
13824 return;
13825}
13826
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836
13837
13838
13839
13840static int __maybe_unused
13841lpfc_pci_suspend_one(struct device *dev)
13842{
13843 struct Scsi_Host *shost = dev_get_drvdata(dev);
13844 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13845 int rc = -ENODEV;
13846
13847 switch (phba->pci_dev_grp) {
13848 case LPFC_PCI_DEV_LP:
13849 rc = lpfc_pci_suspend_one_s3(dev);
13850 break;
13851 case LPFC_PCI_DEV_OC:
13852 rc = lpfc_pci_suspend_one_s4(dev);
13853 break;
13854 default:
13855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13856 "1425 Invalid PCI device group: 0x%x\n",
13857 phba->pci_dev_grp);
13858 break;
13859 }
13860 return rc;
13861}
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876static int __maybe_unused
13877lpfc_pci_resume_one(struct device *dev)
13878{
13879 struct Scsi_Host *shost = dev_get_drvdata(dev);
13880 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13881 int rc = -ENODEV;
13882
13883 switch (phba->pci_dev_grp) {
13884 case LPFC_PCI_DEV_LP:
13885 rc = lpfc_pci_resume_one_s3(dev);
13886 break;
13887 case LPFC_PCI_DEV_OC:
13888 rc = lpfc_pci_resume_one_s4(dev);
13889 break;
13890 default:
13891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13892 "1426 Invalid PCI device group: 0x%x\n",
13893 phba->pci_dev_grp);
13894 break;
13895 }
13896 return rc;
13897}
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912
13913
13914static pci_ers_result_t
13915lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13916{
13917 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13918 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13919 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13920
13921 switch (phba->pci_dev_grp) {
13922 case LPFC_PCI_DEV_LP:
13923 rc = lpfc_io_error_detected_s3(pdev, state);
13924 break;
13925 case LPFC_PCI_DEV_OC:
13926 rc = lpfc_io_error_detected_s4(pdev, state);
13927 break;
13928 default:
13929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13930 "1427 Invalid PCI device group: 0x%x\n",
13931 phba->pci_dev_grp);
13932 break;
13933 }
13934 return rc;
13935}
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951static pci_ers_result_t
13952lpfc_io_slot_reset(struct pci_dev *pdev)
13953{
13954 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13955 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13956 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13957
13958 switch (phba->pci_dev_grp) {
13959 case LPFC_PCI_DEV_LP:
13960 rc = lpfc_io_slot_reset_s3(pdev);
13961 break;
13962 case LPFC_PCI_DEV_OC:
13963 rc = lpfc_io_slot_reset_s4(pdev);
13964 break;
13965 default:
13966 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13967 "1428 Invalid PCI device group: 0x%x\n",
13968 phba->pci_dev_grp);
13969 break;
13970 }
13971 return rc;
13972}
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984static void
13985lpfc_io_resume(struct pci_dev *pdev)
13986{
13987 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13988 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13989
13990 switch (phba->pci_dev_grp) {
13991 case LPFC_PCI_DEV_LP:
13992 lpfc_io_resume_s3(pdev);
13993 break;
13994 case LPFC_PCI_DEV_OC:
13995 lpfc_io_resume_s4(pdev);
13996 break;
13997 default:
13998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13999 "1429 Invalid PCI device group: 0x%x\n",
14000 phba->pci_dev_grp);
14001 break;
14002 }
14003 return;
14004}
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016static void
14017lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14018{
14019
14020 if (!phba->cfg_EnableXLane)
14021 return;
14022
14023 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14024 phba->cfg_fof = 1;
14025 } else {
14026 phba->cfg_fof = 0;
14027 mempool_destroy(phba->device_data_mem_pool);
14028 phba->device_data_mem_pool = NULL;
14029 }
14030
14031 return;
14032}
14033
14034
14035
14036
14037
14038
14039
14040
14041void
14042lpfc_sli4_ras_init(struct lpfc_hba *phba)
14043{
14044 switch (phba->pcidev->device) {
14045 case PCI_DEVICE_ID_LANCER_G6_FC:
14046 case PCI_DEVICE_ID_LANCER_G7_FC:
14047 phba->ras_fwlog.ras_hwsupport = true;
14048 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14049 phba->cfg_ras_fwlog_buffsize)
14050 phba->ras_fwlog.ras_enabled = true;
14051 else
14052 phba->ras_fwlog.ras_enabled = false;
14053 break;
14054 default:
14055 phba->ras_fwlog.ras_hwsupport = false;
14056 }
14057}
14058
14059
14060MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14061
14062static const struct pci_error_handlers lpfc_err_handler = {
14063 .error_detected = lpfc_io_error_detected,
14064 .slot_reset = lpfc_io_slot_reset,
14065 .resume = lpfc_io_resume,
14066};
14067
14068static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
14069 lpfc_pci_suspend_one,
14070 lpfc_pci_resume_one);
14071
14072static struct pci_driver lpfc_driver = {
14073 .name = LPFC_DRIVER_NAME,
14074 .id_table = lpfc_id_table,
14075 .probe = lpfc_pci_probe_one,
14076 .remove = lpfc_pci_remove_one,
14077 .shutdown = lpfc_pci_remove_one,
14078 .driver.pm = &lpfc_pci_pm_ops_one,
14079 .err_handler = &lpfc_err_handler,
14080};
14081
14082static const struct file_operations lpfc_mgmt_fop = {
14083 .owner = THIS_MODULE,
14084};
14085
14086static struct miscdevice lpfc_mgmt_dev = {
14087 .minor = MISC_DYNAMIC_MINOR,
14088 .name = "lpfcmgmt",
14089 .fops = &lpfc_mgmt_fop,
14090};
14091
14092
14093
14094
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104static int __init
14105lpfc_init(void)
14106{
14107 int error = 0;
14108
14109 pr_info(LPFC_MODULE_DESC "\n");
14110 pr_info(LPFC_COPYRIGHT "\n");
14111
14112 error = misc_register(&lpfc_mgmt_dev);
14113 if (error)
14114 printk(KERN_ERR "Could not register lpfcmgmt device, "
14115 "misc_register returned with status %d", error);
14116
14117 error = -ENOMEM;
14118 lpfc_transport_functions.vport_create = lpfc_vport_create;
14119 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14120 lpfc_transport_template =
14121 fc_attach_transport(&lpfc_transport_functions);
14122 if (lpfc_transport_template == NULL)
14123 goto unregister;
14124 lpfc_vport_transport_template =
14125 fc_attach_transport(&lpfc_vport_transport_functions);
14126 if (lpfc_vport_transport_template == NULL) {
14127 fc_release_transport(lpfc_transport_template);
14128 goto unregister;
14129 }
14130 lpfc_wqe_cmd_template();
14131 lpfc_nvmet_cmd_template();
14132
14133
14134 lpfc_present_cpu = num_present_cpus();
14135
14136 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14137 "lpfc/sli4:online",
14138 lpfc_cpu_online, lpfc_cpu_offline);
14139 if (error < 0)
14140 goto cpuhp_failure;
14141 lpfc_cpuhp_state = error;
14142
14143 error = pci_register_driver(&lpfc_driver);
14144 if (error)
14145 goto unwind;
14146
14147 return error;
14148
14149unwind:
14150 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14151cpuhp_failure:
14152 fc_release_transport(lpfc_transport_template);
14153 fc_release_transport(lpfc_vport_transport_template);
14154unregister:
14155 misc_deregister(&lpfc_mgmt_dev);
14156
14157 return error;
14158}
14159
14160void lpfc_dmp_dbg(struct lpfc_hba *phba)
14161{
14162 unsigned int start_idx;
14163 unsigned int dbg_cnt;
14164 unsigned int temp_idx;
14165 int i;
14166 int j = 0;
14167 unsigned long rem_nsec;
14168 struct lpfc_vport **vports;
14169
14170
14171
14172
14173 if (phba->cfg_log_verbose)
14174 return;
14175
14176 vports = lpfc_create_vport_work_array(phba);
14177 if (vports != NULL) {
14178 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14179 if (vports[i]->cfg_log_verbose) {
14180 lpfc_destroy_vport_work_array(phba, vports);
14181 return;
14182 }
14183 }
14184 }
14185 lpfc_destroy_vport_work_array(phba, vports);
14186
14187 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14188 return;
14189
14190 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14191 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14192 if (!dbg_cnt)
14193 goto out;
14194 temp_idx = start_idx;
14195 if (dbg_cnt >= DBG_LOG_SZ) {
14196 dbg_cnt = DBG_LOG_SZ;
14197 temp_idx -= 1;
14198 } else {
14199 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14200 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14201 } else {
14202 if (start_idx < dbg_cnt)
14203 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14204 else
14205 start_idx -= dbg_cnt;
14206 }
14207 }
14208 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14209 start_idx, temp_idx, dbg_cnt);
14210
14211 for (i = 0; i < dbg_cnt; i++) {
14212 if ((start_idx + i) < DBG_LOG_SZ)
14213 temp_idx = (start_idx + i) % DBG_LOG_SZ;
14214 else
14215 temp_idx = j++;
14216 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14217 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14218 temp_idx,
14219 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14220 rem_nsec / 1000,
14221 phba->dbg_log[temp_idx].log);
14222 }
14223out:
14224 atomic_set(&phba->dbg_log_cnt, 0);
14225 atomic_set(&phba->dbg_log_dmping, 0);
14226}
14227
14228__printf(2, 3)
14229void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14230{
14231 unsigned int idx;
14232 va_list args;
14233 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14234 struct va_format vaf;
14235
14236
14237 va_start(args, fmt);
14238 if (unlikely(dbg_dmping)) {
14239 vaf.fmt = fmt;
14240 vaf.va = &args;
14241 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14242 va_end(args);
14243 return;
14244 }
14245 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14246 DBG_LOG_SZ;
14247
14248 atomic_inc(&phba->dbg_log_cnt);
14249
14250 vscnprintf(phba->dbg_log[idx].log,
14251 sizeof(phba->dbg_log[idx].log), fmt, args);
14252 va_end(args);
14253
14254 phba->dbg_log[idx].t_ns = local_clock();
14255}
14256
14257
14258
14259
14260
14261
14262
14263
14264static void __exit
14265lpfc_exit(void)
14266{
14267 misc_deregister(&lpfc_mgmt_dev);
14268 pci_unregister_driver(&lpfc_driver);
14269 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14270 fc_release_transport(lpfc_transport_template);
14271 fc_release_transport(lpfc_vport_transport_template);
14272 idr_destroy(&lpfc_hba_index);
14273}
14274
14275module_init(lpfc_init);
14276module_exit(lpfc_exit);
14277MODULE_LICENSE("GPL");
14278MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14279MODULE_AUTHOR("Broadcom");
14280MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
14281