1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116int
117lpfc_config_port_prep(struct lpfc_hba *phba)
118{
119 lpfc_vpd_t *vp = &phba->vpd;
120 int i = 0, rc;
121 LPFC_MBOXQ_t *pmb;
122 MAILBOX_t *mb;
123 char *lpfc_vpd_data = NULL;
124 uint16_t offset = 0;
125 static char licensed[56] =
126 "key unlock for use with gnu public licensed code only\0";
127 static int init_key = 1;
128
129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130 if (!pmb) {
131 phba->link_state = LPFC_HBA_ERROR;
132 return -ENOMEM;
133 }
134
135 mb = &pmb->u.mb;
136 phba->link_state = LPFC_INIT_MBX_CMDS;
137
138 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139 if (init_key) {
140 uint32_t *ptext = (uint32_t *) licensed;
141
142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143 *ptext = cpu_to_be32(*ptext);
144 init_key = 0;
145 }
146
147 lpfc_read_nv(phba, pmb);
148 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149 sizeof (mb->un.varRDnvp.rsvd3));
150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151 sizeof (licensed));
152
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155 if (rc != MBX_SUCCESS) {
156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
157 "0324 Config Port initialization "
158 "error, mbxCmd x%x READ_NVPARM, "
159 "mbxStatus x%x\n",
160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free(pmb, phba->mbox_mem_pool);
162 return -ERESTART;
163 }
164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165 sizeof(phba->wwnn));
166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167 sizeof(phba->wwpn));
168 }
169
170
171
172
173
174 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176
177 lpfc_read_rev(phba, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
181 "0439 Adapter failed to init, mbxCmd x%x "
182 "READ_REV, mbxStatus x%x\n",
183 mb->mbxCommand, mb->mbxStatus);
184 mempool_free( pmb, phba->mbox_mem_pool);
185 return -ERESTART;
186 }
187
188
189
190
191
192
193 if (mb->un.varRdRev.rr == 0) {
194 vp->rev.rBit = 0;
195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
196 "0440 Adapter failed to init, READ_REV has "
197 "missing revision information.\n");
198 mempool_free(pmb, phba->mbox_mem_pool);
199 return -ERESTART;
200 }
201
202 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203 mempool_free(pmb, phba->mbox_mem_pool);
204 return -EINVAL;
205 }
206
207
208 vp->rev.rBit = 1;
209 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214 vp->rev.biuRev = mb->un.varRdRev.biuRev;
215 vp->rev.smRev = mb->un.varRdRev.smRev;
216 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217 vp->rev.endecRev = mb->un.varRdRev.endecRev;
218 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225
226
227
228
229 if (vp->rev.feaLevelHigh < 9)
230 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232 if (lpfc_is_LC_HBA(phba->pcidev->device))
233 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234 sizeof (phba->RandomData));
235
236
237 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238 if (!lpfc_vpd_data)
239 goto out_free_mbox;
240 do {
241 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244 if (rc != MBX_SUCCESS) {
245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246 "0441 VPD not present on adapter, "
247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248 mb->mbxCommand, mb->mbxStatus);
249 mb->un.varDmp.word_cnt = 0;
250 }
251
252
253
254 if (mb->un.varDmp.word_cnt == 0)
255 break;
256
257 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
258 if (offset + i > DMP_VPD_SIZE)
259 i = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset, i);
262 offset += i;
263 } while (offset < DMP_VPD_SIZE);
264
265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
266
267 kfree(lpfc_vpd_data);
268out_free_mbox:
269 mempool_free(pmb, phba->mbox_mem_pool);
270 return 0;
271}
272
273
274
275
276
277
278
279
280
281
282
283static void
284lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
285{
286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
287 phba->temp_sensor_support = 1;
288 else
289 phba->temp_sensor_support = 0;
290 mempool_free(pmboxq, phba->mbox_mem_pool);
291 return;
292}
293
294
295
296
297
298
299
300
301
302
303
304static void
305lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
306{
307 struct prog_id *prg;
308 uint32_t prog_id_word;
309 char dist = ' ';
310
311 char dist_char[] = "nabx";
312
313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
314 mempool_free(pmboxq, phba->mbox_mem_pool);
315 return;
316 }
317
318 prg = (struct prog_id *) &prog_id_word;
319
320
321 prog_id_word = pmboxq->u.mb.un.varWords[7];
322
323
324 if (prg->dist < 4)
325 dist = dist_char[prg->dist];
326
327 if ((prg->dist == 3) && (prg->num == 0))
328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
329 prg->ver, prg->rev, prg->lev);
330 else
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
332 prg->ver, prg->rev, prg->lev,
333 dist, prg->num);
334 mempool_free(pmboxq, phba->mbox_mem_pool);
335 return;
336}
337
338
339
340
341
342
343
344
345
346
347void
348lpfc_update_vport_wwn(struct lpfc_vport *vport)
349{
350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
352
353
354 if (vport->phba->cfg_soft_wwnn)
355 u64_to_wwn(vport->phba->cfg_soft_wwnn,
356 vport->fc_sparam.nodeName.u.wwn);
357 if (vport->phba->cfg_soft_wwpn)
358 u64_to_wwn(vport->phba->cfg_soft_wwpn,
359 vport->fc_sparam.portName.u.wwn);
360
361
362
363
364
365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
367 sizeof(struct lpfc_name));
368 else
369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 sizeof(struct lpfc_name));
371
372
373
374
375
376 if (vport->fc_portname.u.wwn[0] != 0 &&
377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
378 sizeof(struct lpfc_name)))
379 vport->vport_flag |= FAWWPN_PARAM_CHG;
380
381 if (vport->fc_portname.u.wwn[0] == 0 ||
382 vport->phba->cfg_soft_wwpn ||
383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
384 vport->vport_flag & FAWWPN_SET) {
385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
386 sizeof(struct lpfc_name));
387 vport->vport_flag &= ~FAWWPN_SET;
388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
389 vport->vport_flag |= FAWWPN_SET;
390 }
391 else
392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
393 sizeof(struct lpfc_name));
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int
410lpfc_config_port_post(struct lpfc_hba *phba)
411{
412 struct lpfc_vport *vport = phba->pport;
413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 LPFC_MBOXQ_t *pmb;
415 MAILBOX_t *mb;
416 struct lpfc_dmabuf *mp;
417 struct lpfc_sli *psli = &phba->sli;
418 uint32_t status, timeout;
419 int i, j;
420 int rc;
421
422 spin_lock_irq(&phba->hbalock);
423
424
425
426
427 if (phba->over_temp_state == HBA_OVER_TEMP)
428 phba->over_temp_state = HBA_NORMAL_TEMP;
429 spin_unlock_irq(&phba->hbalock);
430
431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
432 if (!pmb) {
433 phba->link_state = LPFC_HBA_ERROR;
434 return -ENOMEM;
435 }
436 mb = &pmb->u.mb;
437
438
439 rc = lpfc_read_sparam(phba, pmb, 0);
440 if (rc) {
441 mempool_free(pmb, phba->mbox_mem_pool);
442 return -ENOMEM;
443 }
444
445 pmb->vport = vport;
446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb->mbxCommand, mb->mbxStatus);
451 phba->link_state = LPFC_HBA_ERROR;
452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
453 mempool_free(pmb, phba->mbox_mem_pool);
454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
455 kfree(mp);
456 return -EIO;
457 }
458
459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
460
461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
463 kfree(mp);
464 pmb->ctx_buf = NULL;
465 lpfc_update_vport_wwn(vport);
466
467
468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
470 fc_host_max_npiv_vports(shost) = phba->max_vpi;
471
472
473
474 if (phba->SerialNumber[0] == 0) {
475 uint8_t *outptr;
476
477 outptr = &vport->fc_nodename.u.s.IEEE[0];
478 for (i = 0; i < 12; i++) {
479 status = *outptr++;
480 j = ((status & 0xf0) >> 4);
481 if (j <= 9)
482 phba->SerialNumber[i] =
483 (char)((uint8_t) 0x30 + (uint8_t) j);
484 else
485 phba->SerialNumber[i] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
487 i++;
488 j = (status & 0xf);
489 if (j <= 9)
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
492 else
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
495 }
496 }
497
498 lpfc_read_config(phba, pmb);
499 pmb->vport = vport;
500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb->mbxCommand, mb->mbxStatus);
505 phba->link_state = LPFC_HBA_ERROR;
506 mempool_free( pmb, phba->mbox_mem_pool);
507 return -EIO;
508 }
509
510
511 lpfc_sli_read_link_ste(phba);
512
513
514 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
516 "3359 HBA queue depth changed from %d to %d\n",
517 phba->cfg_hba_queue_depth,
518 mb->un.varRdConfig.max_xri);
519 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
520 }
521
522 phba->lmt = mb->un.varRdConfig.lmt;
523
524
525 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
526
527 phba->link_state = LPFC_LINK_DOWN;
528
529
530 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
531 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
532 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
534
535
536 if (phba->sli_rev != 3)
537 lpfc_post_rcv_buf(phba);
538
539
540
541
542 if (phba->intr_type == MSIX) {
543 rc = lpfc_config_msi(phba, pmb);
544 if (rc) {
545 mempool_free(pmb, phba->mbox_mem_pool);
546 return -EIO;
547 }
548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
549 if (rc != MBX_SUCCESS) {
550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
551 "0352 Config MSI mailbox command "
552 "failed, mbxCmd x%x, mbxStatus x%x\n",
553 pmb->u.mb.mbxCommand,
554 pmb->u.mb.mbxStatus);
555 mempool_free(pmb, phba->mbox_mem_pool);
556 return -EIO;
557 }
558 }
559
560 spin_lock_irq(&phba->hbalock);
561
562 phba->hba_flag &= ~HBA_ERATT_HANDLED;
563
564
565 if (lpfc_readl(phba->HCregaddr, &status)) {
566 spin_unlock_irq(&phba->hbalock);
567 return -EIO;
568 }
569 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
570 if (psli->num_rings > 0)
571 status |= HC_R0INT_ENA;
572 if (psli->num_rings > 1)
573 status |= HC_R1INT_ENA;
574 if (psli->num_rings > 2)
575 status |= HC_R2INT_ENA;
576 if (psli->num_rings > 3)
577 status |= HC_R3INT_ENA;
578
579 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
580 (phba->cfg_poll & DISABLE_FCP_RING_INT))
581 status &= ~(HC_R0INT_ENA);
582
583 writel(status, phba->HCregaddr);
584 readl(phba->HCregaddr);
585 spin_unlock_irq(&phba->hbalock);
586
587
588 timeout = phba->fc_ratov * 2;
589 mod_timer(&vport->els_tmofunc,
590 jiffies + msecs_to_jiffies(1000 * timeout));
591
592 mod_timer(&phba->hb_tmofunc,
593 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
594 phba->hb_outstanding = 0;
595 phba->last_completion_time = jiffies;
596
597 mod_timer(&phba->eratt_poll,
598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
599
600 if (phba->hba_flag & LINK_DISABLED) {
601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
602 "2598 Adapter Link is disabled.\n");
603 lpfc_down_link(phba, pmb);
604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
608 "2599 Adapter failed to issue DOWN_LINK"
609 " mbox command rc 0x%x\n", rc);
610
611 mempool_free(pmb, phba->mbox_mem_pool);
612 return -EIO;
613 }
614 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
615 mempool_free(pmb, phba->mbox_mem_pool);
616 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
617 if (rc)
618 return rc;
619 }
620
621 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
622 if (!pmb) {
623 phba->link_state = LPFC_HBA_ERROR;
624 return -ENOMEM;
625 }
626
627 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
628 pmb->mbox_cmpl = lpfc_config_async_cmpl;
629 pmb->vport = phba->pport;
630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
631
632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
634 "0456 Adapter failed to issue "
635 "ASYNCEVT_ENABLE mbox status x%x\n",
636 rc);
637 mempool_free(pmb, phba->mbox_mem_pool);
638 }
639
640
641 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
642 if (!pmb) {
643 phba->link_state = LPFC_HBA_ERROR;
644 return -ENOMEM;
645 }
646
647 lpfc_dump_wakeup_param(phba, pmb);
648 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
649 pmb->vport = phba->pport;
650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
651
652 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
654 "0435 Adapter failed "
655 "to get Option ROM version status x%x\n", rc);
656 mempool_free(pmb, phba->mbox_mem_pool);
657 }
658
659 return 0;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676static int
677lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
678{
679 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697int
698lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
699 uint32_t flag)
700{
701 struct lpfc_vport *vport = phba->pport;
702 LPFC_MBOXQ_t *pmb;
703 MAILBOX_t *mb;
704 int rc;
705
706 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
707 if (!pmb) {
708 phba->link_state = LPFC_HBA_ERROR;
709 return -ENOMEM;
710 }
711 mb = &pmb->u.mb;
712 pmb->vport = vport;
713
714 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
715 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
716 !(phba->lmt & LMT_1Gb)) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
718 !(phba->lmt & LMT_2Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
720 !(phba->lmt & LMT_4Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
722 !(phba->lmt & LMT_8Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
724 !(phba->lmt & LMT_10Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
726 !(phba->lmt & LMT_16Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
728 !(phba->lmt & LMT_32Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
730 !(phba->lmt & LMT_64Gb))) {
731
732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
733 "1302 Invalid speed for this board:%d "
734 "Reset link speed to auto.\n",
735 phba->cfg_link_speed);
736 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
737 }
738 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
739 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
740 if (phba->sli_rev < LPFC_SLI_REV4)
741 lpfc_set_loopback_flag(phba);
742 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
743 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
745 "0498 Adapter failed to init, mbxCmd x%x "
746 "INIT_LINK, mbxStatus x%x\n",
747 mb->mbxCommand, mb->mbxStatus);
748 if (phba->sli_rev <= LPFC_SLI_REV3) {
749
750 writel(0, phba->HCregaddr);
751 readl(phba->HCregaddr);
752
753 writel(0xffffffff, phba->HAregaddr);
754 readl(phba->HAregaddr);
755 }
756 phba->link_state = LPFC_HBA_ERROR;
757 if (rc != MBX_BUSY || flag == MBX_POLL)
758 mempool_free(pmb, phba->mbox_mem_pool);
759 return -EIO;
760 }
761 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
762 if (flag == MBX_POLL)
763 mempool_free(pmb, phba->mbox_mem_pool);
764
765 return 0;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static int
782lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
783{
784 LPFC_MBOXQ_t *pmb;
785 int rc;
786
787 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
788 if (!pmb) {
789 phba->link_state = LPFC_HBA_ERROR;
790 return -ENOMEM;
791 }
792
793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
794 "0491 Adapter Link is disabled.\n");
795 lpfc_down_link(phba, pmb);
796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
798 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800 "2522 Adapter failed to issue DOWN_LINK"
801 " mbox command rc 0x%x\n", rc);
802
803 mempool_free(pmb, phba->mbox_mem_pool);
804 return -EIO;
805 }
806 if (flag == MBX_POLL)
807 mempool_free(pmb, phba->mbox_mem_pool);
808
809 return 0;
810}
811
812
813
814
815
816
817
818
819
820
821
822
823int
824lpfc_hba_down_prep(struct lpfc_hba *phba)
825{
826 struct lpfc_vport **vports;
827 int i;
828
829 if (phba->sli_rev <= LPFC_SLI_REV3) {
830
831 writel(0, phba->HCregaddr);
832 readl(phba->HCregaddr);
833 }
834
835 if (phba->pport->load_flag & FC_UNLOADING)
836 lpfc_cleanup_discovery_resources(phba->pport);
837 else {
838 vports = lpfc_create_vport_work_array(phba);
839 if (vports != NULL)
840 for (i = 0; i <= phba->max_vports &&
841 vports[i] != NULL; i++)
842 lpfc_cleanup_discovery_resources(vports[i]);
843 lpfc_destroy_vport_work_array(phba, vports);
844 }
845 return 0;
846}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861static void
862lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
863{
864 struct lpfc_iocbq *rspiocbq;
865 struct hbq_dmabuf *dmabuf;
866 struct lpfc_cq_event *cq_event;
867
868 spin_lock_irq(&phba->hbalock);
869 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
870 spin_unlock_irq(&phba->hbalock);
871
872 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
873
874 spin_lock_irq(&phba->hbalock);
875 list_remove_head(&phba->sli4_hba.sp_queue_event,
876 cq_event, struct lpfc_cq_event, list);
877 spin_unlock_irq(&phba->hbalock);
878
879 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
880 case CQE_CODE_COMPL_WQE:
881 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
882 cq_event);
883 lpfc_sli_release_iocbq(phba, rspiocbq);
884 break;
885 case CQE_CODE_RECEIVE:
886 case CQE_CODE_RECEIVE_V1:
887 dmabuf = container_of(cq_event, struct hbq_dmabuf,
888 cq_event);
889 lpfc_in_buf_free(phba, &dmabuf->dbuf);
890 }
891 }
892}
893
894
895
896
897
898
899
900
901
902
903
904
905static void
906lpfc_hba_free_post_buf(struct lpfc_hba *phba)
907{
908 struct lpfc_sli *psli = &phba->sli;
909 struct lpfc_sli_ring *pring;
910 struct lpfc_dmabuf *mp, *next_mp;
911 LIST_HEAD(buflist);
912 int count;
913
914 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
915 lpfc_sli_hbqbuf_free_all(phba);
916 else {
917
918 pring = &psli->sli3_ring[LPFC_ELS_RING];
919 spin_lock_irq(&phba->hbalock);
920 list_splice_init(&pring->postbufq, &buflist);
921 spin_unlock_irq(&phba->hbalock);
922
923 count = 0;
924 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
925 list_del(&mp->list);
926 count++;
927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 kfree(mp);
929 }
930
931 spin_lock_irq(&phba->hbalock);
932 pring->postbufq_cnt -= count;
933 spin_unlock_irq(&phba->hbalock);
934 }
935}
936
937
938
939
940
941
942
943
944
945
946
947static void
948lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
949{
950 struct lpfc_sli *psli = &phba->sli;
951 struct lpfc_queue *qp = NULL;
952 struct lpfc_sli_ring *pring;
953 LIST_HEAD(completions);
954 int i;
955 struct lpfc_iocbq *piocb, *next_iocb;
956
957 if (phba->sli_rev != LPFC_SLI_REV4) {
958 for (i = 0; i < psli->num_rings; i++) {
959 pring = &psli->sli3_ring[i];
960 spin_lock_irq(&phba->hbalock);
961
962
963
964
965 list_splice_init(&pring->txcmplq, &completions);
966 pring->txcmplq_cnt = 0;
967 spin_unlock_irq(&phba->hbalock);
968
969 lpfc_sli_abort_iocb_ring(phba, pring);
970 }
971
972 lpfc_sli_cancel_iocbs(phba, &completions,
973 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
974 return;
975 }
976 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
977 pring = qp->pring;
978 if (!pring)
979 continue;
980 spin_lock_irq(&pring->ring_lock);
981 list_for_each_entry_safe(piocb, next_iocb,
982 &pring->txcmplq, list)
983 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
984 list_splice_init(&pring->txcmplq, &completions);
985 pring->txcmplq_cnt = 0;
986 spin_unlock_irq(&pring->ring_lock);
987 lpfc_sli_abort_iocb_ring(phba, pring);
988 }
989
990 lpfc_sli_cancel_iocbs(phba, &completions,
991 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int
1006lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1007{
1008 lpfc_hba_free_post_buf(phba);
1009 lpfc_hba_clean_txcmplq(phba);
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static int
1025lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1026{
1027 struct lpfc_io_buf *psb, *psb_next;
1028 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1029 struct lpfc_sli4_hdw_queue *qp;
1030 LIST_HEAD(aborts);
1031 LIST_HEAD(nvme_aborts);
1032 LIST_HEAD(nvmet_aborts);
1033 struct lpfc_sglq *sglq_entry = NULL;
1034 int cnt, idx;
1035
1036
1037 lpfc_sli_hbqbuf_free_all(phba);
1038 lpfc_hba_clean_txcmplq(phba);
1039
1040
1041
1042
1043
1044
1045
1046 spin_lock_irq(&phba->hbalock);
1047
1048
1049
1050
1051 spin_lock(&phba->sli4_hba.sgl_list_lock);
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1061
1062
1063
1064
1065 cnt = 0;
1066 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1067 qp = &phba->sli4_hba.hdwq[idx];
1068
1069 spin_lock(&qp->abts_io_buf_list_lock);
1070 list_splice_init(&qp->lpfc_abts_io_buf_list,
1071 &aborts);
1072
1073 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1074 psb->pCmd = NULL;
1075 psb->status = IOSTAT_SUCCESS;
1076 cnt++;
1077 }
1078 spin_lock(&qp->io_buf_list_put_lock);
1079 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1080 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1081 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1082 qp->abts_scsi_io_bufs = 0;
1083 qp->abts_nvme_io_bufs = 0;
1084 spin_unlock(&qp->io_buf_list_put_lock);
1085 spin_unlock(&qp->abts_io_buf_list_lock);
1086 }
1087 spin_unlock_irq(&phba->hbalock);
1088
1089 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1090 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1091 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1092 &nvmet_aborts);
1093 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1094 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1095 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1097 }
1098 }
1099
1100 lpfc_sli4_free_sp_events(phba);
1101 return cnt;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115int
1116lpfc_hba_down_post(struct lpfc_hba *phba)
1117{
1118 return (*phba->lpfc_hba_down_post)(phba);
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static void
1134lpfc_hb_timeout(struct timer_list *t)
1135{
1136 struct lpfc_hba *phba;
1137 uint32_t tmo_posted;
1138 unsigned long iflag;
1139
1140 phba = from_timer(phba, t, hb_tmofunc);
1141
1142
1143 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1144 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1145 if (!tmo_posted)
1146 phba->pport->work_port_events |= WORKER_HB_TMO;
1147 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1148
1149
1150 if (!tmo_posted)
1151 lpfc_worker_wake_up(phba);
1152 return;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static void
1168lpfc_rrq_timeout(struct timer_list *t)
1169{
1170 struct lpfc_hba *phba;
1171 unsigned long iflag;
1172
1173 phba = from_timer(phba, t, rrq_tmr);
1174 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1175 if (!(phba->pport->load_flag & FC_UNLOADING))
1176 phba->hba_flag |= HBA_RRQ_ACTIVE;
1177 else
1178 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1179 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1180
1181 if (!(phba->pport->load_flag & FC_UNLOADING))
1182 lpfc_worker_wake_up(phba);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static void
1202lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1203{
1204 unsigned long drvr_flag;
1205
1206 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1207 phba->hb_outstanding = 0;
1208 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1209
1210
1211 mempool_free(pmboxq, phba->mbox_mem_pool);
1212 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1213 !(phba->link_state == LPFC_HBA_ERROR) &&
1214 !(phba->pport->load_flag & FC_UNLOADING))
1215 mod_timer(&phba->hb_tmofunc,
1216 jiffies +
1217 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1218 return;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229static void
1230lpfc_idle_stat_delay_work(struct work_struct *work)
1231{
1232 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1233 struct lpfc_hba,
1234 idle_stat_delay_work);
1235 struct lpfc_queue *cq;
1236 struct lpfc_sli4_hdw_queue *hdwq;
1237 struct lpfc_idle_stat *idle_stat;
1238 u32 i, idle_percent;
1239 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1240
1241 if (phba->pport->load_flag & FC_UNLOADING)
1242 return;
1243
1244 if (phba->link_state == LPFC_HBA_ERROR ||
1245 phba->pport->fc_flag & FC_OFFLINE_MODE)
1246 goto requeue;
1247
1248 for_each_present_cpu(i) {
1249 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1250 cq = hdwq->io_cq;
1251
1252
1253 if (cq->chann != i)
1254 continue;
1255
1256 idle_stat = &phba->sli4_hba.idle_stat[i];
1257
1258
1259
1260
1261
1262
1263
1264 wall_idle = get_cpu_idle_time(i, &wall, 1);
1265 diff_idle = wall_idle - idle_stat->prev_idle;
1266 diff_wall = wall - idle_stat->prev_wall;
1267
1268 if (diff_wall <= diff_idle)
1269 busy_time = 0;
1270 else
1271 busy_time = diff_wall - diff_idle;
1272
1273 idle_percent = div64_u64(100 * busy_time, diff_wall);
1274 idle_percent = 100 - idle_percent;
1275
1276 if (idle_percent < 15)
1277 cq->poll_mode = LPFC_QUEUE_WORK;
1278 else
1279 cq->poll_mode = LPFC_IRQ_POLL;
1280
1281 idle_stat->prev_idle = wall_idle;
1282 idle_stat->prev_wall = wall;
1283 }
1284
1285requeue:
1286 schedule_delayed_work(&phba->idle_stat_delay_work,
1287 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1288}
1289
1290static void
1291lpfc_hb_eq_delay_work(struct work_struct *work)
1292{
1293 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294 struct lpfc_hba, eq_delay_work);
1295 struct lpfc_eq_intr_info *eqi, *eqi_new;
1296 struct lpfc_queue *eq, *eq_next;
1297 unsigned char *ena_delay = NULL;
1298 uint32_t usdelay;
1299 int i;
1300
1301 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1302 return;
1303
1304 if (phba->link_state == LPFC_HBA_ERROR ||
1305 phba->pport->fc_flag & FC_OFFLINE_MODE)
1306 goto requeue;
1307
1308 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1309 GFP_KERNEL);
1310 if (!ena_delay)
1311 goto requeue;
1312
1313 for (i = 0; i < phba->cfg_irq_chann; i++) {
1314
1315 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1316 if (!eq)
1317 continue;
1318 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1319 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1320 ena_delay[eq->last_cpu] = 1;
1321 }
1322 }
1323
1324 for_each_present_cpu(i) {
1325 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1326 if (ena_delay[i]) {
1327 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1328 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1329 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1330 } else {
1331 usdelay = 0;
1332 }
1333
1334 eqi->icnt = 0;
1335
1336 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1337 if (unlikely(eq->last_cpu != i)) {
1338 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1339 eq->last_cpu);
1340 list_move_tail(&eq->cpu_list, &eqi_new->list);
1341 continue;
1342 }
1343 if (usdelay != eq->q_mode)
1344 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1345 usdelay);
1346 }
1347 }
1348
1349 kfree(ena_delay);
1350
1351requeue:
1352 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1353 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1364{
1365 u32 i;
1366 u32 hwq_count;
1367
1368 hwq_count = phba->cfg_hdw_queue;
1369 for (i = 0; i < hwq_count; i++) {
1370
1371 lpfc_adjust_pvt_pool_count(phba, i);
1372
1373
1374 lpfc_adjust_high_watermark(phba, i);
1375
1376#ifdef LPFC_MXP_STAT
1377
1378 lpfc_snapshot_mxp(phba, i);
1379#endif
1380 }
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399void
1400lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1401{
1402 struct lpfc_vport **vports;
1403 LPFC_MBOXQ_t *pmboxq;
1404 struct lpfc_dmabuf *buf_ptr;
1405 int retval, i;
1406 struct lpfc_sli *psli = &phba->sli;
1407 LIST_HEAD(completions);
1408
1409 if (phba->cfg_xri_rebalancing) {
1410
1411 lpfc_hb_mxp_handler(phba);
1412 }
1413
1414 vports = lpfc_create_vport_work_array(phba);
1415 if (vports != NULL)
1416 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1417 lpfc_rcv_seq_check_edtov(vports[i]);
1418 lpfc_fdmi_change_check(vports[i]);
1419 }
1420 lpfc_destroy_vport_work_array(phba, vports);
1421
1422 if ((phba->link_state == LPFC_HBA_ERROR) ||
1423 (phba->pport->load_flag & FC_UNLOADING) ||
1424 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1425 return;
1426
1427 spin_lock_irq(&phba->pport->work_port_lock);
1428
1429 if (time_after(phba->last_completion_time +
1430 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1431 jiffies)) {
1432 spin_unlock_irq(&phba->pport->work_port_lock);
1433 if (!phba->hb_outstanding)
1434 mod_timer(&phba->hb_tmofunc,
1435 jiffies +
1436 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1437 else
1438 mod_timer(&phba->hb_tmofunc,
1439 jiffies +
1440 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1441 return;
1442 }
1443 spin_unlock_irq(&phba->pport->work_port_lock);
1444
1445 if (phba->elsbuf_cnt &&
1446 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1447 spin_lock_irq(&phba->hbalock);
1448 list_splice_init(&phba->elsbuf, &completions);
1449 phba->elsbuf_cnt = 0;
1450 phba->elsbuf_prev_cnt = 0;
1451 spin_unlock_irq(&phba->hbalock);
1452
1453 while (!list_empty(&completions)) {
1454 list_remove_head(&completions, buf_ptr,
1455 struct lpfc_dmabuf, list);
1456 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1457 kfree(buf_ptr);
1458 }
1459 }
1460 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1461
1462
1463 if (phba->cfg_enable_hba_heartbeat) {
1464 if (!phba->hb_outstanding) {
1465 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1466 (list_empty(&psli->mboxq))) {
1467 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1468 GFP_KERNEL);
1469 if (!pmboxq) {
1470 mod_timer(&phba->hb_tmofunc,
1471 jiffies +
1472 msecs_to_jiffies(1000 *
1473 LPFC_HB_MBOX_INTERVAL));
1474 return;
1475 }
1476
1477 lpfc_heart_beat(phba, pmboxq);
1478 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1479 pmboxq->vport = phba->pport;
1480 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1481 MBX_NOWAIT);
1482
1483 if (retval != MBX_BUSY &&
1484 retval != MBX_SUCCESS) {
1485 mempool_free(pmboxq,
1486 phba->mbox_mem_pool);
1487 mod_timer(&phba->hb_tmofunc,
1488 jiffies +
1489 msecs_to_jiffies(1000 *
1490 LPFC_HB_MBOX_INTERVAL));
1491 return;
1492 }
1493 phba->skipped_hb = 0;
1494 phba->hb_outstanding = 1;
1495 } else if (time_before_eq(phba->last_completion_time,
1496 phba->skipped_hb)) {
1497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1498 "2857 Last completion time not "
1499 " updated in %d ms\n",
1500 jiffies_to_msecs(jiffies
1501 - phba->last_completion_time));
1502 } else
1503 phba->skipped_hb = jiffies;
1504
1505 mod_timer(&phba->hb_tmofunc,
1506 jiffies +
1507 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1508 return;
1509 } else {
1510
1511
1512
1513
1514
1515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1516 "0459 Adapter heartbeat still out"
1517 "standing:last compl time was %d ms.\n",
1518 jiffies_to_msecs(jiffies
1519 - phba->last_completion_time));
1520 mod_timer(&phba->hb_tmofunc,
1521 jiffies +
1522 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1523 }
1524 } else {
1525 mod_timer(&phba->hb_tmofunc,
1526 jiffies +
1527 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1528 }
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538static void
1539lpfc_offline_eratt(struct lpfc_hba *phba)
1540{
1541 struct lpfc_sli *psli = &phba->sli;
1542
1543 spin_lock_irq(&phba->hbalock);
1544 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1545 spin_unlock_irq(&phba->hbalock);
1546 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1547
1548 lpfc_offline(phba);
1549 lpfc_reset_barrier(phba);
1550 spin_lock_irq(&phba->hbalock);
1551 lpfc_sli_brdreset(phba);
1552 spin_unlock_irq(&phba->hbalock);
1553 lpfc_hba_down_post(phba);
1554 lpfc_sli_brdready(phba, HS_MBRDY);
1555 lpfc_unblock_mgmt_io(phba);
1556 phba->link_state = LPFC_HBA_ERROR;
1557 return;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567void
1568lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1569{
1570 spin_lock_irq(&phba->hbalock);
1571 phba->link_state = LPFC_HBA_ERROR;
1572 spin_unlock_irq(&phba->hbalock);
1573
1574 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1575 lpfc_sli_flush_io_rings(phba);
1576 lpfc_offline(phba);
1577 lpfc_hba_down_post(phba);
1578 lpfc_unblock_mgmt_io(phba);
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static void
1591lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1592{
1593 uint32_t old_host_status = phba->work_hs;
1594 struct lpfc_sli *psli = &phba->sli;
1595
1596
1597
1598
1599 if (pci_channel_offline(phba->pcidev)) {
1600 spin_lock_irq(&phba->hbalock);
1601 phba->hba_flag &= ~DEFER_ERATT;
1602 spin_unlock_irq(&phba->hbalock);
1603 return;
1604 }
1605
1606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1607 "0479 Deferred Adapter Hardware Error "
1608 "Data: x%x x%x x%x\n",
1609 phba->work_hs, phba->work_status[0],
1610 phba->work_status[1]);
1611
1612 spin_lock_irq(&phba->hbalock);
1613 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1614 spin_unlock_irq(&phba->hbalock);
1615
1616
1617
1618
1619
1620
1621
1622 lpfc_sli_abort_fcp_rings(phba);
1623
1624
1625
1626
1627
1628 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1629 lpfc_offline(phba);
1630
1631
1632 while (phba->work_hs & HS_FFER1) {
1633 msleep(100);
1634 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1635 phba->work_hs = UNPLUG_ERR ;
1636 break;
1637 }
1638
1639 if (phba->pport->load_flag & FC_UNLOADING) {
1640 phba->work_hs = 0;
1641 break;
1642 }
1643 }
1644
1645
1646
1647
1648
1649
1650 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1651 phba->work_hs = old_host_status & ~HS_FFER1;
1652
1653 spin_lock_irq(&phba->hbalock);
1654 phba->hba_flag &= ~DEFER_ERATT;
1655 spin_unlock_irq(&phba->hbalock);
1656 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1657 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1658}
1659
1660static void
1661lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1662{
1663 struct lpfc_board_event_header board_event;
1664 struct Scsi_Host *shost;
1665
1666 board_event.event_type = FC_REG_BOARD_EVENT;
1667 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1668 shost = lpfc_shost_from_vport(phba->pport);
1669 fc_host_post_vendor_event(shost, fc_get_event_number(),
1670 sizeof(board_event),
1671 (char *) &board_event,
1672 LPFC_NL_VENDOR_ID);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static void
1686lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1687{
1688 struct lpfc_vport *vport = phba->pport;
1689 struct lpfc_sli *psli = &phba->sli;
1690 uint32_t event_data;
1691 unsigned long temperature;
1692 struct temp_event temp_event_data;
1693 struct Scsi_Host *shost;
1694
1695
1696
1697
1698 if (pci_channel_offline(phba->pcidev)) {
1699 spin_lock_irq(&phba->hbalock);
1700 phba->hba_flag &= ~DEFER_ERATT;
1701 spin_unlock_irq(&phba->hbalock);
1702 return;
1703 }
1704
1705
1706 if (!phba->cfg_enable_hba_reset)
1707 return;
1708
1709
1710 lpfc_board_errevt_to_mgmt(phba);
1711
1712 if (phba->hba_flag & DEFER_ERATT)
1713 lpfc_handle_deferred_eratt(phba);
1714
1715 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1716 if (phba->work_hs & HS_FFER6)
1717
1718 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1719 "1301 Re-establishing Link "
1720 "Data: x%x x%x x%x\n",
1721 phba->work_hs, phba->work_status[0],
1722 phba->work_status[1]);
1723 if (phba->work_hs & HS_FFER8)
1724
1725 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1726 "2861 Host Authentication device "
1727 "zeroization Data:x%x x%x x%x\n",
1728 phba->work_hs, phba->work_status[0],
1729 phba->work_status[1]);
1730
1731 spin_lock_irq(&phba->hbalock);
1732 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1733 spin_unlock_irq(&phba->hbalock);
1734
1735
1736
1737
1738
1739
1740
1741 lpfc_sli_abort_fcp_rings(phba);
1742
1743
1744
1745
1746
1747 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1748 lpfc_offline(phba);
1749 lpfc_sli_brdrestart(phba);
1750 if (lpfc_online(phba) == 0) {
1751 lpfc_unblock_mgmt_io(phba);
1752 return;
1753 }
1754 lpfc_unblock_mgmt_io(phba);
1755 } else if (phba->work_hs & HS_CRIT_TEMP) {
1756 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1757 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1758 temp_event_data.event_code = LPFC_CRIT_TEMP;
1759 temp_event_data.data = (uint32_t)temperature;
1760
1761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1762 "0406 Adapter maximum temperature exceeded "
1763 "(%ld), taking this port offline "
1764 "Data: x%x x%x x%x\n",
1765 temperature, phba->work_hs,
1766 phba->work_status[0], phba->work_status[1]);
1767
1768 shost = lpfc_shost_from_vport(phba->pport);
1769 fc_host_post_vendor_event(shost, fc_get_event_number(),
1770 sizeof(temp_event_data),
1771 (char *) &temp_event_data,
1772 SCSI_NL_VID_TYPE_PCI
1773 | PCI_VENDOR_ID_EMULEX);
1774
1775 spin_lock_irq(&phba->hbalock);
1776 phba->over_temp_state = HBA_OVER_TEMP;
1777 spin_unlock_irq(&phba->hbalock);
1778 lpfc_offline_eratt(phba);
1779
1780 } else {
1781
1782
1783
1784
1785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1786 "0457 Adapter Hardware Error "
1787 "Data: x%x x%x x%x\n",
1788 phba->work_hs,
1789 phba->work_status[0], phba->work_status[1]);
1790
1791 event_data = FC_REG_DUMP_EVENT;
1792 shost = lpfc_shost_from_vport(vport);
1793 fc_host_post_vendor_event(shost, fc_get_event_number(),
1794 sizeof(event_data), (char *) &event_data,
1795 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1796
1797 lpfc_offline_eratt(phba);
1798 }
1799 return;
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static int
1814lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1815 bool en_rn_msg)
1816{
1817 int rc;
1818 uint32_t intr_mode;
1819
1820 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1821 LPFC_SLI_INTF_IF_TYPE_2) {
1822
1823
1824
1825
1826 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1827 if (rc)
1828 return rc;
1829 }
1830
1831
1832 if (en_rn_msg)
1833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1834 "2887 Reset Needed: Attempting Port "
1835 "Recovery...\n");
1836 lpfc_offline_prep(phba, mbx_action);
1837 lpfc_sli_flush_io_rings(phba);
1838 lpfc_offline(phba);
1839
1840 lpfc_sli4_disable_intr(phba);
1841 rc = lpfc_sli_brdrestart(phba);
1842 if (rc) {
1843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1844 "6309 Failed to restart board\n");
1845 return rc;
1846 }
1847
1848 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1849 if (intr_mode == LPFC_INTR_ERROR) {
1850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1851 "3175 Failed to enable interrupt\n");
1852 return -EIO;
1853 }
1854 phba->intr_mode = intr_mode;
1855 rc = lpfc_online(phba);
1856 if (rc == 0)
1857 lpfc_unblock_mgmt_io(phba);
1858
1859 return rc;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869static void
1870lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1871{
1872 struct lpfc_vport *vport = phba->pport;
1873 uint32_t event_data;
1874 struct Scsi_Host *shost;
1875 uint32_t if_type;
1876 struct lpfc_register portstat_reg = {0};
1877 uint32_t reg_err1, reg_err2;
1878 uint32_t uerrlo_reg, uemasklo_reg;
1879 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1880 bool en_rn_msg = true;
1881 struct temp_event temp_event_data;
1882 struct lpfc_register portsmphr_reg;
1883 int rc, i;
1884
1885
1886
1887
1888 if (pci_channel_offline(phba->pcidev)) {
1889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890 "3166 pci channel is offline\n");
1891 lpfc_sli4_offline_eratt(phba);
1892 return;
1893 }
1894
1895 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1896 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1897 switch (if_type) {
1898 case LPFC_SLI_INTF_IF_TYPE_0:
1899 pci_rd_rc1 = lpfc_readl(
1900 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1901 &uerrlo_reg);
1902 pci_rd_rc2 = lpfc_readl(
1903 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1904 &uemasklo_reg);
1905
1906 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1907 return;
1908 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1909 lpfc_sli4_offline_eratt(phba);
1910 return;
1911 }
1912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1913 "7623 Checking UE recoverable");
1914
1915 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1916 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1917 &portsmphr_reg.word0))
1918 continue;
1919
1920 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1921 &portsmphr_reg);
1922 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1923 LPFC_PORT_SEM_UE_RECOVERABLE)
1924 break;
1925
1926 msleep(1000);
1927 }
1928
1929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1930 "4827 smphr_port_status x%x : Waited %dSec",
1931 smphr_port_status, i);
1932
1933
1934 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1935 LPFC_PORT_SEM_UE_RECOVERABLE) {
1936 for (i = 0; i < 20; i++) {
1937 msleep(1000);
1938 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1939 &portsmphr_reg.word0) &&
1940 (LPFC_POST_STAGE_PORT_READY ==
1941 bf_get(lpfc_port_smphr_port_status,
1942 &portsmphr_reg))) {
1943 rc = lpfc_sli4_port_sta_fn_reset(phba,
1944 LPFC_MBX_NO_WAIT, en_rn_msg);
1945 if (rc == 0)
1946 return;
1947 lpfc_printf_log(phba, KERN_ERR,
1948 LOG_TRACE_EVENT,
1949 "4215 Failed to recover UE");
1950 break;
1951 }
1952 }
1953 }
1954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1955 "7624 Firmware not ready: Failing UE recovery,"
1956 " waited %dSec", i);
1957 phba->link_state = LPFC_HBA_ERROR;
1958 break;
1959
1960 case LPFC_SLI_INTF_IF_TYPE_2:
1961 case LPFC_SLI_INTF_IF_TYPE_6:
1962 pci_rd_rc1 = lpfc_readl(
1963 phba->sli4_hba.u.if_type2.STATUSregaddr,
1964 &portstat_reg.word0);
1965
1966 if (pci_rd_rc1 == -EIO) {
1967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1968 "3151 PCI bus read access failure: x%x\n",
1969 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1970 lpfc_sli4_offline_eratt(phba);
1971 return;
1972 }
1973 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1974 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1975 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1977 "2889 Port Overtemperature event, "
1978 "taking port offline Data: x%x x%x\n",
1979 reg_err1, reg_err2);
1980
1981 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1982 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1983 temp_event_data.event_code = LPFC_CRIT_TEMP;
1984 temp_event_data.data = 0xFFFFFFFF;
1985
1986 shost = lpfc_shost_from_vport(phba->pport);
1987 fc_host_post_vendor_event(shost, fc_get_event_number(),
1988 sizeof(temp_event_data),
1989 (char *)&temp_event_data,
1990 SCSI_NL_VID_TYPE_PCI
1991 | PCI_VENDOR_ID_EMULEX);
1992
1993 spin_lock_irq(&phba->hbalock);
1994 phba->over_temp_state = HBA_OVER_TEMP;
1995 spin_unlock_irq(&phba->hbalock);
1996 lpfc_sli4_offline_eratt(phba);
1997 return;
1998 }
1999 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2000 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2002 "3143 Port Down: Firmware Update "
2003 "Detected\n");
2004 en_rn_msg = false;
2005 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2006 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2008 "3144 Port Down: Debug Dump\n");
2009 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2010 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2012 "3145 Port Down: Provisioning\n");
2013
2014
2015 if (!phba->cfg_enable_hba_reset)
2016 return;
2017
2018
2019 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2020 en_rn_msg);
2021 if (rc == 0) {
2022
2023 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2024 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2025 return;
2026 else
2027 break;
2028 }
2029
2030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2031 "3152 Unrecoverable error\n");
2032 phba->link_state = LPFC_HBA_ERROR;
2033 break;
2034 case LPFC_SLI_INTF_IF_TYPE_1:
2035 default:
2036 break;
2037 }
2038 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2039 "3123 Report dump event to upper layer\n");
2040
2041 lpfc_board_errevt_to_mgmt(phba);
2042
2043 event_data = FC_REG_DUMP_EVENT;
2044 shost = lpfc_shost_from_vport(vport);
2045 fc_host_post_vendor_event(shost, fc_get_event_number(),
2046 sizeof(event_data), (char *) &event_data,
2047 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061void
2062lpfc_handle_eratt(struct lpfc_hba *phba)
2063{
2064 (*phba->lpfc_handle_eratt)(phba);
2065}
2066
2067
2068
2069
2070
2071
2072
2073
2074void
2075lpfc_handle_latt(struct lpfc_hba *phba)
2076{
2077 struct lpfc_vport *vport = phba->pport;
2078 struct lpfc_sli *psli = &phba->sli;
2079 LPFC_MBOXQ_t *pmb;
2080 volatile uint32_t control;
2081 struct lpfc_dmabuf *mp;
2082 int rc = 0;
2083
2084 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2085 if (!pmb) {
2086 rc = 1;
2087 goto lpfc_handle_latt_err_exit;
2088 }
2089
2090 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2091 if (!mp) {
2092 rc = 2;
2093 goto lpfc_handle_latt_free_pmb;
2094 }
2095
2096 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2097 if (!mp->virt) {
2098 rc = 3;
2099 goto lpfc_handle_latt_free_mp;
2100 }
2101
2102
2103 lpfc_els_flush_all_cmd(phba);
2104
2105 psli->slistat.link_event++;
2106 lpfc_read_topology(phba, pmb, mp);
2107 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2108 pmb->vport = vport;
2109
2110 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2111 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2112 if (rc == MBX_NOT_FINISHED) {
2113 rc = 4;
2114 goto lpfc_handle_latt_free_mbuf;
2115 }
2116
2117
2118 spin_lock_irq(&phba->hbalock);
2119 writel(HA_LATT, phba->HAregaddr);
2120 readl(phba->HAregaddr);
2121 spin_unlock_irq(&phba->hbalock);
2122
2123 return;
2124
2125lpfc_handle_latt_free_mbuf:
2126 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2127 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2128lpfc_handle_latt_free_mp:
2129 kfree(mp);
2130lpfc_handle_latt_free_pmb:
2131 mempool_free(pmb, phba->mbox_mem_pool);
2132lpfc_handle_latt_err_exit:
2133
2134 spin_lock_irq(&phba->hbalock);
2135 psli->sli_flag |= LPFC_PROCESS_LA;
2136 control = readl(phba->HCregaddr);
2137 control |= HC_LAINT_ENA;
2138 writel(control, phba->HCregaddr);
2139 readl(phba->HCregaddr);
2140
2141
2142 writel(HA_LATT, phba->HAregaddr);
2143 readl(phba->HAregaddr);
2144 spin_unlock_irq(&phba->hbalock);
2145 lpfc_linkdown(phba);
2146 phba->link_state = LPFC_HBA_ERROR;
2147
2148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2149 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2150
2151 return;
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168int
2169lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2170{
2171 uint8_t lenlo, lenhi;
2172 int Length;
2173 int i, j;
2174 int finished = 0;
2175 int index = 0;
2176
2177 if (!vpd)
2178 return 0;
2179
2180
2181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2182 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2183 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2184 (uint32_t) vpd[3]);
2185 while (!finished && (index < (len - 4))) {
2186 switch (vpd[index]) {
2187 case 0x82:
2188 case 0x91:
2189 index += 1;
2190 lenlo = vpd[index];
2191 index += 1;
2192 lenhi = vpd[index];
2193 index += 1;
2194 i = ((((unsigned short)lenhi) << 8) + lenlo);
2195 index += i;
2196 break;
2197 case 0x90:
2198 index += 1;
2199 lenlo = vpd[index];
2200 index += 1;
2201 lenhi = vpd[index];
2202 index += 1;
2203 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2204 if (Length > len - index)
2205 Length = len - index;
2206 while (Length > 0) {
2207
2208 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2209 index += 2;
2210 i = vpd[index];
2211 index += 1;
2212 j = 0;
2213 Length -= (3+i);
2214 while(i--) {
2215 phba->SerialNumber[j++] = vpd[index++];
2216 if (j == 31)
2217 break;
2218 }
2219 phba->SerialNumber[j] = 0;
2220 continue;
2221 }
2222 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2223 phba->vpd_flag |= VPD_MODEL_DESC;
2224 index += 2;
2225 i = vpd[index];
2226 index += 1;
2227 j = 0;
2228 Length -= (3+i);
2229 while(i--) {
2230 phba->ModelDesc[j++] = vpd[index++];
2231 if (j == 255)
2232 break;
2233 }
2234 phba->ModelDesc[j] = 0;
2235 continue;
2236 }
2237 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2238 phba->vpd_flag |= VPD_MODEL_NAME;
2239 index += 2;
2240 i = vpd[index];
2241 index += 1;
2242 j = 0;
2243 Length -= (3+i);
2244 while(i--) {
2245 phba->ModelName[j++] = vpd[index++];
2246 if (j == 79)
2247 break;
2248 }
2249 phba->ModelName[j] = 0;
2250 continue;
2251 }
2252 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2253 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2254 index += 2;
2255 i = vpd[index];
2256 index += 1;
2257 j = 0;
2258 Length -= (3+i);
2259 while(i--) {
2260 phba->ProgramType[j++] = vpd[index++];
2261 if (j == 255)
2262 break;
2263 }
2264 phba->ProgramType[j] = 0;
2265 continue;
2266 }
2267 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2268 phba->vpd_flag |= VPD_PORT;
2269 index += 2;
2270 i = vpd[index];
2271 index += 1;
2272 j = 0;
2273 Length -= (3+i);
2274 while(i--) {
2275 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2276 (phba->sli4_hba.pport_name_sta ==
2277 LPFC_SLI4_PPNAME_GET)) {
2278 j++;
2279 index++;
2280 } else
2281 phba->Port[j++] = vpd[index++];
2282 if (j == 19)
2283 break;
2284 }
2285 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2286 (phba->sli4_hba.pport_name_sta ==
2287 LPFC_SLI4_PPNAME_NON))
2288 phba->Port[j] = 0;
2289 continue;
2290 }
2291 else {
2292 index += 2;
2293 i = vpd[index];
2294 index += 1;
2295 index += i;
2296 Length -= (3 + i);
2297 }
2298 }
2299 finished = 0;
2300 break;
2301 case 0x78:
2302 finished = 1;
2303 break;
2304 default:
2305 index ++;
2306 break;
2307 }
2308 }
2309
2310 return(1);
2311}
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325static void
2326lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2327{
2328 lpfc_vpd_t *vp;
2329 uint16_t dev_id = phba->pcidev->device;
2330 int max_speed;
2331 int GE = 0;
2332 int oneConnect = 0;
2333 struct {
2334 char *name;
2335 char *bus;
2336 char *function;
2337 } m = {"<Unknown>", "", ""};
2338
2339 if (mdp && mdp[0] != '\0'
2340 && descp && descp[0] != '\0')
2341 return;
2342
2343 if (phba->lmt & LMT_64Gb)
2344 max_speed = 64;
2345 else if (phba->lmt & LMT_32Gb)
2346 max_speed = 32;
2347 else if (phba->lmt & LMT_16Gb)
2348 max_speed = 16;
2349 else if (phba->lmt & LMT_10Gb)
2350 max_speed = 10;
2351 else if (phba->lmt & LMT_8Gb)
2352 max_speed = 8;
2353 else if (phba->lmt & LMT_4Gb)
2354 max_speed = 4;
2355 else if (phba->lmt & LMT_2Gb)
2356 max_speed = 2;
2357 else if (phba->lmt & LMT_1Gb)
2358 max_speed = 1;
2359 else
2360 max_speed = 0;
2361
2362 vp = &phba->vpd;
2363
2364 switch (dev_id) {
2365 case PCI_DEVICE_ID_FIREFLY:
2366 m = (typeof(m)){"LP6000", "PCI",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2368 break;
2369 case PCI_DEVICE_ID_SUPERFLY:
2370 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2371 m = (typeof(m)){"LP7000", "PCI", ""};
2372 else
2373 m = (typeof(m)){"LP7000E", "PCI", ""};
2374 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2375 break;
2376 case PCI_DEVICE_ID_DRAGONFLY:
2377 m = (typeof(m)){"LP8000", "PCI",
2378 "Obsolete, Unsupported Fibre Channel Adapter"};
2379 break;
2380 case PCI_DEVICE_ID_CENTAUR:
2381 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2382 m = (typeof(m)){"LP9002", "PCI", ""};
2383 else
2384 m = (typeof(m)){"LP9000", "PCI", ""};
2385 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2386 break;
2387 case PCI_DEVICE_ID_RFLY:
2388 m = (typeof(m)){"LP952", "PCI",
2389 "Obsolete, Unsupported Fibre Channel Adapter"};
2390 break;
2391 case PCI_DEVICE_ID_PEGASUS:
2392 m = (typeof(m)){"LP9802", "PCI-X",
2393 "Obsolete, Unsupported Fibre Channel Adapter"};
2394 break;
2395 case PCI_DEVICE_ID_THOR:
2396 m = (typeof(m)){"LP10000", "PCI-X",
2397 "Obsolete, Unsupported Fibre Channel Adapter"};
2398 break;
2399 case PCI_DEVICE_ID_VIPER:
2400 m = (typeof(m)){"LPX1000", "PCI-X",
2401 "Obsolete, Unsupported Fibre Channel Adapter"};
2402 break;
2403 case PCI_DEVICE_ID_PFLY:
2404 m = (typeof(m)){"LP982", "PCI-X",
2405 "Obsolete, Unsupported Fibre Channel Adapter"};
2406 break;
2407 case PCI_DEVICE_ID_TFLY:
2408 m = (typeof(m)){"LP1050", "PCI-X",
2409 "Obsolete, Unsupported Fibre Channel Adapter"};
2410 break;
2411 case PCI_DEVICE_ID_HELIOS:
2412 m = (typeof(m)){"LP11000", "PCI-X2",
2413 "Obsolete, Unsupported Fibre Channel Adapter"};
2414 break;
2415 case PCI_DEVICE_ID_HELIOS_SCSP:
2416 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2417 "Obsolete, Unsupported Fibre Channel Adapter"};
2418 break;
2419 case PCI_DEVICE_ID_HELIOS_DCSP:
2420 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2421 "Obsolete, Unsupported Fibre Channel Adapter"};
2422 break;
2423 case PCI_DEVICE_ID_NEPTUNE:
2424 m = (typeof(m)){"LPe1000", "PCIe",
2425 "Obsolete, Unsupported Fibre Channel Adapter"};
2426 break;
2427 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2428 m = (typeof(m)){"LPe1000-SP", "PCIe",
2429 "Obsolete, Unsupported Fibre Channel Adapter"};
2430 break;
2431 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2432 m = (typeof(m)){"LPe1002-SP", "PCIe",
2433 "Obsolete, Unsupported Fibre Channel Adapter"};
2434 break;
2435 case PCI_DEVICE_ID_BMID:
2436 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2437 break;
2438 case PCI_DEVICE_ID_BSMB:
2439 m = (typeof(m)){"LP111", "PCI-X2",
2440 "Obsolete, Unsupported Fibre Channel Adapter"};
2441 break;
2442 case PCI_DEVICE_ID_ZEPHYR:
2443 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2444 break;
2445 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2446 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2449 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2450 GE = 1;
2451 break;
2452 case PCI_DEVICE_ID_ZMID:
2453 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2454 break;
2455 case PCI_DEVICE_ID_ZSMB:
2456 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2457 break;
2458 case PCI_DEVICE_ID_LP101:
2459 m = (typeof(m)){"LP101", "PCI-X",
2460 "Obsolete, Unsupported Fibre Channel Adapter"};
2461 break;
2462 case PCI_DEVICE_ID_LP10000S:
2463 m = (typeof(m)){"LP10000-S", "PCI",
2464 "Obsolete, Unsupported Fibre Channel Adapter"};
2465 break;
2466 case PCI_DEVICE_ID_LP11000S:
2467 m = (typeof(m)){"LP11000-S", "PCI-X2",
2468 "Obsolete, Unsupported Fibre Channel Adapter"};
2469 break;
2470 case PCI_DEVICE_ID_LPE11000S:
2471 m = (typeof(m)){"LPe11000-S", "PCIe",
2472 "Obsolete, Unsupported Fibre Channel Adapter"};
2473 break;
2474 case PCI_DEVICE_ID_SAT:
2475 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2476 break;
2477 case PCI_DEVICE_ID_SAT_MID:
2478 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2479 break;
2480 case PCI_DEVICE_ID_SAT_SMB:
2481 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2482 break;
2483 case PCI_DEVICE_ID_SAT_DCSP:
2484 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2485 break;
2486 case PCI_DEVICE_ID_SAT_SCSP:
2487 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2488 break;
2489 case PCI_DEVICE_ID_SAT_S:
2490 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2491 break;
2492 case PCI_DEVICE_ID_HORNET:
2493 m = (typeof(m)){"LP21000", "PCIe",
2494 "Obsolete, Unsupported FCoE Adapter"};
2495 GE = 1;
2496 break;
2497 case PCI_DEVICE_ID_PROTEUS_VF:
2498 m = (typeof(m)){"LPev12000", "PCIe IOV",
2499 "Obsolete, Unsupported Fibre Channel Adapter"};
2500 break;
2501 case PCI_DEVICE_ID_PROTEUS_PF:
2502 m = (typeof(m)){"LPev12000", "PCIe IOV",
2503 "Obsolete, Unsupported Fibre Channel Adapter"};
2504 break;
2505 case PCI_DEVICE_ID_PROTEUS_S:
2506 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2507 "Obsolete, Unsupported Fibre Channel Adapter"};
2508 break;
2509 case PCI_DEVICE_ID_TIGERSHARK:
2510 oneConnect = 1;
2511 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2512 break;
2513 case PCI_DEVICE_ID_TOMCAT:
2514 oneConnect = 1;
2515 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2516 break;
2517 case PCI_DEVICE_ID_FALCON:
2518 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2519 "EmulexSecure Fibre"};
2520 break;
2521 case PCI_DEVICE_ID_BALIUS:
2522 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
2524 break;
2525 case PCI_DEVICE_ID_LANCER_FC:
2526 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2527 break;
2528 case PCI_DEVICE_ID_LANCER_FC_VF:
2529 m = (typeof(m)){"LPe16000", "PCIe",
2530 "Obsolete, Unsupported Fibre Channel Adapter"};
2531 break;
2532 case PCI_DEVICE_ID_LANCER_FCOE:
2533 oneConnect = 1;
2534 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2535 break;
2536 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2537 oneConnect = 1;
2538 m = (typeof(m)){"OCe15100", "PCIe",
2539 "Obsolete, Unsupported FCoE"};
2540 break;
2541 case PCI_DEVICE_ID_LANCER_G6_FC:
2542 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2543 break;
2544 case PCI_DEVICE_ID_LANCER_G7_FC:
2545 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2546 break;
2547 case PCI_DEVICE_ID_SKYHAWK:
2548 case PCI_DEVICE_ID_SKYHAWK_VF:
2549 oneConnect = 1;
2550 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2551 break;
2552 default:
2553 m = (typeof(m)){"Unknown", "", ""};
2554 break;
2555 }
2556
2557 if (mdp && mdp[0] == '\0')
2558 snprintf(mdp, 79,"%s", m.name);
2559
2560
2561
2562
2563 if (descp && descp[0] == '\0') {
2564 if (oneConnect)
2565 snprintf(descp, 255,
2566 "Emulex OneConnect %s, %s Initiator %s",
2567 m.name, m.function,
2568 phba->Port);
2569 else if (max_speed == 0)
2570 snprintf(descp, 255,
2571 "Emulex %s %s %s",
2572 m.name, m.bus, m.function);
2573 else
2574 snprintf(descp, 255,
2575 "Emulex %s %d%s %s %s",
2576 m.name, max_speed, (GE) ? "GE" : "Gb",
2577 m.bus, m.function);
2578 }
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593int
2594lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2595{
2596 IOCB_t *icmd;
2597 struct lpfc_iocbq *iocb;
2598 struct lpfc_dmabuf *mp1, *mp2;
2599
2600 cnt += pring->missbufcnt;
2601
2602
2603 while (cnt > 0) {
2604
2605 iocb = lpfc_sli_get_iocbq(phba);
2606 if (iocb == NULL) {
2607 pring->missbufcnt = cnt;
2608 return cnt;
2609 }
2610 icmd = &iocb->iocb;
2611
2612
2613
2614 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2615 if (mp1)
2616 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2617 if (!mp1 || !mp1->virt) {
2618 kfree(mp1);
2619 lpfc_sli_release_iocbq(phba, iocb);
2620 pring->missbufcnt = cnt;
2621 return cnt;
2622 }
2623
2624 INIT_LIST_HEAD(&mp1->list);
2625
2626 if (cnt > 1) {
2627 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2628 if (mp2)
2629 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2630 &mp2->phys);
2631 if (!mp2 || !mp2->virt) {
2632 kfree(mp2);
2633 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2634 kfree(mp1);
2635 lpfc_sli_release_iocbq(phba, iocb);
2636 pring->missbufcnt = cnt;
2637 return cnt;
2638 }
2639
2640 INIT_LIST_HEAD(&mp2->list);
2641 } else {
2642 mp2 = NULL;
2643 }
2644
2645 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2646 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2647 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2648 icmd->ulpBdeCount = 1;
2649 cnt--;
2650 if (mp2) {
2651 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2652 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2653 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2654 cnt--;
2655 icmd->ulpBdeCount = 2;
2656 }
2657
2658 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2659 icmd->ulpLe = 1;
2660
2661 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2662 IOCB_ERROR) {
2663 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2664 kfree(mp1);
2665 cnt++;
2666 if (mp2) {
2667 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2668 kfree(mp2);
2669 cnt++;
2670 }
2671 lpfc_sli_release_iocbq(phba, iocb);
2672 pring->missbufcnt = cnt;
2673 return cnt;
2674 }
2675 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2676 if (mp2)
2677 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2678 }
2679 pring->missbufcnt = 0;
2680 return 0;
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694static int
2695lpfc_post_rcv_buf(struct lpfc_hba *phba)
2696{
2697 struct lpfc_sli *psli = &phba->sli;
2698
2699
2700 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2701
2702
2703 return 0;
2704}
2705
2706#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2707
2708
2709
2710
2711
2712
2713
2714
2715static void
2716lpfc_sha_init(uint32_t * HashResultPointer)
2717{
2718 HashResultPointer[0] = 0x67452301;
2719 HashResultPointer[1] = 0xEFCDAB89;
2720 HashResultPointer[2] = 0x98BADCFE;
2721 HashResultPointer[3] = 0x10325476;
2722 HashResultPointer[4] = 0xC3D2E1F0;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static void
2736lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2737{
2738 int t;
2739 uint32_t TEMP;
2740 uint32_t A, B, C, D, E;
2741 t = 16;
2742 do {
2743 HashWorkingPointer[t] =
2744 S(1,
2745 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2746 8] ^
2747 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2748 } while (++t <= 79);
2749 t = 0;
2750 A = HashResultPointer[0];
2751 B = HashResultPointer[1];
2752 C = HashResultPointer[2];
2753 D = HashResultPointer[3];
2754 E = HashResultPointer[4];
2755
2756 do {
2757 if (t < 20) {
2758 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2759 } else if (t < 40) {
2760 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2761 } else if (t < 60) {
2762 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2763 } else {
2764 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2765 }
2766 TEMP += S(5, A) + E + HashWorkingPointer[t];
2767 E = D;
2768 D = C;
2769 C = S(30, B);
2770 B = A;
2771 A = TEMP;
2772 } while (++t <= 79);
2773
2774 HashResultPointer[0] += A;
2775 HashResultPointer[1] += B;
2776 HashResultPointer[2] += C;
2777 HashResultPointer[3] += D;
2778 HashResultPointer[4] += E;
2779
2780}
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792static void
2793lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2794{
2795 *HashWorking = (*RandomChallenge ^ *HashWorking);
2796}
2797
2798
2799
2800
2801
2802
2803
2804
2805void
2806lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2807{
2808 int t;
2809 uint32_t *HashWorking;
2810 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2811
2812 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2813 if (!HashWorking)
2814 return;
2815
2816 HashWorking[0] = HashWorking[78] = *pwwnn++;
2817 HashWorking[1] = HashWorking[79] = *pwwnn;
2818
2819 for (t = 0; t < 7; t++)
2820 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2821
2822 lpfc_sha_init(hbainit);
2823 lpfc_sha_iterate(hbainit, HashWorking);
2824 kfree(HashWorking);
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836void
2837lpfc_cleanup(struct lpfc_vport *vport)
2838{
2839 struct lpfc_hba *phba = vport->phba;
2840 struct lpfc_nodelist *ndlp, *next_ndlp;
2841 int i = 0;
2842
2843 if (phba->link_state > LPFC_LINK_DOWN)
2844 lpfc_port_link_failure(vport);
2845
2846 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2847 if (!NLP_CHK_NODE_ACT(ndlp)) {
2848 ndlp = lpfc_enable_node(vport, ndlp,
2849 NLP_STE_UNUSED_NODE);
2850 if (!ndlp)
2851 continue;
2852 spin_lock_irq(&phba->ndlp_lock);
2853 NLP_SET_FREE_REQ(ndlp);
2854 spin_unlock_irq(&phba->ndlp_lock);
2855
2856 lpfc_nlp_put(ndlp);
2857 continue;
2858 }
2859 spin_lock_irq(&phba->ndlp_lock);
2860 if (NLP_CHK_FREE_REQ(ndlp)) {
2861
2862 spin_unlock_irq(&phba->ndlp_lock);
2863 continue;
2864 } else
2865
2866 NLP_SET_FREE_REQ(ndlp);
2867 spin_unlock_irq(&phba->ndlp_lock);
2868
2869 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2870 ndlp->nlp_DID == Fabric_DID) {
2871
2872 lpfc_nlp_put(ndlp);
2873 continue;
2874 }
2875
2876
2877
2878
2879 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2880 lpfc_nlp_put(ndlp);
2881 continue;
2882 }
2883
2884 if (ndlp->nlp_type & NLP_FABRIC)
2885 lpfc_disc_state_machine(vport, ndlp, NULL,
2886 NLP_EVT_DEVICE_RECOVERY);
2887
2888 lpfc_disc_state_machine(vport, ndlp, NULL,
2889 NLP_EVT_DEVICE_RM);
2890 }
2891
2892
2893
2894
2895
2896 while (!list_empty(&vport->fc_nodes)) {
2897 if (i++ > 3000) {
2898 lpfc_printf_vlog(vport, KERN_ERR,
2899 LOG_TRACE_EVENT,
2900 "0233 Nodelist not empty\n");
2901 list_for_each_entry_safe(ndlp, next_ndlp,
2902 &vport->fc_nodes, nlp_listp) {
2903 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2904 LOG_TRACE_EVENT,
2905 "0282 did:x%x ndlp:x%px "
2906 "usgmap:x%x refcnt:%d\n",
2907 ndlp->nlp_DID, (void *)ndlp,
2908 ndlp->nlp_usg_map,
2909 kref_read(&ndlp->kref));
2910 }
2911 break;
2912 }
2913
2914
2915 msleep(10);
2916 }
2917 lpfc_cleanup_vports_rrqs(vport, NULL);
2918}
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928void
2929lpfc_stop_vport_timers(struct lpfc_vport *vport)
2930{
2931 del_timer_sync(&vport->els_tmofunc);
2932 del_timer_sync(&vport->delayed_disc_tmo);
2933 lpfc_can_disctmo(vport);
2934 return;
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944void
2945__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2946{
2947
2948 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2949
2950
2951 del_timer(&phba->fcf.redisc_wait);
2952}
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963void
2964lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2965{
2966 spin_lock_irq(&phba->hbalock);
2967 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2968
2969 spin_unlock_irq(&phba->hbalock);
2970 return;
2971 }
2972 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2973
2974 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2975 spin_unlock_irq(&phba->hbalock);
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985void
2986lpfc_stop_hba_timers(struct lpfc_hba *phba)
2987{
2988 if (phba->pport)
2989 lpfc_stop_vport_timers(phba->pport);
2990 cancel_delayed_work_sync(&phba->eq_delay_work);
2991 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
2992 del_timer_sync(&phba->sli.mbox_tmo);
2993 del_timer_sync(&phba->fabric_block_timer);
2994 del_timer_sync(&phba->eratt_poll);
2995 del_timer_sync(&phba->hb_tmofunc);
2996 if (phba->sli_rev == LPFC_SLI_REV4) {
2997 del_timer_sync(&phba->rrq_tmr);
2998 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2999 }
3000 phba->hb_outstanding = 0;
3001
3002 switch (phba->pci_dev_grp) {
3003 case LPFC_PCI_DEV_LP:
3004
3005 del_timer_sync(&phba->fcp_poll_timer);
3006 break;
3007 case LPFC_PCI_DEV_OC:
3008
3009 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3010 break;
3011 default:
3012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3013 "0297 Invalid device group (x%x)\n",
3014 phba->pci_dev_grp);
3015 break;
3016 }
3017 return;
3018}
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031static void
3032lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3033{
3034 unsigned long iflag;
3035 uint8_t actcmd = MBX_HEARTBEAT;
3036 unsigned long timeout;
3037
3038 spin_lock_irqsave(&phba->hbalock, iflag);
3039 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3040 spin_unlock_irqrestore(&phba->hbalock, iflag);
3041 if (mbx_action == LPFC_MBX_NO_WAIT)
3042 return;
3043 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3044 spin_lock_irqsave(&phba->hbalock, iflag);
3045 if (phba->sli.mbox_active) {
3046 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3047
3048
3049
3050 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3051 phba->sli.mbox_active) * 1000) + jiffies;
3052 }
3053 spin_unlock_irqrestore(&phba->hbalock, iflag);
3054
3055
3056 while (phba->sli.mbox_active) {
3057
3058 msleep(2);
3059 if (time_after(jiffies, timeout)) {
3060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3061 "2813 Mgmt IO is Blocked %x "
3062 "- mbox cmd %x still active\n",
3063 phba->sli.sli_flag, actcmd);
3064 break;
3065 }
3066 }
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077void
3078lpfc_sli4_node_prep(struct lpfc_hba *phba)
3079{
3080 struct lpfc_nodelist *ndlp, *next_ndlp;
3081 struct lpfc_vport **vports;
3082 int i, rpi;
3083 unsigned long flags;
3084
3085 if (phba->sli_rev != LPFC_SLI_REV4)
3086 return;
3087
3088 vports = lpfc_create_vport_work_array(phba);
3089 if (vports == NULL)
3090 return;
3091
3092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3093 if (vports[i]->load_flag & FC_UNLOADING)
3094 continue;
3095
3096 list_for_each_entry_safe(ndlp, next_ndlp,
3097 &vports[i]->fc_nodes,
3098 nlp_listp) {
3099 if (!NLP_CHK_NODE_ACT(ndlp))
3100 continue;
3101 rpi = lpfc_sli4_alloc_rpi(phba);
3102 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3103 spin_lock_irqsave(&phba->ndlp_lock, flags);
3104 NLP_CLR_NODE_ACT(ndlp);
3105 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3106 continue;
3107 }
3108 ndlp->nlp_rpi = rpi;
3109 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3110 LOG_NODE | LOG_DISCOVERY,
3111 "0009 Assign RPI x%x to ndlp x%px "
3112 "DID:x%06x flg:x%x map:x%x\n",
3113 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3114 ndlp->nlp_flag, ndlp->nlp_usg_map);
3115 }
3116 }
3117 lpfc_destroy_vport_work_array(phba, vports);
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3128{
3129 struct lpfc_sli4_hdw_queue *qp;
3130 struct lpfc_io_buf *lpfc_ncmd;
3131 struct lpfc_io_buf *lpfc_ncmd_next;
3132 struct lpfc_epd_pool *epd_pool;
3133 unsigned long iflag;
3134
3135 epd_pool = &phba->epd_pool;
3136 qp = &phba->sli4_hba.hdwq[0];
3137
3138 spin_lock_init(&epd_pool->lock);
3139 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3140 spin_lock(&epd_pool->lock);
3141 INIT_LIST_HEAD(&epd_pool->list);
3142 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3143 &qp->lpfc_io_buf_list_put, list) {
3144 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3145 lpfc_ncmd->expedite = true;
3146 qp->put_io_bufs--;
3147 epd_pool->count++;
3148 if (epd_pool->count >= XRI_BATCH)
3149 break;
3150 }
3151 spin_unlock(&epd_pool->lock);
3152 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3153}
3154
3155
3156
3157
3158
3159
3160
3161
3162static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3163{
3164 struct lpfc_sli4_hdw_queue *qp;
3165 struct lpfc_io_buf *lpfc_ncmd;
3166 struct lpfc_io_buf *lpfc_ncmd_next;
3167 struct lpfc_epd_pool *epd_pool;
3168 unsigned long iflag;
3169
3170 epd_pool = &phba->epd_pool;
3171 qp = &phba->sli4_hba.hdwq[0];
3172
3173 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3174 spin_lock(&epd_pool->lock);
3175 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3176 &epd_pool->list, list) {
3177 list_move_tail(&lpfc_ncmd->list,
3178 &qp->lpfc_io_buf_list_put);
3179 lpfc_ncmd->flags = false;
3180 qp->put_io_bufs++;
3181 epd_pool->count--;
3182 }
3183 spin_unlock(&epd_pool->lock);
3184 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3185}
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3196{
3197 u32 i, j;
3198 u32 hwq_count;
3199 u32 count_per_hwq;
3200 struct lpfc_io_buf *lpfc_ncmd;
3201 struct lpfc_io_buf *lpfc_ncmd_next;
3202 unsigned long iflag;
3203 struct lpfc_sli4_hdw_queue *qp;
3204 struct lpfc_multixri_pool *multixri_pool;
3205 struct lpfc_pbl_pool *pbl_pool;
3206 struct lpfc_pvt_pool *pvt_pool;
3207
3208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3210 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3211 phba->sli4_hba.io_xri_cnt);
3212
3213 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3214 lpfc_create_expedite_pool(phba);
3215
3216 hwq_count = phba->cfg_hdw_queue;
3217 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3218
3219 for (i = 0; i < hwq_count; i++) {
3220 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3221
3222 if (!multixri_pool) {
3223 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3224 "1238 Failed to allocate memory for "
3225 "multixri_pool\n");
3226
3227 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3228 lpfc_destroy_expedite_pool(phba);
3229
3230 j = 0;
3231 while (j < i) {
3232 qp = &phba->sli4_hba.hdwq[j];
3233 kfree(qp->p_multixri_pool);
3234 j++;
3235 }
3236 phba->cfg_xri_rebalancing = 0;
3237 return;
3238 }
3239
3240 qp = &phba->sli4_hba.hdwq[i];
3241 qp->p_multixri_pool = multixri_pool;
3242
3243 multixri_pool->xri_limit = count_per_hwq;
3244 multixri_pool->rrb_next_hwqid = i;
3245
3246
3247 pbl_pool = &multixri_pool->pbl_pool;
3248 spin_lock_init(&pbl_pool->lock);
3249 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3250 spin_lock(&pbl_pool->lock);
3251 INIT_LIST_HEAD(&pbl_pool->list);
3252 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3253 &qp->lpfc_io_buf_list_put, list) {
3254 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3255 qp->put_io_bufs--;
3256 pbl_pool->count++;
3257 }
3258 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3259 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3260 pbl_pool->count, i);
3261 spin_unlock(&pbl_pool->lock);
3262 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3263
3264
3265 pvt_pool = &multixri_pool->pvt_pool;
3266 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3267 pvt_pool->low_watermark = XRI_BATCH;
3268 spin_lock_init(&pvt_pool->lock);
3269 spin_lock_irqsave(&pvt_pool->lock, iflag);
3270 INIT_LIST_HEAD(&pvt_pool->list);
3271 pvt_pool->count = 0;
3272 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3273 }
3274}
3275
3276
3277
3278
3279
3280
3281
3282static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3283{
3284 u32 i;
3285 u32 hwq_count;
3286 struct lpfc_io_buf *lpfc_ncmd;
3287 struct lpfc_io_buf *lpfc_ncmd_next;
3288 unsigned long iflag;
3289 struct lpfc_sli4_hdw_queue *qp;
3290 struct lpfc_multixri_pool *multixri_pool;
3291 struct lpfc_pbl_pool *pbl_pool;
3292 struct lpfc_pvt_pool *pvt_pool;
3293
3294 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3295 lpfc_destroy_expedite_pool(phba);
3296
3297 if (!(phba->pport->load_flag & FC_UNLOADING))
3298 lpfc_sli_flush_io_rings(phba);
3299
3300 hwq_count = phba->cfg_hdw_queue;
3301
3302 for (i = 0; i < hwq_count; i++) {
3303 qp = &phba->sli4_hba.hdwq[i];
3304 multixri_pool = qp->p_multixri_pool;
3305 if (!multixri_pool)
3306 continue;
3307
3308 qp->p_multixri_pool = NULL;
3309
3310 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3311
3312
3313 pbl_pool = &multixri_pool->pbl_pool;
3314 spin_lock(&pbl_pool->lock);
3315
3316 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3317 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3318 pbl_pool->count, i);
3319
3320 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3321 &pbl_pool->list, list) {
3322 list_move_tail(&lpfc_ncmd->list,
3323 &qp->lpfc_io_buf_list_put);
3324 qp->put_io_bufs++;
3325 pbl_pool->count--;
3326 }
3327
3328 INIT_LIST_HEAD(&pbl_pool->list);
3329 pbl_pool->count = 0;
3330
3331 spin_unlock(&pbl_pool->lock);
3332
3333
3334 pvt_pool = &multixri_pool->pvt_pool;
3335 spin_lock(&pvt_pool->lock);
3336
3337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3338 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3339 pvt_pool->count, i);
3340
3341 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3342 &pvt_pool->list, list) {
3343 list_move_tail(&lpfc_ncmd->list,
3344 &qp->lpfc_io_buf_list_put);
3345 qp->put_io_bufs++;
3346 pvt_pool->count--;
3347 }
3348
3349 INIT_LIST_HEAD(&pvt_pool->list);
3350 pvt_pool->count = 0;
3351
3352 spin_unlock(&pvt_pool->lock);
3353 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3354
3355 kfree(multixri_pool);
3356 }
3357}
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371int
3372lpfc_online(struct lpfc_hba *phba)
3373{
3374 struct lpfc_vport *vport;
3375 struct lpfc_vport **vports;
3376 int i, error = 0;
3377 bool vpis_cleared = false;
3378
3379 if (!phba)
3380 return 0;
3381 vport = phba->pport;
3382
3383 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3384 return 0;
3385
3386 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3387 "0458 Bring Adapter online\n");
3388
3389 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3390
3391 if (phba->sli_rev == LPFC_SLI_REV4) {
3392 if (lpfc_sli4_hba_setup(phba)) {
3393 lpfc_unblock_mgmt_io(phba);
3394 return 1;
3395 }
3396 spin_lock_irq(&phba->hbalock);
3397 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3398 vpis_cleared = true;
3399 spin_unlock_irq(&phba->hbalock);
3400
3401
3402
3403
3404 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3405 !phba->nvmet_support) {
3406 error = lpfc_nvme_create_localport(phba->pport);
3407 if (error)
3408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3409 "6132 NVME restore reg failed "
3410 "on nvmei error x%x\n", error);
3411 }
3412 } else {
3413 lpfc_sli_queue_init(phba);
3414 if (lpfc_sli_hba_setup(phba)) {
3415 lpfc_unblock_mgmt_io(phba);
3416 return 1;
3417 }
3418 }
3419
3420 vports = lpfc_create_vport_work_array(phba);
3421 if (vports != NULL) {
3422 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3423 struct Scsi_Host *shost;
3424 shost = lpfc_shost_from_vport(vports[i]);
3425 spin_lock_irq(shost->host_lock);
3426 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3427 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3428 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3429 if (phba->sli_rev == LPFC_SLI_REV4) {
3430 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3431 if ((vpis_cleared) &&
3432 (vports[i]->port_type !=
3433 LPFC_PHYSICAL_PORT))
3434 vports[i]->vpi = 0;
3435 }
3436 spin_unlock_irq(shost->host_lock);
3437 }
3438 }
3439 lpfc_destroy_vport_work_array(phba, vports);
3440
3441 if (phba->cfg_xri_rebalancing)
3442 lpfc_create_multixri_pools(phba);
3443
3444 lpfc_cpuhp_add(phba);
3445
3446 lpfc_unblock_mgmt_io(phba);
3447 return 0;
3448}
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461void
3462lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3463{
3464 unsigned long iflag;
3465
3466 spin_lock_irqsave(&phba->hbalock, iflag);
3467 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3468 spin_unlock_irqrestore(&phba->hbalock, iflag);
3469}
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480void
3481lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3482{
3483 struct lpfc_vport *vport = phba->pport;
3484 struct lpfc_nodelist *ndlp, *next_ndlp;
3485 struct lpfc_vport **vports;
3486 struct Scsi_Host *shost;
3487 int i;
3488
3489 if (vport->fc_flag & FC_OFFLINE_MODE)
3490 return;
3491
3492 lpfc_block_mgmt_io(phba, mbx_action);
3493
3494 lpfc_linkdown(phba);
3495
3496
3497 vports = lpfc_create_vport_work_array(phba);
3498 if (vports != NULL) {
3499 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3500 if (vports[i]->load_flag & FC_UNLOADING)
3501 continue;
3502 shost = lpfc_shost_from_vport(vports[i]);
3503 spin_lock_irq(shost->host_lock);
3504 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3505 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3506 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3507 spin_unlock_irq(shost->host_lock);
3508
3509 shost = lpfc_shost_from_vport(vports[i]);
3510 list_for_each_entry_safe(ndlp, next_ndlp,
3511 &vports[i]->fc_nodes,
3512 nlp_listp) {
3513 if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3514 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3515
3516
3517
3518 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3519 continue;
3520 }
3521
3522 if (ndlp->nlp_type & NLP_FABRIC) {
3523 lpfc_disc_state_machine(vports[i], ndlp,
3524 NULL, NLP_EVT_DEVICE_RECOVERY);
3525 lpfc_disc_state_machine(vports[i], ndlp,
3526 NULL, NLP_EVT_DEVICE_RM);
3527 }
3528 spin_lock_irq(shost->host_lock);
3529 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3530 spin_unlock_irq(shost->host_lock);
3531
3532
3533
3534
3535
3536 if (phba->sli_rev == LPFC_SLI_REV4) {
3537 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3538 LOG_NODE | LOG_DISCOVERY,
3539 "0011 Free RPI x%x on "
3540 "ndlp:x%px did x%x "
3541 "usgmap:x%x\n",
3542 ndlp->nlp_rpi, ndlp,
3543 ndlp->nlp_DID,
3544 ndlp->nlp_usg_map);
3545 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3546 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3547 }
3548 lpfc_unreg_rpi(vports[i], ndlp);
3549 }
3550 }
3551 }
3552 lpfc_destroy_vport_work_array(phba, vports);
3553
3554 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3555
3556 if (phba->wq)
3557 flush_workqueue(phba->wq);
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568void
3569lpfc_offline(struct lpfc_hba *phba)
3570{
3571 struct Scsi_Host *shost;
3572 struct lpfc_vport **vports;
3573 int i;
3574
3575 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3576 return;
3577
3578
3579 lpfc_stop_port(phba);
3580
3581
3582
3583
3584 lpfc_nvmet_destroy_targetport(phba);
3585 lpfc_nvme_destroy_localport(phba->pport);
3586
3587 vports = lpfc_create_vport_work_array(phba);
3588 if (vports != NULL)
3589 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3590 lpfc_stop_vport_timers(vports[i]);
3591 lpfc_destroy_vport_work_array(phba, vports);
3592 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3593 "0460 Bring Adapter offline\n");
3594
3595
3596 lpfc_sli_hba_down(phba);
3597 spin_lock_irq(&phba->hbalock);
3598 phba->work_ha = 0;
3599 spin_unlock_irq(&phba->hbalock);
3600 vports = lpfc_create_vport_work_array(phba);
3601 if (vports != NULL)
3602 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3603 shost = lpfc_shost_from_vport(vports[i]);
3604 spin_lock_irq(shost->host_lock);
3605 vports[i]->work_port_events = 0;
3606 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3607 spin_unlock_irq(shost->host_lock);
3608 }
3609 lpfc_destroy_vport_work_array(phba, vports);
3610 __lpfc_cpuhp_remove(phba);
3611
3612 if (phba->cfg_xri_rebalancing)
3613 lpfc_destroy_multixri_pools(phba);
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624static void
3625lpfc_scsi_free(struct lpfc_hba *phba)
3626{
3627 struct lpfc_io_buf *sb, *sb_next;
3628
3629 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3630 return;
3631
3632 spin_lock_irq(&phba->hbalock);
3633
3634
3635
3636 spin_lock(&phba->scsi_buf_list_put_lock);
3637 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3638 list) {
3639 list_del(&sb->list);
3640 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3641 sb->dma_handle);
3642 kfree(sb);
3643 phba->total_scsi_bufs--;
3644 }
3645 spin_unlock(&phba->scsi_buf_list_put_lock);
3646
3647 spin_lock(&phba->scsi_buf_list_get_lock);
3648 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3649 list) {
3650 list_del(&sb->list);
3651 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3652 sb->dma_handle);
3653 kfree(sb);
3654 phba->total_scsi_bufs--;
3655 }
3656 spin_unlock(&phba->scsi_buf_list_get_lock);
3657 spin_unlock_irq(&phba->hbalock);
3658}
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668void
3669lpfc_io_free(struct lpfc_hba *phba)
3670{
3671 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3672 struct lpfc_sli4_hdw_queue *qp;
3673 int idx;
3674
3675 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3676 qp = &phba->sli4_hba.hdwq[idx];
3677
3678 spin_lock(&qp->io_buf_list_put_lock);
3679 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3680 &qp->lpfc_io_buf_list_put,
3681 list) {
3682 list_del(&lpfc_ncmd->list);
3683 qp->put_io_bufs--;
3684 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3685 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3686 if (phba->cfg_xpsgl && !phba->nvmet_support)
3687 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3688 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3689 kfree(lpfc_ncmd);
3690 qp->total_io_bufs--;
3691 }
3692 spin_unlock(&qp->io_buf_list_put_lock);
3693
3694 spin_lock(&qp->io_buf_list_get_lock);
3695 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3696 &qp->lpfc_io_buf_list_get,
3697 list) {
3698 list_del(&lpfc_ncmd->list);
3699 qp->get_io_bufs--;
3700 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3701 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3702 if (phba->cfg_xpsgl && !phba->nvmet_support)
3703 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3704 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3705 kfree(lpfc_ncmd);
3706 qp->total_io_bufs--;
3707 }
3708 spin_unlock(&qp->io_buf_list_get_lock);
3709 }
3710}
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724int
3725lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3726{
3727 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3728 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3729 LIST_HEAD(els_sgl_list);
3730 int rc;
3731
3732
3733
3734
3735 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3736
3737 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3738
3739 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3740 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3741 "3157 ELS xri-sgl count increased from "
3742 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3743 els_xri_cnt);
3744
3745 for (i = 0; i < xri_cnt; i++) {
3746 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3747 GFP_KERNEL);
3748 if (sglq_entry == NULL) {
3749 lpfc_printf_log(phba, KERN_ERR,
3750 LOG_TRACE_EVENT,
3751 "2562 Failure to allocate an "
3752 "ELS sgl entry:%d\n", i);
3753 rc = -ENOMEM;
3754 goto out_free_mem;
3755 }
3756 sglq_entry->buff_type = GEN_BUFF_TYPE;
3757 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3758 &sglq_entry->phys);
3759 if (sglq_entry->virt == NULL) {
3760 kfree(sglq_entry);
3761 lpfc_printf_log(phba, KERN_ERR,
3762 LOG_TRACE_EVENT,
3763 "2563 Failure to allocate an "
3764 "ELS mbuf:%d\n", i);
3765 rc = -ENOMEM;
3766 goto out_free_mem;
3767 }
3768 sglq_entry->sgl = sglq_entry->virt;
3769 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3770 sglq_entry->state = SGL_FREED;
3771 list_add_tail(&sglq_entry->list, &els_sgl_list);
3772 }
3773 spin_lock_irq(&phba->hbalock);
3774 spin_lock(&phba->sli4_hba.sgl_list_lock);
3775 list_splice_init(&els_sgl_list,
3776 &phba->sli4_hba.lpfc_els_sgl_list);
3777 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3778 spin_unlock_irq(&phba->hbalock);
3779 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3780
3781 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3782 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3783 "3158 ELS xri-sgl count decreased from "
3784 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3785 els_xri_cnt);
3786 spin_lock_irq(&phba->hbalock);
3787 spin_lock(&phba->sli4_hba.sgl_list_lock);
3788 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3789 &els_sgl_list);
3790
3791 for (i = 0; i < xri_cnt; i++) {
3792 list_remove_head(&els_sgl_list,
3793 sglq_entry, struct lpfc_sglq, list);
3794 if (sglq_entry) {
3795 __lpfc_mbuf_free(phba, sglq_entry->virt,
3796 sglq_entry->phys);
3797 kfree(sglq_entry);
3798 }
3799 }
3800 list_splice_init(&els_sgl_list,
3801 &phba->sli4_hba.lpfc_els_sgl_list);
3802 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3803 spin_unlock_irq(&phba->hbalock);
3804 } else
3805 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3806 "3163 ELS xri-sgl count unchanged: %d\n",
3807 els_xri_cnt);
3808 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3809
3810
3811 sglq_entry = NULL;
3812 sglq_entry_next = NULL;
3813 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3814 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3815 lxri = lpfc_sli4_next_xritag(phba);
3816 if (lxri == NO_XRI) {
3817 lpfc_printf_log(phba, KERN_ERR,
3818 LOG_TRACE_EVENT,
3819 "2400 Failed to allocate xri for "
3820 "ELS sgl\n");
3821 rc = -ENOMEM;
3822 goto out_free_mem;
3823 }
3824 sglq_entry->sli4_lxritag = lxri;
3825 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3826 }
3827 return 0;
3828
3829out_free_mem:
3830 lpfc_free_els_sgl_list(phba);
3831 return rc;
3832}
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846int
3847lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3848{
3849 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3850 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3851 uint16_t nvmet_xri_cnt;
3852 LIST_HEAD(nvmet_sgl_list);
3853 int rc;
3854
3855
3856
3857
3858 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3859
3860
3861 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3862 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3863
3864 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3865 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3866 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3867 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3868
3869 for (i = 0; i < xri_cnt; i++) {
3870 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3871 GFP_KERNEL);
3872 if (sglq_entry == NULL) {
3873 lpfc_printf_log(phba, KERN_ERR,
3874 LOG_TRACE_EVENT,
3875 "6303 Failure to allocate an "
3876 "NVMET sgl entry:%d\n", i);
3877 rc = -ENOMEM;
3878 goto out_free_mem;
3879 }
3880 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3881 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3882 &sglq_entry->phys);
3883 if (sglq_entry->virt == NULL) {
3884 kfree(sglq_entry);
3885 lpfc_printf_log(phba, KERN_ERR,
3886 LOG_TRACE_EVENT,
3887 "6304 Failure to allocate an "
3888 "NVMET buf:%d\n", i);
3889 rc = -ENOMEM;
3890 goto out_free_mem;
3891 }
3892 sglq_entry->sgl = sglq_entry->virt;
3893 memset(sglq_entry->sgl, 0,
3894 phba->cfg_sg_dma_buf_size);
3895 sglq_entry->state = SGL_FREED;
3896 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3897 }
3898 spin_lock_irq(&phba->hbalock);
3899 spin_lock(&phba->sli4_hba.sgl_list_lock);
3900 list_splice_init(&nvmet_sgl_list,
3901 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3902 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3903 spin_unlock_irq(&phba->hbalock);
3904 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3905
3906 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3908 "6305 NVMET xri-sgl count decreased from "
3909 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3910 nvmet_xri_cnt);
3911 spin_lock_irq(&phba->hbalock);
3912 spin_lock(&phba->sli4_hba.sgl_list_lock);
3913 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3914 &nvmet_sgl_list);
3915
3916 for (i = 0; i < xri_cnt; i++) {
3917 list_remove_head(&nvmet_sgl_list,
3918 sglq_entry, struct lpfc_sglq, list);
3919 if (sglq_entry) {
3920 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3921 sglq_entry->phys);
3922 kfree(sglq_entry);
3923 }
3924 }
3925 list_splice_init(&nvmet_sgl_list,
3926 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3927 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3928 spin_unlock_irq(&phba->hbalock);
3929 } else
3930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3931 "6306 NVMET xri-sgl count unchanged: %d\n",
3932 nvmet_xri_cnt);
3933 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3934
3935
3936 sglq_entry = NULL;
3937 sglq_entry_next = NULL;
3938 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3939 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3940 lxri = lpfc_sli4_next_xritag(phba);
3941 if (lxri == NO_XRI) {
3942 lpfc_printf_log(phba, KERN_ERR,
3943 LOG_TRACE_EVENT,
3944 "6307 Failed to allocate xri for "
3945 "NVMET sgl\n");
3946 rc = -ENOMEM;
3947 goto out_free_mem;
3948 }
3949 sglq_entry->sli4_lxritag = lxri;
3950 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3951 }
3952 return 0;
3953
3954out_free_mem:
3955 lpfc_free_nvmet_sgl_list(phba);
3956 return rc;
3957}
3958
3959int
3960lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3961{
3962 LIST_HEAD(blist);
3963 struct lpfc_sli4_hdw_queue *qp;
3964 struct lpfc_io_buf *lpfc_cmd;
3965 struct lpfc_io_buf *iobufp, *prev_iobufp;
3966 int idx, cnt, xri, inserted;
3967
3968 cnt = 0;
3969 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3970 qp = &phba->sli4_hba.hdwq[idx];
3971 spin_lock_irq(&qp->io_buf_list_get_lock);
3972 spin_lock(&qp->io_buf_list_put_lock);
3973
3974
3975 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3976 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3977 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3978 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3979 cnt += qp->get_io_bufs + qp->put_io_bufs;
3980 qp->get_io_bufs = 0;
3981 qp->put_io_bufs = 0;
3982 qp->total_io_bufs = 0;
3983 spin_unlock(&qp->io_buf_list_put_lock);
3984 spin_unlock_irq(&qp->io_buf_list_get_lock);
3985 }
3986
3987
3988
3989
3990
3991
3992 for (idx = 0; idx < cnt; idx++) {
3993 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3994 if (!lpfc_cmd)
3995 return cnt;
3996 if (idx == 0) {
3997 list_add_tail(&lpfc_cmd->list, cbuf);
3998 continue;
3999 }
4000 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4001 inserted = 0;
4002 prev_iobufp = NULL;
4003 list_for_each_entry(iobufp, cbuf, list) {
4004 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4005 if (prev_iobufp)
4006 list_add(&lpfc_cmd->list,
4007 &prev_iobufp->list);
4008 else
4009 list_add(&lpfc_cmd->list, cbuf);
4010 inserted = 1;
4011 break;
4012 }
4013 prev_iobufp = iobufp;
4014 }
4015 if (!inserted)
4016 list_add_tail(&lpfc_cmd->list, cbuf);
4017 }
4018 return cnt;
4019}
4020
4021int
4022lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4023{
4024 struct lpfc_sli4_hdw_queue *qp;
4025 struct lpfc_io_buf *lpfc_cmd;
4026 int idx, cnt;
4027
4028 qp = phba->sli4_hba.hdwq;
4029 cnt = 0;
4030 while (!list_empty(cbuf)) {
4031 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4032 list_remove_head(cbuf, lpfc_cmd,
4033 struct lpfc_io_buf, list);
4034 if (!lpfc_cmd)
4035 return cnt;
4036 cnt++;
4037 qp = &phba->sli4_hba.hdwq[idx];
4038 lpfc_cmd->hdwq_no = idx;
4039 lpfc_cmd->hdwq = qp;
4040 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4041 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4042 spin_lock(&qp->io_buf_list_put_lock);
4043 list_add_tail(&lpfc_cmd->list,
4044 &qp->lpfc_io_buf_list_put);
4045 qp->put_io_bufs++;
4046 qp->total_io_bufs++;
4047 spin_unlock(&qp->io_buf_list_put_lock);
4048 }
4049 }
4050 return cnt;
4051}
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065int
4066lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4067{
4068 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4069 uint16_t i, lxri, els_xri_cnt;
4070 uint16_t io_xri_cnt, io_xri_max;
4071 LIST_HEAD(io_sgl_list);
4072 int rc, cnt;
4073
4074
4075
4076
4077
4078
4079 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4080 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4081 phba->sli4_hba.io_xri_max = io_xri_max;
4082
4083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4084 "6074 Current allocated XRI sgl count:%d, "
4085 "maximum XRI count:%d\n",
4086 phba->sli4_hba.io_xri_cnt,
4087 phba->sli4_hba.io_xri_max);
4088
4089 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4090
4091 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4092
4093 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4094 phba->sli4_hba.io_xri_max;
4095
4096 for (i = 0; i < io_xri_cnt; i++) {
4097 list_remove_head(&io_sgl_list, lpfc_ncmd,
4098 struct lpfc_io_buf, list);
4099 if (lpfc_ncmd) {
4100 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4101 lpfc_ncmd->data,
4102 lpfc_ncmd->dma_handle);
4103 kfree(lpfc_ncmd);
4104 }
4105 }
4106 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4107 }
4108
4109
4110 lpfc_ncmd = NULL;
4111 lpfc_ncmd_next = NULL;
4112 phba->sli4_hba.io_xri_cnt = cnt;
4113 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4114 &io_sgl_list, list) {
4115 lxri = lpfc_sli4_next_xritag(phba);
4116 if (lxri == NO_XRI) {
4117 lpfc_printf_log(phba, KERN_ERR,
4118 LOG_TRACE_EVENT,
4119 "6075 Failed to allocate xri for "
4120 "nvme buffer\n");
4121 rc = -ENOMEM;
4122 goto out_free_mem;
4123 }
4124 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4125 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4126 }
4127 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4128 return 0;
4129
4130out_free_mem:
4131 lpfc_io_free(phba);
4132 return rc;
4133}
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149int
4150lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4151{
4152 struct lpfc_io_buf *lpfc_ncmd;
4153 struct lpfc_iocbq *pwqeq;
4154 uint16_t iotag, lxri = 0;
4155 int bcnt, num_posted;
4156 LIST_HEAD(prep_nblist);
4157 LIST_HEAD(post_nblist);
4158 LIST_HEAD(nvme_nblist);
4159
4160 phba->sli4_hba.io_xri_cnt = 0;
4161 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4162 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4163 if (!lpfc_ncmd)
4164 break;
4165
4166
4167
4168
4169
4170 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4171 GFP_KERNEL,
4172 &lpfc_ncmd->dma_handle);
4173 if (!lpfc_ncmd->data) {
4174 kfree(lpfc_ncmd);
4175 break;
4176 }
4177
4178 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4179 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4180 } else {
4181
4182
4183
4184
4185 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4186 (((unsigned long)(lpfc_ncmd->data) &
4187 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4188 lpfc_printf_log(phba, KERN_ERR,
4189 LOG_TRACE_EVENT,
4190 "3369 Memory alignment err: "
4191 "addr=%lx\n",
4192 (unsigned long)lpfc_ncmd->data);
4193 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4194 lpfc_ncmd->data,
4195 lpfc_ncmd->dma_handle);
4196 kfree(lpfc_ncmd);
4197 break;
4198 }
4199 }
4200
4201 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4202
4203 lxri = lpfc_sli4_next_xritag(phba);
4204 if (lxri == NO_XRI) {
4205 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4206 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4207 kfree(lpfc_ncmd);
4208 break;
4209 }
4210 pwqeq = &lpfc_ncmd->cur_iocbq;
4211
4212
4213 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4214 if (iotag == 0) {
4215 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4216 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4217 kfree(lpfc_ncmd);
4218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4219 "6121 Failed to allocate IOTAG for"
4220 " XRI:0x%x\n", lxri);
4221 lpfc_sli4_free_xri(phba, lxri);
4222 break;
4223 }
4224 pwqeq->sli4_lxritag = lxri;
4225 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4226 pwqeq->context1 = lpfc_ncmd;
4227
4228
4229 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4230 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4231 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4232 spin_lock_init(&lpfc_ncmd->buf_lock);
4233
4234
4235 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4236 phba->sli4_hba.io_xri_cnt++;
4237 }
4238 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4239 "6114 Allocate %d out of %d requested new NVME "
4240 "buffers\n", bcnt, num_to_alloc);
4241
4242
4243 if (!list_empty(&post_nblist))
4244 num_posted = lpfc_sli4_post_io_sgl_list(
4245 phba, &post_nblist, bcnt);
4246 else
4247 num_posted = 0;
4248
4249 return num_posted;
4250}
4251
4252static uint64_t
4253lpfc_get_wwpn(struct lpfc_hba *phba)
4254{
4255 uint64_t wwn;
4256 int rc;
4257 LPFC_MBOXQ_t *mboxq;
4258 MAILBOX_t *mb;
4259
4260 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4261 GFP_KERNEL);
4262 if (!mboxq)
4263 return (uint64_t)-1;
4264
4265
4266 lpfc_read_nv(phba, mboxq);
4267 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4268 if (rc != MBX_SUCCESS) {
4269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4270 "6019 Mailbox failed , mbxCmd x%x "
4271 "READ_NV, mbxStatus x%x\n",
4272 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4273 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4274 mempool_free(mboxq, phba->mbox_mem_pool);
4275 return (uint64_t) -1;
4276 }
4277 mb = &mboxq->u.mb;
4278 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4279
4280 mempool_free(mboxq, phba->mbox_mem_pool);
4281 if (phba->sli_rev == LPFC_SLI_REV4)
4282 return be64_to_cpu(wwn);
4283 else
4284 return rol64(wwn, 32);
4285}
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303struct lpfc_vport *
4304lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4305{
4306 struct lpfc_vport *vport;
4307 struct Scsi_Host *shost = NULL;
4308 struct scsi_host_template *template;
4309 int error = 0;
4310 int i;
4311 uint64_t wwn;
4312 bool use_no_reset_hba = false;
4313 int rc;
4314
4315 if (lpfc_no_hba_reset_cnt) {
4316 if (phba->sli_rev < LPFC_SLI_REV4 &&
4317 dev == &phba->pcidev->dev) {
4318
4319 lpfc_sli_brdrestart(phba);
4320 rc = lpfc_sli_chipset_init(phba);
4321 if (rc)
4322 return NULL;
4323 }
4324 wwn = lpfc_get_wwpn(phba);
4325 }
4326
4327 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4328 if (wwn == lpfc_no_hba_reset[i]) {
4329 lpfc_printf_log(phba, KERN_ERR,
4330 LOG_TRACE_EVENT,
4331 "6020 Setting use_no_reset port=%llx\n",
4332 wwn);
4333 use_no_reset_hba = true;
4334 break;
4335 }
4336 }
4337
4338
4339 if (dev == &phba->pcidev->dev) {
4340 template = &phba->port_template;
4341
4342 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4343
4344 memcpy(template, &lpfc_template, sizeof(*template));
4345
4346 if (use_no_reset_hba) {
4347
4348 template->max_sectors = 0xffff;
4349 template->eh_host_reset_handler = NULL;
4350 }
4351
4352
4353 memcpy(&phba->vport_template, &lpfc_template,
4354 sizeof(*template));
4355 phba->vport_template.max_sectors = 0xffff;
4356 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4357 phba->vport_template.eh_bus_reset_handler = NULL;
4358 phba->vport_template.eh_host_reset_handler = NULL;
4359 phba->vport_template.vendor_id = 0;
4360
4361
4362 if (phba->sli_rev == LPFC_SLI_REV4) {
4363 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4364 phba->vport_template.sg_tablesize =
4365 phba->cfg_scsi_seg_cnt;
4366 } else {
4367 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4368 phba->vport_template.sg_tablesize =
4369 phba->cfg_sg_seg_cnt;
4370 }
4371
4372 } else {
4373
4374 memcpy(template, &lpfc_template_nvme,
4375 sizeof(*template));
4376 }
4377 } else {
4378 template = &phba->vport_template;
4379 }
4380
4381 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4382 if (!shost)
4383 goto out;
4384
4385 vport = (struct lpfc_vport *) shost->hostdata;
4386 vport->phba = phba;
4387 vport->load_flag |= FC_LOADING;
4388 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4389 vport->fc_rscn_flush = 0;
4390 lpfc_get_vport_cfgparam(vport);
4391
4392
4393 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4394
4395 shost->unique_id = instance;
4396 shost->max_id = LPFC_MAX_TARGET;
4397 shost->max_lun = vport->cfg_max_luns;
4398 shost->this_id = -1;
4399 shost->max_cmd_len = 16;
4400
4401 if (phba->sli_rev == LPFC_SLI_REV4) {
4402 if (!phba->cfg_fcp_mq_threshold ||
4403 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4404 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4405
4406 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4407 phba->cfg_fcp_mq_threshold);
4408
4409 shost->dma_boundary =
4410 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4411
4412 if (phba->cfg_xpsgl && !phba->nvmet_support)
4413 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4414 else
4415 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4416 } else
4417
4418
4419
4420 shost->nr_hw_queues = 1;
4421
4422
4423
4424
4425
4426
4427 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4428 if (dev != &phba->pcidev->dev) {
4429 shost->transportt = lpfc_vport_transport_template;
4430 vport->port_type = LPFC_NPIV_PORT;
4431 } else {
4432 shost->transportt = lpfc_transport_template;
4433 vport->port_type = LPFC_PHYSICAL_PORT;
4434 }
4435
4436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4437 "9081 CreatePort TMPLATE type %x TBLsize %d "
4438 "SEGcnt %d/%d\n",
4439 vport->port_type, shost->sg_tablesize,
4440 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4441
4442
4443 INIT_LIST_HEAD(&vport->fc_nodes);
4444 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4445 spin_lock_init(&vport->work_port_lock);
4446
4447 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4448
4449 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4450
4451 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4452
4453 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4454 lpfc_setup_bg(phba, shost);
4455
4456 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4457 if (error)
4458 goto out_put_shost;
4459
4460 spin_lock_irq(&phba->port_list_lock);
4461 list_add_tail(&vport->listentry, &phba->port_list);
4462 spin_unlock_irq(&phba->port_list_lock);
4463 return vport;
4464
4465out_put_shost:
4466 scsi_host_put(shost);
4467out:
4468 return NULL;
4469}
4470
4471
4472
4473
4474
4475
4476
4477
4478void
4479destroy_port(struct lpfc_vport *vport)
4480{
4481 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4482 struct lpfc_hba *phba = vport->phba;
4483
4484 lpfc_debugfs_terminate(vport);
4485 fc_remove_host(shost);
4486 scsi_remove_host(shost);
4487
4488 spin_lock_irq(&phba->port_list_lock);
4489 list_del_init(&vport->listentry);
4490 spin_unlock_irq(&phba->port_list_lock);
4491
4492 lpfc_cleanup(vport);
4493 return;
4494}
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506int
4507lpfc_get_instance(void)
4508{
4509 int ret;
4510
4511 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4512 return ret < 0 ? -1 : ret;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4531{
4532 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4533 struct lpfc_hba *phba = vport->phba;
4534 int stat = 0;
4535
4536 spin_lock_irq(shost->host_lock);
4537
4538 if (vport->load_flag & FC_UNLOADING) {
4539 stat = 1;
4540 goto finished;
4541 }
4542 if (time >= msecs_to_jiffies(30 * 1000)) {
4543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4544 "0461 Scanning longer than 30 "
4545 "seconds. Continuing initialization\n");
4546 stat = 1;
4547 goto finished;
4548 }
4549 if (time >= msecs_to_jiffies(15 * 1000) &&
4550 phba->link_state <= LPFC_LINK_DOWN) {
4551 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4552 "0465 Link down longer than 15 "
4553 "seconds. Continuing initialization\n");
4554 stat = 1;
4555 goto finished;
4556 }
4557
4558 if (vport->port_state != LPFC_VPORT_READY)
4559 goto finished;
4560 if (vport->num_disc_nodes || vport->fc_prli_sent)
4561 goto finished;
4562 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4563 goto finished;
4564 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4565 goto finished;
4566
4567 stat = 1;
4568
4569finished:
4570 spin_unlock_irq(shost->host_lock);
4571 return stat;
4572}
4573
4574static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4575{
4576 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4577 struct lpfc_hba *phba = vport->phba;
4578
4579 fc_host_supported_speeds(shost) = 0;
4580
4581
4582
4583
4584 if (phba->hba_flag & HBA_FCOE_MODE)
4585 return;
4586
4587 if (phba->lmt & LMT_128Gb)
4588 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4589 if (phba->lmt & LMT_64Gb)
4590 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4591 if (phba->lmt & LMT_32Gb)
4592 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4593 if (phba->lmt & LMT_16Gb)
4594 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4595 if (phba->lmt & LMT_10Gb)
4596 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4597 if (phba->lmt & LMT_8Gb)
4598 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4599 if (phba->lmt & LMT_4Gb)
4600 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4601 if (phba->lmt & LMT_2Gb)
4602 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4603 if (phba->lmt & LMT_1Gb)
4604 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4605}
4606
4607
4608
4609
4610
4611
4612
4613
4614void lpfc_host_attrib_init(struct Scsi_Host *shost)
4615{
4616 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4617 struct lpfc_hba *phba = vport->phba;
4618
4619
4620
4621
4622 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4623 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4624 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4625
4626 memset(fc_host_supported_fc4s(shost), 0,
4627 sizeof(fc_host_supported_fc4s(shost)));
4628 fc_host_supported_fc4s(shost)[2] = 1;
4629 fc_host_supported_fc4s(shost)[7] = 1;
4630
4631 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4632 sizeof fc_host_symbolic_name(shost));
4633
4634 lpfc_host_supported_speeds_set(shost);
4635
4636 fc_host_maxframe_size(shost) =
4637 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4638 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4639
4640 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4641
4642
4643 memset(fc_host_active_fc4s(shost), 0,
4644 sizeof(fc_host_active_fc4s(shost)));
4645 fc_host_active_fc4s(shost)[2] = 1;
4646 fc_host_active_fc4s(shost)[7] = 1;
4647
4648 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4649 spin_lock_irq(shost->host_lock);
4650 vport->load_flag &= ~FC_LOADING;
4651 spin_unlock_irq(shost->host_lock);
4652}
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662static void
4663lpfc_stop_port_s3(struct lpfc_hba *phba)
4664{
4665
4666 writel(0, phba->HCregaddr);
4667 readl(phba->HCregaddr);
4668
4669 writel(0xffffffff, phba->HAregaddr);
4670 readl(phba->HAregaddr);
4671
4672
4673 lpfc_stop_hba_timers(phba);
4674 phba->pport->work_port_events = 0;
4675}
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685static void
4686lpfc_stop_port_s4(struct lpfc_hba *phba)
4687{
4688
4689 lpfc_stop_hba_timers(phba);
4690 if (phba->pport)
4691 phba->pport->work_port_events = 0;
4692 phba->sli4_hba.intr_enable = 0;
4693}
4694
4695
4696
4697
4698
4699
4700
4701
4702void
4703lpfc_stop_port(struct lpfc_hba *phba)
4704{
4705 phba->lpfc_stop_port(phba);
4706
4707 if (phba->wq)
4708 flush_workqueue(phba->wq);
4709}
4710
4711
4712
4713
4714
4715
4716
4717void
4718lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4719{
4720 unsigned long fcf_redisc_wait_tmo =
4721 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4722
4723 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4724 spin_lock_irq(&phba->hbalock);
4725
4726 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4727
4728 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4729 spin_unlock_irq(&phba->hbalock);
4730}
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742static void
4743lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4744{
4745 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4746
4747
4748 spin_lock_irq(&phba->hbalock);
4749 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4750 spin_unlock_irq(&phba->hbalock);
4751 return;
4752 }
4753
4754 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4755
4756 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4757 spin_unlock_irq(&phba->hbalock);
4758 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4759 "2776 FCF rediscover quiescent timer expired\n");
4760
4761 lpfc_worker_wake_up(phba);
4762}
4763
4764
4765
4766
4767
4768
4769
4770
4771static void
4772lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4773 struct lpfc_acqe_link *acqe_link)
4774{
4775 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4776 case LPFC_ASYNC_LINK_FAULT_NONE:
4777 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4778 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4779 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4780 break;
4781 default:
4782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4783 "0398 Unknown link fault code: x%x\n",
4784 bf_get(lpfc_acqe_link_fault, acqe_link));
4785 break;
4786 }
4787}
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799static uint8_t
4800lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4801 struct lpfc_acqe_link *acqe_link)
4802{
4803 uint8_t att_type;
4804
4805 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4806 case LPFC_ASYNC_LINK_STATUS_DOWN:
4807 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4808 att_type = LPFC_ATT_LINK_DOWN;
4809 break;
4810 case LPFC_ASYNC_LINK_STATUS_UP:
4811
4812 att_type = LPFC_ATT_RESERVED;
4813 break;
4814 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4815 att_type = LPFC_ATT_LINK_UP;
4816 break;
4817 default:
4818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4819 "0399 Invalid link attention type: x%x\n",
4820 bf_get(lpfc_acqe_link_status, acqe_link));
4821 att_type = LPFC_ATT_RESERVED;
4822 break;
4823 }
4824 return att_type;
4825}
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835uint32_t
4836lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4837{
4838 uint32_t link_speed;
4839
4840 if (!lpfc_is_link_up(phba))
4841 return 0;
4842
4843 if (phba->sli_rev <= LPFC_SLI_REV3) {
4844 switch (phba->fc_linkspeed) {
4845 case LPFC_LINK_SPEED_1GHZ:
4846 link_speed = 1000;
4847 break;
4848 case LPFC_LINK_SPEED_2GHZ:
4849 link_speed = 2000;
4850 break;
4851 case LPFC_LINK_SPEED_4GHZ:
4852 link_speed = 4000;
4853 break;
4854 case LPFC_LINK_SPEED_8GHZ:
4855 link_speed = 8000;
4856 break;
4857 case LPFC_LINK_SPEED_10GHZ:
4858 link_speed = 10000;
4859 break;
4860 case LPFC_LINK_SPEED_16GHZ:
4861 link_speed = 16000;
4862 break;
4863 default:
4864 link_speed = 0;
4865 }
4866 } else {
4867 if (phba->sli4_hba.link_state.logical_speed)
4868 link_speed =
4869 phba->sli4_hba.link_state.logical_speed;
4870 else
4871 link_speed = phba->sli4_hba.link_state.speed;
4872 }
4873 return link_speed;
4874}
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887static uint32_t
4888lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4889 uint8_t speed_code)
4890{
4891 uint32_t port_speed;
4892
4893 switch (evt_code) {
4894 case LPFC_TRAILER_CODE_LINK:
4895 switch (speed_code) {
4896 case LPFC_ASYNC_LINK_SPEED_ZERO:
4897 port_speed = 0;
4898 break;
4899 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4900 port_speed = 10;
4901 break;
4902 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4903 port_speed = 100;
4904 break;
4905 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4906 port_speed = 1000;
4907 break;
4908 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4909 port_speed = 10000;
4910 break;
4911 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4912 port_speed = 20000;
4913 break;
4914 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4915 port_speed = 25000;
4916 break;
4917 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4918 port_speed = 40000;
4919 break;
4920 case LPFC_ASYNC_LINK_SPEED_100GBPS:
4921 port_speed = 100000;
4922 break;
4923 default:
4924 port_speed = 0;
4925 }
4926 break;
4927 case LPFC_TRAILER_CODE_FC:
4928 switch (speed_code) {
4929 case LPFC_FC_LA_SPEED_UNKNOWN:
4930 port_speed = 0;
4931 break;
4932 case LPFC_FC_LA_SPEED_1G:
4933 port_speed = 1000;
4934 break;
4935 case LPFC_FC_LA_SPEED_2G:
4936 port_speed = 2000;
4937 break;
4938 case LPFC_FC_LA_SPEED_4G:
4939 port_speed = 4000;
4940 break;
4941 case LPFC_FC_LA_SPEED_8G:
4942 port_speed = 8000;
4943 break;
4944 case LPFC_FC_LA_SPEED_10G:
4945 port_speed = 10000;
4946 break;
4947 case LPFC_FC_LA_SPEED_16G:
4948 port_speed = 16000;
4949 break;
4950 case LPFC_FC_LA_SPEED_32G:
4951 port_speed = 32000;
4952 break;
4953 case LPFC_FC_LA_SPEED_64G:
4954 port_speed = 64000;
4955 break;
4956 case LPFC_FC_LA_SPEED_128G:
4957 port_speed = 128000;
4958 break;
4959 default:
4960 port_speed = 0;
4961 }
4962 break;
4963 default:
4964 port_speed = 0;
4965 }
4966 return port_speed;
4967}
4968
4969
4970
4971
4972
4973
4974
4975
4976static void
4977lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4978 struct lpfc_acqe_link *acqe_link)
4979{
4980 struct lpfc_dmabuf *mp;
4981 LPFC_MBOXQ_t *pmb;
4982 MAILBOX_t *mb;
4983 struct lpfc_mbx_read_top *la;
4984 uint8_t att_type;
4985 int rc;
4986
4987 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4988 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4989 return;
4990 phba->fcoe_eventtag = acqe_link->event_tag;
4991 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4992 if (!pmb) {
4993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4994 "0395 The mboxq allocation failed\n");
4995 return;
4996 }
4997 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4998 if (!mp) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5000 "0396 The lpfc_dmabuf allocation failed\n");
5001 goto out_free_pmb;
5002 }
5003 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5004 if (!mp->virt) {
5005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5006 "0397 The mbuf allocation failed\n");
5007 goto out_free_dmabuf;
5008 }
5009
5010
5011 lpfc_els_flush_all_cmd(phba);
5012
5013
5014 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5015
5016
5017 phba->sli.slistat.link_event++;
5018
5019
5020 lpfc_read_topology(phba, pmb, mp);
5021 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5022 pmb->vport = phba->pport;
5023
5024
5025 phba->sli4_hba.link_state.speed =
5026 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5027 bf_get(lpfc_acqe_link_speed, acqe_link));
5028 phba->sli4_hba.link_state.duplex =
5029 bf_get(lpfc_acqe_link_duplex, acqe_link);
5030 phba->sli4_hba.link_state.status =
5031 bf_get(lpfc_acqe_link_status, acqe_link);
5032 phba->sli4_hba.link_state.type =
5033 bf_get(lpfc_acqe_link_type, acqe_link);
5034 phba->sli4_hba.link_state.number =
5035 bf_get(lpfc_acqe_link_number, acqe_link);
5036 phba->sli4_hba.link_state.fault =
5037 bf_get(lpfc_acqe_link_fault, acqe_link);
5038 phba->sli4_hba.link_state.logical_speed =
5039 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5040
5041 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5042 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5043 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5044 "Logical speed:%dMbps Fault:%d\n",
5045 phba->sli4_hba.link_state.speed,
5046 phba->sli4_hba.link_state.topology,
5047 phba->sli4_hba.link_state.status,
5048 phba->sli4_hba.link_state.type,
5049 phba->sli4_hba.link_state.number,
5050 phba->sli4_hba.link_state.logical_speed,
5051 phba->sli4_hba.link_state.fault);
5052
5053
5054
5055
5056 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5057 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5058 if (rc == MBX_NOT_FINISHED)
5059 goto out_free_dmabuf;
5060 return;
5061 }
5062
5063
5064
5065
5066
5067
5068 mb = &pmb->u.mb;
5069 mb->mbxStatus = MBX_SUCCESS;
5070
5071
5072 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5073
5074
5075 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5076 la->eventTag = acqe_link->event_tag;
5077 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5078 bf_set(lpfc_mbx_read_top_link_spd, la,
5079 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5080
5081
5082 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5083 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5084 bf_set(lpfc_mbx_read_top_il, la, 0);
5085 bf_set(lpfc_mbx_read_top_pb, la, 0);
5086 bf_set(lpfc_mbx_read_top_fa, la, 0);
5087 bf_set(lpfc_mbx_read_top_mm, la, 0);
5088
5089
5090 lpfc_mbx_cmpl_read_topology(phba, pmb);
5091
5092 return;
5093
5094out_free_dmabuf:
5095 kfree(mp);
5096out_free_pmb:
5097 mempool_free(pmb, phba->mbox_mem_pool);
5098}
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111static uint8_t
5112lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5113{
5114 uint8_t port_speed;
5115
5116 switch (speed_code) {
5117 case LPFC_FC_LA_SPEED_1G:
5118 port_speed = LPFC_LINK_SPEED_1GHZ;
5119 break;
5120 case LPFC_FC_LA_SPEED_2G:
5121 port_speed = LPFC_LINK_SPEED_2GHZ;
5122 break;
5123 case LPFC_FC_LA_SPEED_4G:
5124 port_speed = LPFC_LINK_SPEED_4GHZ;
5125 break;
5126 case LPFC_FC_LA_SPEED_8G:
5127 port_speed = LPFC_LINK_SPEED_8GHZ;
5128 break;
5129 case LPFC_FC_LA_SPEED_16G:
5130 port_speed = LPFC_LINK_SPEED_16GHZ;
5131 break;
5132 case LPFC_FC_LA_SPEED_32G:
5133 port_speed = LPFC_LINK_SPEED_32GHZ;
5134 break;
5135 case LPFC_FC_LA_SPEED_64G:
5136 port_speed = LPFC_LINK_SPEED_64GHZ;
5137 break;
5138 case LPFC_FC_LA_SPEED_128G:
5139 port_speed = LPFC_LINK_SPEED_128GHZ;
5140 break;
5141 case LPFC_FC_LA_SPEED_256G:
5142 port_speed = LPFC_LINK_SPEED_256GHZ;
5143 break;
5144 default:
5145 port_speed = 0;
5146 break;
5147 }
5148
5149 return port_speed;
5150}
5151
5152#define trunk_link_status(__idx)\
5153 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5154 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5155 "Link up" : "Link down") : "NA"
5156
5157#define trunk_port_fault(__idx)\
5158 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5159 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5160
5161static void
5162lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5163 struct lpfc_acqe_fc_la *acqe_fc)
5164{
5165 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5166 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5167
5168 phba->sli4_hba.link_state.speed =
5169 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5170 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5171
5172 phba->sli4_hba.link_state.logical_speed =
5173 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5174
5175 phba->fc_linkspeed =
5176 lpfc_async_link_speed_to_read_top(
5177 phba,
5178 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5179
5180 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5181 phba->trunk_link.link0.state =
5182 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5183 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5184 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5185 }
5186 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5187 phba->trunk_link.link1.state =
5188 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5189 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5190 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5191 }
5192 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5193 phba->trunk_link.link2.state =
5194 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5195 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5196 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5197 }
5198 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5199 phba->trunk_link.link3.state =
5200 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5201 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5202 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5203 }
5204
5205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5206 "2910 Async FC Trunking Event - Speed:%d\n"
5207 "\tLogical speed:%d "
5208 "port0: %s port1: %s port2: %s port3: %s\n",
5209 phba->sli4_hba.link_state.speed,
5210 phba->sli4_hba.link_state.logical_speed,
5211 trunk_link_status(0), trunk_link_status(1),
5212 trunk_link_status(2), trunk_link_status(3));
5213
5214 if (port_fault)
5215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5216 "3202 trunk error:0x%x (%s) seen on port0:%s "
5217
5218
5219
5220
5221
5222 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5223 "UNDEFINED. update driver." : trunk_errmsg[err],
5224 trunk_port_fault(0), trunk_port_fault(1),
5225 trunk_port_fault(2), trunk_port_fault(3));
5226}
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238static void
5239lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5240{
5241 struct lpfc_dmabuf *mp;
5242 LPFC_MBOXQ_t *pmb;
5243 MAILBOX_t *mb;
5244 struct lpfc_mbx_read_top *la;
5245 int rc;
5246
5247 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5248 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5250 "2895 Non FC link Event detected.(%d)\n",
5251 bf_get(lpfc_trailer_type, acqe_fc));
5252 return;
5253 }
5254
5255 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5256 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5257 lpfc_update_trunk_link_status(phba, acqe_fc);
5258 return;
5259 }
5260
5261
5262 phba->sli4_hba.link_state.speed =
5263 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5264 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5265 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5266 phba->sli4_hba.link_state.topology =
5267 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5268 phba->sli4_hba.link_state.status =
5269 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5270 phba->sli4_hba.link_state.type =
5271 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5272 phba->sli4_hba.link_state.number =
5273 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5274 phba->sli4_hba.link_state.fault =
5275 bf_get(lpfc_acqe_link_fault, acqe_fc);
5276
5277 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5278 LPFC_FC_LA_TYPE_LINK_DOWN)
5279 phba->sli4_hba.link_state.logical_speed = 0;
5280 else if (!phba->sli4_hba.conf_trunk)
5281 phba->sli4_hba.link_state.logical_speed =
5282 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5283
5284 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5285 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5286 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5287 "%dMbps Fault:%d\n",
5288 phba->sli4_hba.link_state.speed,
5289 phba->sli4_hba.link_state.topology,
5290 phba->sli4_hba.link_state.status,
5291 phba->sli4_hba.link_state.type,
5292 phba->sli4_hba.link_state.number,
5293 phba->sli4_hba.link_state.logical_speed,
5294 phba->sli4_hba.link_state.fault);
5295 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5296 if (!pmb) {
5297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5298 "2897 The mboxq allocation failed\n");
5299 return;
5300 }
5301 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5302 if (!mp) {
5303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5304 "2898 The lpfc_dmabuf allocation failed\n");
5305 goto out_free_pmb;
5306 }
5307 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5308 if (!mp->virt) {
5309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5310 "2899 The mbuf allocation failed\n");
5311 goto out_free_dmabuf;
5312 }
5313
5314
5315 lpfc_els_flush_all_cmd(phba);
5316
5317
5318 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5319
5320
5321 phba->sli.slistat.link_event++;
5322
5323
5324 lpfc_read_topology(phba, pmb, mp);
5325 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5326 pmb->vport = phba->pport;
5327
5328 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5329 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5330
5331 switch (phba->sli4_hba.link_state.status) {
5332 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5333 phba->link_flag |= LS_MDS_LINK_DOWN;
5334 break;
5335 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5336 phba->link_flag |= LS_MDS_LOOPBACK;
5337 break;
5338 default:
5339 break;
5340 }
5341
5342
5343 mb = &pmb->u.mb;
5344 mb->mbxStatus = MBX_SUCCESS;
5345
5346
5347 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5348
5349
5350 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5351 la->eventTag = acqe_fc->event_tag;
5352
5353 if (phba->sli4_hba.link_state.status ==
5354 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5355 bf_set(lpfc_mbx_read_top_att_type, la,
5356 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5357 } else {
5358 bf_set(lpfc_mbx_read_top_att_type, la,
5359 LPFC_FC_LA_TYPE_LINK_DOWN);
5360 }
5361
5362 lpfc_mbx_cmpl_read_topology(phba, pmb);
5363
5364 return;
5365 }
5366
5367 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5368 if (rc == MBX_NOT_FINISHED)
5369 goto out_free_dmabuf;
5370 return;
5371
5372out_free_dmabuf:
5373 kfree(mp);
5374out_free_pmb:
5375 mempool_free(pmb, phba->mbox_mem_pool);
5376}
5377
5378
5379
5380
5381
5382
5383
5384
5385static void
5386lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5387{
5388 char port_name;
5389 char message[128];
5390 uint8_t status;
5391 uint8_t evt_type;
5392 uint8_t operational = 0;
5393 struct temp_event temp_event_data;
5394 struct lpfc_acqe_misconfigured_event *misconfigured;
5395 struct Scsi_Host *shost;
5396 struct lpfc_vport **vports;
5397 int rc, i;
5398
5399 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5400
5401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5402 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5403 "x%08x x%08x x%08x\n", evt_type,
5404 acqe_sli->event_data1, acqe_sli->event_data2,
5405 acqe_sli->reserved, acqe_sli->trailer);
5406
5407 port_name = phba->Port[0];
5408 if (port_name == 0x00)
5409 port_name = '?';
5410
5411 switch (evt_type) {
5412 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5413 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5414 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5415 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5416
5417 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5418 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5419 acqe_sli->event_data1, port_name);
5420
5421 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5422 shost = lpfc_shost_from_vport(phba->pport);
5423 fc_host_post_vendor_event(shost, fc_get_event_number(),
5424 sizeof(temp_event_data),
5425 (char *)&temp_event_data,
5426 SCSI_NL_VID_TYPE_PCI
5427 | PCI_VENDOR_ID_EMULEX);
5428 break;
5429 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5430 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5431 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5432 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5433
5434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5435 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5436 acqe_sli->event_data1, port_name);
5437
5438 shost = lpfc_shost_from_vport(phba->pport);
5439 fc_host_post_vendor_event(shost, fc_get_event_number(),
5440 sizeof(temp_event_data),
5441 (char *)&temp_event_data,
5442 SCSI_NL_VID_TYPE_PCI
5443 | PCI_VENDOR_ID_EMULEX);
5444 break;
5445 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5446 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5447 &acqe_sli->event_data1;
5448
5449
5450 switch (phba->sli4_hba.lnk_info.lnk_no) {
5451 case LPFC_LINK_NUMBER_0:
5452 status = bf_get(lpfc_sli_misconfigured_port0_state,
5453 &misconfigured->theEvent);
5454 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5455 &misconfigured->theEvent);
5456 break;
5457 case LPFC_LINK_NUMBER_1:
5458 status = bf_get(lpfc_sli_misconfigured_port1_state,
5459 &misconfigured->theEvent);
5460 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5461 &misconfigured->theEvent);
5462 break;
5463 case LPFC_LINK_NUMBER_2:
5464 status = bf_get(lpfc_sli_misconfigured_port2_state,
5465 &misconfigured->theEvent);
5466 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5467 &misconfigured->theEvent);
5468 break;
5469 case LPFC_LINK_NUMBER_3:
5470 status = bf_get(lpfc_sli_misconfigured_port3_state,
5471 &misconfigured->theEvent);
5472 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5473 &misconfigured->theEvent);
5474 break;
5475 default:
5476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5477 "3296 "
5478 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5479 "event: Invalid link %d",
5480 phba->sli4_hba.lnk_info.lnk_no);
5481 return;
5482 }
5483
5484
5485 if (phba->sli4_hba.lnk_info.optic_state == status)
5486 return;
5487
5488 switch (status) {
5489 case LPFC_SLI_EVENT_STATUS_VALID:
5490 sprintf(message, "Physical Link is functional");
5491 break;
5492 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5493 sprintf(message, "Optics faulted/incorrectly "
5494 "installed/not installed - Reseat optics, "
5495 "if issue not resolved, replace.");
5496 break;
5497 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5498 sprintf(message,
5499 "Optics of two types installed - Remove one "
5500 "optic or install matching pair of optics.");
5501 break;
5502 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5503 sprintf(message, "Incompatible optics - Replace with "
5504 "compatible optics for card to function.");
5505 break;
5506 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5507 sprintf(message, "Unqualified optics - Replace with "
5508 "Avago optics for Warranty and Technical "
5509 "Support - Link is%s operational",
5510 (operational) ? " not" : "");
5511 break;
5512 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5513 sprintf(message, "Uncertified optics - Replace with "
5514 "Avago-certified optics to enable link "
5515 "operation - Link is%s operational",
5516 (operational) ? " not" : "");
5517 break;
5518 default:
5519
5520 sprintf(message, "Unknown event status x%02x", status);
5521 break;
5522 }
5523
5524
5525 rc = lpfc_sli4_read_config(phba);
5526 if (rc) {
5527 phba->lmt = 0;
5528 lpfc_printf_log(phba, KERN_ERR,
5529 LOG_TRACE_EVENT,
5530 "3194 Unable to retrieve supported "
5531 "speeds, rc = 0x%x\n", rc);
5532 }
5533 vports = lpfc_create_vport_work_array(phba);
5534 if (vports != NULL) {
5535 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5536 i++) {
5537 shost = lpfc_shost_from_vport(vports[i]);
5538 lpfc_host_supported_speeds_set(shost);
5539 }
5540 }
5541 lpfc_destroy_vport_work_array(phba, vports);
5542
5543 phba->sli4_hba.lnk_info.optic_state = status;
5544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5545 "3176 Port Name %c %s\n", port_name, message);
5546 break;
5547 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5548 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5549 "3192 Remote DPort Test Initiated - "
5550 "Event Data1:x%08x Event Data2: x%08x\n",
5551 acqe_sli->event_data1, acqe_sli->event_data2);
5552 break;
5553 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5554
5555
5556
5557
5558
5559 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5560 "2699 Misconfigured FA-WWN - Attached device does "
5561 "not support FA-WWN\n");
5562 break;
5563 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5564
5565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5566 "2518 EEPROM failure - "
5567 "Event Data1: x%08x Event Data2: x%08x\n",
5568 acqe_sli->event_data1, acqe_sli->event_data2);
5569 break;
5570 default:
5571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5572 "3193 Unrecognized SLI event, type: 0x%x",
5573 evt_type);
5574 break;
5575 }
5576}
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588static struct lpfc_nodelist *
5589lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5590{
5591 struct lpfc_nodelist *ndlp;
5592 struct Scsi_Host *shost;
5593 struct lpfc_hba *phba;
5594
5595 if (!vport)
5596 return NULL;
5597 phba = vport->phba;
5598 if (!phba)
5599 return NULL;
5600 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5601 if (!ndlp) {
5602
5603 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5604 if (!ndlp)
5605 return 0;
5606
5607 ndlp->nlp_type |= NLP_FABRIC;
5608
5609 lpfc_enqueue_node(vport, ndlp);
5610 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5611
5612 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5613 if (!ndlp)
5614 return 0;
5615 }
5616 if ((phba->pport->port_state < LPFC_FLOGI) &&
5617 (phba->pport->port_state != LPFC_VPORT_FAILED))
5618 return NULL;
5619
5620 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5621 && (vport->port_state != LPFC_VPORT_FAILED))
5622 return NULL;
5623 shost = lpfc_shost_from_vport(vport);
5624 if (!shost)
5625 return NULL;
5626 lpfc_linkdown_port(vport);
5627 lpfc_cleanup_pending_mbox(vport);
5628 spin_lock_irq(shost->host_lock);
5629 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5630 spin_unlock_irq(shost->host_lock);
5631
5632 return ndlp;
5633}
5634
5635
5636
5637
5638
5639
5640
5641
5642static void
5643lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5644{
5645 struct lpfc_vport **vports;
5646 int i;
5647
5648 vports = lpfc_create_vport_work_array(phba);
5649 if (vports)
5650 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5651 lpfc_sli4_perform_vport_cvl(vports[i]);
5652 lpfc_destroy_vport_work_array(phba, vports);
5653}
5654
5655
5656
5657
5658
5659
5660
5661
5662static void
5663lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5664 struct lpfc_acqe_fip *acqe_fip)
5665{
5666 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5667 int rc;
5668 struct lpfc_vport *vport;
5669 struct lpfc_nodelist *ndlp;
5670 struct Scsi_Host *shost;
5671 int active_vlink_present;
5672 struct lpfc_vport **vports;
5673 int i;
5674
5675 phba->fc_eventTag = acqe_fip->event_tag;
5676 phba->fcoe_eventtag = acqe_fip->event_tag;
5677 switch (event_type) {
5678 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5679 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5680 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5682 "2546 New FCF event, evt_tag:x%x, "
5683 "index:x%x\n",
5684 acqe_fip->event_tag,
5685 acqe_fip->index);
5686 else
5687 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5688 LOG_DISCOVERY,
5689 "2788 FCF param modified event, "
5690 "evt_tag:x%x, index:x%x\n",
5691 acqe_fip->event_tag,
5692 acqe_fip->index);
5693 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5694
5695
5696
5697
5698
5699 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5700 LOG_DISCOVERY,
5701 "2779 Read FCF (x%x) for updating "
5702 "roundrobin FCF failover bmask\n",
5703 acqe_fip->index);
5704 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5705 }
5706
5707
5708 spin_lock_irq(&phba->hbalock);
5709 if (phba->hba_flag & FCF_TS_INPROG) {
5710 spin_unlock_irq(&phba->hbalock);
5711 break;
5712 }
5713
5714 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5715 spin_unlock_irq(&phba->hbalock);
5716 break;
5717 }
5718
5719
5720 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5721 spin_unlock_irq(&phba->hbalock);
5722 break;
5723 }
5724 spin_unlock_irq(&phba->hbalock);
5725
5726
5727 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5728 "2770 Start FCF table scan per async FCF "
5729 "event, evt_tag:x%x, index:x%x\n",
5730 acqe_fip->event_tag, acqe_fip->index);
5731 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5732 LPFC_FCOE_FCF_GET_FIRST);
5733 if (rc)
5734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5735 "2547 Issue FCF scan read FCF mailbox "
5736 "command failed (x%x)\n", rc);
5737 break;
5738
5739 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5741 "2548 FCF Table full count 0x%x tag 0x%x\n",
5742 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5743 acqe_fip->event_tag);
5744 break;
5745
5746 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5747 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5749 "2549 FCF (x%x) disconnected from network, "
5750 "tag:x%x\n", acqe_fip->index,
5751 acqe_fip->event_tag);
5752
5753
5754
5755
5756 spin_lock_irq(&phba->hbalock);
5757 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5758 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5759 spin_unlock_irq(&phba->hbalock);
5760
5761 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5762 break;
5763 }
5764 spin_unlock_irq(&phba->hbalock);
5765
5766
5767 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5768 break;
5769
5770
5771
5772
5773
5774
5775
5776 spin_lock_irq(&phba->hbalock);
5777
5778 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5779 spin_unlock_irq(&phba->hbalock);
5780
5781 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5782 "2771 Start FCF fast failover process due to "
5783 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5784 "\n", acqe_fip->event_tag, acqe_fip->index);
5785 rc = lpfc_sli4_redisc_fcf_table(phba);
5786 if (rc) {
5787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5788 LOG_TRACE_EVENT,
5789 "2772 Issue FCF rediscover mailbox "
5790 "command failed, fail through to FCF "
5791 "dead event\n");
5792 spin_lock_irq(&phba->hbalock);
5793 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5794 spin_unlock_irq(&phba->hbalock);
5795
5796
5797
5798
5799 lpfc_sli4_fcf_dead_failthrough(phba);
5800 } else {
5801
5802 lpfc_sli4_clear_fcf_rr_bmask(phba);
5803
5804
5805
5806
5807 lpfc_sli4_perform_all_vport_cvl(phba);
5808 }
5809 break;
5810 case LPFC_FIP_EVENT_TYPE_CVL:
5811 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5812 lpfc_printf_log(phba, KERN_ERR,
5813 LOG_TRACE_EVENT,
5814 "2718 Clear Virtual Link Received for VPI 0x%x"
5815 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5816
5817 vport = lpfc_find_vport_by_vpid(phba,
5818 acqe_fip->index);
5819 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5820 if (!ndlp)
5821 break;
5822 active_vlink_present = 0;
5823
5824 vports = lpfc_create_vport_work_array(phba);
5825 if (vports) {
5826 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5827 i++) {
5828 if ((!(vports[i]->fc_flag &
5829 FC_VPORT_CVL_RCVD)) &&
5830 (vports[i]->port_state > LPFC_FDISC)) {
5831 active_vlink_present = 1;
5832 break;
5833 }
5834 }
5835 lpfc_destroy_vport_work_array(phba, vports);
5836 }
5837
5838
5839
5840
5841
5842
5843 if (!(vport->load_flag & FC_UNLOADING) &&
5844 active_vlink_present) {
5845
5846
5847
5848
5849 mod_timer(&ndlp->nlp_delayfunc,
5850 jiffies + msecs_to_jiffies(1000));
5851 shost = lpfc_shost_from_vport(vport);
5852 spin_lock_irq(shost->host_lock);
5853 ndlp->nlp_flag |= NLP_DELAY_TMO;
5854 spin_unlock_irq(shost->host_lock);
5855 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5856 vport->port_state = LPFC_FDISC;
5857 } else {
5858
5859
5860
5861
5862
5863
5864
5865 spin_lock_irq(&phba->hbalock);
5866 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5867 spin_unlock_irq(&phba->hbalock);
5868 break;
5869 }
5870
5871 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5872 spin_unlock_irq(&phba->hbalock);
5873 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5874 LOG_DISCOVERY,
5875 "2773 Start FCF failover per CVL, "
5876 "evt_tag:x%x\n", acqe_fip->event_tag);
5877 rc = lpfc_sli4_redisc_fcf_table(phba);
5878 if (rc) {
5879 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5880 LOG_TRACE_EVENT,
5881 "2774 Issue FCF rediscover "
5882 "mailbox command failed, "
5883 "through to CVL event\n");
5884 spin_lock_irq(&phba->hbalock);
5885 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5886 spin_unlock_irq(&phba->hbalock);
5887
5888
5889
5890
5891 lpfc_retry_pport_discovery(phba);
5892 } else
5893
5894
5895
5896
5897 lpfc_sli4_clear_fcf_rr_bmask(phba);
5898 }
5899 break;
5900 default:
5901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5902 "0288 Unknown FCoE event type 0x%x event tag "
5903 "0x%x\n", event_type, acqe_fip->event_tag);
5904 break;
5905 }
5906}
5907
5908
5909
5910
5911
5912
5913
5914
5915static void
5916lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5917 struct lpfc_acqe_dcbx *acqe_dcbx)
5918{
5919 phba->fc_eventTag = acqe_dcbx->event_tag;
5920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5921 "0290 The SLI4 DCBX asynchronous event is not "
5922 "handled yet\n");
5923}
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934static void
5935lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5936 struct lpfc_acqe_grp5 *acqe_grp5)
5937{
5938 uint16_t prev_ll_spd;
5939
5940 phba->fc_eventTag = acqe_grp5->event_tag;
5941 phba->fcoe_eventtag = acqe_grp5->event_tag;
5942 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5943 phba->sli4_hba.link_state.logical_speed =
5944 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5946 "2789 GRP5 Async Event: Updating logical link speed "
5947 "from %dMbps to %dMbps\n", prev_ll_spd,
5948 phba->sli4_hba.link_state.logical_speed);
5949}
5950
5951
5952
5953
5954
5955
5956
5957
5958void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5959{
5960 struct lpfc_cq_event *cq_event;
5961
5962
5963 spin_lock_irq(&phba->hbalock);
5964 phba->hba_flag &= ~ASYNC_EVENT;
5965 spin_unlock_irq(&phba->hbalock);
5966
5967 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5968
5969 spin_lock_irq(&phba->hbalock);
5970 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5971 cq_event, struct lpfc_cq_event, list);
5972 spin_unlock_irq(&phba->hbalock);
5973
5974 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5975 case LPFC_TRAILER_CODE_LINK:
5976 lpfc_sli4_async_link_evt(phba,
5977 &cq_event->cqe.acqe_link);
5978 break;
5979 case LPFC_TRAILER_CODE_FCOE:
5980 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5981 break;
5982 case LPFC_TRAILER_CODE_DCBX:
5983 lpfc_sli4_async_dcbx_evt(phba,
5984 &cq_event->cqe.acqe_dcbx);
5985 break;
5986 case LPFC_TRAILER_CODE_GRP5:
5987 lpfc_sli4_async_grp5_evt(phba,
5988 &cq_event->cqe.acqe_grp5);
5989 break;
5990 case LPFC_TRAILER_CODE_FC:
5991 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5992 break;
5993 case LPFC_TRAILER_CODE_SLI:
5994 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5995 break;
5996 default:
5997 lpfc_printf_log(phba, KERN_ERR,
5998 LOG_TRACE_EVENT,
5999 "1804 Invalid asynchronous event code: "
6000 "x%x\n", bf_get(lpfc_trailer_code,
6001 &cq_event->cqe.mcqe_cmpl));
6002 break;
6003 }
6004
6005 lpfc_sli4_cq_event_release(phba, cq_event);
6006 }
6007}
6008
6009
6010
6011
6012
6013
6014
6015
6016void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6017{
6018 int rc;
6019
6020 spin_lock_irq(&phba->hbalock);
6021
6022 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6023
6024 phba->fcf.failover_rec.flag = 0;
6025
6026 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6027 spin_unlock_irq(&phba->hbalock);
6028
6029
6030 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6031 "2777 Start post-quiescent FCF table scan\n");
6032 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6033 if (rc)
6034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6035 "2747 Issue FCF scan read FCF mailbox "
6036 "command failed 0x%x\n", rc);
6037}
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049int
6050lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6051{
6052 int rc;
6053
6054
6055 phba->pci_dev_grp = dev_grp;
6056
6057
6058 if (dev_grp == LPFC_PCI_DEV_OC)
6059 phba->sli_rev = LPFC_SLI_REV4;
6060
6061
6062 rc = lpfc_init_api_table_setup(phba, dev_grp);
6063 if (rc)
6064 return -ENODEV;
6065
6066 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6067 if (rc)
6068 return -ENODEV;
6069
6070 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6071 if (rc)
6072 return -ENODEV;
6073
6074 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6075 if (rc)
6076 return -ENODEV;
6077
6078 return 0;
6079}
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6090{
6091 switch (intr_mode) {
6092 case 0:
6093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6094 "0470 Enable INTx interrupt mode.\n");
6095 break;
6096 case 1:
6097 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6098 "0481 Enabled MSI interrupt mode.\n");
6099 break;
6100 case 2:
6101 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6102 "0480 Enabled MSI-X interrupt mode.\n");
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6106 "0482 Illegal interrupt mode.\n");
6107 break;
6108 }
6109 return;
6110}
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123static int
6124lpfc_enable_pci_dev(struct lpfc_hba *phba)
6125{
6126 struct pci_dev *pdev;
6127
6128
6129 if (!phba->pcidev)
6130 goto out_error;
6131 else
6132 pdev = phba->pcidev;
6133
6134 if (pci_enable_device_mem(pdev))
6135 goto out_error;
6136
6137 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6138 goto out_disable_device;
6139
6140 pci_set_master(pdev);
6141 pci_try_set_mwi(pdev);
6142 pci_save_state(pdev);
6143
6144
6145 if (pci_is_pcie(pdev))
6146 pdev->needs_freset = 1;
6147
6148 return 0;
6149
6150out_disable_device:
6151 pci_disable_device(pdev);
6152out_error:
6153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6154 "1401 Failed to enable pci device\n");
6155 return -ENODEV;
6156}
6157
6158
6159
6160
6161
6162
6163
6164
6165static void
6166lpfc_disable_pci_dev(struct lpfc_hba *phba)
6167{
6168 struct pci_dev *pdev;
6169
6170
6171 if (!phba->pcidev)
6172 return;
6173 else
6174 pdev = phba->pcidev;
6175
6176 pci_release_mem_regions(pdev);
6177 pci_disable_device(pdev);
6178
6179 return;
6180}
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191void
6192lpfc_reset_hba(struct lpfc_hba *phba)
6193{
6194
6195 if (!phba->cfg_enable_hba_reset) {
6196 phba->link_state = LPFC_HBA_ERROR;
6197 return;
6198 }
6199 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6200 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6201 else
6202 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6203 lpfc_offline(phba);
6204 lpfc_sli_brdrestart(phba);
6205 lpfc_online(phba);
6206 lpfc_unblock_mgmt_io(phba);
6207}
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219uint16_t
6220lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6221{
6222 struct pci_dev *pdev = phba->pcidev;
6223 uint16_t nr_virtfn;
6224 int pos;
6225
6226 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6227 if (pos == 0)
6228 return 0;
6229
6230 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6231 return nr_virtfn;
6232}
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245int
6246lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6247{
6248 struct pci_dev *pdev = phba->pcidev;
6249 uint16_t max_nr_vfn;
6250 int rc;
6251
6252 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6253 if (nr_vfn > max_nr_vfn) {
6254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6255 "3057 Requested vfs (%d) greater than "
6256 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6257 return -EINVAL;
6258 }
6259
6260 rc = pci_enable_sriov(pdev, nr_vfn);
6261 if (rc) {
6262 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6263 "2806 Failed to enable sriov on this device "
6264 "with vfn number nr_vf:%d, rc:%d\n",
6265 nr_vfn, rc);
6266 } else
6267 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6268 "2807 Successful enable sriov on this device "
6269 "with vfn number nr_vf:%d\n", nr_vfn);
6270 return rc;
6271}
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284static int
6285lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6286{
6287 struct lpfc_sli *psli = &phba->sli;
6288
6289
6290
6291
6292 atomic_set(&phba->fast_event_count, 0);
6293 atomic_set(&phba->dbg_log_idx, 0);
6294 atomic_set(&phba->dbg_log_cnt, 0);
6295 atomic_set(&phba->dbg_log_dmping, 0);
6296 spin_lock_init(&phba->hbalock);
6297
6298
6299 spin_lock_init(&phba->ndlp_lock);
6300
6301
6302 spin_lock_init(&phba->port_list_lock);
6303 INIT_LIST_HEAD(&phba->port_list);
6304
6305 INIT_LIST_HEAD(&phba->work_list);
6306 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6307
6308
6309 init_waitqueue_head(&phba->work_waitq);
6310
6311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6312 "1403 Protocols supported %s %s %s\n",
6313 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6314 "SCSI" : " "),
6315 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6316 "NVME" : " "),
6317 (phba->nvmet_support ? "NVMET" : " "));
6318
6319
6320 spin_lock_init(&phba->scsi_buf_list_get_lock);
6321 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6322 spin_lock_init(&phba->scsi_buf_list_put_lock);
6323 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6324
6325
6326 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6327
6328
6329 INIT_LIST_HEAD(&phba->elsbuf);
6330
6331
6332 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6333
6334
6335 spin_lock_init(&phba->devicelock);
6336 INIT_LIST_HEAD(&phba->luns);
6337
6338
6339 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6340
6341 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6342
6343 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6344
6345 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6346
6347 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6348
6349 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6350 lpfc_idle_stat_delay_work);
6351
6352 return 0;
6353}
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366static int
6367lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6368{
6369 int rc, entry_sz;
6370
6371
6372
6373
6374
6375
6376 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6377
6378
6379 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6380 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6381
6382
6383 lpfc_get_cfgparam(phba);
6384
6385
6386 rc = lpfc_setup_driver_resource_phase1(phba);
6387 if (rc)
6388 return -ENODEV;
6389
6390 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6391 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6392
6393 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6394 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6395 }
6396
6397 if (!phba->sli.sli3_ring)
6398 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6399 sizeof(struct lpfc_sli_ring),
6400 GFP_KERNEL);
6401 if (!phba->sli.sli3_ring)
6402 return -ENOMEM;
6403
6404
6405
6406
6407
6408
6409 if (phba->sli_rev == LPFC_SLI_REV4)
6410 entry_sz = sizeof(struct sli4_sge);
6411 else
6412 entry_sz = sizeof(struct ulp_bde64);
6413
6414
6415 if (phba->cfg_enable_bg) {
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6426 sizeof(struct fcp_rsp) +
6427 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6428
6429 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6430 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6431
6432
6433 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6434 } else {
6435
6436
6437
6438
6439
6440 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6441 sizeof(struct fcp_rsp) +
6442 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6443
6444
6445 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6446 }
6447
6448 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6449 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6450 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6451 phba->cfg_total_seg_cnt);
6452
6453 phba->max_vpi = LPFC_MAX_VPI;
6454
6455 phba->max_vports = 0;
6456
6457
6458
6459
6460 lpfc_sli_setup(phba);
6461 lpfc_sli_queue_init(phba);
6462
6463
6464 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6465 return -ENOMEM;
6466
6467 phba->lpfc_sg_dma_buf_pool =
6468 dma_pool_create("lpfc_sg_dma_buf_pool",
6469 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6470 BPL_ALIGN_SZ, 0);
6471
6472 if (!phba->lpfc_sg_dma_buf_pool)
6473 goto fail_free_mem;
6474
6475 phba->lpfc_cmd_rsp_buf_pool =
6476 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6477 &phba->pcidev->dev,
6478 sizeof(struct fcp_cmnd) +
6479 sizeof(struct fcp_rsp),
6480 BPL_ALIGN_SZ, 0);
6481
6482 if (!phba->lpfc_cmd_rsp_buf_pool)
6483 goto fail_free_dma_buf_pool;
6484
6485
6486
6487
6488
6489 if (phba->cfg_sriov_nr_virtfn > 0) {
6490 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6491 phba->cfg_sriov_nr_virtfn);
6492 if (rc) {
6493 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6494 "2808 Requested number of SR-IOV "
6495 "virtual functions (%d) is not "
6496 "supported\n",
6497 phba->cfg_sriov_nr_virtfn);
6498 phba->cfg_sriov_nr_virtfn = 0;
6499 }
6500 }
6501
6502 return 0;
6503
6504fail_free_dma_buf_pool:
6505 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6506 phba->lpfc_sg_dma_buf_pool = NULL;
6507fail_free_mem:
6508 lpfc_mem_free(phba);
6509 return -ENOMEM;
6510}
6511
6512
6513
6514
6515
6516
6517
6518
6519static void
6520lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6521{
6522
6523 lpfc_mem_free_all(phba);
6524
6525 return;
6526}
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539static int
6540lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6541{
6542 LPFC_MBOXQ_t *mboxq;
6543 MAILBOX_t *mb;
6544 int rc, i, max_buf_size;
6545 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6546 struct lpfc_mqe *mqe;
6547 int longs;
6548 int extra;
6549 uint64_t wwn;
6550 u32 if_type;
6551 u32 if_fam;
6552
6553 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6554 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6555 phba->sli4_hba.curr_disp_cpu = 0;
6556
6557
6558 lpfc_get_cfgparam(phba);
6559
6560
6561 rc = lpfc_setup_driver_resource_phase1(phba);
6562 if (rc)
6563 return -ENODEV;
6564
6565
6566 rc = lpfc_sli4_post_status_check(phba);
6567 if (rc)
6568 return -ENODEV;
6569
6570
6571
6572
6573 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6574
6575
6576
6577
6578
6579 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6580
6581
6582 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6583
6584
6585
6586
6587
6588 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6589 sizeof(struct lpfc_mbox_ext_buf_ctx));
6590 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6591
6592 phba->max_vpi = LPFC_MAX_VPI;
6593
6594
6595 phba->max_vports = 0;
6596
6597
6598 phba->valid_vlan = 0;
6599 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6600 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6601 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6602
6603
6604
6605
6606
6607
6608
6609
6610 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6611 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6612 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6613
6614
6615
6616
6617
6618 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6619 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6620
6621 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6622
6623 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6624 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6625 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6626 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6627 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6628 }
6629
6630
6631 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6632 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6633
6634
6635
6636
6637
6638
6639 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6640
6641 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6642
6643 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6644
6645 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6646
6647 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6648
6649 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6650
6651
6652 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6653 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6654 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6655 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6656
6657
6658
6659
6660 INIT_LIST_HEAD(&phba->sli.mboxq);
6661 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6662
6663
6664 phba->sli4_hba.lnk_info.optic_state = 0xff;
6665
6666
6667 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6668 if (rc)
6669 return -ENOMEM;
6670
6671
6672 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6673 LPFC_SLI_INTF_IF_TYPE_2) {
6674 rc = lpfc_pci_function_reset(phba);
6675 if (unlikely(rc)) {
6676 rc = -ENODEV;
6677 goto out_free_mem;
6678 }
6679 phba->temp_sensor_support = 1;
6680 }
6681
6682
6683 rc = lpfc_create_bootstrap_mbox(phba);
6684 if (unlikely(rc))
6685 goto out_free_mem;
6686
6687
6688 rc = lpfc_setup_endian_order(phba);
6689 if (unlikely(rc))
6690 goto out_free_bsmbx;
6691
6692
6693 rc = lpfc_sli4_read_config(phba);
6694 if (unlikely(rc))
6695 goto out_free_bsmbx;
6696 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6697 if (unlikely(rc))
6698 goto out_free_bsmbx;
6699
6700
6701 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6702 LPFC_SLI_INTF_IF_TYPE_0) {
6703 rc = lpfc_pci_function_reset(phba);
6704 if (unlikely(rc))
6705 goto out_free_bsmbx;
6706 }
6707
6708 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6709 GFP_KERNEL);
6710 if (!mboxq) {
6711 rc = -ENOMEM;
6712 goto out_free_bsmbx;
6713 }
6714
6715
6716 phba->nvmet_support = 0;
6717 if (lpfc_enable_nvmet_cnt) {
6718
6719
6720 lpfc_read_nv(phba, mboxq);
6721 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6722 if (rc != MBX_SUCCESS) {
6723 lpfc_printf_log(phba, KERN_ERR,
6724 LOG_TRACE_EVENT,
6725 "6016 Mailbox failed , mbxCmd x%x "
6726 "READ_NV, mbxStatus x%x\n",
6727 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6728 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6729 mempool_free(mboxq, phba->mbox_mem_pool);
6730 rc = -EIO;
6731 goto out_free_bsmbx;
6732 }
6733 mb = &mboxq->u.mb;
6734 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6735 sizeof(uint64_t));
6736 wwn = cpu_to_be64(wwn);
6737 phba->sli4_hba.wwnn.u.name = wwn;
6738 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6739 sizeof(uint64_t));
6740
6741 wwn = cpu_to_be64(wwn);
6742 phba->sli4_hba.wwpn.u.name = wwn;
6743
6744
6745 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6746 if (wwn == lpfc_enable_nvmet[i]) {
6747#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6748 if (lpfc_nvmet_mem_alloc(phba))
6749 break;
6750
6751 phba->nvmet_support = 1;
6752
6753 lpfc_printf_log(phba, KERN_ERR,
6754 LOG_TRACE_EVENT,
6755 "6017 NVME Target %016llx\n",
6756 wwn);
6757#else
6758 lpfc_printf_log(phba, KERN_ERR,
6759 LOG_TRACE_EVENT,
6760 "6021 Can't enable NVME Target."
6761 " NVME_TARGET_FC infrastructure"
6762 " is not in kernel\n");
6763#endif
6764
6765 phba->cfg_xri_rebalancing = 0;
6766 if (phba->irq_chann_mode == NHT_MODE) {
6767 phba->cfg_irq_chann =
6768 phba->sli4_hba.num_present_cpu;
6769 phba->cfg_hdw_queue =
6770 phba->sli4_hba.num_present_cpu;
6771 phba->irq_chann_mode = NORMAL_MODE;
6772 }
6773 break;
6774 }
6775 }
6776 }
6777
6778 lpfc_nvme_mod_param_dep(phba);
6779
6780
6781 lpfc_supported_pages(mboxq);
6782 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6783 if (!rc) {
6784 mqe = &mboxq->u.mqe;
6785 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6786 LPFC_MAX_SUPPORTED_PAGES);
6787 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6788 switch (pn_page[i]) {
6789 case LPFC_SLI4_PARAMETERS:
6790 phba->sli4_hba.pc_sli4_params.supported = 1;
6791 break;
6792 default:
6793 break;
6794 }
6795 }
6796
6797 if (phba->sli4_hba.pc_sli4_params.supported)
6798 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6799 if (rc) {
6800 mempool_free(mboxq, phba->mbox_mem_pool);
6801 rc = -EIO;
6802 goto out_free_bsmbx;
6803 }
6804 }
6805
6806
6807
6808
6809
6810
6811 rc = lpfc_get_sli4_parameters(phba, mboxq);
6812 if (rc) {
6813 if_type = bf_get(lpfc_sli_intf_if_type,
6814 &phba->sli4_hba.sli_intf);
6815 if_fam = bf_get(lpfc_sli_intf_sli_family,
6816 &phba->sli4_hba.sli_intf);
6817 if (phba->sli4_hba.extents_in_use &&
6818 phba->sli4_hba.rpi_hdrs_in_use) {
6819 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6820 "2999 Unsupported SLI4 Parameters "
6821 "Extents and RPI headers enabled.\n");
6822 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6823 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6824 mempool_free(mboxq, phba->mbox_mem_pool);
6825 rc = -EIO;
6826 goto out_free_bsmbx;
6827 }
6828 }
6829 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6830 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6831 mempool_free(mboxq, phba->mbox_mem_pool);
6832 rc = -EIO;
6833 goto out_free_bsmbx;
6834 }
6835 }
6836
6837
6838
6839
6840
6841 extra = 2;
6842 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6843 extra++;
6844
6845
6846
6847
6848
6849
6850 max_buf_size = (2 * SLI4_PAGE_SIZE);
6851
6852
6853
6854
6855
6856 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6869 sizeof(struct fcp_rsp) + max_buf_size;
6870
6871
6872 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6873
6874
6875
6876
6877
6878 if (phba->cfg_enable_bg &&
6879 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6880 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6881 else
6882 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6883
6884 } else {
6885
6886
6887
6888
6889
6890 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6891 sizeof(struct fcp_rsp) +
6892 ((phba->cfg_sg_seg_cnt + extra) *
6893 sizeof(struct sli4_sge));
6894
6895
6896 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6897 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6898
6899
6900
6901
6902
6903 }
6904
6905 if (phba->cfg_xpsgl && !phba->nvmet_support)
6906 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6907 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6908 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6909 else
6910 phba->cfg_sg_dma_buf_size =
6911 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6912
6913 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6914 sizeof(struct sli4_sge);
6915
6916
6917 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6918 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6919 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6920 "6300 Reducing NVME sg segment "
6921 "cnt to %d\n",
6922 LPFC_MAX_NVME_SEG_CNT);
6923 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6924 } else
6925 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6926 }
6927
6928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6929 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6930 "total:%d scsi:%d nvme:%d\n",
6931 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6932 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6933 phba->cfg_nvme_seg_cnt);
6934
6935 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6936 i = phba->cfg_sg_dma_buf_size;
6937 else
6938 i = SLI4_PAGE_SIZE;
6939
6940 phba->lpfc_sg_dma_buf_pool =
6941 dma_pool_create("lpfc_sg_dma_buf_pool",
6942 &phba->pcidev->dev,
6943 phba->cfg_sg_dma_buf_size,
6944 i, 0);
6945 if (!phba->lpfc_sg_dma_buf_pool)
6946 goto out_free_bsmbx;
6947
6948 phba->lpfc_cmd_rsp_buf_pool =
6949 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6950 &phba->pcidev->dev,
6951 sizeof(struct fcp_cmnd) +
6952 sizeof(struct fcp_rsp),
6953 i, 0);
6954 if (!phba->lpfc_cmd_rsp_buf_pool)
6955 goto out_free_sg_dma_buf;
6956
6957 mempool_free(mboxq, phba->mbox_mem_pool);
6958
6959
6960 lpfc_sli4_oas_verify(phba);
6961
6962
6963 lpfc_sli4_ras_init(phba);
6964
6965
6966 rc = lpfc_sli4_queue_verify(phba);
6967 if (rc)
6968 goto out_free_cmd_rsp_buf;
6969
6970
6971 rc = lpfc_sli4_cq_event_pool_create(phba);
6972 if (rc)
6973 goto out_free_cmd_rsp_buf;
6974
6975
6976 lpfc_init_sgl_list(phba);
6977
6978
6979 rc = lpfc_init_active_sgl_array(phba);
6980 if (rc) {
6981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6982 "1430 Failed to initialize sgl list.\n");
6983 goto out_destroy_cq_event_pool;
6984 }
6985 rc = lpfc_sli4_init_rpi_hdrs(phba);
6986 if (rc) {
6987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6988 "1432 Failed to initialize rpi headers.\n");
6989 goto out_free_active_sgl;
6990 }
6991
6992
6993 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6994 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6995 GFP_KERNEL);
6996 if (!phba->fcf.fcf_rr_bmask) {
6997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6998 "2759 Failed allocate memory for FCF round "
6999 "robin failover bmask\n");
7000 rc = -ENOMEM;
7001 goto out_remove_rpi_hdrs;
7002 }
7003
7004 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
7005 sizeof(struct lpfc_hba_eq_hdl),
7006 GFP_KERNEL);
7007 if (!phba->sli4_hba.hba_eq_hdl) {
7008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7009 "2572 Failed allocate memory for "
7010 "fast-path per-EQ handle array\n");
7011 rc = -ENOMEM;
7012 goto out_free_fcf_rr_bmask;
7013 }
7014
7015 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
7016 sizeof(struct lpfc_vector_map_info),
7017 GFP_KERNEL);
7018 if (!phba->sli4_hba.cpu_map) {
7019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7020 "3327 Failed allocate memory for msi-x "
7021 "interrupt vector mapping\n");
7022 rc = -ENOMEM;
7023 goto out_free_hba_eq_hdl;
7024 }
7025
7026 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7027 if (!phba->sli4_hba.eq_info) {
7028 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7029 "3321 Failed allocation for per_cpu stats\n");
7030 rc = -ENOMEM;
7031 goto out_free_hba_cpu_map;
7032 }
7033
7034 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7035 sizeof(*phba->sli4_hba.idle_stat),
7036 GFP_KERNEL);
7037 if (!phba->sli4_hba.idle_stat) {
7038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7039 "3390 Failed allocation for idle_stat\n");
7040 rc = -ENOMEM;
7041 goto out_free_hba_eq_info;
7042 }
7043
7044#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7045 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7046 if (!phba->sli4_hba.c_stat) {
7047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7048 "3332 Failed allocating per cpu hdwq stats\n");
7049 rc = -ENOMEM;
7050 goto out_free_hba_idle_stat;
7051 }
7052#endif
7053
7054
7055
7056
7057
7058 if (phba->cfg_sriov_nr_virtfn > 0) {
7059 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7060 phba->cfg_sriov_nr_virtfn);
7061 if (rc) {
7062 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7063 "3020 Requested number of SR-IOV "
7064 "virtual functions (%d) is not "
7065 "supported\n",
7066 phba->cfg_sriov_nr_virtfn);
7067 phba->cfg_sriov_nr_virtfn = 0;
7068 }
7069 }
7070
7071 return 0;
7072
7073#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7074out_free_hba_idle_stat:
7075 kfree(phba->sli4_hba.idle_stat);
7076#endif
7077out_free_hba_eq_info:
7078 free_percpu(phba->sli4_hba.eq_info);
7079out_free_hba_cpu_map:
7080 kfree(phba->sli4_hba.cpu_map);
7081out_free_hba_eq_hdl:
7082 kfree(phba->sli4_hba.hba_eq_hdl);
7083out_free_fcf_rr_bmask:
7084 kfree(phba->fcf.fcf_rr_bmask);
7085out_remove_rpi_hdrs:
7086 lpfc_sli4_remove_rpi_hdrs(phba);
7087out_free_active_sgl:
7088 lpfc_free_active_sgl(phba);
7089out_destroy_cq_event_pool:
7090 lpfc_sli4_cq_event_pool_destroy(phba);
7091out_free_cmd_rsp_buf:
7092 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7093 phba->lpfc_cmd_rsp_buf_pool = NULL;
7094out_free_sg_dma_buf:
7095 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7096 phba->lpfc_sg_dma_buf_pool = NULL;
7097out_free_bsmbx:
7098 lpfc_destroy_bootstrap_mbox(phba);
7099out_free_mem:
7100 lpfc_mem_free(phba);
7101 return rc;
7102}
7103
7104
7105
7106
7107
7108
7109
7110
7111static void
7112lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7113{
7114 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7115
7116 free_percpu(phba->sli4_hba.eq_info);
7117#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7118 free_percpu(phba->sli4_hba.c_stat);
7119#endif
7120 kfree(phba->sli4_hba.idle_stat);
7121
7122
7123 kfree(phba->sli4_hba.cpu_map);
7124 phba->sli4_hba.num_possible_cpu = 0;
7125 phba->sli4_hba.num_present_cpu = 0;
7126 phba->sli4_hba.curr_disp_cpu = 0;
7127 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7128
7129
7130 kfree(phba->sli4_hba.hba_eq_hdl);
7131
7132
7133 lpfc_sli4_remove_rpi_hdrs(phba);
7134 lpfc_sli4_remove_rpis(phba);
7135
7136
7137 kfree(phba->fcf.fcf_rr_bmask);
7138
7139
7140 lpfc_free_active_sgl(phba);
7141 lpfc_free_els_sgl_list(phba);
7142 lpfc_free_nvmet_sgl_list(phba);
7143
7144
7145 lpfc_sli4_cq_event_release_all(phba);
7146 lpfc_sli4_cq_event_pool_destroy(phba);
7147
7148
7149 lpfc_sli4_dealloc_resource_identifiers(phba);
7150
7151
7152 lpfc_destroy_bootstrap_mbox(phba);
7153
7154
7155 lpfc_mem_free_all(phba);
7156
7157
7158 list_for_each_entry_safe(conn_entry, next_conn_entry,
7159 &phba->fcf_conn_rec_list, list) {
7160 list_del_init(&conn_entry->list);
7161 kfree(conn_entry);
7162 }
7163
7164 return;
7165}
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177int
7178lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7179{
7180 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7181 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7182 phba->lpfc_selective_reset = lpfc_selective_reset;
7183 switch (dev_grp) {
7184 case LPFC_PCI_DEV_LP:
7185 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7186 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7187 phba->lpfc_stop_port = lpfc_stop_port_s3;
7188 break;
7189 case LPFC_PCI_DEV_OC:
7190 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7191 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7192 phba->lpfc_stop_port = lpfc_stop_port_s4;
7193 break;
7194 default:
7195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7196 "1431 Invalid HBA PCI-device group: 0x%x\n",
7197 dev_grp);
7198 return -ENODEV;
7199 break;
7200 }
7201 return 0;
7202}
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215static int
7216lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7217{
7218 int error;
7219
7220
7221 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7222 "lpfc_worker_%d", phba->brd_no);
7223 if (IS_ERR(phba->worker_thread)) {
7224 error = PTR_ERR(phba->worker_thread);
7225 return error;
7226 }
7227
7228 return 0;
7229}
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239static void
7240lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7241{
7242 if (phba->wq) {
7243 flush_workqueue(phba->wq);
7244 destroy_workqueue(phba->wq);
7245 phba->wq = NULL;
7246 }
7247
7248
7249 if (phba->worker_thread)
7250 kthread_stop(phba->worker_thread);
7251}
7252
7253
7254
7255
7256
7257
7258
7259void
7260lpfc_free_iocb_list(struct lpfc_hba *phba)
7261{
7262 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7263
7264 spin_lock_irq(&phba->hbalock);
7265 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7266 &phba->lpfc_iocb_list, list) {
7267 list_del(&iocbq_entry->list);
7268 kfree(iocbq_entry);
7269 phba->total_iocbq_bufs--;
7270 }
7271 spin_unlock_irq(&phba->hbalock);
7272
7273 return;
7274}
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288int
7289lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7290{
7291 struct lpfc_iocbq *iocbq_entry = NULL;
7292 uint16_t iotag;
7293 int i;
7294
7295
7296 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7297 for (i = 0; i < iocb_count; i++) {
7298 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7299 if (iocbq_entry == NULL) {
7300 printk(KERN_ERR "%s: only allocated %d iocbs of "
7301 "expected %d count. Unloading driver.\n",
7302 __func__, i, iocb_count);
7303 goto out_free_iocbq;
7304 }
7305
7306 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7307 if (iotag == 0) {
7308 kfree(iocbq_entry);
7309 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7310 "Unloading driver.\n", __func__);
7311 goto out_free_iocbq;
7312 }
7313 iocbq_entry->sli4_lxritag = NO_XRI;
7314 iocbq_entry->sli4_xritag = NO_XRI;
7315
7316 spin_lock_irq(&phba->hbalock);
7317 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7318 phba->total_iocbq_bufs++;
7319 spin_unlock_irq(&phba->hbalock);
7320 }
7321
7322 return 0;
7323
7324out_free_iocbq:
7325 lpfc_free_iocb_list(phba);
7326
7327 return -ENOMEM;
7328}
7329
7330
7331
7332
7333
7334
7335
7336
7337void
7338lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7339{
7340 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7341
7342 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7343 list_del(&sglq_entry->list);
7344 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7345 kfree(sglq_entry);
7346 }
7347}
7348
7349
7350
7351
7352
7353
7354
7355static void
7356lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7357{
7358 LIST_HEAD(sglq_list);
7359
7360
7361 spin_lock_irq(&phba->hbalock);
7362 spin_lock(&phba->sli4_hba.sgl_list_lock);
7363 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7364 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7365 spin_unlock_irq(&phba->hbalock);
7366
7367
7368 lpfc_free_sgl_list(phba, &sglq_list);
7369}
7370
7371
7372
7373
7374
7375
7376
7377static void
7378lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7379{
7380 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7381 LIST_HEAD(sglq_list);
7382
7383
7384 spin_lock_irq(&phba->hbalock);
7385 spin_lock(&phba->sli4_hba.sgl_list_lock);
7386 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7387 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7388 spin_unlock_irq(&phba->hbalock);
7389
7390
7391 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7392 list_del(&sglq_entry->list);
7393 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7394 kfree(sglq_entry);
7395 }
7396
7397
7398
7399
7400
7401 phba->sli4_hba.nvmet_xri_cnt = 0;
7402}
7403
7404
7405
7406
7407
7408
7409
7410
7411static int
7412lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7413{
7414 int size;
7415 size = sizeof(struct lpfc_sglq *);
7416 size *= phba->sli4_hba.max_cfg_param.max_xri;
7417
7418 phba->sli4_hba.lpfc_sglq_active_list =
7419 kzalloc(size, GFP_KERNEL);
7420 if (!phba->sli4_hba.lpfc_sglq_active_list)
7421 return -ENOMEM;
7422 return 0;
7423}
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433static void
7434lpfc_free_active_sgl(struct lpfc_hba *phba)
7435{
7436 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7437}
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447static void
7448lpfc_init_sgl_list(struct lpfc_hba *phba)
7449{
7450
7451 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7452 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7453 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7454 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7455
7456
7457 phba->sli4_hba.els_xri_cnt = 0;
7458
7459
7460 phba->sli4_hba.io_xri_cnt = 0;
7461}
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476
7477int
7478lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7479{
7480 int rc = 0;
7481 struct lpfc_rpi_hdr *rpi_hdr;
7482
7483 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7484 if (!phba->sli4_hba.rpi_hdrs_in_use)
7485 return rc;
7486 if (phba->sli4_hba.extents_in_use)
7487 return -EIO;
7488
7489 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7490 if (!rpi_hdr) {
7491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7492 "0391 Error during rpi post operation\n");
7493 lpfc_sli4_remove_rpis(phba);
7494 rc = -ENODEV;
7495 }
7496
7497 return rc;
7498}
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509
7510
7511
7512
7513struct lpfc_rpi_hdr *
7514lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7515{
7516 uint16_t rpi_limit, curr_rpi_range;
7517 struct lpfc_dmabuf *dmabuf;
7518 struct lpfc_rpi_hdr *rpi_hdr;
7519
7520
7521
7522
7523
7524
7525 if (!phba->sli4_hba.rpi_hdrs_in_use)
7526 return NULL;
7527 if (phba->sli4_hba.extents_in_use)
7528 return NULL;
7529
7530
7531 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7532
7533 spin_lock_irq(&phba->hbalock);
7534
7535
7536
7537
7538
7539 curr_rpi_range = phba->sli4_hba.next_rpi;
7540 spin_unlock_irq(&phba->hbalock);
7541
7542
7543 if (curr_rpi_range == rpi_limit)
7544 return NULL;
7545
7546
7547
7548
7549
7550 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7551 if (!dmabuf)
7552 return NULL;
7553
7554 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7555 LPFC_HDR_TEMPLATE_SIZE,
7556 &dmabuf->phys, GFP_KERNEL);
7557 if (!dmabuf->virt) {
7558 rpi_hdr = NULL;
7559 goto err_free_dmabuf;
7560 }
7561
7562 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7563 rpi_hdr = NULL;
7564 goto err_free_coherent;
7565 }
7566
7567
7568 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7569 if (!rpi_hdr)
7570 goto err_free_coherent;
7571
7572 rpi_hdr->dmabuf = dmabuf;
7573 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7574 rpi_hdr->page_count = 1;
7575 spin_lock_irq(&phba->hbalock);
7576
7577
7578 rpi_hdr->start_rpi = curr_rpi_range;
7579 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7580 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7581
7582 spin_unlock_irq(&phba->hbalock);
7583 return rpi_hdr;
7584
7585 err_free_coherent:
7586 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7587 dmabuf->virt, dmabuf->phys);
7588 err_free_dmabuf:
7589 kfree(dmabuf);
7590 return NULL;
7591}
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602void
7603lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7604{
7605 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7606
7607 if (!phba->sli4_hba.rpi_hdrs_in_use)
7608 goto exit;
7609
7610 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7611 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7612 list_del(&rpi_hdr->list);
7613 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7614 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7615 kfree(rpi_hdr->dmabuf);
7616 kfree(rpi_hdr);
7617 }
7618 exit:
7619
7620 phba->sli4_hba.next_rpi = 0;
7621}
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635static struct lpfc_hba *
7636lpfc_hba_alloc(struct pci_dev *pdev)
7637{
7638 struct lpfc_hba *phba;
7639
7640
7641 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7642 if (!phba) {
7643 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7644 return NULL;
7645 }
7646
7647
7648 phba->pcidev = pdev;
7649
7650
7651 phba->brd_no = lpfc_get_instance();
7652 if (phba->brd_no < 0) {
7653 kfree(phba);
7654 return NULL;
7655 }
7656 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7657
7658 spin_lock_init(&phba->ct_ev_lock);
7659 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7660
7661 return phba;
7662}
7663
7664
7665
7666
7667
7668
7669
7670
7671static void
7672lpfc_hba_free(struct lpfc_hba *phba)
7673{
7674 if (phba->sli_rev == LPFC_SLI_REV4)
7675 kfree(phba->sli4_hba.hdwq);
7676
7677
7678 idr_remove(&lpfc_hba_index, phba->brd_no);
7679
7680
7681 kfree(phba->sli.sli3_ring);
7682 phba->sli.sli3_ring = NULL;
7683
7684 kfree(phba);
7685 return;
7686}
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698
7699static int
7700lpfc_create_shost(struct lpfc_hba *phba)
7701{
7702 struct lpfc_vport *vport;
7703 struct Scsi_Host *shost;
7704
7705
7706 phba->fc_edtov = FF_DEF_EDTOV;
7707 phba->fc_ratov = FF_DEF_RATOV;
7708 phba->fc_altov = FF_DEF_ALTOV;
7709 phba->fc_arbtov = FF_DEF_ARBTOV;
7710
7711 atomic_set(&phba->sdev_cnt, 0);
7712 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7713 if (!vport)
7714 return -ENODEV;
7715
7716 shost = lpfc_shost_from_vport(vport);
7717 phba->pport = vport;
7718
7719 if (phba->nvmet_support) {
7720
7721 phba->targetport = NULL;
7722 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7724 "6076 NVME Target Found\n");
7725 }
7726
7727 lpfc_debugfs_initialize(vport);
7728
7729 pci_set_drvdata(phba->pcidev, shost);
7730
7731
7732
7733
7734
7735 vport->load_flag |= FC_ALLOW_FDMI;
7736 if (phba->cfg_enable_SmartSAN ||
7737 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7738
7739
7740 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7741 if (phba->cfg_enable_SmartSAN)
7742 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7743 else
7744 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7745 }
7746 return 0;
7747}
7748
7749
7750
7751
7752
7753
7754
7755
7756static void
7757lpfc_destroy_shost(struct lpfc_hba *phba)
7758{
7759 struct lpfc_vport *vport = phba->pport;
7760
7761
7762 destroy_port(vport);
7763
7764 return;
7765}
7766
7767
7768
7769
7770
7771
7772
7773
7774
7775static void
7776lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7777{
7778 uint32_t old_mask;
7779 uint32_t old_guard;
7780
7781 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7782 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7783 "1478 Registering BlockGuard with the "
7784 "SCSI layer\n");
7785
7786 old_mask = phba->cfg_prot_mask;
7787 old_guard = phba->cfg_prot_guard;
7788
7789
7790 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7791 SHOST_DIX_TYPE0_PROTECTION |
7792 SHOST_DIX_TYPE1_PROTECTION);
7793 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7794 SHOST_DIX_GUARD_CRC);
7795
7796
7797 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7798 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7799
7800 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7801 if ((old_mask != phba->cfg_prot_mask) ||
7802 (old_guard != phba->cfg_prot_guard))
7803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7804 "1475 Registering BlockGuard with the "
7805 "SCSI layer: mask %d guard %d\n",
7806 phba->cfg_prot_mask,
7807 phba->cfg_prot_guard);
7808
7809 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7810 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7811 } else
7812 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7813 "1479 Not Registering BlockGuard with the SCSI "
7814 "layer, Bad protection parameters: %d %d\n",
7815 old_mask, old_guard);
7816 }
7817}
7818
7819
7820
7821
7822
7823
7824
7825
7826static void
7827lpfc_post_init_setup(struct lpfc_hba *phba)
7828{
7829 struct Scsi_Host *shost;
7830 struct lpfc_adapter_event_header adapter_event;
7831
7832
7833 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7834
7835
7836
7837
7838
7839 shost = pci_get_drvdata(phba->pcidev);
7840 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7841
7842 lpfc_host_attrib_init(shost);
7843
7844 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7845 spin_lock_irq(shost->host_lock);
7846 lpfc_poll_start_timer(phba);
7847 spin_unlock_irq(shost->host_lock);
7848 }
7849
7850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7851 "0428 Perform SCSI scan\n");
7852
7853 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7854 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7855 fc_host_post_vendor_event(shost, fc_get_event_number(),
7856 sizeof(adapter_event),
7857 (char *) &adapter_event,
7858 LPFC_NL_VENDOR_ID);
7859 return;
7860}
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873static int
7874lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7875{
7876 struct pci_dev *pdev = phba->pcidev;
7877 unsigned long bar0map_len, bar2map_len;
7878 int i, hbq_count;
7879 void *ptr;
7880 int error;
7881
7882 if (!pdev)
7883 return -ENODEV;
7884
7885
7886 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7887 if (error)
7888 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7889 if (error)
7890 return error;
7891 error = -ENODEV;
7892
7893
7894
7895
7896 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7897 bar0map_len = pci_resource_len(pdev, 0);
7898
7899 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7900 bar2map_len = pci_resource_len(pdev, 2);
7901
7902
7903 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7904 if (!phba->slim_memmap_p) {
7905 dev_printk(KERN_ERR, &pdev->dev,
7906 "ioremap failed for SLIM memory.\n");
7907 goto out;
7908 }
7909
7910
7911 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7912 if (!phba->ctrl_regs_memmap_p) {
7913 dev_printk(KERN_ERR, &pdev->dev,
7914 "ioremap failed for HBA control registers.\n");
7915 goto out_iounmap_slim;
7916 }
7917
7918
7919 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7920 &phba->slim2p.phys, GFP_KERNEL);
7921 if (!phba->slim2p.virt)
7922 goto out_iounmap;
7923
7924 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7925 phba->mbox_ext = (phba->slim2p.virt +
7926 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7927 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7928 phba->IOCBs = (phba->slim2p.virt +
7929 offsetof(struct lpfc_sli2_slim, IOCBs));
7930
7931 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7932 lpfc_sli_hbq_size(),
7933 &phba->hbqslimp.phys,
7934 GFP_KERNEL);
7935 if (!phba->hbqslimp.virt)
7936 goto out_free_slim;
7937
7938 hbq_count = lpfc_sli_hbq_count();
7939 ptr = phba->hbqslimp.virt;
7940 for (i = 0; i < hbq_count; ++i) {
7941 phba->hbqs[i].hbq_virt = ptr;
7942 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7943 ptr += (lpfc_hbq_defs[i]->entry_count *
7944 sizeof(struct lpfc_hbq_entry));
7945 }
7946 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7947 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7948
7949 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7950
7951 phba->MBslimaddr = phba->slim_memmap_p;
7952 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7953 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7954 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7955 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7956
7957 return 0;
7958
7959out_free_slim:
7960 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7961 phba->slim2p.virt, phba->slim2p.phys);
7962out_iounmap:
7963 iounmap(phba->ctrl_regs_memmap_p);
7964out_iounmap_slim:
7965 iounmap(phba->slim_memmap_p);
7966out:
7967 return error;
7968}
7969
7970
7971
7972
7973
7974
7975
7976
7977static void
7978lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7979{
7980 struct pci_dev *pdev;
7981
7982
7983 if (!phba->pcidev)
7984 return;
7985 else
7986 pdev = phba->pcidev;
7987
7988
7989 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7990 phba->hbqslimp.virt, phba->hbqslimp.phys);
7991 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7992 phba->slim2p.virt, phba->slim2p.phys);
7993
7994
7995 iounmap(phba->ctrl_regs_memmap_p);
7996 iounmap(phba->slim_memmap_p);
7997
7998 return;
7999}
8000
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010int
8011lpfc_sli4_post_status_check(struct lpfc_hba *phba)
8012{
8013 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8014 struct lpfc_register reg_data;
8015 int i, port_error = 0;
8016 uint32_t if_type;
8017
8018 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8019 memset(®_data, 0, sizeof(reg_data));
8020 if (!phba->sli4_hba.PSMPHRregaddr)
8021 return -ENODEV;
8022
8023
8024 for (i = 0; i < 3000; i++) {
8025 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8026 &portsmphr_reg.word0) ||
8027 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
8028
8029 port_error = -ENODEV;
8030 break;
8031 }
8032 if (LPFC_POST_STAGE_PORT_READY ==
8033 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
8034 break;
8035 msleep(10);
8036 }
8037
8038
8039
8040
8041
8042 if (port_error) {
8043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8044 "1408 Port Failed POST - portsmphr=0x%x, "
8045 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8046 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8047 portsmphr_reg.word0,
8048 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8049 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8050 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8051 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8052 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8053 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8054 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8055 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8056 } else {
8057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8058 "2534 Device Info: SLIFamily=0x%x, "
8059 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8060 "SLIHint_2=0x%x, FT=0x%x\n",
8061 bf_get(lpfc_sli_intf_sli_family,
8062 &phba->sli4_hba.sli_intf),
8063 bf_get(lpfc_sli_intf_slirev,
8064 &phba->sli4_hba.sli_intf),
8065 bf_get(lpfc_sli_intf_if_type,
8066 &phba->sli4_hba.sli_intf),
8067 bf_get(lpfc_sli_intf_sli_hint1,
8068 &phba->sli4_hba.sli_intf),
8069 bf_get(lpfc_sli_intf_sli_hint2,
8070 &phba->sli4_hba.sli_intf),
8071 bf_get(lpfc_sli_intf_func_type,
8072 &phba->sli4_hba.sli_intf));
8073
8074
8075
8076
8077
8078 if_type = bf_get(lpfc_sli_intf_if_type,
8079 &phba->sli4_hba.sli_intf);
8080 switch (if_type) {
8081 case LPFC_SLI_INTF_IF_TYPE_0:
8082 phba->sli4_hba.ue_mask_lo =
8083 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8084 phba->sli4_hba.ue_mask_hi =
8085 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8086 uerrlo_reg.word0 =
8087 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8088 uerrhi_reg.word0 =
8089 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8090 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8091 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
8092 lpfc_printf_log(phba, KERN_ERR,
8093 LOG_TRACE_EVENT,
8094 "1422 Unrecoverable Error "
8095 "Detected during POST "
8096 "uerr_lo_reg=0x%x, "
8097 "uerr_hi_reg=0x%x, "
8098 "ue_mask_lo_reg=0x%x, "
8099 "ue_mask_hi_reg=0x%x\n",
8100 uerrlo_reg.word0,
8101 uerrhi_reg.word0,
8102 phba->sli4_hba.ue_mask_lo,
8103 phba->sli4_hba.ue_mask_hi);
8104 port_error = -ENODEV;
8105 }
8106 break;
8107 case LPFC_SLI_INTF_IF_TYPE_2:
8108 case LPFC_SLI_INTF_IF_TYPE_6:
8109
8110 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8111 ®_data.word0) ||
8112 (bf_get(lpfc_sliport_status_err, ®_data) &&
8113 !bf_get(lpfc_sliport_status_rn, ®_data))) {
8114 phba->work_status[0] =
8115 readl(phba->sli4_hba.u.if_type2.
8116 ERR1regaddr);
8117 phba->work_status[1] =
8118 readl(phba->sli4_hba.u.if_type2.
8119 ERR2regaddr);
8120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8121 "2888 Unrecoverable port error "
8122 "following POST: port status reg "
8123 "0x%x, port_smphr reg 0x%x, "
8124 "error 1=0x%x, error 2=0x%x\n",
8125 reg_data.word0,
8126 portsmphr_reg.word0,
8127 phba->work_status[0],
8128 phba->work_status[1]);
8129 port_error = -ENODEV;
8130 }
8131 break;
8132 case LPFC_SLI_INTF_IF_TYPE_1:
8133 default:
8134 break;
8135 }
8136 }
8137 return port_error;
8138}
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148static void
8149lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8150{
8151 switch (if_type) {
8152 case LPFC_SLI_INTF_IF_TYPE_0:
8153 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8154 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8155 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8156 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8157 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8158 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8159 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8160 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8161 phba->sli4_hba.SLIINTFregaddr =
8162 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8163 break;
8164 case LPFC_SLI_INTF_IF_TYPE_2:
8165 phba->sli4_hba.u.if_type2.EQDregaddr =
8166 phba->sli4_hba.conf_regs_memmap_p +
8167 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8168 phba->sli4_hba.u.if_type2.ERR1regaddr =
8169 phba->sli4_hba.conf_regs_memmap_p +
8170 LPFC_CTL_PORT_ER1_OFFSET;
8171 phba->sli4_hba.u.if_type2.ERR2regaddr =
8172 phba->sli4_hba.conf_regs_memmap_p +
8173 LPFC_CTL_PORT_ER2_OFFSET;
8174 phba->sli4_hba.u.if_type2.CTRLregaddr =
8175 phba->sli4_hba.conf_regs_memmap_p +
8176 LPFC_CTL_PORT_CTL_OFFSET;
8177 phba->sli4_hba.u.if_type2.STATUSregaddr =
8178 phba->sli4_hba.conf_regs_memmap_p +
8179 LPFC_CTL_PORT_STA_OFFSET;
8180 phba->sli4_hba.SLIINTFregaddr =
8181 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8182 phba->sli4_hba.PSMPHRregaddr =
8183 phba->sli4_hba.conf_regs_memmap_p +
8184 LPFC_CTL_PORT_SEM_OFFSET;
8185 phba->sli4_hba.RQDBregaddr =
8186 phba->sli4_hba.conf_regs_memmap_p +
8187 LPFC_ULP0_RQ_DOORBELL;
8188 phba->sli4_hba.WQDBregaddr =
8189 phba->sli4_hba.conf_regs_memmap_p +
8190 LPFC_ULP0_WQ_DOORBELL;
8191 phba->sli4_hba.CQDBregaddr =
8192 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8193 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8194 phba->sli4_hba.MQDBregaddr =
8195 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8196 phba->sli4_hba.BMBXregaddr =
8197 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8198 break;
8199 case LPFC_SLI_INTF_IF_TYPE_6:
8200 phba->sli4_hba.u.if_type2.EQDregaddr =
8201 phba->sli4_hba.conf_regs_memmap_p +
8202 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8203 phba->sli4_hba.u.if_type2.ERR1regaddr =
8204 phba->sli4_hba.conf_regs_memmap_p +
8205 LPFC_CTL_PORT_ER1_OFFSET;
8206 phba->sli4_hba.u.if_type2.ERR2regaddr =
8207 phba->sli4_hba.conf_regs_memmap_p +
8208 LPFC_CTL_PORT_ER2_OFFSET;
8209 phba->sli4_hba.u.if_type2.CTRLregaddr =
8210 phba->sli4_hba.conf_regs_memmap_p +
8211 LPFC_CTL_PORT_CTL_OFFSET;
8212 phba->sli4_hba.u.if_type2.STATUSregaddr =
8213 phba->sli4_hba.conf_regs_memmap_p +
8214 LPFC_CTL_PORT_STA_OFFSET;
8215 phba->sli4_hba.PSMPHRregaddr =
8216 phba->sli4_hba.conf_regs_memmap_p +
8217 LPFC_CTL_PORT_SEM_OFFSET;
8218 phba->sli4_hba.BMBXregaddr =
8219 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8220 break;
8221 case LPFC_SLI_INTF_IF_TYPE_1:
8222 default:
8223 dev_printk(KERN_ERR, &phba->pcidev->dev,
8224 "FATAL - unsupported SLI4 interface type - %d\n",
8225 if_type);
8226 break;
8227 }
8228}
8229
8230
8231
8232
8233
8234
8235
8236
8237static void
8238lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8239{
8240 switch (if_type) {
8241 case LPFC_SLI_INTF_IF_TYPE_0:
8242 phba->sli4_hba.PSMPHRregaddr =
8243 phba->sli4_hba.ctrl_regs_memmap_p +
8244 LPFC_SLIPORT_IF0_SMPHR;
8245 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8246 LPFC_HST_ISR0;
8247 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8248 LPFC_HST_IMR0;
8249 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8250 LPFC_HST_ISCR0;
8251 break;
8252 case LPFC_SLI_INTF_IF_TYPE_6:
8253 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8254 LPFC_IF6_RQ_DOORBELL;
8255 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8256 LPFC_IF6_WQ_DOORBELL;
8257 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8258 LPFC_IF6_CQ_DOORBELL;
8259 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8260 LPFC_IF6_EQ_DOORBELL;
8261 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8262 LPFC_IF6_MQ_DOORBELL;
8263 break;
8264 case LPFC_SLI_INTF_IF_TYPE_2:
8265 case LPFC_SLI_INTF_IF_TYPE_1:
8266 default:
8267 dev_err(&phba->pcidev->dev,
8268 "FATAL - unsupported SLI4 interface type - %d\n",
8269 if_type);
8270 break;
8271 }
8272}
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284static int
8285lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8286{
8287 if (vf > LPFC_VIR_FUNC_MAX)
8288 return -ENODEV;
8289
8290 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8291 vf * LPFC_VFR_PAGE_SIZE +
8292 LPFC_ULP0_RQ_DOORBELL);
8293 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8294 vf * LPFC_VFR_PAGE_SIZE +
8295 LPFC_ULP0_WQ_DOORBELL);
8296 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8297 vf * LPFC_VFR_PAGE_SIZE +
8298 LPFC_EQCQ_DOORBELL);
8299 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8300 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8301 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8302 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8303 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8304 return 0;
8305}
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318
8319
8320
8321
8322static int
8323lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8324{
8325 uint32_t bmbx_size;
8326 struct lpfc_dmabuf *dmabuf;
8327 struct dma_address *dma_address;
8328 uint32_t pa_addr;
8329 uint64_t phys_addr;
8330
8331 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8332 if (!dmabuf)
8333 return -ENOMEM;
8334
8335
8336
8337
8338
8339 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8340 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8341 &dmabuf->phys, GFP_KERNEL);
8342 if (!dmabuf->virt) {
8343 kfree(dmabuf);
8344 return -ENOMEM;
8345 }
8346
8347
8348
8349
8350
8351
8352
8353
8354 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8355 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8356
8357 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8358 LPFC_ALIGN_16_BYTE);
8359 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8360 LPFC_ALIGN_16_BYTE);
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370 dma_address = &phba->sli4_hba.bmbx.dma_address;
8371 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8372 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8373 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8374 LPFC_BMBX_BIT1_ADDR_HI);
8375
8376 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8377 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8378 LPFC_BMBX_BIT1_ADDR_LO);
8379 return 0;
8380}
8381
8382
8383
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393static void
8394lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8395{
8396 dma_free_coherent(&phba->pcidev->dev,
8397 phba->sli4_hba.bmbx.bmbx_size,
8398 phba->sli4_hba.bmbx.dmabuf->virt,
8399 phba->sli4_hba.bmbx.dmabuf->phys);
8400
8401 kfree(phba->sli4_hba.bmbx.dmabuf);
8402 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8403}
8404
8405static const char * const lpfc_topo_to_str[] = {
8406 "Loop then P2P",
8407 "Loopback",
8408 "P2P Only",
8409 "Unsupported",
8410 "Loop Only",
8411 "Unsupported",
8412 "P2P then Loop",
8413};
8414
8415#define LINK_FLAGS_DEF 0x0
8416#define LINK_FLAGS_P2P 0x1
8417#define LINK_FLAGS_LOOP 0x2
8418
8419
8420
8421
8422
8423
8424
8425
8426
8427
8428static void
8429lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8430{
8431 u8 ptv, tf, pt;
8432
8433 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8434 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8435 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8436
8437 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8438 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8439 ptv, tf, pt);
8440 if (!ptv) {
8441 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8442 "2019 FW does not support persistent topology "
8443 "Using driver parameter defined value [%s]",
8444 lpfc_topo_to_str[phba->cfg_topology]);
8445 return;
8446 }
8447
8448 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8449 switch (phba->pcidev->device) {
8450 case PCI_DEVICE_ID_LANCER_G7_FC:
8451 case PCI_DEVICE_ID_LANCER_G6_FC:
8452 if (!tf) {
8453 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8454 ? FLAGS_TOPOLOGY_MODE_LOOP
8455 : FLAGS_TOPOLOGY_MODE_PT_PT);
8456 } else {
8457 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8458 }
8459 break;
8460 default:
8461 if (tf) {
8462
8463 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8464 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8465 } else {
8466 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8467 ? FLAGS_TOPOLOGY_MODE_PT_PT
8468 : FLAGS_TOPOLOGY_MODE_LOOP);
8469 }
8470 break;
8471 }
8472 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8474 "2020 Using persistent topology value [%s]",
8475 lpfc_topo_to_str[phba->cfg_topology]);
8476 } else {
8477 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8478 "2021 Invalid topology values from FW "
8479 "Using driver parameter defined value [%s]",
8480 lpfc_topo_to_str[phba->cfg_topology]);
8481 }
8482}
8483
8484
8485
8486
8487
8488
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498int
8499lpfc_sli4_read_config(struct lpfc_hba *phba)
8500{
8501 LPFC_MBOXQ_t *pmb;
8502 struct lpfc_mbx_read_config *rd_config;
8503 union lpfc_sli4_cfg_shdr *shdr;
8504 uint32_t shdr_status, shdr_add_status;
8505 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8506 struct lpfc_rsrc_desc_fcfcoe *desc;
8507 char *pdesc_0;
8508 uint16_t forced_link_speed;
8509 uint32_t if_type, qmin;
8510 int length, i, rc = 0, rc2;
8511
8512 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8513 if (!pmb) {
8514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8515 "2011 Unable to allocate memory for issuing "
8516 "SLI_CONFIG_SPECIAL mailbox command\n");
8517 return -ENOMEM;
8518 }
8519
8520 lpfc_read_config(phba, pmb);
8521
8522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8523 if (rc != MBX_SUCCESS) {
8524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8525 "2012 Mailbox failed , mbxCmd x%x "
8526 "READ_CONFIG, mbxStatus x%x\n",
8527 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8528 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8529 rc = -EIO;
8530 } else {
8531 rd_config = &pmb->u.mqe.un.rd_config;
8532 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8533 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8534 phba->sli4_hba.lnk_info.lnk_tp =
8535 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8536 phba->sli4_hba.lnk_info.lnk_no =
8537 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8538 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8539 "3081 lnk_type:%d, lnk_numb:%d\n",
8540 phba->sli4_hba.lnk_info.lnk_tp,
8541 phba->sli4_hba.lnk_info.lnk_no);
8542 } else
8543 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8544 "3082 Mailbox (x%x) returned ldv:x0\n",
8545 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8546 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8547 phba->bbcredit_support = 1;
8548 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8549 }
8550
8551 phba->sli4_hba.conf_trunk =
8552 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8553 phba->sli4_hba.extents_in_use =
8554 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8555 phba->sli4_hba.max_cfg_param.max_xri =
8556 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8557
8558 if (is_kdump_kernel() &&
8559 phba->sli4_hba.max_cfg_param.max_xri > 512)
8560 phba->sli4_hba.max_cfg_param.max_xri = 512;
8561 phba->sli4_hba.max_cfg_param.xri_base =
8562 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8563 phba->sli4_hba.max_cfg_param.max_vpi =
8564 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8565
8566 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8567 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8568 phba->sli4_hba.max_cfg_param.vpi_base =
8569 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8570 phba->sli4_hba.max_cfg_param.max_rpi =
8571 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8572 phba->sli4_hba.max_cfg_param.rpi_base =
8573 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8574 phba->sli4_hba.max_cfg_param.max_vfi =
8575 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8576 phba->sli4_hba.max_cfg_param.vfi_base =
8577 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8578 phba->sli4_hba.max_cfg_param.max_fcfi =
8579 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8580 phba->sli4_hba.max_cfg_param.max_eq =
8581 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8582 phba->sli4_hba.max_cfg_param.max_rq =
8583 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8584 phba->sli4_hba.max_cfg_param.max_wq =
8585 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8586 phba->sli4_hba.max_cfg_param.max_cq =
8587 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8588 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8589 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8590 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8591 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8592 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8593 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8594 phba->max_vports = phba->max_vpi;
8595 lpfc_map_topology(phba, rd_config);
8596 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8597 "2003 cfg params Extents? %d "
8598 "XRI(B:%d M:%d), "
8599 "VPI(B:%d M:%d) "
8600 "VFI(B:%d M:%d) "
8601 "RPI(B:%d M:%d) "
8602 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
8603 phba->sli4_hba.extents_in_use,
8604 phba->sli4_hba.max_cfg_param.xri_base,
8605 phba->sli4_hba.max_cfg_param.max_xri,
8606 phba->sli4_hba.max_cfg_param.vpi_base,
8607 phba->sli4_hba.max_cfg_param.max_vpi,
8608 phba->sli4_hba.max_cfg_param.vfi_base,
8609 phba->sli4_hba.max_cfg_param.max_vfi,
8610 phba->sli4_hba.max_cfg_param.rpi_base,
8611 phba->sli4_hba.max_cfg_param.max_rpi,
8612 phba->sli4_hba.max_cfg_param.max_fcfi,
8613 phba->sli4_hba.max_cfg_param.max_eq,
8614 phba->sli4_hba.max_cfg_param.max_cq,
8615 phba->sli4_hba.max_cfg_param.max_wq,
8616 phba->sli4_hba.max_cfg_param.max_rq,
8617 phba->lmt);
8618
8619
8620
8621
8622
8623 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8624 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8625 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8626 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8627 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8628
8629
8630
8631
8632
8633
8634 qmin -= 4;
8635
8636
8637 if ((phba->cfg_irq_chann > qmin) ||
8638 (phba->cfg_hdw_queue > qmin)) {
8639 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8640 "2005 Reducing Queues - "
8641 "FW resource limitation: "
8642 "WQ %d CQ %d EQ %d: min %d: "
8643 "IRQ %d HDWQ %d\n",
8644 phba->sli4_hba.max_cfg_param.max_wq,
8645 phba->sli4_hba.max_cfg_param.max_cq,
8646 phba->sli4_hba.max_cfg_param.max_eq,
8647 qmin, phba->cfg_irq_chann,
8648 phba->cfg_hdw_queue);
8649
8650 if (phba->cfg_irq_chann > qmin)
8651 phba->cfg_irq_chann = qmin;
8652 if (phba->cfg_hdw_queue > qmin)
8653 phba->cfg_hdw_queue = qmin;
8654 }
8655 }
8656
8657 if (rc)
8658 goto read_cfg_out;
8659
8660
8661 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8662 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8663 forced_link_speed =
8664 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8665 if (forced_link_speed) {
8666 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8667
8668 switch (forced_link_speed) {
8669 case LINK_SPEED_1G:
8670 phba->cfg_link_speed =
8671 LPFC_USER_LINK_SPEED_1G;
8672 break;
8673 case LINK_SPEED_2G:
8674 phba->cfg_link_speed =
8675 LPFC_USER_LINK_SPEED_2G;
8676 break;
8677 case LINK_SPEED_4G:
8678 phba->cfg_link_speed =
8679 LPFC_USER_LINK_SPEED_4G;
8680 break;
8681 case LINK_SPEED_8G:
8682 phba->cfg_link_speed =
8683 LPFC_USER_LINK_SPEED_8G;
8684 break;
8685 case LINK_SPEED_10G:
8686 phba->cfg_link_speed =
8687 LPFC_USER_LINK_SPEED_10G;
8688 break;
8689 case LINK_SPEED_16G:
8690 phba->cfg_link_speed =
8691 LPFC_USER_LINK_SPEED_16G;
8692 break;
8693 case LINK_SPEED_32G:
8694 phba->cfg_link_speed =
8695 LPFC_USER_LINK_SPEED_32G;
8696 break;
8697 case LINK_SPEED_64G:
8698 phba->cfg_link_speed =
8699 LPFC_USER_LINK_SPEED_64G;
8700 break;
8701 case 0xffff:
8702 phba->cfg_link_speed =
8703 LPFC_USER_LINK_SPEED_AUTO;
8704 break;
8705 default:
8706 lpfc_printf_log(phba, KERN_ERR,
8707 LOG_TRACE_EVENT,
8708 "0047 Unrecognized link "
8709 "speed : %d\n",
8710 forced_link_speed);
8711 phba->cfg_link_speed =
8712 LPFC_USER_LINK_SPEED_AUTO;
8713 }
8714 }
8715 }
8716
8717
8718 length = phba->sli4_hba.max_cfg_param.max_xri -
8719 lpfc_sli4_get_els_iocb_cnt(phba);
8720 if (phba->cfg_hba_queue_depth > length) {
8721 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8722 "3361 HBA queue depth changed from %d to %d\n",
8723 phba->cfg_hba_queue_depth, length);
8724 phba->cfg_hba_queue_depth = length;
8725 }
8726
8727 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8728 LPFC_SLI_INTF_IF_TYPE_2)
8729 goto read_cfg_out;
8730
8731
8732 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8733 sizeof(struct lpfc_sli4_cfg_mhdr));
8734 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8735 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8736 length, LPFC_SLI4_MBX_EMBED);
8737
8738 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8739 shdr = (union lpfc_sli4_cfg_shdr *)
8740 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8741 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8742 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8743 if (rc2 || shdr_status || shdr_add_status) {
8744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8745 "3026 Mailbox failed , mbxCmd x%x "
8746 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8747 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8748 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8749 goto read_cfg_out;
8750 }
8751
8752
8753 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8754
8755 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8756 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8757 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8758 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8759 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8760 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8761 goto read_cfg_out;
8762
8763 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8764 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8765 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8766 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8767 phba->sli4_hba.iov.pf_number =
8768 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8769 phba->sli4_hba.iov.vf_number =
8770 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8771 break;
8772 }
8773 }
8774
8775 if (i < LPFC_RSRC_DESC_MAX_NUM)
8776 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8777 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8778 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8779 phba->sli4_hba.iov.vf_number);
8780 else
8781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8782 "3028 GET_FUNCTION_CONFIG: failed to find "
8783 "Resource Descriptor:x%x\n",
8784 LPFC_RSRC_DESC_TYPE_FCFCOE);
8785
8786read_cfg_out:
8787 mempool_free(pmb, phba->mbox_mem_pool);
8788 return rc;
8789}
8790
8791
8792
8793
8794
8795
8796
8797
8798
8799
8800
8801
8802
8803
8804static int
8805lpfc_setup_endian_order(struct lpfc_hba *phba)
8806{
8807 LPFC_MBOXQ_t *mboxq;
8808 uint32_t if_type, rc = 0;
8809 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8810 HOST_ENDIAN_HIGH_WORD1};
8811
8812 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8813 switch (if_type) {
8814 case LPFC_SLI_INTF_IF_TYPE_0:
8815 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8816 GFP_KERNEL);
8817 if (!mboxq) {
8818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8819 "0492 Unable to allocate memory for "
8820 "issuing SLI_CONFIG_SPECIAL mailbox "
8821 "command\n");
8822 return -ENOMEM;
8823 }
8824
8825
8826
8827
8828
8829 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8830 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8831 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8832 if (rc != MBX_SUCCESS) {
8833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8834 "0493 SLI_CONFIG_SPECIAL mailbox "
8835 "failed with status x%x\n",
8836 rc);
8837 rc = -EIO;
8838 }
8839 mempool_free(mboxq, phba->mbox_mem_pool);
8840 break;
8841 case LPFC_SLI_INTF_IF_TYPE_6:
8842 case LPFC_SLI_INTF_IF_TYPE_2:
8843 case LPFC_SLI_INTF_IF_TYPE_1:
8844 default:
8845 break;
8846 }
8847 return rc;
8848}
8849
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860
8861
8862
8863static int
8864lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8865{
8866
8867
8868
8869
8870
8871 if (phba->nvmet_support) {
8872 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8873 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8874 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8875 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8876 }
8877
8878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8879 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8880 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8881 phba->cfg_nvmet_mrq);
8882
8883
8884 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8885 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8886
8887
8888 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8889 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8890 return 0;
8891}
8892
8893static int
8894lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8895{
8896 struct lpfc_queue *qdesc;
8897 u32 wqesize;
8898 int cpu;
8899
8900 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8901
8902 if (phba->enab_exp_wqcq_pages)
8903
8904 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8905 phba->sli4_hba.cq_esize,
8906 LPFC_CQE_EXP_COUNT, cpu);
8907
8908 else
8909 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8910 phba->sli4_hba.cq_esize,
8911 phba->sli4_hba.cq_ecount, cpu);
8912 if (!qdesc) {
8913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8914 "0499 Failed allocate fast-path IO CQ (%d)\n",
8915 idx);
8916 return 1;
8917 }
8918 qdesc->qe_valid = 1;
8919 qdesc->hdwq = idx;
8920 qdesc->chann = cpu;
8921 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8922
8923
8924 if (phba->enab_exp_wqcq_pages) {
8925
8926 wqesize = (phba->fcp_embed_io) ?
8927 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8928 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8929 wqesize,
8930 LPFC_WQE_EXP_COUNT, cpu);
8931 } else
8932 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8933 phba->sli4_hba.wq_esize,
8934 phba->sli4_hba.wq_ecount, cpu);
8935
8936 if (!qdesc) {
8937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8938 "0503 Failed allocate fast-path IO WQ (%d)\n",
8939 idx);
8940 return 1;
8941 }
8942 qdesc->hdwq = idx;
8943 qdesc->chann = cpu;
8944 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8945 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8946 return 0;
8947}
8948
8949
8950
8951
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963int
8964lpfc_sli4_queue_create(struct lpfc_hba *phba)
8965{
8966 struct lpfc_queue *qdesc;
8967 int idx, cpu, eqcpu;
8968 struct lpfc_sli4_hdw_queue *qp;
8969 struct lpfc_vector_map_info *cpup;
8970 struct lpfc_vector_map_info *eqcpup;
8971 struct lpfc_eq_intr_info *eqi;
8972
8973
8974
8975
8976
8977 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8978 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8979 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8980 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8981 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8982 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8983 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8984 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8985 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8986 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8987
8988 if (!phba->sli4_hba.hdwq) {
8989 phba->sli4_hba.hdwq = kcalloc(
8990 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8991 GFP_KERNEL);
8992 if (!phba->sli4_hba.hdwq) {
8993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8994 "6427 Failed allocate memory for "
8995 "fast-path Hardware Queue array\n");
8996 goto out_error;
8997 }
8998
8999 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9000 qp = &phba->sli4_hba.hdwq[idx];
9001 spin_lock_init(&qp->io_buf_list_get_lock);
9002 spin_lock_init(&qp->io_buf_list_put_lock);
9003 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9004 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9005 qp->get_io_bufs = 0;
9006 qp->put_io_bufs = 0;
9007 qp->total_io_bufs = 0;
9008 spin_lock_init(&qp->abts_io_buf_list_lock);
9009 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
9010 qp->abts_scsi_io_bufs = 0;
9011 qp->abts_nvme_io_bufs = 0;
9012 INIT_LIST_HEAD(&qp->sgl_list);
9013 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9014 spin_lock_init(&qp->hdwq_lock);
9015 }
9016 }
9017
9018 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9019 if (phba->nvmet_support) {
9020 phba->sli4_hba.nvmet_cqset = kcalloc(
9021 phba->cfg_nvmet_mrq,
9022 sizeof(struct lpfc_queue *),
9023 GFP_KERNEL);
9024 if (!phba->sli4_hba.nvmet_cqset) {
9025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9026 "3121 Fail allocate memory for "
9027 "fast-path CQ set array\n");
9028 goto out_error;
9029 }
9030 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9031 phba->cfg_nvmet_mrq,
9032 sizeof(struct lpfc_queue *),
9033 GFP_KERNEL);
9034 if (!phba->sli4_hba.nvmet_mrq_hdr) {
9035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9036 "3122 Fail allocate memory for "
9037 "fast-path RQ set hdr array\n");
9038 goto out_error;
9039 }
9040 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9041 phba->cfg_nvmet_mrq,
9042 sizeof(struct lpfc_queue *),
9043 GFP_KERNEL);
9044 if (!phba->sli4_hba.nvmet_mrq_data) {
9045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9046 "3124 Fail allocate memory for "
9047 "fast-path RQ set data array\n");
9048 goto out_error;
9049 }
9050 }
9051 }
9052
9053 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9054
9055
9056 for_each_present_cpu(cpu) {
9057
9058
9059
9060
9061 cpup = &phba->sli4_hba.cpu_map[cpu];
9062 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9063 continue;
9064
9065
9066 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9067
9068
9069 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9070 phba->sli4_hba.eq_esize,
9071 phba->sli4_hba.eq_ecount, cpu);
9072 if (!qdesc) {
9073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9074 "0497 Failed allocate EQ (%d)\n",
9075 cpup->hdwq);
9076 goto out_error;
9077 }
9078 qdesc->qe_valid = 1;
9079 qdesc->hdwq = cpup->hdwq;
9080 qdesc->chann = cpu;
9081 qdesc->last_cpu = qdesc->chann;
9082
9083
9084 qp->hba_eq = qdesc;
9085
9086 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9087 list_add(&qdesc->cpu_list, &eqi->list);
9088 }
9089
9090
9091
9092
9093 for_each_present_cpu(cpu) {
9094 cpup = &phba->sli4_hba.cpu_map[cpu];
9095
9096
9097 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9098 continue;
9099
9100
9101 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9102 if (qp->hba_eq)
9103 continue;
9104
9105
9106 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9107 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9108 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9109 }
9110
9111
9112 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9113 if (lpfc_alloc_io_wq_cq(phba, idx))
9114 goto out_error;
9115 }
9116
9117 if (phba->nvmet_support) {
9118 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9119 cpu = lpfc_find_cpu_handle(phba, idx,
9120 LPFC_FIND_BY_HDWQ);
9121 qdesc = lpfc_sli4_queue_alloc(phba,
9122 LPFC_DEFAULT_PAGE_SIZE,
9123 phba->sli4_hba.cq_esize,
9124 phba->sli4_hba.cq_ecount,
9125 cpu);
9126 if (!qdesc) {
9127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9128 "3142 Failed allocate NVME "
9129 "CQ Set (%d)\n", idx);
9130 goto out_error;
9131 }
9132 qdesc->qe_valid = 1;
9133 qdesc->hdwq = idx;
9134 qdesc->chann = cpu;
9135 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9136 }
9137 }
9138
9139
9140
9141
9142
9143 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9144
9145 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9146 phba->sli4_hba.cq_esize,
9147 phba->sli4_hba.cq_ecount, cpu);
9148 if (!qdesc) {
9149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9150 "0500 Failed allocate slow-path mailbox CQ\n");
9151 goto out_error;
9152 }
9153 qdesc->qe_valid = 1;
9154 phba->sli4_hba.mbx_cq = qdesc;
9155
9156
9157 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9158 phba->sli4_hba.cq_esize,
9159 phba->sli4_hba.cq_ecount, cpu);
9160 if (!qdesc) {
9161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9162 "0501 Failed allocate slow-path ELS CQ\n");
9163 goto out_error;
9164 }
9165 qdesc->qe_valid = 1;
9166 qdesc->chann = cpu;
9167 phba->sli4_hba.els_cq = qdesc;
9168
9169
9170
9171
9172
9173
9174
9175
9176 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9177 phba->sli4_hba.mq_esize,
9178 phba->sli4_hba.mq_ecount, cpu);
9179 if (!qdesc) {
9180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9181 "0505 Failed allocate slow-path MQ\n");
9182 goto out_error;
9183 }
9184 qdesc->chann = cpu;
9185 phba->sli4_hba.mbx_wq = qdesc;
9186
9187
9188
9189
9190
9191
9192 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9193 phba->sli4_hba.wq_esize,
9194 phba->sli4_hba.wq_ecount, cpu);
9195 if (!qdesc) {
9196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9197 "0504 Failed allocate slow-path ELS WQ\n");
9198 goto out_error;
9199 }
9200 qdesc->chann = cpu;
9201 phba->sli4_hba.els_wq = qdesc;
9202 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9203
9204 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9205
9206 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9207 phba->sli4_hba.cq_esize,
9208 phba->sli4_hba.cq_ecount, cpu);
9209 if (!qdesc) {
9210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9211 "6079 Failed allocate NVME LS CQ\n");
9212 goto out_error;
9213 }
9214 qdesc->chann = cpu;
9215 qdesc->qe_valid = 1;
9216 phba->sli4_hba.nvmels_cq = qdesc;
9217
9218
9219 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9220 phba->sli4_hba.wq_esize,
9221 phba->sli4_hba.wq_ecount, cpu);
9222 if (!qdesc) {
9223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9224 "6080 Failed allocate NVME LS WQ\n");
9225 goto out_error;
9226 }
9227 qdesc->chann = cpu;
9228 phba->sli4_hba.nvmels_wq = qdesc;
9229 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9230 }
9231
9232
9233
9234
9235
9236
9237 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9238 phba->sli4_hba.rq_esize,
9239 phba->sli4_hba.rq_ecount, cpu);
9240 if (!qdesc) {
9241 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9242 "0506 Failed allocate receive HRQ\n");
9243 goto out_error;
9244 }
9245 phba->sli4_hba.hdr_rq = qdesc;
9246
9247
9248 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9249 phba->sli4_hba.rq_esize,
9250 phba->sli4_hba.rq_ecount, cpu);
9251 if (!qdesc) {
9252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9253 "0507 Failed allocate receive DRQ\n");
9254 goto out_error;
9255 }
9256 phba->sli4_hba.dat_rq = qdesc;
9257
9258 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9259 phba->nvmet_support) {
9260 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9261 cpu = lpfc_find_cpu_handle(phba, idx,
9262 LPFC_FIND_BY_HDWQ);
9263
9264 qdesc = lpfc_sli4_queue_alloc(phba,
9265 LPFC_DEFAULT_PAGE_SIZE,
9266 phba->sli4_hba.rq_esize,
9267 LPFC_NVMET_RQE_DEF_COUNT,
9268 cpu);
9269 if (!qdesc) {
9270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9271 "3146 Failed allocate "
9272 "receive HRQ\n");
9273 goto out_error;
9274 }
9275 qdesc->hdwq = idx;
9276 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9277
9278
9279 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9280 GFP_KERNEL,
9281 cpu_to_node(cpu));
9282 if (qdesc->rqbp == NULL) {
9283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9284 "6131 Failed allocate "
9285 "Header RQBP\n");
9286 goto out_error;
9287 }
9288
9289
9290 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9291
9292
9293 qdesc = lpfc_sli4_queue_alloc(phba,
9294 LPFC_DEFAULT_PAGE_SIZE,
9295 phba->sli4_hba.rq_esize,
9296 LPFC_NVMET_RQE_DEF_COUNT,
9297 cpu);
9298 if (!qdesc) {
9299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9300 "3156 Failed allocate "
9301 "receive DRQ\n");
9302 goto out_error;
9303 }
9304 qdesc->hdwq = idx;
9305 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9306 }
9307 }
9308
9309
9310 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9311 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9312 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9313 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9314 }
9315 }
9316
9317
9318 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9319 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9320 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9321 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9322 }
9323 }
9324
9325 return 0;
9326
9327out_error:
9328 lpfc_sli4_queue_destroy(phba);
9329 return -ENOMEM;
9330}
9331
9332static inline void
9333__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9334{
9335 if (*qp != NULL) {
9336 lpfc_sli4_queue_free(*qp);
9337 *qp = NULL;
9338 }
9339}
9340
9341static inline void
9342lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9343{
9344 int idx;
9345
9346 if (*qs == NULL)
9347 return;
9348
9349 for (idx = 0; idx < max; idx++)
9350 __lpfc_sli4_release_queue(&(*qs)[idx]);
9351
9352 kfree(*qs);
9353 *qs = NULL;
9354}
9355
9356static inline void
9357lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9358{
9359 struct lpfc_sli4_hdw_queue *hdwq;
9360 struct lpfc_queue *eq;
9361 uint32_t idx;
9362
9363 hdwq = phba->sli4_hba.hdwq;
9364
9365
9366 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9367
9368 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9369 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9370 hdwq[idx].hba_eq = NULL;
9371 hdwq[idx].io_cq = NULL;
9372 hdwq[idx].io_wq = NULL;
9373 if (phba->cfg_xpsgl && !phba->nvmet_support)
9374 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9375 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9376 }
9377
9378 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9379
9380 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9381 lpfc_sli4_queue_free(eq);
9382 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9383 }
9384}
9385
9386
9387
9388
9389
9390
9391
9392
9393
9394
9395
9396
9397
9398void
9399lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9400{
9401
9402
9403
9404
9405
9406 spin_lock_irq(&phba->hbalock);
9407 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9408 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9409 spin_unlock_irq(&phba->hbalock);
9410 msleep(20);
9411 spin_lock_irq(&phba->hbalock);
9412 }
9413 spin_unlock_irq(&phba->hbalock);
9414
9415 lpfc_sli4_cleanup_poll_list(phba);
9416
9417
9418 if (phba->sli4_hba.hdwq)
9419 lpfc_sli4_release_hdwq(phba);
9420
9421 if (phba->nvmet_support) {
9422 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9423 phba->cfg_nvmet_mrq);
9424
9425 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9426 phba->cfg_nvmet_mrq);
9427 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9428 phba->cfg_nvmet_mrq);
9429 }
9430
9431
9432 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9433
9434
9435 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9436
9437
9438 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9439
9440
9441 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9442 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9443
9444
9445 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9446
9447
9448 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9449
9450
9451 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9452
9453
9454 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9455
9456
9457 spin_lock_irq(&phba->hbalock);
9458 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9459 spin_unlock_irq(&phba->hbalock);
9460}
9461
9462int
9463lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9464{
9465 struct lpfc_rqb *rqbp;
9466 struct lpfc_dmabuf *h_buf;
9467 struct rqb_dmabuf *rqb_buffer;
9468
9469 rqbp = rq->rqbp;
9470 while (!list_empty(&rqbp->rqb_buffer_list)) {
9471 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9472 struct lpfc_dmabuf, list);
9473
9474 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9475 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9476 rqbp->buffer_count--;
9477 }
9478 return 1;
9479}
9480
9481static int
9482lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9483 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9484 int qidx, uint32_t qtype)
9485{
9486 struct lpfc_sli_ring *pring;
9487 int rc;
9488
9489 if (!eq || !cq || !wq) {
9490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9491 "6085 Fast-path %s (%d) not allocated\n",
9492 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9493 return -ENOMEM;
9494 }
9495
9496
9497 rc = lpfc_cq_create(phba, cq, eq,
9498 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9499 if (rc) {
9500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9501 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9502 qidx, (uint32_t)rc);
9503 return rc;
9504 }
9505
9506 if (qtype != LPFC_MBOX) {
9507
9508 if (cq_map)
9509 *cq_map = cq->queue_id;
9510
9511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9512 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9513 qidx, cq->queue_id, qidx, eq->queue_id);
9514
9515
9516 rc = lpfc_wq_create(phba, wq, cq, qtype);
9517 if (rc) {
9518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9519 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9520 qidx, (uint32_t)rc);
9521
9522 return rc;
9523 }
9524
9525
9526 pring = wq->pring;
9527 pring->sli.sli4.wqp = (void *)wq;
9528 cq->pring = pring;
9529
9530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9531 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9532 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9533 } else {
9534 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9535 if (rc) {
9536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9537 "0539 Failed setup of slow-path MQ: "
9538 "rc = 0x%x\n", rc);
9539
9540 return rc;
9541 }
9542
9543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9544 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9545 phba->sli4_hba.mbx_wq->queue_id,
9546 phba->sli4_hba.mbx_cq->queue_id);
9547 }
9548
9549 return 0;
9550}
9551
9552
9553
9554
9555
9556
9557
9558
9559static void
9560lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9561{
9562 struct lpfc_queue *eq, *childq;
9563 int qidx;
9564
9565 memset(phba->sli4_hba.cq_lookup, 0,
9566 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9567
9568 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9569
9570 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9571 if (!eq)
9572 continue;
9573
9574 list_for_each_entry(childq, &eq->child_list, list) {
9575 if (childq->queue_id > phba->sli4_hba.cq_max)
9576 continue;
9577 if (childq->subtype == LPFC_IO)
9578 phba->sli4_hba.cq_lookup[childq->queue_id] =
9579 childq;
9580 }
9581 }
9582}
9583
9584
9585
9586
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596int
9597lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9598{
9599 uint32_t shdr_status, shdr_add_status;
9600 union lpfc_sli4_cfg_shdr *shdr;
9601 struct lpfc_vector_map_info *cpup;
9602 struct lpfc_sli4_hdw_queue *qp;
9603 LPFC_MBOXQ_t *mboxq;
9604 int qidx, cpu;
9605 uint32_t length, usdelay;
9606 int rc = -ENOMEM;
9607
9608
9609 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9610 if (!mboxq) {
9611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9612 "3249 Unable to allocate memory for "
9613 "QUERY_FW_CFG mailbox command\n");
9614 return -ENOMEM;
9615 }
9616 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9617 sizeof(struct lpfc_sli4_cfg_mhdr));
9618 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9619 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9620 length, LPFC_SLI4_MBX_EMBED);
9621
9622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9623
9624 shdr = (union lpfc_sli4_cfg_shdr *)
9625 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9626 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9627 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9628 if (shdr_status || shdr_add_status || rc) {
9629 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9630 "3250 QUERY_FW_CFG mailbox failed with status "
9631 "x%x add_status x%x, mbx status x%x\n",
9632 shdr_status, shdr_add_status, rc);
9633 if (rc != MBX_TIMEOUT)
9634 mempool_free(mboxq, phba->mbox_mem_pool);
9635 rc = -ENXIO;
9636 goto out_error;
9637 }
9638
9639 phba->sli4_hba.fw_func_mode =
9640 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9641 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9642 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9643 phba->sli4_hba.physical_port =
9644 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9646 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9647 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9648 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9649
9650 if (rc != MBX_TIMEOUT)
9651 mempool_free(mboxq, phba->mbox_mem_pool);
9652
9653
9654
9655
9656 qp = phba->sli4_hba.hdwq;
9657
9658
9659 if (!qp) {
9660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9661 "3147 Fast-path EQs not allocated\n");
9662 rc = -ENOMEM;
9663 goto out_error;
9664 }
9665
9666
9667 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9668
9669 for_each_present_cpu(cpu) {
9670 cpup = &phba->sli4_hba.cpu_map[cpu];
9671
9672
9673
9674
9675 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9676 continue;
9677 if (qidx != cpup->eq)
9678 continue;
9679
9680
9681 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9682 phba->cfg_fcp_imax);
9683 if (rc) {
9684 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9685 "0523 Failed setup of fast-path"
9686 " EQ (%d), rc = 0x%x\n",
9687 cpup->eq, (uint32_t)rc);
9688 goto out_destroy;
9689 }
9690
9691
9692 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9693 qp[cpup->hdwq].hba_eq;
9694
9695 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9696 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9697 cpup->eq,
9698 qp[cpup->hdwq].hba_eq->queue_id);
9699 }
9700 }
9701
9702
9703 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9704 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9705 cpup = &phba->sli4_hba.cpu_map[cpu];
9706
9707
9708 rc = lpfc_create_wq_cq(phba,
9709 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9710 qp[qidx].io_cq,
9711 qp[qidx].io_wq,
9712 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9713 qidx,
9714 LPFC_IO);
9715 if (rc) {
9716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9717 "0535 Failed to setup fastpath "
9718 "IO WQ/CQ (%d), rc = 0x%x\n",
9719 qidx, (uint32_t)rc);
9720 goto out_destroy;
9721 }
9722 }
9723
9724
9725
9726
9727
9728
9729
9730 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9732 "0528 %s not allocated\n",
9733 phba->sli4_hba.mbx_cq ?
9734 "Mailbox WQ" : "Mailbox CQ");
9735 rc = -ENOMEM;
9736 goto out_destroy;
9737 }
9738
9739 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9740 phba->sli4_hba.mbx_cq,
9741 phba->sli4_hba.mbx_wq,
9742 NULL, 0, LPFC_MBOX);
9743 if (rc) {
9744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9745 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9746 (uint32_t)rc);
9747 goto out_destroy;
9748 }
9749 if (phba->nvmet_support) {
9750 if (!phba->sli4_hba.nvmet_cqset) {
9751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9752 "3165 Fast-path NVME CQ Set "
9753 "array not allocated\n");
9754 rc = -ENOMEM;
9755 goto out_destroy;
9756 }
9757 if (phba->cfg_nvmet_mrq > 1) {
9758 rc = lpfc_cq_create_set(phba,
9759 phba->sli4_hba.nvmet_cqset,
9760 qp,
9761 LPFC_WCQ, LPFC_NVMET);
9762 if (rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9764 "3164 Failed setup of NVME CQ "
9765 "Set, rc = 0x%x\n",
9766 (uint32_t)rc);
9767 goto out_destroy;
9768 }
9769 } else {
9770
9771 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9772 qp[0].hba_eq,
9773 LPFC_WCQ, LPFC_NVMET);
9774 if (rc) {
9775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9776 "6089 Failed setup NVMET CQ: "
9777 "rc = 0x%x\n", (uint32_t)rc);
9778 goto out_destroy;
9779 }
9780 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9781
9782 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9783 "6090 NVMET CQ setup: cq-id=%d, "
9784 "parent eq-id=%d\n",
9785 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9786 qp[0].hba_eq->queue_id);
9787 }
9788 }
9789
9790
9791 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9793 "0530 ELS %s not allocated\n",
9794 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9795 rc = -ENOMEM;
9796 goto out_destroy;
9797 }
9798 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9799 phba->sli4_hba.els_cq,
9800 phba->sli4_hba.els_wq,
9801 NULL, 0, LPFC_ELS);
9802 if (rc) {
9803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9804 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9805 (uint32_t)rc);
9806 goto out_destroy;
9807 }
9808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9809 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9810 phba->sli4_hba.els_wq->queue_id,
9811 phba->sli4_hba.els_cq->queue_id);
9812
9813 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9814
9815 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9816 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9817 "6091 LS %s not allocated\n",
9818 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9819 rc = -ENOMEM;
9820 goto out_destroy;
9821 }
9822 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9823 phba->sli4_hba.nvmels_cq,
9824 phba->sli4_hba.nvmels_wq,
9825 NULL, 0, LPFC_NVME_LS);
9826 if (rc) {
9827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9828 "0526 Failed setup of NVVME LS WQ/CQ: "
9829 "rc = 0x%x\n", (uint32_t)rc);
9830 goto out_destroy;
9831 }
9832
9833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9834 "6096 ELS WQ setup: wq-id=%d, "
9835 "parent cq-id=%d\n",
9836 phba->sli4_hba.nvmels_wq->queue_id,
9837 phba->sli4_hba.nvmels_cq->queue_id);
9838 }
9839
9840
9841
9842
9843 if (phba->nvmet_support) {
9844 if ((!phba->sli4_hba.nvmet_cqset) ||
9845 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9846 (!phba->sli4_hba.nvmet_mrq_data)) {
9847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9848 "6130 MRQ CQ Queues not "
9849 "allocated\n");
9850 rc = -ENOMEM;
9851 goto out_destroy;
9852 }
9853 if (phba->cfg_nvmet_mrq > 1) {
9854 rc = lpfc_mrq_create(phba,
9855 phba->sli4_hba.nvmet_mrq_hdr,
9856 phba->sli4_hba.nvmet_mrq_data,
9857 phba->sli4_hba.nvmet_cqset,
9858 LPFC_NVMET);
9859 if (rc) {
9860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9861 "6098 Failed setup of NVMET "
9862 "MRQ: rc = 0x%x\n",
9863 (uint32_t)rc);
9864 goto out_destroy;
9865 }
9866
9867 } else {
9868 rc = lpfc_rq_create(phba,
9869 phba->sli4_hba.nvmet_mrq_hdr[0],
9870 phba->sli4_hba.nvmet_mrq_data[0],
9871 phba->sli4_hba.nvmet_cqset[0],
9872 LPFC_NVMET);
9873 if (rc) {
9874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9875 "6057 Failed setup of NVMET "
9876 "Receive Queue: rc = 0x%x\n",
9877 (uint32_t)rc);
9878 goto out_destroy;
9879 }
9880
9881 lpfc_printf_log(
9882 phba, KERN_INFO, LOG_INIT,
9883 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9884 "dat-rq-id=%d parent cq-id=%d\n",
9885 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9886 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9887 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9888
9889 }
9890 }
9891
9892 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9894 "0540 Receive Queue not allocated\n");
9895 rc = -ENOMEM;
9896 goto out_destroy;
9897 }
9898
9899 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9900 phba->sli4_hba.els_cq, LPFC_USOL);
9901 if (rc) {
9902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9903 "0541 Failed setup of Receive Queue: "
9904 "rc = 0x%x\n", (uint32_t)rc);
9905 goto out_destroy;
9906 }
9907
9908 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9909 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9910 "parent cq-id=%d\n",
9911 phba->sli4_hba.hdr_rq->queue_id,
9912 phba->sli4_hba.dat_rq->queue_id,
9913 phba->sli4_hba.els_cq->queue_id);
9914
9915 if (phba->cfg_fcp_imax)
9916 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9917 else
9918 usdelay = 0;
9919
9920 for (qidx = 0; qidx < phba->cfg_irq_chann;
9921 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9922 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9923 usdelay);
9924
9925 if (phba->sli4_hba.cq_max) {
9926 kfree(phba->sli4_hba.cq_lookup);
9927 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9928 sizeof(struct lpfc_queue *), GFP_KERNEL);
9929 if (!phba->sli4_hba.cq_lookup) {
9930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9931 "0549 Failed setup of CQ Lookup table: "
9932 "size 0x%x\n", phba->sli4_hba.cq_max);
9933 rc = -ENOMEM;
9934 goto out_destroy;
9935 }
9936 lpfc_setup_cq_lookup(phba);
9937 }
9938 return 0;
9939
9940out_destroy:
9941 lpfc_sli4_queue_unset(phba);
9942out_error:
9943 return rc;
9944}
9945
9946
9947
9948
9949
9950
9951
9952
9953
9954
9955
9956
9957
9958void
9959lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9960{
9961 struct lpfc_sli4_hdw_queue *qp;
9962 struct lpfc_queue *eq;
9963 int qidx;
9964
9965
9966 if (phba->sli4_hba.mbx_wq)
9967 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9968
9969
9970 if (phba->sli4_hba.nvmels_wq)
9971 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9972
9973
9974 if (phba->sli4_hba.els_wq)
9975 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9976
9977
9978 if (phba->sli4_hba.hdr_rq)
9979 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9980 phba->sli4_hba.dat_rq);
9981
9982
9983 if (phba->sli4_hba.mbx_cq)
9984 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9985
9986
9987 if (phba->sli4_hba.els_cq)
9988 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9989
9990
9991 if (phba->sli4_hba.nvmels_cq)
9992 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9993
9994 if (phba->nvmet_support) {
9995
9996 if (phba->sli4_hba.nvmet_mrq_hdr) {
9997 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9998 lpfc_rq_destroy(
9999 phba,
10000 phba->sli4_hba.nvmet_mrq_hdr[qidx],
10001 phba->sli4_hba.nvmet_mrq_data[qidx]);
10002 }
10003
10004
10005 if (phba->sli4_hba.nvmet_cqset) {
10006 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10007 lpfc_cq_destroy(
10008 phba, phba->sli4_hba.nvmet_cqset[qidx]);
10009 }
10010 }
10011
10012
10013 if (phba->sli4_hba.hdwq) {
10014
10015 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10016
10017 qp = &phba->sli4_hba.hdwq[qidx];
10018 lpfc_wq_destroy(phba, qp->io_wq);
10019 lpfc_cq_destroy(phba, qp->io_cq);
10020 }
10021
10022 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10023
10024 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10025 lpfc_eq_destroy(phba, eq);
10026 }
10027 }
10028
10029 kfree(phba->sli4_hba.cq_lookup);
10030 phba->sli4_hba.cq_lookup = NULL;
10031 phba->sli4_hba.cq_max = 0;
10032}
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050static int
10051lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10052{
10053 struct lpfc_cq_event *cq_event;
10054 int i;
10055
10056 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10057 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10058 if (!cq_event)
10059 goto out_pool_create_fail;
10060 list_add_tail(&cq_event->list,
10061 &phba->sli4_hba.sp_cqe_event_pool);
10062 }
10063 return 0;
10064
10065out_pool_create_fail:
10066 lpfc_sli4_cq_event_pool_destroy(phba);
10067 return -ENOMEM;
10068}
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080static void
10081lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10082{
10083 struct lpfc_cq_event *cq_event, *next_cq_event;
10084
10085 list_for_each_entry_safe(cq_event, next_cq_event,
10086 &phba->sli4_hba.sp_cqe_event_pool, list) {
10087 list_del(&cq_event->list);
10088 kfree(cq_event);
10089 }
10090}
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102struct lpfc_cq_event *
10103__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10104{
10105 struct lpfc_cq_event *cq_event = NULL;
10106
10107 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10108 struct lpfc_cq_event, list);
10109 return cq_event;
10110}
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122struct lpfc_cq_event *
10123lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10124{
10125 struct lpfc_cq_event *cq_event;
10126 unsigned long iflags;
10127
10128 spin_lock_irqsave(&phba->hbalock, iflags);
10129 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10130 spin_unlock_irqrestore(&phba->hbalock, iflags);
10131 return cq_event;
10132}
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142void
10143__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10144 struct lpfc_cq_event *cq_event)
10145{
10146 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10147}
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157void
10158lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10159 struct lpfc_cq_event *cq_event)
10160{
10161 unsigned long iflags;
10162 spin_lock_irqsave(&phba->hbalock, iflags);
10163 __lpfc_sli4_cq_event_release(phba, cq_event);
10164 spin_unlock_irqrestore(&phba->hbalock, iflags);
10165}
10166
10167
10168
10169
10170
10171
10172
10173
10174static void
10175lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10176{
10177 LIST_HEAD(cqelist);
10178 struct lpfc_cq_event *cqe;
10179 unsigned long iflags;
10180
10181
10182 spin_lock_irqsave(&phba->hbalock, iflags);
10183
10184 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10185 &cqelist);
10186
10187 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10188 &cqelist);
10189
10190 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10191 &cqelist);
10192 spin_unlock_irqrestore(&phba->hbalock, iflags);
10193
10194 while (!list_empty(&cqelist)) {
10195 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10196 lpfc_sli4_cq_event_release(phba, cqe);
10197 }
10198}
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212int
10213lpfc_pci_function_reset(struct lpfc_hba *phba)
10214{
10215 LPFC_MBOXQ_t *mboxq;
10216 uint32_t rc = 0, if_type;
10217 uint32_t shdr_status, shdr_add_status;
10218 uint32_t rdy_chk;
10219 uint32_t port_reset = 0;
10220 union lpfc_sli4_cfg_shdr *shdr;
10221 struct lpfc_register reg_data;
10222 uint16_t devid;
10223
10224 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10225 switch (if_type) {
10226 case LPFC_SLI_INTF_IF_TYPE_0:
10227 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10228 GFP_KERNEL);
10229 if (!mboxq) {
10230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10231 "0494 Unable to allocate memory for "
10232 "issuing SLI_FUNCTION_RESET mailbox "
10233 "command\n");
10234 return -ENOMEM;
10235 }
10236
10237
10238 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10239 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10240 LPFC_SLI4_MBX_EMBED);
10241 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10242 shdr = (union lpfc_sli4_cfg_shdr *)
10243 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10244 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10245 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10246 &shdr->response);
10247 if (rc != MBX_TIMEOUT)
10248 mempool_free(mboxq, phba->mbox_mem_pool);
10249 if (shdr_status || shdr_add_status || rc) {
10250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10251 "0495 SLI_FUNCTION_RESET mailbox "
10252 "failed with status x%x add_status x%x,"
10253 " mbx status x%x\n",
10254 shdr_status, shdr_add_status, rc);
10255 rc = -ENXIO;
10256 }
10257 break;
10258 case LPFC_SLI_INTF_IF_TYPE_2:
10259 case LPFC_SLI_INTF_IF_TYPE_6:
10260wait:
10261
10262
10263
10264
10265
10266 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10267 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10268 STATUSregaddr, ®_data.word0)) {
10269 rc = -ENODEV;
10270 goto out;
10271 }
10272 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10273 break;
10274 msleep(20);
10275 }
10276
10277 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10278 phba->work_status[0] = readl(
10279 phba->sli4_hba.u.if_type2.ERR1regaddr);
10280 phba->work_status[1] = readl(
10281 phba->sli4_hba.u.if_type2.ERR2regaddr);
10282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10283 "2890 Port not ready, port status reg "
10284 "0x%x error 1=0x%x, error 2=0x%x\n",
10285 reg_data.word0,
10286 phba->work_status[0],
10287 phba->work_status[1]);
10288 rc = -ENODEV;
10289 goto out;
10290 }
10291
10292 if (!port_reset) {
10293
10294
10295
10296 reg_data.word0 = 0;
10297 bf_set(lpfc_sliport_ctrl_end, ®_data,
10298 LPFC_SLIPORT_LITTLE_ENDIAN);
10299 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10300 LPFC_SLIPORT_INIT_PORT);
10301 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10302 CTRLregaddr);
10303
10304 pci_read_config_word(phba->pcidev,
10305 PCI_DEVICE_ID, &devid);
10306
10307 port_reset = 1;
10308 msleep(20);
10309 goto wait;
10310 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10311 rc = -ENODEV;
10312 goto out;
10313 }
10314 break;
10315
10316 case LPFC_SLI_INTF_IF_TYPE_1:
10317 default:
10318 break;
10319 }
10320
10321out:
10322
10323 if (rc) {
10324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10325 "3317 HBA not functional: IP Reset Failed "
10326 "try: echo fw_reset > board_mode\n");
10327 rc = -ENODEV;
10328 }
10329
10330 return rc;
10331}
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344static int
10345lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10346{
10347 struct pci_dev *pdev = phba->pcidev;
10348 unsigned long bar0map_len, bar1map_len, bar2map_len;
10349 int error;
10350 uint32_t if_type;
10351
10352 if (!pdev)
10353 return -ENODEV;
10354
10355
10356 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10357 if (error)
10358 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10359 if (error)
10360 return error;
10361
10362
10363
10364
10365
10366 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10367 &phba->sli4_hba.sli_intf.word0)) {
10368 return -ENODEV;
10369 }
10370
10371
10372 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10373 LPFC_SLI_INTF_VALID) {
10374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10375 "2894 SLI_INTF reg contents invalid "
10376 "sli_intf reg 0x%x\n",
10377 phba->sli4_hba.sli_intf.word0);
10378 return -ENODEV;
10379 }
10380
10381 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10382
10383
10384
10385
10386
10387
10388 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10389 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10390 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10391
10392
10393
10394
10395
10396 phba->sli4_hba.conf_regs_memmap_p =
10397 ioremap(phba->pci_bar0_map, bar0map_len);
10398 if (!phba->sli4_hba.conf_regs_memmap_p) {
10399 dev_printk(KERN_ERR, &pdev->dev,
10400 "ioremap failed for SLI4 PCI config "
10401 "registers.\n");
10402 return -ENODEV;
10403 }
10404 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10405
10406 lpfc_sli4_bar0_register_memmap(phba, if_type);
10407 } else {
10408 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10409 bar0map_len = pci_resource_len(pdev, 1);
10410 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10411 dev_printk(KERN_ERR, &pdev->dev,
10412 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10413 return -ENODEV;
10414 }
10415 phba->sli4_hba.conf_regs_memmap_p =
10416 ioremap(phba->pci_bar0_map, bar0map_len);
10417 if (!phba->sli4_hba.conf_regs_memmap_p) {
10418 dev_printk(KERN_ERR, &pdev->dev,
10419 "ioremap failed for SLI4 PCI config "
10420 "registers.\n");
10421 return -ENODEV;
10422 }
10423 lpfc_sli4_bar0_register_memmap(phba, if_type);
10424 }
10425
10426 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10427 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10428
10429
10430
10431
10432 phba->pci_bar1_map = pci_resource_start(pdev,
10433 PCI_64BIT_BAR2);
10434 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10435 phba->sli4_hba.ctrl_regs_memmap_p =
10436 ioremap(phba->pci_bar1_map,
10437 bar1map_len);
10438 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10439 dev_err(&pdev->dev,
10440 "ioremap failed for SLI4 HBA "
10441 "control registers.\n");
10442 error = -ENOMEM;
10443 goto out_iounmap_conf;
10444 }
10445 phba->pci_bar2_memmap_p =
10446 phba->sli4_hba.ctrl_regs_memmap_p;
10447 lpfc_sli4_bar1_register_memmap(phba, if_type);
10448 } else {
10449 error = -ENOMEM;
10450 goto out_iounmap_conf;
10451 }
10452 }
10453
10454 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10455 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10456
10457
10458
10459
10460 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10461 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10462 phba->sli4_hba.drbl_regs_memmap_p =
10463 ioremap(phba->pci_bar1_map, bar1map_len);
10464 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10465 dev_err(&pdev->dev,
10466 "ioremap failed for SLI4 HBA doorbell registers.\n");
10467 error = -ENOMEM;
10468 goto out_iounmap_conf;
10469 }
10470 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10471 lpfc_sli4_bar1_register_memmap(phba, if_type);
10472 }
10473
10474 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10475 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10476
10477
10478
10479
10480 phba->pci_bar2_map = pci_resource_start(pdev,
10481 PCI_64BIT_BAR4);
10482 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10483 phba->sli4_hba.drbl_regs_memmap_p =
10484 ioremap(phba->pci_bar2_map,
10485 bar2map_len);
10486 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10487 dev_err(&pdev->dev,
10488 "ioremap failed for SLI4 HBA"
10489 " doorbell registers.\n");
10490 error = -ENOMEM;
10491 goto out_iounmap_ctrl;
10492 }
10493 phba->pci_bar4_memmap_p =
10494 phba->sli4_hba.drbl_regs_memmap_p;
10495 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10496 if (error)
10497 goto out_iounmap_all;
10498 } else {
10499 error = -ENOMEM;
10500 goto out_iounmap_all;
10501 }
10502 }
10503
10504 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10505 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10506
10507
10508
10509
10510 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10511 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10512 phba->sli4_hba.dpp_regs_memmap_p =
10513 ioremap(phba->pci_bar2_map, bar2map_len);
10514 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10515 dev_err(&pdev->dev,
10516 "ioremap failed for SLI4 HBA dpp registers.\n");
10517 error = -ENOMEM;
10518 goto out_iounmap_ctrl;
10519 }
10520 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10521 }
10522
10523
10524 switch (if_type) {
10525 case LPFC_SLI_INTF_IF_TYPE_0:
10526 case LPFC_SLI_INTF_IF_TYPE_2:
10527 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10528 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10529 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10530 break;
10531 case LPFC_SLI_INTF_IF_TYPE_6:
10532 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10533 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10534 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10535 break;
10536 default:
10537 break;
10538 }
10539
10540 return 0;
10541
10542out_iounmap_all:
10543 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10544out_iounmap_ctrl:
10545 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10546out_iounmap_conf:
10547 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10548
10549 return error;
10550}
10551
10552
10553
10554
10555
10556
10557
10558
10559static void
10560lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10561{
10562 uint32_t if_type;
10563 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10564
10565 switch (if_type) {
10566 case LPFC_SLI_INTF_IF_TYPE_0:
10567 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10568 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10569 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10570 break;
10571 case LPFC_SLI_INTF_IF_TYPE_2:
10572 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10573 break;
10574 case LPFC_SLI_INTF_IF_TYPE_6:
10575 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10576 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10577 if (phba->sli4_hba.dpp_regs_memmap_p)
10578 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10579 break;
10580 case LPFC_SLI_INTF_IF_TYPE_1:
10581 default:
10582 dev_printk(KERN_ERR, &phba->pcidev->dev,
10583 "FATAL - unsupported SLI4 interface type - %d\n",
10584 if_type);
10585 break;
10586 }
10587}
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600static int
10601lpfc_sli_enable_msix(struct lpfc_hba *phba)
10602{
10603 int rc;
10604 LPFC_MBOXQ_t *pmb;
10605
10606
10607 rc = pci_alloc_irq_vectors(phba->pcidev,
10608 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10609 if (rc < 0) {
10610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10611 "0420 PCI enable MSI-X failed (%d)\n", rc);
10612 goto vec_fail_out;
10613 }
10614
10615
10616
10617
10618
10619
10620 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10621 &lpfc_sli_sp_intr_handler, 0,
10622 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10623 if (rc) {
10624 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10625 "0421 MSI-X slow-path request_irq failed "
10626 "(%d)\n", rc);
10627 goto msi_fail_out;
10628 }
10629
10630
10631 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10632 &lpfc_sli_fp_intr_handler, 0,
10633 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10634
10635 if (rc) {
10636 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10637 "0429 MSI-X fast-path request_irq failed "
10638 "(%d)\n", rc);
10639 goto irq_fail_out;
10640 }
10641
10642
10643
10644
10645 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10646
10647 if (!pmb) {
10648 rc = -ENOMEM;
10649 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10650 "0474 Unable to allocate memory for issuing "
10651 "MBOX_CONFIG_MSI command\n");
10652 goto mem_fail_out;
10653 }
10654 rc = lpfc_config_msi(phba, pmb);
10655 if (rc)
10656 goto mbx_fail_out;
10657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10658 if (rc != MBX_SUCCESS) {
10659 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10660 "0351 Config MSI mailbox command failed, "
10661 "mbxCmd x%x, mbxStatus x%x\n",
10662 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10663 goto mbx_fail_out;
10664 }
10665
10666
10667 mempool_free(pmb, phba->mbox_mem_pool);
10668 return rc;
10669
10670mbx_fail_out:
10671
10672 mempool_free(pmb, phba->mbox_mem_pool);
10673
10674mem_fail_out:
10675
10676 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10677
10678irq_fail_out:
10679
10680 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10681
10682msi_fail_out:
10683
10684 pci_free_irq_vectors(phba->pcidev);
10685
10686vec_fail_out:
10687 return rc;
10688}
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704static int
10705lpfc_sli_enable_msi(struct lpfc_hba *phba)
10706{
10707 int rc;
10708
10709 rc = pci_enable_msi(phba->pcidev);
10710 if (!rc)
10711 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10712 "0462 PCI enable MSI mode success.\n");
10713 else {
10714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10715 "0471 PCI enable MSI mode failed (%d)\n", rc);
10716 return rc;
10717 }
10718
10719 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10720 0, LPFC_DRIVER_NAME, phba);
10721 if (rc) {
10722 pci_disable_msi(phba->pcidev);
10723 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10724 "0478 MSI request_irq failed (%d)\n", rc);
10725 }
10726 return rc;
10727}
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746static uint32_t
10747lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10748{
10749 uint32_t intr_mode = LPFC_INTR_ERROR;
10750 int retval;
10751
10752 if (cfg_mode == 2) {
10753
10754 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10755 if (!retval) {
10756
10757 retval = lpfc_sli_enable_msix(phba);
10758 if (!retval) {
10759
10760 phba->intr_type = MSIX;
10761 intr_mode = 2;
10762 }
10763 }
10764 }
10765
10766
10767 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10768 retval = lpfc_sli_enable_msi(phba);
10769 if (!retval) {
10770
10771 phba->intr_type = MSI;
10772 intr_mode = 1;
10773 }
10774 }
10775
10776
10777 if (phba->intr_type == NONE) {
10778 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10779 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10780 if (!retval) {
10781
10782 phba->intr_type = INTx;
10783 intr_mode = 0;
10784 }
10785 }
10786 return intr_mode;
10787}
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798static void
10799lpfc_sli_disable_intr(struct lpfc_hba *phba)
10800{
10801 int nr_irqs, i;
10802
10803 if (phba->intr_type == MSIX)
10804 nr_irqs = LPFC_MSIX_VECTORS;
10805 else
10806 nr_irqs = 1;
10807
10808 for (i = 0; i < nr_irqs; i++)
10809 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10810 pci_free_irq_vectors(phba->pcidev);
10811
10812
10813 phba->intr_type = NONE;
10814 phba->sli.slistat.sli_intr = 0;
10815}
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825static uint16_t
10826lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10827{
10828 struct lpfc_vector_map_info *cpup;
10829 int cpu;
10830
10831
10832 for_each_present_cpu(cpu) {
10833 cpup = &phba->sli4_hba.cpu_map[cpu];
10834
10835
10836
10837
10838
10839 if ((match == LPFC_FIND_BY_EQ) &&
10840 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10841 (cpup->eq == id))
10842 return cpu;
10843
10844
10845 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10846 return cpu;
10847 }
10848 return 0;
10849}
10850
10851#ifdef CONFIG_X86
10852
10853
10854
10855
10856
10857
10858
10859static int
10860lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10861 uint16_t phys_id, uint16_t core_id)
10862{
10863 struct lpfc_vector_map_info *cpup;
10864 int idx;
10865
10866 for_each_present_cpu(idx) {
10867 cpup = &phba->sli4_hba.cpu_map[idx];
10868
10869 if ((cpup->phys_id == phys_id) &&
10870 (cpup->core_id == core_id) &&
10871 (cpu != idx))
10872 return 1;
10873 }
10874 return 0;
10875}
10876#endif
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887static inline void
10888lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10889 unsigned int cpu)
10890{
10891 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10892 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10893
10894 cpup->eq = eqidx;
10895 cpup->flag |= flag;
10896
10897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10898 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10899 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10900}
10901
10902
10903
10904
10905
10906
10907
10908static void
10909lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10910{
10911 struct lpfc_vector_map_info *cpup;
10912 struct lpfc_eq_intr_info *eqi;
10913 int cpu;
10914
10915 for_each_possible_cpu(cpu) {
10916 cpup = &phba->sli4_hba.cpu_map[cpu];
10917 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10918 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10919 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10920 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10921 cpup->flag = 0;
10922 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10923 INIT_LIST_HEAD(&eqi->list);
10924 eqi->icnt = 0;
10925 }
10926}
10927
10928
10929
10930
10931
10932
10933
10934static void
10935lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10936{
10937 struct lpfc_hba_eq_hdl *eqhdl;
10938 int i;
10939
10940 for (i = 0; i < phba->cfg_irq_chann; i++) {
10941 eqhdl = lpfc_get_eq_hdl(i);
10942 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10943 eqhdl->phba = phba;
10944 }
10945}
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957static void
10958lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10959{
10960 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10961 int max_phys_id, min_phys_id;
10962 int max_core_id, min_core_id;
10963 struct lpfc_vector_map_info *cpup;
10964 struct lpfc_vector_map_info *new_cpup;
10965#ifdef CONFIG_X86
10966 struct cpuinfo_x86 *cpuinfo;
10967#endif
10968#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10969 struct lpfc_hdwq_stat *c_stat;
10970#endif
10971
10972 max_phys_id = 0;
10973 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10974 max_core_id = 0;
10975 min_core_id = LPFC_VECTOR_MAP_EMPTY;
10976
10977
10978 for_each_present_cpu(cpu) {
10979 cpup = &phba->sli4_hba.cpu_map[cpu];
10980#ifdef CONFIG_X86
10981 cpuinfo = &cpu_data(cpu);
10982 cpup->phys_id = cpuinfo->phys_proc_id;
10983 cpup->core_id = cpuinfo->cpu_core_id;
10984 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10985 cpup->flag |= LPFC_CPU_MAP_HYPER;
10986#else
10987
10988 cpup->phys_id = 0;
10989 cpup->core_id = cpu;
10990#endif
10991
10992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10993 "3328 CPU %d physid %d coreid %d flag x%x\n",
10994 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10995
10996 if (cpup->phys_id > max_phys_id)
10997 max_phys_id = cpup->phys_id;
10998 if (cpup->phys_id < min_phys_id)
10999 min_phys_id = cpup->phys_id;
11000
11001 if (cpup->core_id > max_core_id)
11002 max_core_id = cpup->core_id;
11003 if (cpup->core_id < min_core_id)
11004 min_core_id = cpup->core_id;
11005 }
11006
11007
11008
11009
11010
11011
11012 first_cpu = cpumask_first(cpu_present_mask);
11013 start_cpu = first_cpu;
11014
11015 for_each_present_cpu(cpu) {
11016 cpup = &phba->sli4_hba.cpu_map[cpu];
11017
11018
11019 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11020
11021 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11022
11023
11024
11025
11026
11027
11028 new_cpu = start_cpu;
11029 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11030 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11031 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11032 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
11033 (new_cpup->phys_id == cpup->phys_id))
11034 goto found_same;
11035 new_cpu = cpumask_next(
11036 new_cpu, cpu_present_mask);
11037 if (new_cpu == nr_cpumask_bits)
11038 new_cpu = first_cpu;
11039 }
11040
11041 continue;
11042found_same:
11043
11044 cpup->eq = new_cpup->eq;
11045
11046
11047
11048
11049
11050 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11051 if (start_cpu == nr_cpumask_bits)
11052 start_cpu = first_cpu;
11053
11054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11055 "3337 Set Affinity: CPU %d "
11056 "eq %d from peer cpu %d same "
11057 "phys_id (%d)\n",
11058 cpu, cpup->eq, new_cpu,
11059 cpup->phys_id);
11060 }
11061 }
11062
11063
11064 start_cpu = first_cpu;
11065
11066 for_each_present_cpu(cpu) {
11067 cpup = &phba->sli4_hba.cpu_map[cpu];
11068
11069
11070 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11071
11072 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11073
11074
11075
11076
11077
11078
11079 new_cpu = start_cpu;
11080 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11081 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11082 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11083 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
11084 goto found_any;
11085 new_cpu = cpumask_next(
11086 new_cpu, cpu_present_mask);
11087 if (new_cpu == nr_cpumask_bits)
11088 new_cpu = first_cpu;
11089 }
11090
11091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11092 "3339 Set Affinity: CPU %d "
11093 "eq %d UNASSIGNED\n",
11094 cpup->hdwq, cpup->eq);
11095 continue;
11096found_any:
11097
11098 cpup->eq = new_cpup->eq;
11099
11100
11101
11102
11103
11104 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11105 if (start_cpu == nr_cpumask_bits)
11106 start_cpu = first_cpu;
11107
11108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11109 "3338 Set Affinity: CPU %d "
11110 "eq %d from peer cpu %d (%d/%d)\n",
11111 cpu, cpup->eq, new_cpu,
11112 new_cpup->phys_id, new_cpup->core_id);
11113 }
11114 }
11115
11116
11117
11118
11119 idx = 0;
11120 for_each_present_cpu(cpu) {
11121 cpup = &phba->sli4_hba.cpu_map[cpu];
11122
11123
11124 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11125 continue;
11126
11127
11128 cpup->hdwq = idx;
11129 idx++;
11130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11131 "3333 Set Affinity: CPU %d (phys %d core %d): "
11132 "hdwq %d eq %d flg x%x\n",
11133 cpu, cpup->phys_id, cpup->core_id,
11134 cpup->hdwq, cpup->eq, cpup->flag);
11135 }
11136
11137
11138
11139
11140
11141
11142
11143
11144 next_idx = idx;
11145 start_cpu = 0;
11146 idx = 0;
11147 for_each_present_cpu(cpu) {
11148 cpup = &phba->sli4_hba.cpu_map[cpu];
11149
11150
11151 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11152 continue;
11153
11154
11155
11156
11157
11158 if (next_idx < phba->cfg_hdw_queue) {
11159 cpup->hdwq = next_idx;
11160 next_idx++;
11161 continue;
11162 }
11163
11164
11165
11166
11167
11168
11169 new_cpu = start_cpu;
11170 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11171 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11172 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11173 new_cpup->phys_id == cpup->phys_id &&
11174 new_cpup->core_id == cpup->core_id) {
11175 goto found_hdwq;
11176 }
11177 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11178 if (new_cpu == nr_cpumask_bits)
11179 new_cpu = first_cpu;
11180 }
11181
11182
11183
11184
11185 new_cpu = start_cpu;
11186 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11187 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11188 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11189 new_cpup->phys_id == cpup->phys_id)
11190 goto found_hdwq;
11191
11192 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11193 if (new_cpu == nr_cpumask_bits)
11194 new_cpu = first_cpu;
11195 }
11196
11197
11198 cpup->hdwq = idx % phba->cfg_hdw_queue;
11199 idx++;
11200 goto logit;
11201 found_hdwq:
11202
11203 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11204 if (start_cpu == nr_cpumask_bits)
11205 start_cpu = first_cpu;
11206 cpup->hdwq = new_cpup->hdwq;
11207 logit:
11208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11209 "3335 Set Affinity: CPU %d (phys %d core %d): "
11210 "hdwq %d eq %d flg x%x\n",
11211 cpu, cpup->phys_id, cpup->core_id,
11212 cpup->hdwq, cpup->eq, cpup->flag);
11213 }
11214
11215
11216
11217
11218
11219 idx = 0;
11220 for_each_possible_cpu(cpu) {
11221 cpup = &phba->sli4_hba.cpu_map[cpu];
11222#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11223 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11224 c_stat->hdwq_no = cpup->hdwq;
11225#endif
11226 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11227 continue;
11228
11229 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11230#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11231 c_stat->hdwq_no = cpup->hdwq;
11232#endif
11233 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11234 "3340 Set Affinity: not present "
11235 "CPU %d hdwq %d\n",
11236 cpu, cpup->hdwq);
11237 }
11238
11239
11240
11241
11242 return;
11243}
11244
11245
11246
11247
11248
11249
11250
11251
11252static int
11253lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11254 struct list_head *eqlist)
11255{
11256 const struct cpumask *maskp;
11257 struct lpfc_queue *eq;
11258 struct cpumask *tmp;
11259 u16 idx;
11260
11261 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11262 if (!tmp)
11263 return -ENOMEM;
11264
11265 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11266 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11267 if (!maskp)
11268 continue;
11269
11270
11271
11272
11273
11274 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11275 continue;
11276
11277
11278
11279
11280
11281
11282 cpumask_and(tmp, maskp, cpu_online_mask);
11283 if (cpumask_weight(tmp) > 1)
11284 continue;
11285
11286
11287
11288
11289
11290
11291 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11292 list_add(&eq->_poll_list, eqlist);
11293 }
11294 kfree(tmp);
11295 return 0;
11296}
11297
11298static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11299{
11300 if (phba->sli_rev != LPFC_SLI_REV4)
11301 return;
11302
11303 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11304 &phba->cpuhp);
11305
11306
11307
11308
11309 synchronize_rcu();
11310 del_timer_sync(&phba->cpuhp_poll_timer);
11311}
11312
11313static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11314{
11315 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11316 return;
11317
11318 __lpfc_cpuhp_remove(phba);
11319}
11320
11321static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11322{
11323 if (phba->sli_rev != LPFC_SLI_REV4)
11324 return;
11325
11326 rcu_read_lock();
11327
11328 if (!list_empty(&phba->poll_list))
11329 mod_timer(&phba->cpuhp_poll_timer,
11330 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11331
11332 rcu_read_unlock();
11333
11334 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11335 &phba->cpuhp);
11336}
11337
11338static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11339{
11340 if (phba->pport->load_flag & FC_UNLOADING) {
11341 *retval = -EAGAIN;
11342 return true;
11343 }
11344
11345 if (phba->sli_rev != LPFC_SLI_REV4) {
11346 *retval = 0;
11347 return true;
11348 }
11349
11350
11351 return false;
11352}
11353
11354
11355
11356
11357
11358
11359
11360static inline void
11361lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11362{
11363 cpumask_clear(&eqhdl->aff_mask);
11364 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11365 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11366 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11367}
11368
11369
11370
11371
11372
11373
11374static inline void
11375lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11376{
11377 cpumask_clear(&eqhdl->aff_mask);
11378 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11379}
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397static void
11398lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11399{
11400 struct lpfc_vector_map_info *cpup;
11401 struct cpumask *aff_mask;
11402 unsigned int cpu_select, cpu_next, idx;
11403 const struct cpumask *orig_mask;
11404
11405 if (phba->irq_chann_mode == NORMAL_MODE)
11406 return;
11407
11408 orig_mask = &phba->sli4_hba.irq_aff_mask;
11409
11410 if (!cpumask_test_cpu(cpu, orig_mask))
11411 return;
11412
11413 cpup = &phba->sli4_hba.cpu_map[cpu];
11414
11415 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11416 return;
11417
11418 if (offline) {
11419
11420 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11421 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11422
11423
11424 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11425
11426
11427
11428 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11429 aff_mask = lpfc_get_aff_mask(idx);
11430
11431
11432 if (cpumask_test_cpu(cpu, aff_mask))
11433 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11434 cpu_select);
11435 }
11436 } else {
11437
11438 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11439 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11440 }
11441 } else {
11442
11443 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11444 }
11445}
11446
11447static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11448{
11449 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11450 struct lpfc_queue *eq, *next;
11451 LIST_HEAD(eqlist);
11452 int retval;
11453
11454 if (!phba) {
11455 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11456 return 0;
11457 }
11458
11459 if (__lpfc_cpuhp_checks(phba, &retval))
11460 return retval;
11461
11462 lpfc_irq_rebalance(phba, cpu, true);
11463
11464 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11465 if (retval)
11466 return retval;
11467
11468
11469 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11470 list_del_init(&eq->_poll_list);
11471 lpfc_sli4_start_polling(eq);
11472 }
11473
11474 return 0;
11475}
11476
11477static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11478{
11479 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11480 struct lpfc_queue *eq, *next;
11481 unsigned int n;
11482 int retval;
11483
11484 if (!phba) {
11485 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11486 return 0;
11487 }
11488
11489 if (__lpfc_cpuhp_checks(phba, &retval))
11490 return retval;
11491
11492 lpfc_irq_rebalance(phba, cpu, false);
11493
11494 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11495 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11496 if (n == cpu)
11497 lpfc_sli4_stop_polling(eq);
11498 }
11499
11500 return 0;
11501}
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527
11528
11529
11530
11531static int
11532lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11533{
11534 int vectors, rc, index;
11535 char *name;
11536 const struct cpumask *aff_mask = NULL;
11537 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11538 struct lpfc_vector_map_info *cpup;
11539 struct lpfc_hba_eq_hdl *eqhdl;
11540 const struct cpumask *maskp;
11541 unsigned int flags = PCI_IRQ_MSIX;
11542
11543
11544 vectors = phba->cfg_irq_chann;
11545
11546 if (phba->irq_chann_mode != NORMAL_MODE)
11547 aff_mask = &phba->sli4_hba.irq_aff_mask;
11548
11549 if (aff_mask) {
11550 cpu_cnt = cpumask_weight(aff_mask);
11551 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11552
11553
11554
11555
11556 cpu = cpumask_first(aff_mask);
11557 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11558 } else {
11559 flags |= PCI_IRQ_AFFINITY;
11560 }
11561
11562 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11563 if (rc < 0) {
11564 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11565 "0484 PCI enable MSI-X failed (%d)\n", rc);
11566 goto vec_fail_out;
11567 }
11568 vectors = rc;
11569
11570
11571 for (index = 0; index < vectors; index++) {
11572 eqhdl = lpfc_get_eq_hdl(index);
11573 name = eqhdl->handler_name;
11574 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11575 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11576 LPFC_DRIVER_HANDLER_NAME"%d", index);
11577
11578 eqhdl->idx = index;
11579 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11580 &lpfc_sli4_hba_intr_handler, 0,
11581 name, eqhdl);
11582 if (rc) {
11583 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11584 "0486 MSI-X fast-path (%d) "
11585 "request_irq failed (%d)\n", index, rc);
11586 goto cfg_fail_out;
11587 }
11588
11589 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11590
11591 if (aff_mask) {
11592
11593 if (cpu_select < nr_cpu_ids)
11594 lpfc_irq_set_aff(eqhdl, cpu_select);
11595
11596
11597 lpfc_assign_eq_map_info(phba, index,
11598 LPFC_CPU_FIRST_IRQ,
11599 cpu);
11600
11601
11602 cpu = cpumask_next(cpu, aff_mask);
11603
11604
11605 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11606 } else if (vectors == 1) {
11607 cpu = cpumask_first(cpu_present_mask);
11608 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11609 cpu);
11610 } else {
11611 maskp = pci_irq_get_affinity(phba->pcidev, index);
11612
11613
11614 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11615 cpup = &phba->sli4_hba.cpu_map[cpu];
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11630 continue;
11631 lpfc_assign_eq_map_info(phba, index,
11632 LPFC_CPU_FIRST_IRQ,
11633 cpu);
11634 break;
11635 }
11636 }
11637 }
11638
11639 if (vectors != phba->cfg_irq_chann) {
11640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11641 "3238 Reducing IO channels to match number of "
11642 "MSI-X vectors, requested %d got %d\n",
11643 phba->cfg_irq_chann, vectors);
11644 if (phba->cfg_irq_chann > vectors)
11645 phba->cfg_irq_chann = vectors;
11646 }
11647
11648 return rc;
11649
11650cfg_fail_out:
11651
11652 for (--index; index >= 0; index--) {
11653 eqhdl = lpfc_get_eq_hdl(index);
11654 lpfc_irq_clear_aff(eqhdl);
11655 irq_set_affinity_hint(eqhdl->irq, NULL);
11656 free_irq(eqhdl->irq, eqhdl);
11657 }
11658
11659
11660 pci_free_irq_vectors(phba->pcidev);
11661
11662vec_fail_out:
11663 return rc;
11664}
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680static int
11681lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11682{
11683 int rc, index;
11684 unsigned int cpu;
11685 struct lpfc_hba_eq_hdl *eqhdl;
11686
11687 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11688 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11689 if (rc > 0)
11690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11691 "0487 PCI enable MSI mode success.\n");
11692 else {
11693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11694 "0488 PCI enable MSI mode failed (%d)\n", rc);
11695 return rc ? rc : -1;
11696 }
11697
11698 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11699 0, LPFC_DRIVER_NAME, phba);
11700 if (rc) {
11701 pci_free_irq_vectors(phba->pcidev);
11702 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11703 "0490 MSI request_irq failed (%d)\n", rc);
11704 return rc;
11705 }
11706
11707 eqhdl = lpfc_get_eq_hdl(0);
11708 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11709
11710 cpu = cpumask_first(cpu_present_mask);
11711 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11712
11713 for (index = 0; index < phba->cfg_irq_chann; index++) {
11714 eqhdl = lpfc_get_eq_hdl(index);
11715 eqhdl->idx = index;
11716 }
11717
11718 return 0;
11719}
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738static uint32_t
11739lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11740{
11741 uint32_t intr_mode = LPFC_INTR_ERROR;
11742 int retval, idx;
11743
11744 if (cfg_mode == 2) {
11745
11746 retval = 0;
11747 if (!retval) {
11748
11749 retval = lpfc_sli4_enable_msix(phba);
11750 if (!retval) {
11751
11752 phba->intr_type = MSIX;
11753 intr_mode = 2;
11754 }
11755 }
11756 }
11757
11758
11759 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11760 retval = lpfc_sli4_enable_msi(phba);
11761 if (!retval) {
11762
11763 phba->intr_type = MSI;
11764 intr_mode = 1;
11765 }
11766 }
11767
11768
11769 if (phba->intr_type == NONE) {
11770 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11771 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11772 if (!retval) {
11773 struct lpfc_hba_eq_hdl *eqhdl;
11774 unsigned int cpu;
11775
11776
11777 phba->intr_type = INTx;
11778 intr_mode = 0;
11779
11780 eqhdl = lpfc_get_eq_hdl(0);
11781 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11782
11783 cpu = cpumask_first(cpu_present_mask);
11784 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11785 cpu);
11786 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11787 eqhdl = lpfc_get_eq_hdl(idx);
11788 eqhdl->idx = idx;
11789 }
11790 }
11791 }
11792 return intr_mode;
11793}
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804static void
11805lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11806{
11807
11808 if (phba->intr_type == MSIX) {
11809 int index;
11810 struct lpfc_hba_eq_hdl *eqhdl;
11811
11812
11813 for (index = 0; index < phba->cfg_irq_chann; index++) {
11814 eqhdl = lpfc_get_eq_hdl(index);
11815 lpfc_irq_clear_aff(eqhdl);
11816 irq_set_affinity_hint(eqhdl->irq, NULL);
11817 free_irq(eqhdl->irq, eqhdl);
11818 }
11819 } else {
11820 free_irq(phba->pcidev->irq, phba);
11821 }
11822
11823 pci_free_irq_vectors(phba->pcidev);
11824
11825
11826 phba->intr_type = NONE;
11827 phba->sli.slistat.sli_intr = 0;
11828}
11829
11830
11831
11832
11833
11834
11835
11836
11837static void
11838lpfc_unset_hba(struct lpfc_hba *phba)
11839{
11840 struct lpfc_vport *vport = phba->pport;
11841 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11842
11843 spin_lock_irq(shost->host_lock);
11844 vport->load_flag |= FC_UNLOADING;
11845 spin_unlock_irq(shost->host_lock);
11846
11847 kfree(phba->vpi_bmask);
11848 kfree(phba->vpi_ids);
11849
11850 lpfc_stop_hba_timers(phba);
11851
11852 phba->pport->work_port_events = 0;
11853
11854 lpfc_sli_hba_down(phba);
11855
11856 lpfc_sli_brdrestart(phba);
11857
11858 lpfc_sli_disable_intr(phba);
11859
11860 return;
11861}
11862
11863
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876static void
11877lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11878{
11879 struct lpfc_sli4_hdw_queue *qp;
11880 int idx, ccnt;
11881 int wait_time = 0;
11882 int io_xri_cmpl = 1;
11883 int nvmet_xri_cmpl = 1;
11884 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11885
11886
11887
11888
11889
11890 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11891
11892
11893 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11894 lpfc_nvme_wait_for_io_drain(phba);
11895
11896 ccnt = 0;
11897 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11898 qp = &phba->sli4_hba.hdwq[idx];
11899 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11900 if (!io_xri_cmpl)
11901 ccnt++;
11902 }
11903 if (ccnt)
11904 io_xri_cmpl = 0;
11905
11906 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11907 nvmet_xri_cmpl =
11908 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11909 }
11910
11911 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11912 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11913 if (!nvmet_xri_cmpl)
11914 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11915 "6424 NVMET XRI exchange busy "
11916 "wait time: %d seconds.\n",
11917 wait_time/1000);
11918 if (!io_xri_cmpl)
11919 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11920 "6100 IO XRI exchange busy "
11921 "wait time: %d seconds.\n",
11922 wait_time/1000);
11923 if (!els_xri_cmpl)
11924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11925 "2878 ELS XRI exchange busy "
11926 "wait time: %d seconds.\n",
11927 wait_time/1000);
11928 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11929 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11930 } else {
11931 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11932 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11933 }
11934
11935 ccnt = 0;
11936 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11937 qp = &phba->sli4_hba.hdwq[idx];
11938 io_xri_cmpl = list_empty(
11939 &qp->lpfc_abts_io_buf_list);
11940 if (!io_xri_cmpl)
11941 ccnt++;
11942 }
11943 if (ccnt)
11944 io_xri_cmpl = 0;
11945
11946 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11947 nvmet_xri_cmpl = list_empty(
11948 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11949 }
11950 els_xri_cmpl =
11951 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11952
11953 }
11954}
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966static void
11967lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11968{
11969 int wait_cnt = 0;
11970 LPFC_MBOXQ_t *mboxq;
11971 struct pci_dev *pdev = phba->pcidev;
11972
11973 lpfc_stop_hba_timers(phba);
11974 if (phba->pport)
11975 phba->sli4_hba.intr_enable = 0;
11976
11977
11978
11979
11980
11981
11982
11983 spin_lock_irq(&phba->hbalock);
11984 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11985 spin_unlock_irq(&phba->hbalock);
11986
11987 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11988 msleep(10);
11989 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11990 break;
11991 }
11992
11993 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11994 spin_lock_irq(&phba->hbalock);
11995 mboxq = phba->sli.mbox_active;
11996 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11997 __lpfc_mbox_cmpl_put(phba, mboxq);
11998 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11999 phba->sli.mbox_active = NULL;
12000 spin_unlock_irq(&phba->hbalock);
12001 }
12002
12003
12004 lpfc_sli_hba_iocb_abort(phba);
12005
12006
12007 lpfc_sli4_xri_exchange_busy_wait(phba);
12008
12009
12010 if (phba->pport)
12011 lpfc_cpuhp_remove(phba);
12012
12013
12014 lpfc_sli4_disable_intr(phba);
12015
12016
12017 if (phba->cfg_sriov_nr_virtfn)
12018 pci_disable_sriov(pdev);
12019
12020
12021 kthread_stop(phba->worker_thread);
12022
12023
12024 lpfc_ras_stop_fwlog(phba);
12025
12026
12027
12028
12029 lpfc_sli4_queue_unset(phba);
12030 lpfc_sli4_queue_destroy(phba);
12031
12032
12033 lpfc_pci_function_reset(phba);
12034
12035
12036 if (phba->ras_fwlog.ras_enabled)
12037 lpfc_sli4_ras_dma_free(phba);
12038
12039
12040 if (phba->pport)
12041 phba->pport->work_port_events = 0;
12042}
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056int
12057lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12058{
12059 int rc;
12060 struct lpfc_mqe *mqe;
12061 struct lpfc_pc_sli4_params *sli4_params;
12062 uint32_t mbox_tmo;
12063
12064 rc = 0;
12065 mqe = &mboxq->u.mqe;
12066
12067
12068 lpfc_pc_sli4_params(mboxq);
12069 if (!phba->sli4_hba.intr_enable)
12070 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12071 else {
12072 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12073 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12074 }
12075
12076 if (unlikely(rc))
12077 return 1;
12078
12079 sli4_params = &phba->sli4_hba.pc_sli4_params;
12080 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
12081 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
12082 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
12083 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
12084 &mqe->un.sli4_params);
12085 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
12086 &mqe->un.sli4_params);
12087 sli4_params->proto_types = mqe->un.sli4_params.word3;
12088 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
12089 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
12090 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
12091 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
12092 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
12093 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
12094 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
12095 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
12096 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
12097 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
12098 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
12099 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
12100 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
12101 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
12102 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
12103 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
12104 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
12105 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
12106 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
12107 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
12108
12109
12110 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12111 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12112
12113 return rc;
12114}
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128int
12129lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12130{
12131 int rc;
12132 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12133 struct lpfc_pc_sli4_params *sli4_params;
12134 uint32_t mbox_tmo;
12135 int length;
12136 bool exp_wqcq_pages = true;
12137 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12138
12139
12140
12141
12142
12143
12144 phba->sli4_hba.rpi_hdrs_in_use = 1;
12145
12146
12147 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12148 sizeof(struct lpfc_sli4_cfg_mhdr));
12149 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12150 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12151 length, LPFC_SLI4_MBX_EMBED);
12152 if (!phba->sli4_hba.intr_enable)
12153 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12154 else {
12155 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12156 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12157 }
12158 if (unlikely(rc))
12159 return rc;
12160 sli4_params = &phba->sli4_hba.pc_sli4_params;
12161 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12162 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12163 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12164 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12165 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12166 mbx_sli4_parameters);
12167 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12168 mbx_sli4_parameters);
12169 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12170 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12171 else
12172 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12173 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12174 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
12175 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12176 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12177 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12178 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12179 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12180 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12181 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12182 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12183 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12184 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12185 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12186 mbx_sli4_parameters);
12187 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12188 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12189 mbx_sli4_parameters);
12190 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12191 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12192
12193
12194 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12195
12196
12197 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12198 bf_get(cfg_xib, mbx_sli4_parameters));
12199
12200 if (rc) {
12201
12202 sli4_params->nvme = 1;
12203
12204
12205 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12206 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12207 "6133 Disabling NVME support: "
12208 "FC4 type not supported: x%x\n",
12209 phba->cfg_enable_fc4_type);
12210 goto fcponly;
12211 }
12212 } else {
12213
12214 sli4_params->nvme = 0;
12215 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12217 "6101 Disabling NVME support: Not "
12218 "supported by firmware (%d %d) x%x\n",
12219 bf_get(cfg_nvme, mbx_sli4_parameters),
12220 bf_get(cfg_xib, mbx_sli4_parameters),
12221 phba->cfg_enable_fc4_type);
12222fcponly:
12223 phba->nvme_support = 0;
12224 phba->nvmet_support = 0;
12225 phba->cfg_nvmet_mrq = 0;
12226 phba->cfg_nvme_seg_cnt = 0;
12227
12228
12229 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12230 return -ENODEV;
12231 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12232 }
12233 }
12234
12235
12236
12237
12238 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12239 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12240
12241
12242 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12243 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12244 phba->cfg_enable_pbde = 0;
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12255 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12256 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12257 else
12258 phba->cfg_suppress_rsp = 0;
12259
12260 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12261 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12262
12263
12264 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12265 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12266
12267
12268
12269
12270
12271
12272 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12273 phba->fcp_embed_io = 1;
12274 else
12275 phba->fcp_embed_io = 0;
12276
12277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12278 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12279 bf_get(cfg_xib, mbx_sli4_parameters),
12280 phba->cfg_enable_pbde,
12281 phba->fcp_embed_io, phba->nvme_support,
12282 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12283
12284 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12285 LPFC_SLI_INTF_IF_TYPE_2) &&
12286 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12287 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12288 exp_wqcq_pages = false;
12289
12290 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12291 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12292 exp_wqcq_pages &&
12293 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12294 phba->enab_exp_wqcq_pages = 1;
12295 else
12296 phba->enab_exp_wqcq_pages = 0;
12297
12298
12299
12300 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12301 phba->mds_diags_support = 1;
12302 else
12303 phba->mds_diags_support = 0;
12304
12305
12306
12307
12308 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12309 phba->nsler = 1;
12310 else
12311 phba->nsler = 0;
12312
12313 return 0;
12314}
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333static int
12334lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12335{
12336 struct lpfc_hba *phba;
12337 struct lpfc_vport *vport = NULL;
12338 struct Scsi_Host *shost = NULL;
12339 int error;
12340 uint32_t cfg_mode, intr_mode;
12341
12342
12343 phba = lpfc_hba_alloc(pdev);
12344 if (!phba)
12345 return -ENOMEM;
12346
12347
12348 error = lpfc_enable_pci_dev(phba);
12349 if (error)
12350 goto out_free_phba;
12351
12352
12353 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12354 if (error)
12355 goto out_disable_pci_dev;
12356
12357
12358 error = lpfc_sli_pci_mem_setup(phba);
12359 if (error) {
12360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12361 "1402 Failed to set up pci memory space.\n");
12362 goto out_disable_pci_dev;
12363 }
12364
12365
12366 error = lpfc_sli_driver_resource_setup(phba);
12367 if (error) {
12368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12369 "1404 Failed to set up driver resource.\n");
12370 goto out_unset_pci_mem_s3;
12371 }
12372
12373
12374
12375 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12376 if (error) {
12377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12378 "1405 Failed to initialize iocb list.\n");
12379 goto out_unset_driver_resource_s3;
12380 }
12381
12382
12383 error = lpfc_setup_driver_resource_phase2(phba);
12384 if (error) {
12385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12386 "1406 Failed to set up driver resource.\n");
12387 goto out_free_iocb_list;
12388 }
12389
12390
12391 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12392
12393
12394 error = lpfc_create_shost(phba);
12395 if (error) {
12396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12397 "1407 Failed to create scsi host.\n");
12398 goto out_unset_driver_resource;
12399 }
12400
12401
12402 vport = phba->pport;
12403 error = lpfc_alloc_sysfs_attr(vport);
12404 if (error) {
12405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12406 "1476 Failed to allocate sysfs attr\n");
12407 goto out_destroy_shost;
12408 }
12409
12410 shost = lpfc_shost_from_vport(vport);
12411
12412 cfg_mode = phba->cfg_use_msi;
12413 while (true) {
12414
12415 lpfc_stop_port(phba);
12416
12417 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12418 if (intr_mode == LPFC_INTR_ERROR) {
12419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12420 "0431 Failed to enable interrupt.\n");
12421 error = -ENODEV;
12422 goto out_free_sysfs_attr;
12423 }
12424
12425 if (lpfc_sli_hba_setup(phba)) {
12426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12427 "1477 Failed to set up hba\n");
12428 error = -ENODEV;
12429 goto out_remove_device;
12430 }
12431
12432
12433 msleep(50);
12434
12435 if (intr_mode == 0 ||
12436 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12437
12438 phba->intr_mode = intr_mode;
12439 lpfc_log_intr_mode(phba, intr_mode);
12440 break;
12441 } else {
12442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12443 "0447 Configure interrupt mode (%d) "
12444 "failed active interrupt test.\n",
12445 intr_mode);
12446
12447 lpfc_sli_disable_intr(phba);
12448
12449 cfg_mode = --intr_mode;
12450 }
12451 }
12452
12453
12454 lpfc_post_init_setup(phba);
12455
12456
12457 lpfc_create_static_vport(phba);
12458
12459 return 0;
12460
12461out_remove_device:
12462 lpfc_unset_hba(phba);
12463out_free_sysfs_attr:
12464 lpfc_free_sysfs_attr(vport);
12465out_destroy_shost:
12466 lpfc_destroy_shost(phba);
12467out_unset_driver_resource:
12468 lpfc_unset_driver_resource_phase2(phba);
12469out_free_iocb_list:
12470 lpfc_free_iocb_list(phba);
12471out_unset_driver_resource_s3:
12472 lpfc_sli_driver_resource_unset(phba);
12473out_unset_pci_mem_s3:
12474 lpfc_sli_pci_mem_unset(phba);
12475out_disable_pci_dev:
12476 lpfc_disable_pci_dev(phba);
12477 if (shost)
12478 scsi_host_put(shost);
12479out_free_phba:
12480 lpfc_hba_free(phba);
12481 return error;
12482}
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493static void
12494lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12495{
12496 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12497 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12498 struct lpfc_vport **vports;
12499 struct lpfc_hba *phba = vport->phba;
12500 int i;
12501
12502 spin_lock_irq(&phba->hbalock);
12503 vport->load_flag |= FC_UNLOADING;
12504 spin_unlock_irq(&phba->hbalock);
12505
12506 lpfc_free_sysfs_attr(vport);
12507
12508
12509 vports = lpfc_create_vport_work_array(phba);
12510 if (vports != NULL)
12511 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12512 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12513 continue;
12514 fc_vport_terminate(vports[i]->fc_vport);
12515 }
12516 lpfc_destroy_vport_work_array(phba, vports);
12517
12518
12519 fc_remove_host(shost);
12520 scsi_remove_host(shost);
12521
12522 lpfc_cleanup(vport);
12523
12524
12525
12526
12527
12528
12529
12530
12531 lpfc_sli_hba_down(phba);
12532
12533 kthread_stop(phba->worker_thread);
12534
12535 lpfc_sli_brdrestart(phba);
12536
12537 kfree(phba->vpi_bmask);
12538 kfree(phba->vpi_ids);
12539
12540 lpfc_stop_hba_timers(phba);
12541 spin_lock_irq(&phba->port_list_lock);
12542 list_del_init(&vport->listentry);
12543 spin_unlock_irq(&phba->port_list_lock);
12544
12545 lpfc_debugfs_terminate(vport);
12546
12547
12548 if (phba->cfg_sriov_nr_virtfn)
12549 pci_disable_sriov(pdev);
12550
12551
12552 lpfc_sli_disable_intr(phba);
12553
12554 scsi_host_put(shost);
12555
12556
12557
12558
12559
12560 lpfc_scsi_free(phba);
12561 lpfc_free_iocb_list(phba);
12562
12563 lpfc_mem_free_all(phba);
12564
12565 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12566 phba->hbqslimp.virt, phba->hbqslimp.phys);
12567
12568
12569 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12570 phba->slim2p.virt, phba->slim2p.phys);
12571
12572
12573 iounmap(phba->ctrl_regs_memmap_p);
12574 iounmap(phba->slim_memmap_p);
12575
12576 lpfc_hba_free(phba);
12577
12578 pci_release_mem_regions(pdev);
12579 pci_disable_device(pdev);
12580}
12581
12582
12583
12584
12585
12586
12587
12588
12589
12590
12591
12592
12593
12594
12595
12596
12597
12598
12599
12600
12601
12602
12603static int
12604lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12605{
12606 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12607 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12608
12609 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12610 "0473 PCI device Power Management suspend.\n");
12611
12612
12613 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12614 lpfc_offline(phba);
12615 kthread_stop(phba->worker_thread);
12616
12617
12618 lpfc_sli_disable_intr(phba);
12619
12620
12621 pci_save_state(pdev);
12622 pci_set_power_state(pdev, PCI_D3hot);
12623
12624 return 0;
12625}
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646static int
12647lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12648{
12649 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12650 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12651 uint32_t intr_mode;
12652 int error;
12653
12654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12655 "0452 PCI device Power Management resume.\n");
12656
12657
12658 pci_set_power_state(pdev, PCI_D0);
12659 pci_restore_state(pdev);
12660
12661
12662
12663
12664
12665 pci_save_state(pdev);
12666
12667 if (pdev->is_busmaster)
12668 pci_set_master(pdev);
12669
12670
12671 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12672 "lpfc_worker_%d", phba->brd_no);
12673 if (IS_ERR(phba->worker_thread)) {
12674 error = PTR_ERR(phba->worker_thread);
12675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12676 "0434 PM resume failed to start worker "
12677 "thread: error=x%x.\n", error);
12678 return error;
12679 }
12680
12681
12682 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12683 if (intr_mode == LPFC_INTR_ERROR) {
12684 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12685 "0430 PM resume Failed to enable interrupt\n");
12686 return -EIO;
12687 } else
12688 phba->intr_mode = intr_mode;
12689
12690
12691 lpfc_sli_brdrestart(phba);
12692 lpfc_online(phba);
12693
12694
12695 lpfc_log_intr_mode(phba, phba->intr_mode);
12696
12697 return 0;
12698}
12699
12700
12701
12702
12703
12704
12705
12706
12707static void
12708lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12709{
12710 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12711 "2723 PCI channel I/O abort preparing for recovery\n");
12712
12713
12714
12715
12716
12717 lpfc_sli_abort_fcp_rings(phba);
12718}
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728static void
12729lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12730{
12731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12732 "2710 PCI channel disable preparing for reset\n");
12733
12734
12735 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12736
12737
12738 lpfc_scsi_dev_block(phba);
12739
12740
12741 lpfc_sli_flush_io_rings(phba);
12742
12743
12744 lpfc_stop_hba_timers(phba);
12745
12746
12747 lpfc_sli_disable_intr(phba);
12748 pci_disable_device(phba->pcidev);
12749}
12750
12751
12752
12753
12754
12755
12756
12757
12758
12759static void
12760lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12761{
12762 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12763 "2711 PCI channel permanent disable for failure\n");
12764
12765 lpfc_scsi_dev_block(phba);
12766
12767
12768 lpfc_stop_hba_timers(phba);
12769
12770
12771 lpfc_sli_flush_io_rings(phba);
12772}
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785
12786
12787
12788
12789
12790
12791
12792static pci_ers_result_t
12793lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12794{
12795 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12796 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12797
12798 switch (state) {
12799 case pci_channel_io_normal:
12800
12801 lpfc_sli_prep_dev_for_recover(phba);
12802 return PCI_ERS_RESULT_CAN_RECOVER;
12803 case pci_channel_io_frozen:
12804
12805 lpfc_sli_prep_dev_for_reset(phba);
12806 return PCI_ERS_RESULT_NEED_RESET;
12807 case pci_channel_io_perm_failure:
12808
12809 lpfc_sli_prep_dev_for_perm_failure(phba);
12810 return PCI_ERS_RESULT_DISCONNECT;
12811 default:
12812
12813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12814 "0472 Unknown PCI error state: x%x\n", state);
12815 lpfc_sli_prep_dev_for_reset(phba);
12816 return PCI_ERS_RESULT_NEED_RESET;
12817 }
12818}
12819
12820
12821
12822
12823
12824
12825
12826
12827
12828
12829
12830
12831
12832
12833
12834
12835
12836
12837
12838static pci_ers_result_t
12839lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12840{
12841 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12842 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12843 struct lpfc_sli *psli = &phba->sli;
12844 uint32_t intr_mode;
12845
12846 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12847 if (pci_enable_device_mem(pdev)) {
12848 printk(KERN_ERR "lpfc: Cannot re-enable "
12849 "PCI device after reset.\n");
12850 return PCI_ERS_RESULT_DISCONNECT;
12851 }
12852
12853 pci_restore_state(pdev);
12854
12855
12856
12857
12858
12859 pci_save_state(pdev);
12860
12861 if (pdev->is_busmaster)
12862 pci_set_master(pdev);
12863
12864 spin_lock_irq(&phba->hbalock);
12865 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12866 spin_unlock_irq(&phba->hbalock);
12867
12868
12869 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12870 if (intr_mode == LPFC_INTR_ERROR) {
12871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12872 "0427 Cannot re-enable interrupt after "
12873 "slot reset.\n");
12874 return PCI_ERS_RESULT_DISCONNECT;
12875 } else
12876 phba->intr_mode = intr_mode;
12877
12878
12879 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12880 lpfc_offline(phba);
12881 lpfc_sli_brdrestart(phba);
12882
12883
12884 lpfc_log_intr_mode(phba, phba->intr_mode);
12885
12886 return PCI_ERS_RESULT_RECOVERED;
12887}
12888
12889
12890
12891
12892
12893
12894
12895
12896
12897
12898
12899static void
12900lpfc_io_resume_s3(struct pci_dev *pdev)
12901{
12902 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12903 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12904
12905
12906 lpfc_online(phba);
12907}
12908
12909
12910
12911
12912
12913
12914
12915int
12916lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12917{
12918 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12919
12920 if (phba->sli_rev == LPFC_SLI_REV4) {
12921 if (max_xri <= 100)
12922 return 10;
12923 else if (max_xri <= 256)
12924 return 25;
12925 else if (max_xri <= 512)
12926 return 50;
12927 else if (max_xri <= 1024)
12928 return 100;
12929 else if (max_xri <= 1536)
12930 return 150;
12931 else if (max_xri <= 2048)
12932 return 200;
12933 else
12934 return 250;
12935 } else
12936 return 0;
12937}
12938
12939
12940
12941
12942
12943
12944
12945int
12946lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12947{
12948 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12949
12950 if (phba->nvmet_support)
12951 max_xri += LPFC_NVMET_BUF_POST;
12952 return max_xri;
12953}
12954
12955
12956static int
12957lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12958 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12959 const struct firmware *fw)
12960{
12961 int rc;
12962
12963
12964
12965
12966
12967
12968
12969 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12970 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12971 magic_number != MAGIC_NUMBER_G6) ||
12972 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12973 magic_number != MAGIC_NUMBER_G7)) {
12974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12975 "3030 This firmware version is not supported on"
12976 " this HBA model. Device:%x Magic:%x Type:%x "
12977 "ID:%x Size %d %zd\n",
12978 phba->pcidev->device, magic_number, ftype, fid,
12979 fsize, fw->size);
12980 rc = -EINVAL;
12981 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12983 "3021 Firmware downloads have been prohibited "
12984 "by a system configuration setting on "
12985 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12986 "%zd\n",
12987 phba->pcidev->device, magic_number, ftype, fid,
12988 fsize, fw->size);
12989 rc = -EACCES;
12990 } else {
12991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12992 "3022 FW Download failed. Add Status x%x "
12993 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12994 "%zd\n",
12995 offset, phba->pcidev->device, magic_number,
12996 ftype, fid, fsize, fw->size);
12997 rc = -EIO;
12998 }
12999 return rc;
13000}
13001
13002
13003
13004
13005
13006
13007
13008static void
13009lpfc_write_firmware(const struct firmware *fw, void *context)
13010{
13011 struct lpfc_hba *phba = (struct lpfc_hba *)context;
13012 char fwrev[FW_REV_STR_SIZE];
13013 struct lpfc_grp_hdr *image;
13014 struct list_head dma_buffer_list;
13015 int i, rc = 0;
13016 struct lpfc_dmabuf *dmabuf, *next;
13017 uint32_t offset = 0, temp_offset = 0;
13018 uint32_t magic_number, ftype, fid, fsize;
13019
13020
13021 if (!fw) {
13022 rc = -ENXIO;
13023 goto out;
13024 }
13025 image = (struct lpfc_grp_hdr *)fw->data;
13026
13027 magic_number = be32_to_cpu(image->magic_number);
13028 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
13029 fid = bf_get_be32(lpfc_grp_hdr_id, image);
13030 fsize = be32_to_cpu(image->size);
13031
13032 INIT_LIST_HEAD(&dma_buffer_list);
13033 lpfc_decode_firmware_rev(phba, fwrev, 1);
13034 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
13035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13036 "3023 Updating Firmware, Current Version:%s "
13037 "New Version:%s\n",
13038 fwrev, image->revision);
13039 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13040 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13041 GFP_KERNEL);
13042 if (!dmabuf) {
13043 rc = -ENOMEM;
13044 goto release_out;
13045 }
13046 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13047 SLI4_PAGE_SIZE,
13048 &dmabuf->phys,
13049 GFP_KERNEL);
13050 if (!dmabuf->virt) {
13051 kfree(dmabuf);
13052 rc = -ENOMEM;
13053 goto release_out;
13054 }
13055 list_add_tail(&dmabuf->list, &dma_buffer_list);
13056 }
13057 while (offset < fw->size) {
13058 temp_offset = offset;
13059 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
13060 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
13061 memcpy(dmabuf->virt,
13062 fw->data + temp_offset,
13063 fw->size - temp_offset);
13064 temp_offset = fw->size;
13065 break;
13066 }
13067 memcpy(dmabuf->virt, fw->data + temp_offset,
13068 SLI4_PAGE_SIZE);
13069 temp_offset += SLI4_PAGE_SIZE;
13070 }
13071 rc = lpfc_wr_object(phba, &dma_buffer_list,
13072 (fw->size - offset), &offset);
13073 if (rc) {
13074 rc = lpfc_log_write_firmware_error(phba, offset,
13075 magic_number,
13076 ftype,
13077 fid,
13078 fsize,
13079 fw);
13080 goto release_out;
13081 }
13082 }
13083 rc = offset;
13084 } else
13085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13086 "3029 Skipped Firmware update, Current "
13087 "Version:%s New Version:%s\n",
13088 fwrev, image->revision);
13089
13090release_out:
13091 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13092 list_del(&dmabuf->list);
13093 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13094 dmabuf->virt, dmabuf->phys);
13095 kfree(dmabuf);
13096 }
13097 release_firmware(fw);
13098out:
13099 if (rc < 0)
13100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13101 "3062 Firmware update error, status %d.\n", rc);
13102 else
13103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13104 "3024 Firmware update success: size %d.\n", rc);
13105}
13106
13107
13108
13109
13110
13111
13112
13113
13114
13115int
13116lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13117{
13118 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13119 int ret;
13120 const struct firmware *fw;
13121
13122
13123 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13124 LPFC_SLI_INTF_IF_TYPE_2)
13125 return -EPERM;
13126
13127 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13128
13129 if (fw_upgrade == INT_FW_UPGRADE) {
13130 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13131 file_name, &phba->pcidev->dev,
13132 GFP_KERNEL, (void *)phba,
13133 lpfc_write_firmware);
13134 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13135 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13136 if (!ret)
13137 lpfc_write_firmware(fw, (void *)phba);
13138 } else {
13139 ret = -EINVAL;
13140 }
13141
13142 return ret;
13143}
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156
13157
13158
13159
13160
13161
13162
13163static int
13164lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13165{
13166 struct lpfc_hba *phba;
13167 struct lpfc_vport *vport = NULL;
13168 struct Scsi_Host *shost = NULL;
13169 int error;
13170 uint32_t cfg_mode, intr_mode;
13171
13172
13173 phba = lpfc_hba_alloc(pdev);
13174 if (!phba)
13175 return -ENOMEM;
13176
13177
13178 error = lpfc_enable_pci_dev(phba);
13179 if (error)
13180 goto out_free_phba;
13181
13182
13183 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13184 if (error)
13185 goto out_disable_pci_dev;
13186
13187
13188 error = lpfc_sli4_pci_mem_setup(phba);
13189 if (error) {
13190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13191 "1410 Failed to set up pci memory space.\n");
13192 goto out_disable_pci_dev;
13193 }
13194
13195
13196 error = lpfc_sli4_driver_resource_setup(phba);
13197 if (error) {
13198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13199 "1412 Failed to set up driver resource.\n");
13200 goto out_unset_pci_mem_s4;
13201 }
13202
13203 INIT_LIST_HEAD(&phba->active_rrq_list);
13204 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13205
13206
13207 error = lpfc_setup_driver_resource_phase2(phba);
13208 if (error) {
13209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13210 "1414 Failed to set up driver resource.\n");
13211 goto out_unset_driver_resource_s4;
13212 }
13213
13214
13215 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13216
13217
13218 cfg_mode = phba->cfg_use_msi;
13219
13220
13221 phba->pport = NULL;
13222 lpfc_stop_port(phba);
13223
13224
13225 lpfc_cpu_map_array_init(phba);
13226
13227
13228 lpfc_hba_eq_hdl_array_init(phba);
13229
13230
13231 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13232 if (intr_mode == LPFC_INTR_ERROR) {
13233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13234 "0426 Failed to enable interrupt.\n");
13235 error = -ENODEV;
13236 goto out_unset_driver_resource;
13237 }
13238
13239 if (phba->intr_type != MSIX) {
13240 phba->cfg_irq_chann = 1;
13241 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13242 if (phba->nvmet_support)
13243 phba->cfg_nvmet_mrq = 1;
13244 }
13245 }
13246 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13247
13248
13249 error = lpfc_create_shost(phba);
13250 if (error) {
13251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13252 "1415 Failed to create scsi host.\n");
13253 goto out_disable_intr;
13254 }
13255 vport = phba->pport;
13256 shost = lpfc_shost_from_vport(vport);
13257
13258
13259 error = lpfc_alloc_sysfs_attr(vport);
13260 if (error) {
13261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13262 "1416 Failed to allocate sysfs attr\n");
13263 goto out_destroy_shost;
13264 }
13265
13266
13267 if (lpfc_sli4_hba_setup(phba)) {
13268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13269 "1421 Failed to set up hba\n");
13270 error = -ENODEV;
13271 goto out_free_sysfs_attr;
13272 }
13273
13274
13275 phba->intr_mode = intr_mode;
13276 lpfc_log_intr_mode(phba, intr_mode);
13277
13278
13279 lpfc_post_init_setup(phba);
13280
13281
13282
13283
13284 if (phba->nvmet_support == 0) {
13285 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13286
13287
13288
13289
13290
13291 error = lpfc_nvme_create_localport(vport);
13292 if (error) {
13293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13294 "6004 NVME registration "
13295 "failed, error x%x\n",
13296 error);
13297 }
13298 }
13299 }
13300
13301
13302 if (phba->cfg_request_firmware_upgrade)
13303 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13304
13305
13306 lpfc_create_static_vport(phba);
13307
13308
13309 lpfc_sli4_ras_setup(phba);
13310
13311 INIT_LIST_HEAD(&phba->poll_list);
13312 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13313 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13314
13315 return 0;
13316
13317out_free_sysfs_attr:
13318 lpfc_free_sysfs_attr(vport);
13319out_destroy_shost:
13320 lpfc_destroy_shost(phba);
13321out_disable_intr:
13322 lpfc_sli4_disable_intr(phba);
13323out_unset_driver_resource:
13324 lpfc_unset_driver_resource_phase2(phba);
13325out_unset_driver_resource_s4:
13326 lpfc_sli4_driver_resource_unset(phba);
13327out_unset_pci_mem_s4:
13328 lpfc_sli4_pci_mem_unset(phba);
13329out_disable_pci_dev:
13330 lpfc_disable_pci_dev(phba);
13331 if (shost)
13332 scsi_host_put(shost);
13333out_free_phba:
13334 lpfc_hba_free(phba);
13335 return error;
13336}
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347static void
13348lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13349{
13350 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13351 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13352 struct lpfc_vport **vports;
13353 struct lpfc_hba *phba = vport->phba;
13354 int i;
13355
13356
13357 spin_lock_irq(&phba->hbalock);
13358 vport->load_flag |= FC_UNLOADING;
13359 spin_unlock_irq(&phba->hbalock);
13360
13361
13362 lpfc_free_sysfs_attr(vport);
13363
13364
13365 vports = lpfc_create_vport_work_array(phba);
13366 if (vports != NULL)
13367 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13368 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13369 continue;
13370 fc_vport_terminate(vports[i]->fc_vport);
13371 }
13372 lpfc_destroy_vport_work_array(phba, vports);
13373
13374
13375 fc_remove_host(shost);
13376 scsi_remove_host(shost);
13377
13378
13379
13380
13381 lpfc_cleanup(vport);
13382 lpfc_nvmet_destroy_targetport(phba);
13383 lpfc_nvme_destroy_localport(vport);
13384
13385
13386 if (phba->cfg_xri_rebalancing)
13387 lpfc_destroy_multixri_pools(phba);
13388
13389
13390
13391
13392
13393
13394 lpfc_debugfs_terminate(vport);
13395
13396 lpfc_stop_hba_timers(phba);
13397 spin_lock_irq(&phba->port_list_lock);
13398 list_del_init(&vport->listentry);
13399 spin_unlock_irq(&phba->port_list_lock);
13400
13401
13402
13403
13404 lpfc_io_free(phba);
13405 lpfc_free_iocb_list(phba);
13406 lpfc_sli4_hba_unset(phba);
13407
13408 lpfc_unset_driver_resource_phase2(phba);
13409 lpfc_sli4_driver_resource_unset(phba);
13410
13411
13412 lpfc_sli4_pci_mem_unset(phba);
13413
13414
13415 scsi_host_put(shost);
13416 lpfc_disable_pci_dev(phba);
13417
13418
13419 lpfc_hba_free(phba);
13420
13421 return;
13422}
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445static int
13446lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
13447{
13448 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13449 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13450
13451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13452 "2843 PCI device Power Management suspend.\n");
13453
13454
13455 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13456 lpfc_offline(phba);
13457 kthread_stop(phba->worker_thread);
13458
13459
13460 lpfc_sli4_disable_intr(phba);
13461 lpfc_sli4_queue_destroy(phba);
13462
13463
13464 pci_save_state(pdev);
13465 pci_set_power_state(pdev, PCI_D3hot);
13466
13467 return 0;
13468}
13469
13470
13471
13472
13473
13474
13475
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488
13489static int
13490lpfc_pci_resume_one_s4(struct pci_dev *pdev)
13491{
13492 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13493 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13494 uint32_t intr_mode;
13495 int error;
13496
13497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13498 "0292 PCI device Power Management resume.\n");
13499
13500
13501 pci_set_power_state(pdev, PCI_D0);
13502 pci_restore_state(pdev);
13503
13504
13505
13506
13507
13508 pci_save_state(pdev);
13509
13510 if (pdev->is_busmaster)
13511 pci_set_master(pdev);
13512
13513
13514 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13515 "lpfc_worker_%d", phba->brd_no);
13516 if (IS_ERR(phba->worker_thread)) {
13517 error = PTR_ERR(phba->worker_thread);
13518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13519 "0293 PM resume failed to start worker "
13520 "thread: error=x%x.\n", error);
13521 return error;
13522 }
13523
13524
13525 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13526 if (intr_mode == LPFC_INTR_ERROR) {
13527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13528 "0294 PM resume Failed to enable interrupt\n");
13529 return -EIO;
13530 } else
13531 phba->intr_mode = intr_mode;
13532
13533
13534 lpfc_sli_brdrestart(phba);
13535 lpfc_online(phba);
13536
13537
13538 lpfc_log_intr_mode(phba, phba->intr_mode);
13539
13540 return 0;
13541}
13542
13543
13544
13545
13546
13547
13548
13549
13550static void
13551lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13552{
13553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13554 "2828 PCI channel I/O abort preparing for recovery\n");
13555
13556
13557
13558
13559 lpfc_sli_abort_fcp_rings(phba);
13560}
13561
13562
13563
13564
13565
13566
13567
13568
13569
13570static void
13571lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13572{
13573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13574 "2826 PCI channel disable preparing for reset\n");
13575
13576
13577 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13578
13579
13580 lpfc_scsi_dev_block(phba);
13581
13582
13583 lpfc_sli_flush_io_rings(phba);
13584
13585
13586 lpfc_stop_hba_timers(phba);
13587
13588
13589 lpfc_sli4_disable_intr(phba);
13590 lpfc_sli4_queue_destroy(phba);
13591 pci_disable_device(phba->pcidev);
13592}
13593
13594
13595
13596
13597
13598
13599
13600
13601
13602static void
13603lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13604{
13605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13606 "2827 PCI channel permanent disable for failure\n");
13607
13608
13609 lpfc_scsi_dev_block(phba);
13610
13611
13612 lpfc_stop_hba_timers(phba);
13613
13614
13615 lpfc_sli_flush_io_rings(phba);
13616}
13617
13618
13619
13620
13621
13622
13623
13624
13625
13626
13627
13628
13629
13630
13631
13632
13633
13634static pci_ers_result_t
13635lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13636{
13637 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13638 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13639
13640 switch (state) {
13641 case pci_channel_io_normal:
13642
13643 lpfc_sli4_prep_dev_for_recover(phba);
13644 return PCI_ERS_RESULT_CAN_RECOVER;
13645 case pci_channel_io_frozen:
13646
13647 lpfc_sli4_prep_dev_for_reset(phba);
13648 return PCI_ERS_RESULT_NEED_RESET;
13649 case pci_channel_io_perm_failure:
13650
13651 lpfc_sli4_prep_dev_for_perm_failure(phba);
13652 return PCI_ERS_RESULT_DISCONNECT;
13653 default:
13654
13655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13656 "2825 Unknown PCI error state: x%x\n", state);
13657 lpfc_sli4_prep_dev_for_reset(phba);
13658 return PCI_ERS_RESULT_NEED_RESET;
13659 }
13660}
13661
13662
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680static pci_ers_result_t
13681lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13682{
13683 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13684 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13685 struct lpfc_sli *psli = &phba->sli;
13686 uint32_t intr_mode;
13687
13688 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13689 if (pci_enable_device_mem(pdev)) {
13690 printk(KERN_ERR "lpfc: Cannot re-enable "
13691 "PCI device after reset.\n");
13692 return PCI_ERS_RESULT_DISCONNECT;
13693 }
13694
13695 pci_restore_state(pdev);
13696
13697
13698
13699
13700
13701 pci_save_state(pdev);
13702
13703 if (pdev->is_busmaster)
13704 pci_set_master(pdev);
13705
13706 spin_lock_irq(&phba->hbalock);
13707 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13708 spin_unlock_irq(&phba->hbalock);
13709
13710
13711 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13712 if (intr_mode == LPFC_INTR_ERROR) {
13713 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13714 "2824 Cannot re-enable interrupt after "
13715 "slot reset.\n");
13716 return PCI_ERS_RESULT_DISCONNECT;
13717 } else
13718 phba->intr_mode = intr_mode;
13719
13720
13721 lpfc_log_intr_mode(phba, phba->intr_mode);
13722
13723 return PCI_ERS_RESULT_RECOVERED;
13724}
13725
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735
13736static void
13737lpfc_io_resume_s4(struct pci_dev *pdev)
13738{
13739 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13740 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13741
13742
13743
13744
13745
13746
13747
13748 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13749
13750 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13751 lpfc_offline(phba);
13752 lpfc_sli_brdrestart(phba);
13753
13754 lpfc_online(phba);
13755 }
13756}
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767
13768
13769
13770
13771
13772
13773
13774
13775
13776static int
13777lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13778{
13779 int rc;
13780 struct lpfc_sli_intf intf;
13781
13782 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13783 return -ENODEV;
13784
13785 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13786 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13787 rc = lpfc_pci_probe_one_s4(pdev, pid);
13788 else
13789 rc = lpfc_pci_probe_one_s3(pdev, pid);
13790
13791 return rc;
13792}
13793
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803
13804static void
13805lpfc_pci_remove_one(struct pci_dev *pdev)
13806{
13807 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13808 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13809
13810 switch (phba->pci_dev_grp) {
13811 case LPFC_PCI_DEV_LP:
13812 lpfc_pci_remove_one_s3(pdev);
13813 break;
13814 case LPFC_PCI_DEV_OC:
13815 lpfc_pci_remove_one_s4(pdev);
13816 break;
13817 default:
13818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13819 "1424 Invalid PCI device group: 0x%x\n",
13820 phba->pci_dev_grp);
13821 break;
13822 }
13823 return;
13824}
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836
13837
13838
13839
13840static int
13841lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13842{
13843 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13844 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13845 int rc = -ENODEV;
13846
13847 switch (phba->pci_dev_grp) {
13848 case LPFC_PCI_DEV_LP:
13849 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13850 break;
13851 case LPFC_PCI_DEV_OC:
13852 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13853 break;
13854 default:
13855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13856 "1425 Invalid PCI device group: 0x%x\n",
13857 phba->pci_dev_grp);
13858 break;
13859 }
13860 return rc;
13861}
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876static int
13877lpfc_pci_resume_one(struct pci_dev *pdev)
13878{
13879 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13880 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13881 int rc = -ENODEV;
13882
13883 switch (phba->pci_dev_grp) {
13884 case LPFC_PCI_DEV_LP:
13885 rc = lpfc_pci_resume_one_s3(pdev);
13886 break;
13887 case LPFC_PCI_DEV_OC:
13888 rc = lpfc_pci_resume_one_s4(pdev);
13889 break;
13890 default:
13891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13892 "1426 Invalid PCI device group: 0x%x\n",
13893 phba->pci_dev_grp);
13894 break;
13895 }
13896 return rc;
13897}
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912
13913
13914static pci_ers_result_t
13915lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13916{
13917 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13918 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13919 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13920
13921 switch (phba->pci_dev_grp) {
13922 case LPFC_PCI_DEV_LP:
13923 rc = lpfc_io_error_detected_s3(pdev, state);
13924 break;
13925 case LPFC_PCI_DEV_OC:
13926 rc = lpfc_io_error_detected_s4(pdev, state);
13927 break;
13928 default:
13929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13930 "1427 Invalid PCI device group: 0x%x\n",
13931 phba->pci_dev_grp);
13932 break;
13933 }
13934 return rc;
13935}
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951static pci_ers_result_t
13952lpfc_io_slot_reset(struct pci_dev *pdev)
13953{
13954 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13955 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13956 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13957
13958 switch (phba->pci_dev_grp) {
13959 case LPFC_PCI_DEV_LP:
13960 rc = lpfc_io_slot_reset_s3(pdev);
13961 break;
13962 case LPFC_PCI_DEV_OC:
13963 rc = lpfc_io_slot_reset_s4(pdev);
13964 break;
13965 default:
13966 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13967 "1428 Invalid PCI device group: 0x%x\n",
13968 phba->pci_dev_grp);
13969 break;
13970 }
13971 return rc;
13972}
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984static void
13985lpfc_io_resume(struct pci_dev *pdev)
13986{
13987 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13988 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13989
13990 switch (phba->pci_dev_grp) {
13991 case LPFC_PCI_DEV_LP:
13992 lpfc_io_resume_s3(pdev);
13993 break;
13994 case LPFC_PCI_DEV_OC:
13995 lpfc_io_resume_s4(pdev);
13996 break;
13997 default:
13998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13999 "1429 Invalid PCI device group: 0x%x\n",
14000 phba->pci_dev_grp);
14001 break;
14002 }
14003 return;
14004}
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016static void
14017lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14018{
14019
14020 if (!phba->cfg_EnableXLane)
14021 return;
14022
14023 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14024 phba->cfg_fof = 1;
14025 } else {
14026 phba->cfg_fof = 0;
14027 mempool_destroy(phba->device_data_mem_pool);
14028 phba->device_data_mem_pool = NULL;
14029 }
14030
14031 return;
14032}
14033
14034
14035
14036
14037
14038
14039
14040
14041void
14042lpfc_sli4_ras_init(struct lpfc_hba *phba)
14043{
14044 switch (phba->pcidev->device) {
14045 case PCI_DEVICE_ID_LANCER_G6_FC:
14046 case PCI_DEVICE_ID_LANCER_G7_FC:
14047 phba->ras_fwlog.ras_hwsupport = true;
14048 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14049 phba->cfg_ras_fwlog_buffsize)
14050 phba->ras_fwlog.ras_enabled = true;
14051 else
14052 phba->ras_fwlog.ras_enabled = false;
14053 break;
14054 default:
14055 phba->ras_fwlog.ras_hwsupport = false;
14056 }
14057}
14058
14059
14060MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14061
14062static const struct pci_error_handlers lpfc_err_handler = {
14063 .error_detected = lpfc_io_error_detected,
14064 .slot_reset = lpfc_io_slot_reset,
14065 .resume = lpfc_io_resume,
14066};
14067
14068static struct pci_driver lpfc_driver = {
14069 .name = LPFC_DRIVER_NAME,
14070 .id_table = lpfc_id_table,
14071 .probe = lpfc_pci_probe_one,
14072 .remove = lpfc_pci_remove_one,
14073 .shutdown = lpfc_pci_remove_one,
14074 .suspend = lpfc_pci_suspend_one,
14075 .resume = lpfc_pci_resume_one,
14076 .err_handler = &lpfc_err_handler,
14077};
14078
14079static const struct file_operations lpfc_mgmt_fop = {
14080 .owner = THIS_MODULE,
14081};
14082
14083static struct miscdevice lpfc_mgmt_dev = {
14084 .minor = MISC_DYNAMIC_MINOR,
14085 .name = "lpfcmgmt",
14086 .fops = &lpfc_mgmt_fop,
14087};
14088
14089
14090
14091
14092
14093
14094
14095
14096
14097
14098
14099
14100
14101static int __init
14102lpfc_init(void)
14103{
14104 int error = 0;
14105
14106 pr_info(LPFC_MODULE_DESC "\n");
14107 pr_info(LPFC_COPYRIGHT "\n");
14108
14109 error = misc_register(&lpfc_mgmt_dev);
14110 if (error)
14111 printk(KERN_ERR "Could not register lpfcmgmt device, "
14112 "misc_register returned with status %d", error);
14113
14114 error = -ENOMEM;
14115 lpfc_transport_functions.vport_create = lpfc_vport_create;
14116 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14117 lpfc_transport_template =
14118 fc_attach_transport(&lpfc_transport_functions);
14119 if (lpfc_transport_template == NULL)
14120 goto unregister;
14121 lpfc_vport_transport_template =
14122 fc_attach_transport(&lpfc_vport_transport_functions);
14123 if (lpfc_vport_transport_template == NULL) {
14124 fc_release_transport(lpfc_transport_template);
14125 goto unregister;
14126 }
14127 lpfc_nvme_cmd_template();
14128 lpfc_nvmet_cmd_template();
14129
14130
14131 lpfc_present_cpu = num_present_cpus();
14132
14133 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14134 "lpfc/sli4:online",
14135 lpfc_cpu_online, lpfc_cpu_offline);
14136 if (error < 0)
14137 goto cpuhp_failure;
14138 lpfc_cpuhp_state = error;
14139
14140 error = pci_register_driver(&lpfc_driver);
14141 if (error)
14142 goto unwind;
14143
14144 return error;
14145
14146unwind:
14147 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14148cpuhp_failure:
14149 fc_release_transport(lpfc_transport_template);
14150 fc_release_transport(lpfc_vport_transport_template);
14151unregister:
14152 misc_deregister(&lpfc_mgmt_dev);
14153
14154 return error;
14155}
14156
14157void lpfc_dmp_dbg(struct lpfc_hba *phba)
14158{
14159 unsigned int start_idx;
14160 unsigned int dbg_cnt;
14161 unsigned int temp_idx;
14162 int i;
14163 int j = 0;
14164 unsigned long rem_nsec;
14165
14166 if (phba->cfg_log_verbose)
14167 return;
14168
14169 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14170 return;
14171
14172 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14173 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14174 temp_idx = start_idx;
14175 if (dbg_cnt >= DBG_LOG_SZ) {
14176 dbg_cnt = DBG_LOG_SZ;
14177 temp_idx -= 1;
14178 } else {
14179 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14180 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14181 } else {
14182 if (start_idx < dbg_cnt)
14183 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14184 else
14185 start_idx -= dbg_cnt;
14186 }
14187 }
14188 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14189 start_idx, temp_idx, dbg_cnt);
14190
14191 for (i = 0; i < dbg_cnt; i++) {
14192 if ((start_idx + i) < DBG_LOG_SZ)
14193 temp_idx = (start_idx + i) % DBG_LOG_SZ;
14194 else
14195 temp_idx = j++;
14196 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14197 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14198 temp_idx,
14199 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14200 rem_nsec / 1000,
14201 phba->dbg_log[temp_idx].log);
14202 }
14203 atomic_set(&phba->dbg_log_cnt, 0);
14204 atomic_set(&phba->dbg_log_dmping, 0);
14205}
14206
14207__printf(2, 3)
14208void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14209{
14210 unsigned int idx;
14211 va_list args;
14212 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14213 struct va_format vaf;
14214
14215
14216 va_start(args, fmt);
14217 if (unlikely(dbg_dmping)) {
14218 vaf.fmt = fmt;
14219 vaf.va = &args;
14220 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14221 va_end(args);
14222 return;
14223 }
14224 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14225 DBG_LOG_SZ;
14226
14227 atomic_inc(&phba->dbg_log_cnt);
14228
14229 vscnprintf(phba->dbg_log[idx].log,
14230 sizeof(phba->dbg_log[idx].log), fmt, args);
14231 va_end(args);
14232
14233 phba->dbg_log[idx].t_ns = local_clock();
14234}
14235
14236
14237
14238
14239
14240
14241
14242
14243static void __exit
14244lpfc_exit(void)
14245{
14246 misc_deregister(&lpfc_mgmt_dev);
14247 pci_unregister_driver(&lpfc_driver);
14248 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14249 fc_release_transport(lpfc_transport_template);
14250 fc_release_transport(lpfc_vport_transport_template);
14251 idr_destroy(&lpfc_hba_index);
14252}
14253
14254module_init(lpfc_init);
14255module_exit(lpfc_exit);
14256MODULE_LICENSE("GPL");
14257MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14258MODULE_AUTHOR("Broadcom");
14259MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
14260