1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97
98static struct scsi_transport_template *lpfc_transport_template = NULL;
99static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
100static DEFINE_IDR(lpfc_hba_index);
101#define LPFC_NVMET_BUF_POST 254
102static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118int
119lpfc_config_port_prep(struct lpfc_hba *phba)
120{
121 lpfc_vpd_t *vp = &phba->vpd;
122 int i = 0, rc;
123 LPFC_MBOXQ_t *pmb;
124 MAILBOX_t *mb;
125 char *lpfc_vpd_data = NULL;
126 uint16_t offset = 0;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
130
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
132 if (!pmb) {
133 phba->link_state = LPFC_HBA_ERROR;
134 return -ENOMEM;
135 }
136
137 mb = &pmb->u.mb;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
139
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
141 if (init_key) {
142 uint32_t *ptext = (uint32_t *) licensed;
143
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
146 init_key = 0;
147 }
148
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
153 sizeof (licensed));
154
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
161 "mbxStatus x%x\n",
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
164 return -ERESTART;
165 }
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
167 sizeof(phba->wwnn));
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
169 sizeof(phba->wwpn));
170 }
171
172
173
174
175
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
177
178
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
187 return -ERESTART;
188 }
189
190
191
192
193
194
195 if (mb->un.varRdRev.rr == 0) {
196 vp->rev.rBit = 0;
197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
201 return -ERESTART;
202 }
203
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
206 return -EINVAL;
207 }
208
209
210 vp->rev.rBit = 1;
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
226
227
228
229
230
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
233
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
237
238
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
240 if (!lpfc_vpd_data)
241 goto out_free_mbox;
242 do {
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
245
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
252 }
253
254
255
256 if (mb->un.varDmp.word_cnt == 0)
257 break;
258
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
263 mb->un.varDmp.word_cnt);
264 offset += mb->un.varDmp.word_cnt;
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
266
267 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
268
269 kfree(lpfc_vpd_data);
270out_free_mbox:
271 mempool_free(pmb, phba->mbox_mem_pool);
272 return 0;
273}
274
275
276
277
278
279
280
281
282
283
284
285static void
286lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
287{
288 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
289 phba->temp_sensor_support = 1;
290 else
291 phba->temp_sensor_support = 0;
292 mempool_free(pmboxq, phba->mbox_mem_pool);
293 return;
294}
295
296
297
298
299
300
301
302
303
304
305
306static void
307lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
308{
309 struct prog_id *prg;
310 uint32_t prog_id_word;
311 char dist = ' ';
312
313 char dist_char[] = "nabx";
314
315 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
316 mempool_free(pmboxq, phba->mbox_mem_pool);
317 return;
318 }
319
320 prg = (struct prog_id *) &prog_id_word;
321
322
323 prog_id_word = pmboxq->u.mb.un.varWords[7];
324
325
326 if (prg->dist < 4)
327 dist = dist_char[prg->dist];
328
329 if ((prg->dist == 3) && (prg->num == 0))
330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
331 prg->ver, prg->rev, prg->lev);
332 else
333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
334 prg->ver, prg->rev, prg->lev,
335 dist, prg->num);
336 mempool_free(pmboxq, phba->mbox_mem_pool);
337 return;
338}
339
340
341
342
343
344
345
346
347
348
349void
350lpfc_update_vport_wwn(struct lpfc_vport *vport)
351{
352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
354
355
356 if (vport->phba->cfg_soft_wwnn)
357 u64_to_wwn(vport->phba->cfg_soft_wwnn,
358 vport->fc_sparam.nodeName.u.wwn);
359 if (vport->phba->cfg_soft_wwpn)
360 u64_to_wwn(vport->phba->cfg_soft_wwpn,
361 vport->fc_sparam.portName.u.wwn);
362
363
364
365
366
367 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
368 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
369 sizeof(struct lpfc_name));
370 else
371 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
372 sizeof(struct lpfc_name));
373
374
375
376
377
378 if (vport->fc_portname.u.wwn[0] != 0 &&
379 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name)))
381 vport->vport_flag |= FAWWPN_PARAM_CHG;
382
383 if (vport->fc_portname.u.wwn[0] == 0 ||
384 vport->phba->cfg_soft_wwpn ||
385 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
386 vport->vport_flag & FAWWPN_SET) {
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
389 vport->vport_flag &= ~FAWWPN_SET;
390 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
391 vport->vport_flag |= FAWWPN_SET;
392 }
393 else
394 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
395 sizeof(struct lpfc_name));
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411int
412lpfc_config_port_post(struct lpfc_hba *phba)
413{
414 struct lpfc_vport *vport = phba->pport;
415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
416 LPFC_MBOXQ_t *pmb;
417 MAILBOX_t *mb;
418 struct lpfc_dmabuf *mp;
419 struct lpfc_sli *psli = &phba->sli;
420 uint32_t status, timeout;
421 int i, j;
422 int rc;
423
424 spin_lock_irq(&phba->hbalock);
425
426
427
428
429 if (phba->over_temp_state == HBA_OVER_TEMP)
430 phba->over_temp_state = HBA_NORMAL_TEMP;
431 spin_unlock_irq(&phba->hbalock);
432
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 if (!pmb) {
435 phba->link_state = LPFC_HBA_ERROR;
436 return -ENOMEM;
437 }
438 mb = &pmb->u.mb;
439
440
441 rc = lpfc_read_sparam(phba, pmb, 0);
442 if (rc) {
443 mempool_free(pmb, phba->mbox_mem_pool);
444 return -ENOMEM;
445 }
446
447 pmb->vport = vport;
448 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
450 "0448 Adapter failed init, mbxCmd x%x "
451 "READ_SPARM mbxStatus x%x\n",
452 mb->mbxCommand, mb->mbxStatus);
453 phba->link_state = LPFC_HBA_ERROR;
454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 mempool_free(pmb, phba->mbox_mem_pool);
456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
457 kfree(mp);
458 return -EIO;
459 }
460
461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
462
463 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
464 lpfc_mbuf_free(phba, mp->virt, mp->phys);
465 kfree(mp);
466 pmb->ctx_buf = NULL;
467 lpfc_update_vport_wwn(vport);
468
469
470 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
471 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
472 fc_host_max_npiv_vports(shost) = phba->max_vpi;
473
474
475
476 if (phba->SerialNumber[0] == 0) {
477 uint8_t *outptr;
478
479 outptr = &vport->fc_nodename.u.s.IEEE[0];
480 for (i = 0; i < 12; i++) {
481 status = *outptr++;
482 j = ((status & 0xf0) >> 4);
483 if (j <= 9)
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
486 else
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
489 i++;
490 j = (status & 0xf);
491 if (j <= 9)
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x30 + (uint8_t) j);
494 else
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
497 }
498 }
499
500 lpfc_read_config(phba, pmb);
501 pmb->vport = vport;
502 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
504 "0453 Adapter failed to init, mbxCmd x%x "
505 "READ_CONFIG, mbxStatus x%x\n",
506 mb->mbxCommand, mb->mbxStatus);
507 phba->link_state = LPFC_HBA_ERROR;
508 mempool_free( pmb, phba->mbox_mem_pool);
509 return -EIO;
510 }
511
512
513 lpfc_sli_read_link_ste(phba);
514
515
516 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 "3359 HBA queue depth changed from %d to %d\n",
519 phba->cfg_hba_queue_depth,
520 mb->un.varRdConfig.max_xri);
521 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
522 }
523
524 phba->lmt = mb->un.varRdConfig.lmt;
525
526
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528
529 phba->link_state = LPFC_LINK_DOWN;
530
531
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
536
537
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
540
541
542
543
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
546 if (rc) {
547 mempool_free(pmb, phba->mbox_mem_pool);
548 return -EIO;
549 }
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
557 mempool_free(pmb, phba->mbox_mem_pool);
558 return -EIO;
559 }
560 }
561
562 spin_lock_irq(&phba->hbalock);
563
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
565
566
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
569 return -EIO;
570 }
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
580
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 status &= ~(HC_R0INT_ENA);
584
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr);
587 spin_unlock_irq(&phba->hbalock);
588
589
590 timeout = phba->fc_ratov * 2;
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
593
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
597 phba->last_completion_time = jiffies;
598
599 mod_timer(&phba->eratt_poll,
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
601
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
604 "2598 Adapter Link is disabled.\n");
605 lpfc_down_link(phba, pmb);
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 "2599 Adapter failed to issue DOWN_LINK"
611 " mbox command rc 0x%x\n", rc);
612
613 mempool_free(pmb, phba->mbox_mem_pool);
614 return -EIO;
615 }
616 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
617 mempool_free(pmb, phba->mbox_mem_pool);
618 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
619 if (rc)
620 return rc;
621 }
622
623 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
624 if (!pmb) {
625 phba->link_state = LPFC_HBA_ERROR;
626 return -ENOMEM;
627 }
628
629 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
630 pmb->mbox_cmpl = lpfc_config_async_cmpl;
631 pmb->vport = phba->pport;
632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
633
634 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
636 "0456 Adapter failed to issue "
637 "ASYNCEVT_ENABLE mbox status x%x\n",
638 rc);
639 mempool_free(pmb, phba->mbox_mem_pool);
640 }
641
642
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 if (!pmb) {
645 phba->link_state = LPFC_HBA_ERROR;
646 return -ENOMEM;
647 }
648
649 lpfc_dump_wakeup_param(phba, pmb);
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 pmb->vport = phba->pport;
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653
654 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
656 "0435 Adapter failed "
657 "to get Option ROM version status x%x\n", rc);
658 mempool_free(pmb, phba->mbox_mem_pool);
659 }
660
661 return 0;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678static int
679lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
680{
681 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699int
700lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
701 uint32_t flag)
702{
703 struct lpfc_vport *vport = phba->pport;
704 LPFC_MBOXQ_t *pmb;
705 MAILBOX_t *mb;
706 int rc;
707
708 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
709 if (!pmb) {
710 phba->link_state = LPFC_HBA_ERROR;
711 return -ENOMEM;
712 }
713 mb = &pmb->u.mb;
714 pmb->vport = vport;
715
716 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
718 !(phba->lmt & LMT_1Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
720 !(phba->lmt & LMT_2Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
722 !(phba->lmt & LMT_4Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
724 !(phba->lmt & LMT_8Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
726 !(phba->lmt & LMT_10Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
728 !(phba->lmt & LMT_16Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
730 !(phba->lmt & LMT_32Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
732 !(phba->lmt & LMT_64Gb))) {
733
734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
735 "1302 Invalid speed for this board:%d "
736 "Reset link speed to auto.\n",
737 phba->cfg_link_speed);
738 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
739 }
740 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
741 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
742 if (phba->sli_rev < LPFC_SLI_REV4)
743 lpfc_set_loopback_flag(phba);
744 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
745 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
747 "0498 Adapter failed to init, mbxCmd x%x "
748 "INIT_LINK, mbxStatus x%x\n",
749 mb->mbxCommand, mb->mbxStatus);
750 if (phba->sli_rev <= LPFC_SLI_REV3) {
751
752 writel(0, phba->HCregaddr);
753 readl(phba->HCregaddr);
754
755 writel(0xffffffff, phba->HAregaddr);
756 readl(phba->HAregaddr);
757 }
758 phba->link_state = LPFC_HBA_ERROR;
759 if (rc != MBX_BUSY || flag == MBX_POLL)
760 mempool_free(pmb, phba->mbox_mem_pool);
761 return -EIO;
762 }
763 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
764 if (flag == MBX_POLL)
765 mempool_free(pmb, phba->mbox_mem_pool);
766
767 return 0;
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783static int
784lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
785{
786 LPFC_MBOXQ_t *pmb;
787 int rc;
788
789 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
790 if (!pmb) {
791 phba->link_state = LPFC_HBA_ERROR;
792 return -ENOMEM;
793 }
794
795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 "0491 Adapter Link is disabled.\n");
797 lpfc_down_link(phba, pmb);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
802 "2522 Adapter failed to issue DOWN_LINK"
803 " mbox command rc 0x%x\n", rc);
804
805 mempool_free(pmb, phba->mbox_mem_pool);
806 return -EIO;
807 }
808 if (flag == MBX_POLL)
809 mempool_free(pmb, phba->mbox_mem_pool);
810
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825int
826lpfc_hba_down_prep(struct lpfc_hba *phba)
827{
828 struct lpfc_vport **vports;
829 int i;
830
831 if (phba->sli_rev <= LPFC_SLI_REV3) {
832
833 writel(0, phba->HCregaddr);
834 readl(phba->HCregaddr);
835 }
836
837 if (phba->pport->load_flag & FC_UNLOADING)
838 lpfc_cleanup_discovery_resources(phba->pport);
839 else {
840 vports = lpfc_create_vport_work_array(phba);
841 if (vports != NULL)
842 for (i = 0; i <= phba->max_vports &&
843 vports[i] != NULL; i++)
844 lpfc_cleanup_discovery_resources(vports[i]);
845 lpfc_destroy_vport_work_array(phba, vports);
846 }
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863static void
864lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
865{
866 struct lpfc_iocbq *rspiocbq;
867 struct hbq_dmabuf *dmabuf;
868 struct lpfc_cq_event *cq_event;
869
870 spin_lock_irq(&phba->hbalock);
871 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
872 spin_unlock_irq(&phba->hbalock);
873
874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
875
876 spin_lock_irq(&phba->hbalock);
877 list_remove_head(&phba->sli4_hba.sp_queue_event,
878 cq_event, struct lpfc_cq_event, list);
879 spin_unlock_irq(&phba->hbalock);
880
881 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
882 case CQE_CODE_COMPL_WQE:
883 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
884 cq_event);
885 lpfc_sli_release_iocbq(phba, rspiocbq);
886 break;
887 case CQE_CODE_RECEIVE:
888 case CQE_CODE_RECEIVE_V1:
889 dmabuf = container_of(cq_event, struct hbq_dmabuf,
890 cq_event);
891 lpfc_in_buf_free(phba, &dmabuf->dbuf);
892 }
893 }
894}
895
896
897
898
899
900
901
902
903
904
905
906
907static void
908lpfc_hba_free_post_buf(struct lpfc_hba *phba)
909{
910 struct lpfc_sli *psli = &phba->sli;
911 struct lpfc_sli_ring *pring;
912 struct lpfc_dmabuf *mp, *next_mp;
913 LIST_HEAD(buflist);
914 int count;
915
916 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
917 lpfc_sli_hbqbuf_free_all(phba);
918 else {
919
920 pring = &psli->sli3_ring[LPFC_ELS_RING];
921 spin_lock_irq(&phba->hbalock);
922 list_splice_init(&pring->postbufq, &buflist);
923 spin_unlock_irq(&phba->hbalock);
924
925 count = 0;
926 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
927 list_del(&mp->list);
928 count++;
929 lpfc_mbuf_free(phba, mp->virt, mp->phys);
930 kfree(mp);
931 }
932
933 spin_lock_irq(&phba->hbalock);
934 pring->postbufq_cnt -= count;
935 spin_unlock_irq(&phba->hbalock);
936 }
937}
938
939
940
941
942
943
944
945
946
947
948
949static void
950lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
951{
952 struct lpfc_sli *psli = &phba->sli;
953 struct lpfc_queue *qp = NULL;
954 struct lpfc_sli_ring *pring;
955 LIST_HEAD(completions);
956 int i;
957 struct lpfc_iocbq *piocb, *next_iocb;
958
959 if (phba->sli_rev != LPFC_SLI_REV4) {
960 for (i = 0; i < psli->num_rings; i++) {
961 pring = &psli->sli3_ring[i];
962 spin_lock_irq(&phba->hbalock);
963
964
965
966
967 list_splice_init(&pring->txcmplq, &completions);
968 pring->txcmplq_cnt = 0;
969 spin_unlock_irq(&phba->hbalock);
970
971 lpfc_sli_abort_iocb_ring(phba, pring);
972 }
973
974 lpfc_sli_cancel_iocbs(phba, &completions,
975 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
976 return;
977 }
978 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
979 pring = qp->pring;
980 if (!pring)
981 continue;
982 spin_lock_irq(&pring->ring_lock);
983 list_for_each_entry_safe(piocb, next_iocb,
984 &pring->txcmplq, list)
985 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
986 list_splice_init(&pring->txcmplq, &completions);
987 pring->txcmplq_cnt = 0;
988 spin_unlock_irq(&pring->ring_lock);
989 lpfc_sli_abort_iocb_ring(phba, pring);
990 }
991
992 lpfc_sli_cancel_iocbs(phba, &completions,
993 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007static int
1008lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009{
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
1012 return 0;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static int
1027lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028{
1029 struct lpfc_io_buf *psb, *psb_next;
1030 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031 struct lpfc_sli4_hdw_queue *qp;
1032 LIST_HEAD(aborts);
1033 LIST_HEAD(nvme_aborts);
1034 LIST_HEAD(nvmet_aborts);
1035 struct lpfc_sglq *sglq_entry = NULL;
1036 int cnt, idx;
1037
1038
1039 lpfc_sli_hbqbuf_free_all(phba);
1040 lpfc_hba_clean_txcmplq(phba);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1056
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1059
1060
1061 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1062
1063
1064
1065
1066 spin_lock_irq(&phba->hbalock);
1067 cnt = 0;
1068 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069 qp = &phba->sli4_hba.hdwq[idx];
1070
1071 spin_lock(&qp->abts_io_buf_list_lock);
1072 list_splice_init(&qp->lpfc_abts_io_buf_list,
1073 &aborts);
1074
1075 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1076 psb->pCmd = NULL;
1077 psb->status = IOSTAT_SUCCESS;
1078 cnt++;
1079 }
1080 spin_lock(&qp->io_buf_list_put_lock);
1081 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084 qp->abts_scsi_io_bufs = 0;
1085 qp->abts_nvme_io_bufs = 0;
1086 spin_unlock(&qp->io_buf_list_put_lock);
1087 spin_unlock(&qp->abts_io_buf_list_lock);
1088 }
1089 spin_unlock_irq(&phba->hbalock);
1090
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1094 &nvmet_aborts);
1095 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1099 }
1100 }
1101
1102 lpfc_sli4_free_sp_events(phba);
1103 return cnt;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117int
1118lpfc_hba_down_post(struct lpfc_hba *phba)
1119{
1120 return (*phba->lpfc_hba_down_post)(phba);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static void
1136lpfc_hb_timeout(struct timer_list *t)
1137{
1138 struct lpfc_hba *phba;
1139 uint32_t tmo_posted;
1140 unsigned long iflag;
1141
1142 phba = from_timer(phba, t, hb_tmofunc);
1143
1144
1145 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1147 if (!tmo_posted)
1148 phba->pport->work_port_events |= WORKER_HB_TMO;
1149 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1150
1151
1152 if (!tmo_posted)
1153 lpfc_worker_wake_up(phba);
1154 return;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static void
1170lpfc_rrq_timeout(struct timer_list *t)
1171{
1172 struct lpfc_hba *phba;
1173 unsigned long iflag;
1174
1175 phba = from_timer(phba, t, rrq_tmr);
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 if (!(phba->pport->load_flag & FC_UNLOADING))
1178 phba->hba_flag |= HBA_RRQ_ACTIVE;
1179 else
1180 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182
1183 if (!(phba->pport->load_flag & FC_UNLOADING))
1184 lpfc_worker_wake_up(phba);
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203static void
1204lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1205{
1206 unsigned long drvr_flag;
1207
1208 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1211
1212
1213 mempool_free(pmboxq, phba->mbox_mem_pool);
1214 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215 !(phba->link_state == LPFC_HBA_ERROR) &&
1216 !(phba->pport->load_flag & FC_UNLOADING))
1217 mod_timer(&phba->hb_tmofunc,
1218 jiffies +
1219 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1220 return;
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231static void
1232lpfc_idle_stat_delay_work(struct work_struct *work)
1233{
1234 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1235 struct lpfc_hba,
1236 idle_stat_delay_work);
1237 struct lpfc_queue *cq;
1238 struct lpfc_sli4_hdw_queue *hdwq;
1239 struct lpfc_idle_stat *idle_stat;
1240 u32 i, idle_percent;
1241 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1242
1243 if (phba->pport->load_flag & FC_UNLOADING)
1244 return;
1245
1246 if (phba->link_state == LPFC_HBA_ERROR ||
1247 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248 phba->cmf_active_mode != LPFC_CFG_OFF)
1249 goto requeue;
1250
1251 for_each_present_cpu(i) {
1252 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1253 cq = hdwq->io_cq;
1254
1255
1256 if (cq->chann != i)
1257 continue;
1258
1259 idle_stat = &phba->sli4_hba.idle_stat[i];
1260
1261
1262
1263
1264
1265
1266
1267 wall_idle = get_cpu_idle_time(i, &wall, 1);
1268 diff_idle = wall_idle - idle_stat->prev_idle;
1269 diff_wall = wall - idle_stat->prev_wall;
1270
1271 if (diff_wall <= diff_idle)
1272 busy_time = 0;
1273 else
1274 busy_time = diff_wall - diff_idle;
1275
1276 idle_percent = div64_u64(100 * busy_time, diff_wall);
1277 idle_percent = 100 - idle_percent;
1278
1279 if (idle_percent < 15)
1280 cq->poll_mode = LPFC_QUEUE_WORK;
1281 else
1282 cq->poll_mode = LPFC_IRQ_POLL;
1283
1284 idle_stat->prev_idle = wall_idle;
1285 idle_stat->prev_wall = wall;
1286 }
1287
1288requeue:
1289 schedule_delayed_work(&phba->idle_stat_delay_work,
1290 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1291}
1292
1293static void
1294lpfc_hb_eq_delay_work(struct work_struct *work)
1295{
1296 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297 struct lpfc_hba, eq_delay_work);
1298 struct lpfc_eq_intr_info *eqi, *eqi_new;
1299 struct lpfc_queue *eq, *eq_next;
1300 unsigned char *ena_delay = NULL;
1301 uint32_t usdelay;
1302 int i;
1303
1304 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1305 return;
1306
1307 if (phba->link_state == LPFC_HBA_ERROR ||
1308 phba->pport->fc_flag & FC_OFFLINE_MODE)
1309 goto requeue;
1310
1311 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1312 GFP_KERNEL);
1313 if (!ena_delay)
1314 goto requeue;
1315
1316 for (i = 0; i < phba->cfg_irq_chann; i++) {
1317
1318 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1319 if (!eq)
1320 continue;
1321 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323 ena_delay[eq->last_cpu] = 1;
1324 }
1325 }
1326
1327 for_each_present_cpu(i) {
1328 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1329 if (ena_delay[i]) {
1330 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1333 } else {
1334 usdelay = 0;
1335 }
1336
1337 eqi->icnt = 0;
1338
1339 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340 if (unlikely(eq->last_cpu != i)) {
1341 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1342 eq->last_cpu);
1343 list_move_tail(&eq->cpu_list, &eqi_new->list);
1344 continue;
1345 }
1346 if (usdelay != eq->q_mode)
1347 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1348 usdelay);
1349 }
1350 }
1351
1352 kfree(ena_delay);
1353
1354requeue:
1355 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1367{
1368 u32 i;
1369 u32 hwq_count;
1370
1371 hwq_count = phba->cfg_hdw_queue;
1372 for (i = 0; i < hwq_count; i++) {
1373
1374 lpfc_adjust_pvt_pool_count(phba, i);
1375
1376
1377 lpfc_adjust_high_watermark(phba, i);
1378
1379#ifdef LPFC_MXP_STAT
1380
1381 lpfc_snapshot_mxp(phba, i);
1382#endif
1383 }
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int
1395lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1396{
1397 LPFC_MBOXQ_t *pmboxq;
1398 int retval;
1399
1400
1401 if (phba->hba_flag & HBA_HBEAT_INP)
1402 return 0;
1403
1404 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1405 if (!pmboxq)
1406 return -ENOMEM;
1407
1408 lpfc_heart_beat(phba, pmboxq);
1409 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410 pmboxq->vport = phba->pport;
1411 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1412
1413 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414 mempool_free(pmboxq, phba->mbox_mem_pool);
1415 return -ENXIO;
1416 }
1417 phba->hba_flag |= HBA_HBEAT_INP;
1418
1419 return 0;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432void
1433lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1434{
1435 if (phba->cfg_enable_hba_heartbeat)
1436 return;
1437 phba->hba_flag |= HBA_HBEAT_TMO;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456void
1457lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1458{
1459 struct lpfc_vport **vports;
1460 struct lpfc_dmabuf *buf_ptr;
1461 int retval = 0;
1462 int i, tmo;
1463 struct lpfc_sli *psli = &phba->sli;
1464 LIST_HEAD(completions);
1465
1466 if (phba->cfg_xri_rebalancing) {
1467
1468 lpfc_hb_mxp_handler(phba);
1469 }
1470
1471 vports = lpfc_create_vport_work_array(phba);
1472 if (vports != NULL)
1473 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474 lpfc_rcv_seq_check_edtov(vports[i]);
1475 lpfc_fdmi_change_check(vports[i]);
1476 }
1477 lpfc_destroy_vport_work_array(phba, vports);
1478
1479 if ((phba->link_state == LPFC_HBA_ERROR) ||
1480 (phba->pport->load_flag & FC_UNLOADING) ||
1481 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1482 return;
1483
1484 if (phba->elsbuf_cnt &&
1485 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486 spin_lock_irq(&phba->hbalock);
1487 list_splice_init(&phba->elsbuf, &completions);
1488 phba->elsbuf_cnt = 0;
1489 phba->elsbuf_prev_cnt = 0;
1490 spin_unlock_irq(&phba->hbalock);
1491
1492 while (!list_empty(&completions)) {
1493 list_remove_head(&completions, buf_ptr,
1494 struct lpfc_dmabuf, list);
1495 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1496 kfree(buf_ptr);
1497 }
1498 }
1499 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1500
1501
1502 if (phba->cfg_enable_hba_heartbeat) {
1503
1504 spin_lock_irq(&phba->pport->work_port_lock);
1505 if (time_after(phba->last_completion_time +
1506 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1507 jiffies)) {
1508 spin_unlock_irq(&phba->pport->work_port_lock);
1509 if (phba->hba_flag & HBA_HBEAT_INP)
1510 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1511 else
1512 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1513 goto out;
1514 }
1515 spin_unlock_irq(&phba->pport->work_port_lock);
1516
1517
1518 if (phba->hba_flag & HBA_HBEAT_INP) {
1519
1520
1521
1522
1523
1524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525 "0459 Adapter heartbeat still outstanding: "
1526 "last compl time was %d ms.\n",
1527 jiffies_to_msecs(jiffies
1528 - phba->last_completion_time));
1529 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1530 } else {
1531 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532 (list_empty(&psli->mboxq))) {
1533
1534 retval = lpfc_issue_hb_mbox(phba);
1535 if (retval) {
1536 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1537 goto out;
1538 }
1539 phba->skipped_hb = 0;
1540 } else if (time_before_eq(phba->last_completion_time,
1541 phba->skipped_hb)) {
1542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543 "2857 Last completion time not "
1544 " updated in %d ms\n",
1545 jiffies_to_msecs(jiffies
1546 - phba->last_completion_time));
1547 } else
1548 phba->skipped_hb = jiffies;
1549
1550 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1551 goto out;
1552 }
1553 } else {
1554
1555 if (phba->hba_flag & HBA_HBEAT_TMO) {
1556 retval = lpfc_issue_hb_mbox(phba);
1557 if (retval)
1558 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1559 else
1560 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1561 goto out;
1562 }
1563 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1564 }
1565out:
1566 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576static void
1577lpfc_offline_eratt(struct lpfc_hba *phba)
1578{
1579 struct lpfc_sli *psli = &phba->sli;
1580
1581 spin_lock_irq(&phba->hbalock);
1582 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583 spin_unlock_irq(&phba->hbalock);
1584 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1585
1586 lpfc_offline(phba);
1587 lpfc_reset_barrier(phba);
1588 spin_lock_irq(&phba->hbalock);
1589 lpfc_sli_brdreset(phba);
1590 spin_unlock_irq(&phba->hbalock);
1591 lpfc_hba_down_post(phba);
1592 lpfc_sli_brdready(phba, HS_MBRDY);
1593 lpfc_unblock_mgmt_io(phba);
1594 phba->link_state = LPFC_HBA_ERROR;
1595 return;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605void
1606lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1607{
1608 spin_lock_irq(&phba->hbalock);
1609 phba->link_state = LPFC_HBA_ERROR;
1610 spin_unlock_irq(&phba->hbalock);
1611
1612 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1613 lpfc_sli_flush_io_rings(phba);
1614 lpfc_offline(phba);
1615 lpfc_hba_down_post(phba);
1616 lpfc_unblock_mgmt_io(phba);
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628static void
1629lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1630{
1631 uint32_t old_host_status = phba->work_hs;
1632 struct lpfc_sli *psli = &phba->sli;
1633
1634
1635
1636
1637 if (pci_channel_offline(phba->pcidev)) {
1638 spin_lock_irq(&phba->hbalock);
1639 phba->hba_flag &= ~DEFER_ERATT;
1640 spin_unlock_irq(&phba->hbalock);
1641 return;
1642 }
1643
1644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1645 "0479 Deferred Adapter Hardware Error "
1646 "Data: x%x x%x x%x\n",
1647 phba->work_hs, phba->work_status[0],
1648 phba->work_status[1]);
1649
1650 spin_lock_irq(&phba->hbalock);
1651 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1652 spin_unlock_irq(&phba->hbalock);
1653
1654
1655
1656
1657
1658
1659
1660 lpfc_sli_abort_fcp_rings(phba);
1661
1662
1663
1664
1665
1666 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1667 lpfc_offline(phba);
1668
1669
1670 while (phba->work_hs & HS_FFER1) {
1671 msleep(100);
1672 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1673 phba->work_hs = UNPLUG_ERR ;
1674 break;
1675 }
1676
1677 if (phba->pport->load_flag & FC_UNLOADING) {
1678 phba->work_hs = 0;
1679 break;
1680 }
1681 }
1682
1683
1684
1685
1686
1687
1688 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1689 phba->work_hs = old_host_status & ~HS_FFER1;
1690
1691 spin_lock_irq(&phba->hbalock);
1692 phba->hba_flag &= ~DEFER_ERATT;
1693 spin_unlock_irq(&phba->hbalock);
1694 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1695 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1696}
1697
1698static void
1699lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1700{
1701 struct lpfc_board_event_header board_event;
1702 struct Scsi_Host *shost;
1703
1704 board_event.event_type = FC_REG_BOARD_EVENT;
1705 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1706 shost = lpfc_shost_from_vport(phba->pport);
1707 fc_host_post_vendor_event(shost, fc_get_event_number(),
1708 sizeof(board_event),
1709 (char *) &board_event,
1710 LPFC_NL_VENDOR_ID);
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void
1724lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1725{
1726 struct lpfc_vport *vport = phba->pport;
1727 struct lpfc_sli *psli = &phba->sli;
1728 uint32_t event_data;
1729 unsigned long temperature;
1730 struct temp_event temp_event_data;
1731 struct Scsi_Host *shost;
1732
1733
1734
1735
1736 if (pci_channel_offline(phba->pcidev)) {
1737 spin_lock_irq(&phba->hbalock);
1738 phba->hba_flag &= ~DEFER_ERATT;
1739 spin_unlock_irq(&phba->hbalock);
1740 return;
1741 }
1742
1743
1744 if (!phba->cfg_enable_hba_reset)
1745 return;
1746
1747
1748 lpfc_board_errevt_to_mgmt(phba);
1749
1750 if (phba->hba_flag & DEFER_ERATT)
1751 lpfc_handle_deferred_eratt(phba);
1752
1753 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1754 if (phba->work_hs & HS_FFER6)
1755
1756 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1757 "1301 Re-establishing Link "
1758 "Data: x%x x%x x%x\n",
1759 phba->work_hs, phba->work_status[0],
1760 phba->work_status[1]);
1761 if (phba->work_hs & HS_FFER8)
1762
1763 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1764 "2861 Host Authentication device "
1765 "zeroization Data:x%x x%x x%x\n",
1766 phba->work_hs, phba->work_status[0],
1767 phba->work_status[1]);
1768
1769 spin_lock_irq(&phba->hbalock);
1770 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1771 spin_unlock_irq(&phba->hbalock);
1772
1773
1774
1775
1776
1777
1778
1779 lpfc_sli_abort_fcp_rings(phba);
1780
1781
1782
1783
1784
1785 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1786 lpfc_offline(phba);
1787 lpfc_sli_brdrestart(phba);
1788 if (lpfc_online(phba) == 0) {
1789 lpfc_unblock_mgmt_io(phba);
1790 return;
1791 }
1792 lpfc_unblock_mgmt_io(phba);
1793 } else if (phba->work_hs & HS_CRIT_TEMP) {
1794 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1795 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1796 temp_event_data.event_code = LPFC_CRIT_TEMP;
1797 temp_event_data.data = (uint32_t)temperature;
1798
1799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1800 "0406 Adapter maximum temperature exceeded "
1801 "(%ld), taking this port offline "
1802 "Data: x%x x%x x%x\n",
1803 temperature, phba->work_hs,
1804 phba->work_status[0], phba->work_status[1]);
1805
1806 shost = lpfc_shost_from_vport(phba->pport);
1807 fc_host_post_vendor_event(shost, fc_get_event_number(),
1808 sizeof(temp_event_data),
1809 (char *) &temp_event_data,
1810 SCSI_NL_VID_TYPE_PCI
1811 | PCI_VENDOR_ID_EMULEX);
1812
1813 spin_lock_irq(&phba->hbalock);
1814 phba->over_temp_state = HBA_OVER_TEMP;
1815 spin_unlock_irq(&phba->hbalock);
1816 lpfc_offline_eratt(phba);
1817
1818 } else {
1819
1820
1821
1822
1823 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1824 "0457 Adapter Hardware Error "
1825 "Data: x%x x%x x%x\n",
1826 phba->work_hs,
1827 phba->work_status[0], phba->work_status[1]);
1828
1829 event_data = FC_REG_DUMP_EVENT;
1830 shost = lpfc_shost_from_vport(vport);
1831 fc_host_post_vendor_event(shost, fc_get_event_number(),
1832 sizeof(event_data), (char *) &event_data,
1833 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1834
1835 lpfc_offline_eratt(phba);
1836 }
1837 return;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851static int
1852lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1853 bool en_rn_msg)
1854{
1855 int rc;
1856 uint32_t intr_mode;
1857 LPFC_MBOXQ_t *mboxq;
1858
1859 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1860 LPFC_SLI_INTF_IF_TYPE_2) {
1861
1862
1863
1864
1865 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1866 if (rc)
1867 return rc;
1868 }
1869
1870
1871 if (en_rn_msg)
1872 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1873 "2887 Reset Needed: Attempting Port "
1874 "Recovery...\n");
1875
1876
1877
1878
1879
1880 if (mbx_action == LPFC_MBX_NO_WAIT) {
1881 spin_lock_irq(&phba->hbalock);
1882 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1883 if (phba->sli.mbox_active) {
1884 mboxq = phba->sli.mbox_active;
1885 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1886 __lpfc_mbox_cmpl_put(phba, mboxq);
1887 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1888 phba->sli.mbox_active = NULL;
1889 }
1890 spin_unlock_irq(&phba->hbalock);
1891 }
1892
1893 lpfc_offline_prep(phba, mbx_action);
1894 lpfc_sli_flush_io_rings(phba);
1895 lpfc_offline(phba);
1896
1897 lpfc_sli4_disable_intr(phba);
1898 rc = lpfc_sli_brdrestart(phba);
1899 if (rc) {
1900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1901 "6309 Failed to restart board\n");
1902 return rc;
1903 }
1904
1905 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1906 if (intr_mode == LPFC_INTR_ERROR) {
1907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1908 "3175 Failed to enable interrupt\n");
1909 return -EIO;
1910 }
1911 phba->intr_mode = intr_mode;
1912 rc = lpfc_online(phba);
1913 if (rc == 0)
1914 lpfc_unblock_mgmt_io(phba);
1915
1916 return rc;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926static void
1927lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1928{
1929 struct lpfc_vport *vport = phba->pport;
1930 uint32_t event_data;
1931 struct Scsi_Host *shost;
1932 uint32_t if_type;
1933 struct lpfc_register portstat_reg = {0};
1934 uint32_t reg_err1, reg_err2;
1935 uint32_t uerrlo_reg, uemasklo_reg;
1936 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1937 bool en_rn_msg = true;
1938 struct temp_event temp_event_data;
1939 struct lpfc_register portsmphr_reg;
1940 int rc, i;
1941
1942
1943
1944
1945 if (pci_channel_offline(phba->pcidev)) {
1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947 "3166 pci channel is offline\n");
1948 lpfc_sli4_offline_eratt(phba);
1949 return;
1950 }
1951
1952 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1953 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1954 switch (if_type) {
1955 case LPFC_SLI_INTF_IF_TYPE_0:
1956 pci_rd_rc1 = lpfc_readl(
1957 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1958 &uerrlo_reg);
1959 pci_rd_rc2 = lpfc_readl(
1960 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1961 &uemasklo_reg);
1962
1963 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1964 return;
1965 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1966 lpfc_sli4_offline_eratt(phba);
1967 return;
1968 }
1969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970 "7623 Checking UE recoverable");
1971
1972 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1973 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1974 &portsmphr_reg.word0))
1975 continue;
1976
1977 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1978 &portsmphr_reg);
1979 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1980 LPFC_PORT_SEM_UE_RECOVERABLE)
1981 break;
1982
1983 msleep(1000);
1984 }
1985
1986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1987 "4827 smphr_port_status x%x : Waited %dSec",
1988 smphr_port_status, i);
1989
1990
1991 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1992 LPFC_PORT_SEM_UE_RECOVERABLE) {
1993 for (i = 0; i < 20; i++) {
1994 msleep(1000);
1995 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1996 &portsmphr_reg.word0) &&
1997 (LPFC_POST_STAGE_PORT_READY ==
1998 bf_get(lpfc_port_smphr_port_status,
1999 &portsmphr_reg))) {
2000 rc = lpfc_sli4_port_sta_fn_reset(phba,
2001 LPFC_MBX_NO_WAIT, en_rn_msg);
2002 if (rc == 0)
2003 return;
2004 lpfc_printf_log(phba, KERN_ERR,
2005 LOG_TRACE_EVENT,
2006 "4215 Failed to recover UE");
2007 break;
2008 }
2009 }
2010 }
2011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2012 "7624 Firmware not ready: Failing UE recovery,"
2013 " waited %dSec", i);
2014 phba->link_state = LPFC_HBA_ERROR;
2015 break;
2016
2017 case LPFC_SLI_INTF_IF_TYPE_2:
2018 case LPFC_SLI_INTF_IF_TYPE_6:
2019 pci_rd_rc1 = lpfc_readl(
2020 phba->sli4_hba.u.if_type2.STATUSregaddr,
2021 &portstat_reg.word0);
2022
2023 if (pci_rd_rc1 == -EIO) {
2024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2025 "3151 PCI bus read access failure: x%x\n",
2026 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2027 lpfc_sli4_offline_eratt(phba);
2028 return;
2029 }
2030 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2031 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2032 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2034 "2889 Port Overtemperature event, "
2035 "taking port offline Data: x%x x%x\n",
2036 reg_err1, reg_err2);
2037
2038 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2039 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2040 temp_event_data.event_code = LPFC_CRIT_TEMP;
2041 temp_event_data.data = 0xFFFFFFFF;
2042
2043 shost = lpfc_shost_from_vport(phba->pport);
2044 fc_host_post_vendor_event(shost, fc_get_event_number(),
2045 sizeof(temp_event_data),
2046 (char *)&temp_event_data,
2047 SCSI_NL_VID_TYPE_PCI
2048 | PCI_VENDOR_ID_EMULEX);
2049
2050 spin_lock_irq(&phba->hbalock);
2051 phba->over_temp_state = HBA_OVER_TEMP;
2052 spin_unlock_irq(&phba->hbalock);
2053 lpfc_sli4_offline_eratt(phba);
2054 return;
2055 }
2056 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2057 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2059 "3143 Port Down: Firmware Update "
2060 "Detected\n");
2061 en_rn_msg = false;
2062 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2063 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2065 "3144 Port Down: Debug Dump\n");
2066 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069 "3145 Port Down: Provisioning\n");
2070
2071
2072 if (!phba->cfg_enable_hba_reset)
2073 return;
2074
2075
2076 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2077 en_rn_msg);
2078 if (rc == 0) {
2079
2080 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2081 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2082 return;
2083 else
2084 break;
2085 }
2086
2087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2088 "3152 Unrecoverable error\n");
2089 phba->link_state = LPFC_HBA_ERROR;
2090 break;
2091 case LPFC_SLI_INTF_IF_TYPE_1:
2092 default:
2093 break;
2094 }
2095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2096 "3123 Report dump event to upper layer\n");
2097
2098 lpfc_board_errevt_to_mgmt(phba);
2099
2100 event_data = FC_REG_DUMP_EVENT;
2101 shost = lpfc_shost_from_vport(vport);
2102 fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 sizeof(event_data), (char *) &event_data,
2104 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118void
2119lpfc_handle_eratt(struct lpfc_hba *phba)
2120{
2121 (*phba->lpfc_handle_eratt)(phba);
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131void
2132lpfc_handle_latt(struct lpfc_hba *phba)
2133{
2134 struct lpfc_vport *vport = phba->pport;
2135 struct lpfc_sli *psli = &phba->sli;
2136 LPFC_MBOXQ_t *pmb;
2137 volatile uint32_t control;
2138 struct lpfc_dmabuf *mp;
2139 int rc = 0;
2140
2141 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2142 if (!pmb) {
2143 rc = 1;
2144 goto lpfc_handle_latt_err_exit;
2145 }
2146
2147 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2148 if (!mp) {
2149 rc = 2;
2150 goto lpfc_handle_latt_free_pmb;
2151 }
2152
2153 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2154 if (!mp->virt) {
2155 rc = 3;
2156 goto lpfc_handle_latt_free_mp;
2157 }
2158
2159
2160 lpfc_els_flush_all_cmd(phba);
2161
2162 psli->slistat.link_event++;
2163 lpfc_read_topology(phba, pmb, mp);
2164 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2165 pmb->vport = vport;
2166
2167 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2168 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2169 if (rc == MBX_NOT_FINISHED) {
2170 rc = 4;
2171 goto lpfc_handle_latt_free_mbuf;
2172 }
2173
2174
2175 spin_lock_irq(&phba->hbalock);
2176 writel(HA_LATT, phba->HAregaddr);
2177 readl(phba->HAregaddr);
2178 spin_unlock_irq(&phba->hbalock);
2179
2180 return;
2181
2182lpfc_handle_latt_free_mbuf:
2183 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2184 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2185lpfc_handle_latt_free_mp:
2186 kfree(mp);
2187lpfc_handle_latt_free_pmb:
2188 mempool_free(pmb, phba->mbox_mem_pool);
2189lpfc_handle_latt_err_exit:
2190
2191 spin_lock_irq(&phba->hbalock);
2192 psli->sli_flag |= LPFC_PROCESS_LA;
2193 control = readl(phba->HCregaddr);
2194 control |= HC_LAINT_ENA;
2195 writel(control, phba->HCregaddr);
2196 readl(phba->HCregaddr);
2197
2198
2199 writel(HA_LATT, phba->HAregaddr);
2200 readl(phba->HAregaddr);
2201 spin_unlock_irq(&phba->hbalock);
2202 lpfc_linkdown(phba);
2203 phba->link_state = LPFC_HBA_ERROR;
2204
2205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2206 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2207
2208 return;
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225int
2226lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2227{
2228 uint8_t lenlo, lenhi;
2229 int Length;
2230 int i, j;
2231 int finished = 0;
2232 int index = 0;
2233
2234 if (!vpd)
2235 return 0;
2236
2237
2238 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2239 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2240 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2241 (uint32_t) vpd[3]);
2242 while (!finished && (index < (len - 4))) {
2243 switch (vpd[index]) {
2244 case 0x82:
2245 case 0x91:
2246 index += 1;
2247 lenlo = vpd[index];
2248 index += 1;
2249 lenhi = vpd[index];
2250 index += 1;
2251 i = ((((unsigned short)lenhi) << 8) + lenlo);
2252 index += i;
2253 break;
2254 case 0x90:
2255 index += 1;
2256 lenlo = vpd[index];
2257 index += 1;
2258 lenhi = vpd[index];
2259 index += 1;
2260 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2261 if (Length > len - index)
2262 Length = len - index;
2263 while (Length > 0) {
2264
2265 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2266 index += 2;
2267 i = vpd[index];
2268 index += 1;
2269 j = 0;
2270 Length -= (3+i);
2271 while(i--) {
2272 phba->SerialNumber[j++] = vpd[index++];
2273 if (j == 31)
2274 break;
2275 }
2276 phba->SerialNumber[j] = 0;
2277 continue;
2278 }
2279 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2280 phba->vpd_flag |= VPD_MODEL_DESC;
2281 index += 2;
2282 i = vpd[index];
2283 index += 1;
2284 j = 0;
2285 Length -= (3+i);
2286 while(i--) {
2287 phba->ModelDesc[j++] = vpd[index++];
2288 if (j == 255)
2289 break;
2290 }
2291 phba->ModelDesc[j] = 0;
2292 continue;
2293 }
2294 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2295 phba->vpd_flag |= VPD_MODEL_NAME;
2296 index += 2;
2297 i = vpd[index];
2298 index += 1;
2299 j = 0;
2300 Length -= (3+i);
2301 while(i--) {
2302 phba->ModelName[j++] = vpd[index++];
2303 if (j == 79)
2304 break;
2305 }
2306 phba->ModelName[j] = 0;
2307 continue;
2308 }
2309 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2310 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2311 index += 2;
2312 i = vpd[index];
2313 index += 1;
2314 j = 0;
2315 Length -= (3+i);
2316 while(i--) {
2317 phba->ProgramType[j++] = vpd[index++];
2318 if (j == 255)
2319 break;
2320 }
2321 phba->ProgramType[j] = 0;
2322 continue;
2323 }
2324 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2325 phba->vpd_flag |= VPD_PORT;
2326 index += 2;
2327 i = vpd[index];
2328 index += 1;
2329 j = 0;
2330 Length -= (3+i);
2331 while(i--) {
2332 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333 (phba->sli4_hba.pport_name_sta ==
2334 LPFC_SLI4_PPNAME_GET)) {
2335 j++;
2336 index++;
2337 } else
2338 phba->Port[j++] = vpd[index++];
2339 if (j == 19)
2340 break;
2341 }
2342 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343 (phba->sli4_hba.pport_name_sta ==
2344 LPFC_SLI4_PPNAME_NON))
2345 phba->Port[j] = 0;
2346 continue;
2347 }
2348 else {
2349 index += 2;
2350 i = vpd[index];
2351 index += 1;
2352 index += i;
2353 Length -= (3 + i);
2354 }
2355 }
2356 finished = 0;
2357 break;
2358 case 0x78:
2359 finished = 1;
2360 break;
2361 default:
2362 index ++;
2363 break;
2364 }
2365 }
2366
2367 return(1);
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382static void
2383lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2384{
2385 lpfc_vpd_t *vp;
2386 uint16_t dev_id = phba->pcidev->device;
2387 int max_speed;
2388 int GE = 0;
2389 int oneConnect = 0;
2390 struct {
2391 char *name;
2392 char *bus;
2393 char *function;
2394 } m = {"<Unknown>", "", ""};
2395
2396 if (mdp && mdp[0] != '\0'
2397 && descp && descp[0] != '\0')
2398 return;
2399
2400 if (phba->lmt & LMT_64Gb)
2401 max_speed = 64;
2402 else if (phba->lmt & LMT_32Gb)
2403 max_speed = 32;
2404 else if (phba->lmt & LMT_16Gb)
2405 max_speed = 16;
2406 else if (phba->lmt & LMT_10Gb)
2407 max_speed = 10;
2408 else if (phba->lmt & LMT_8Gb)
2409 max_speed = 8;
2410 else if (phba->lmt & LMT_4Gb)
2411 max_speed = 4;
2412 else if (phba->lmt & LMT_2Gb)
2413 max_speed = 2;
2414 else if (phba->lmt & LMT_1Gb)
2415 max_speed = 1;
2416 else
2417 max_speed = 0;
2418
2419 vp = &phba->vpd;
2420
2421 switch (dev_id) {
2422 case PCI_DEVICE_ID_FIREFLY:
2423 m = (typeof(m)){"LP6000", "PCI",
2424 "Obsolete, Unsupported Fibre Channel Adapter"};
2425 break;
2426 case PCI_DEVICE_ID_SUPERFLY:
2427 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2428 m = (typeof(m)){"LP7000", "PCI", ""};
2429 else
2430 m = (typeof(m)){"LP7000E", "PCI", ""};
2431 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432 break;
2433 case PCI_DEVICE_ID_DRAGONFLY:
2434 m = (typeof(m)){"LP8000", "PCI",
2435 "Obsolete, Unsupported Fibre Channel Adapter"};
2436 break;
2437 case PCI_DEVICE_ID_CENTAUR:
2438 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2439 m = (typeof(m)){"LP9002", "PCI", ""};
2440 else
2441 m = (typeof(m)){"LP9000", "PCI", ""};
2442 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2443 break;
2444 case PCI_DEVICE_ID_RFLY:
2445 m = (typeof(m)){"LP952", "PCI",
2446 "Obsolete, Unsupported Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_PEGASUS:
2449 m = (typeof(m)){"LP9802", "PCI-X",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2451 break;
2452 case PCI_DEVICE_ID_THOR:
2453 m = (typeof(m)){"LP10000", "PCI-X",
2454 "Obsolete, Unsupported Fibre Channel Adapter"};
2455 break;
2456 case PCI_DEVICE_ID_VIPER:
2457 m = (typeof(m)){"LPX1000", "PCI-X",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2459 break;
2460 case PCI_DEVICE_ID_PFLY:
2461 m = (typeof(m)){"LP982", "PCI-X",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 break;
2464 case PCI_DEVICE_ID_TFLY:
2465 m = (typeof(m)){"LP1050", "PCI-X",
2466 "Obsolete, Unsupported Fibre Channel Adapter"};
2467 break;
2468 case PCI_DEVICE_ID_HELIOS:
2469 m = (typeof(m)){"LP11000", "PCI-X2",
2470 "Obsolete, Unsupported Fibre Channel Adapter"};
2471 break;
2472 case PCI_DEVICE_ID_HELIOS_SCSP:
2473 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2474 "Obsolete, Unsupported Fibre Channel Adapter"};
2475 break;
2476 case PCI_DEVICE_ID_HELIOS_DCSP:
2477 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2478 "Obsolete, Unsupported Fibre Channel Adapter"};
2479 break;
2480 case PCI_DEVICE_ID_NEPTUNE:
2481 m = (typeof(m)){"LPe1000", "PCIe",
2482 "Obsolete, Unsupported Fibre Channel Adapter"};
2483 break;
2484 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2485 m = (typeof(m)){"LPe1000-SP", "PCIe",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2487 break;
2488 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2489 m = (typeof(m)){"LPe1002-SP", "PCIe",
2490 "Obsolete, Unsupported Fibre Channel Adapter"};
2491 break;
2492 case PCI_DEVICE_ID_BMID:
2493 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2494 break;
2495 case PCI_DEVICE_ID_BSMB:
2496 m = (typeof(m)){"LP111", "PCI-X2",
2497 "Obsolete, Unsupported Fibre Channel Adapter"};
2498 break;
2499 case PCI_DEVICE_ID_ZEPHYR:
2500 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2501 break;
2502 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2503 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2504 break;
2505 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2506 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2507 GE = 1;
2508 break;
2509 case PCI_DEVICE_ID_ZMID:
2510 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2511 break;
2512 case PCI_DEVICE_ID_ZSMB:
2513 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2514 break;
2515 case PCI_DEVICE_ID_LP101:
2516 m = (typeof(m)){"LP101", "PCI-X",
2517 "Obsolete, Unsupported Fibre Channel Adapter"};
2518 break;
2519 case PCI_DEVICE_ID_LP10000S:
2520 m = (typeof(m)){"LP10000-S", "PCI",
2521 "Obsolete, Unsupported Fibre Channel Adapter"};
2522 break;
2523 case PCI_DEVICE_ID_LP11000S:
2524 m = (typeof(m)){"LP11000-S", "PCI-X2",
2525 "Obsolete, Unsupported Fibre Channel Adapter"};
2526 break;
2527 case PCI_DEVICE_ID_LPE11000S:
2528 m = (typeof(m)){"LPe11000-S", "PCIe",
2529 "Obsolete, Unsupported Fibre Channel Adapter"};
2530 break;
2531 case PCI_DEVICE_ID_SAT:
2532 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2533 break;
2534 case PCI_DEVICE_ID_SAT_MID:
2535 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2536 break;
2537 case PCI_DEVICE_ID_SAT_SMB:
2538 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2539 break;
2540 case PCI_DEVICE_ID_SAT_DCSP:
2541 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2542 break;
2543 case PCI_DEVICE_ID_SAT_SCSP:
2544 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2545 break;
2546 case PCI_DEVICE_ID_SAT_S:
2547 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2548 break;
2549 case PCI_DEVICE_ID_HORNET:
2550 m = (typeof(m)){"LP21000", "PCIe",
2551 "Obsolete, Unsupported FCoE Adapter"};
2552 GE = 1;
2553 break;
2554 case PCI_DEVICE_ID_PROTEUS_VF:
2555 m = (typeof(m)){"LPev12000", "PCIe IOV",
2556 "Obsolete, Unsupported Fibre Channel Adapter"};
2557 break;
2558 case PCI_DEVICE_ID_PROTEUS_PF:
2559 m = (typeof(m)){"LPev12000", "PCIe IOV",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2561 break;
2562 case PCI_DEVICE_ID_PROTEUS_S:
2563 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2564 "Obsolete, Unsupported Fibre Channel Adapter"};
2565 break;
2566 case PCI_DEVICE_ID_TIGERSHARK:
2567 oneConnect = 1;
2568 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2569 break;
2570 case PCI_DEVICE_ID_TOMCAT:
2571 oneConnect = 1;
2572 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2573 break;
2574 case PCI_DEVICE_ID_FALCON:
2575 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2576 "EmulexSecure Fibre"};
2577 break;
2578 case PCI_DEVICE_ID_BALIUS:
2579 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2580 "Obsolete, Unsupported Fibre Channel Adapter"};
2581 break;
2582 case PCI_DEVICE_ID_LANCER_FC:
2583 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2584 break;
2585 case PCI_DEVICE_ID_LANCER_FC_VF:
2586 m = (typeof(m)){"LPe16000", "PCIe",
2587 "Obsolete, Unsupported Fibre Channel Adapter"};
2588 break;
2589 case PCI_DEVICE_ID_LANCER_FCOE:
2590 oneConnect = 1;
2591 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2592 break;
2593 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2594 oneConnect = 1;
2595 m = (typeof(m)){"OCe15100", "PCIe",
2596 "Obsolete, Unsupported FCoE"};
2597 break;
2598 case PCI_DEVICE_ID_LANCER_G6_FC:
2599 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2600 break;
2601 case PCI_DEVICE_ID_LANCER_G7_FC:
2602 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2603 break;
2604 case PCI_DEVICE_ID_LANCER_G7P_FC:
2605 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2606 break;
2607 case PCI_DEVICE_ID_SKYHAWK:
2608 case PCI_DEVICE_ID_SKYHAWK_VF:
2609 oneConnect = 1;
2610 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2611 break;
2612 default:
2613 m = (typeof(m)){"Unknown", "", ""};
2614 break;
2615 }
2616
2617 if (mdp && mdp[0] == '\0')
2618 snprintf(mdp, 79,"%s", m.name);
2619
2620
2621
2622
2623 if (descp && descp[0] == '\0') {
2624 if (oneConnect)
2625 snprintf(descp, 255,
2626 "Emulex OneConnect %s, %s Initiator %s",
2627 m.name, m.function,
2628 phba->Port);
2629 else if (max_speed == 0)
2630 snprintf(descp, 255,
2631 "Emulex %s %s %s",
2632 m.name, m.bus, m.function);
2633 else
2634 snprintf(descp, 255,
2635 "Emulex %s %d%s %s %s",
2636 m.name, max_speed, (GE) ? "GE" : "Gb",
2637 m.bus, m.function);
2638 }
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653int
2654lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2655{
2656 IOCB_t *icmd;
2657 struct lpfc_iocbq *iocb;
2658 struct lpfc_dmabuf *mp1, *mp2;
2659
2660 cnt += pring->missbufcnt;
2661
2662
2663 while (cnt > 0) {
2664
2665 iocb = lpfc_sli_get_iocbq(phba);
2666 if (iocb == NULL) {
2667 pring->missbufcnt = cnt;
2668 return cnt;
2669 }
2670 icmd = &iocb->iocb;
2671
2672
2673
2674 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2675 if (mp1)
2676 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2677 if (!mp1 || !mp1->virt) {
2678 kfree(mp1);
2679 lpfc_sli_release_iocbq(phba, iocb);
2680 pring->missbufcnt = cnt;
2681 return cnt;
2682 }
2683
2684 INIT_LIST_HEAD(&mp1->list);
2685
2686 if (cnt > 1) {
2687 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2688 if (mp2)
2689 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2690 &mp2->phys);
2691 if (!mp2 || !mp2->virt) {
2692 kfree(mp2);
2693 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2694 kfree(mp1);
2695 lpfc_sli_release_iocbq(phba, iocb);
2696 pring->missbufcnt = cnt;
2697 return cnt;
2698 }
2699
2700 INIT_LIST_HEAD(&mp2->list);
2701 } else {
2702 mp2 = NULL;
2703 }
2704
2705 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2706 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2707 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2708 icmd->ulpBdeCount = 1;
2709 cnt--;
2710 if (mp2) {
2711 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2712 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2713 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2714 cnt--;
2715 icmd->ulpBdeCount = 2;
2716 }
2717
2718 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2719 icmd->ulpLe = 1;
2720
2721 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2722 IOCB_ERROR) {
2723 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2724 kfree(mp1);
2725 cnt++;
2726 if (mp2) {
2727 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2728 kfree(mp2);
2729 cnt++;
2730 }
2731 lpfc_sli_release_iocbq(phba, iocb);
2732 pring->missbufcnt = cnt;
2733 return cnt;
2734 }
2735 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2736 if (mp2)
2737 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2738 }
2739 pring->missbufcnt = 0;
2740 return 0;
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static int
2755lpfc_post_rcv_buf(struct lpfc_hba *phba)
2756{
2757 struct lpfc_sli *psli = &phba->sli;
2758
2759
2760 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2761
2762
2763 return 0;
2764}
2765
2766#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2767
2768
2769
2770
2771
2772
2773
2774
2775static void
2776lpfc_sha_init(uint32_t * HashResultPointer)
2777{
2778 HashResultPointer[0] = 0x67452301;
2779 HashResultPointer[1] = 0xEFCDAB89;
2780 HashResultPointer[2] = 0x98BADCFE;
2781 HashResultPointer[3] = 0x10325476;
2782 HashResultPointer[4] = 0xC3D2E1F0;
2783}
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795static void
2796lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2797{
2798 int t;
2799 uint32_t TEMP;
2800 uint32_t A, B, C, D, E;
2801 t = 16;
2802 do {
2803 HashWorkingPointer[t] =
2804 S(1,
2805 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2806 8] ^
2807 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2808 } while (++t <= 79);
2809 t = 0;
2810 A = HashResultPointer[0];
2811 B = HashResultPointer[1];
2812 C = HashResultPointer[2];
2813 D = HashResultPointer[3];
2814 E = HashResultPointer[4];
2815
2816 do {
2817 if (t < 20) {
2818 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2819 } else if (t < 40) {
2820 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2821 } else if (t < 60) {
2822 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2823 } else {
2824 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2825 }
2826 TEMP += S(5, A) + E + HashWorkingPointer[t];
2827 E = D;
2828 D = C;
2829 C = S(30, B);
2830 B = A;
2831 A = TEMP;
2832 } while (++t <= 79);
2833
2834 HashResultPointer[0] += A;
2835 HashResultPointer[1] += B;
2836 HashResultPointer[2] += C;
2837 HashResultPointer[3] += D;
2838 HashResultPointer[4] += E;
2839
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852static void
2853lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2854{
2855 *HashWorking = (*RandomChallenge ^ *HashWorking);
2856}
2857
2858
2859
2860
2861
2862
2863
2864
2865void
2866lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2867{
2868 int t;
2869 uint32_t *HashWorking;
2870 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2871
2872 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2873 if (!HashWorking)
2874 return;
2875
2876 HashWorking[0] = HashWorking[78] = *pwwnn++;
2877 HashWorking[1] = HashWorking[79] = *pwwnn;
2878
2879 for (t = 0; t < 7; t++)
2880 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2881
2882 lpfc_sha_init(hbainit);
2883 lpfc_sha_iterate(hbainit, HashWorking);
2884 kfree(HashWorking);
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896void
2897lpfc_cleanup(struct lpfc_vport *vport)
2898{
2899 struct lpfc_hba *phba = vport->phba;
2900 struct lpfc_nodelist *ndlp, *next_ndlp;
2901 int i = 0;
2902
2903 if (phba->link_state > LPFC_LINK_DOWN)
2904 lpfc_port_link_failure(vport);
2905
2906
2907 if (lpfc_is_vmid_enabled(phba))
2908 lpfc_vmid_vport_cleanup(vport);
2909
2910 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2911 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2912 ndlp->nlp_DID == Fabric_DID) {
2913
2914 lpfc_nlp_put(ndlp);
2915 continue;
2916 }
2917
2918 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2919 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2920 lpfc_nlp_put(ndlp);
2921 continue;
2922 }
2923
2924
2925
2926
2927 if (ndlp->nlp_type & NLP_FABRIC &&
2928 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2929 lpfc_disc_state_machine(vport, ndlp, NULL,
2930 NLP_EVT_DEVICE_RECOVERY);
2931
2932 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2933 lpfc_disc_state_machine(vport, ndlp, NULL,
2934 NLP_EVT_DEVICE_RM);
2935 }
2936
2937
2938
2939
2940
2941 while (!list_empty(&vport->fc_nodes)) {
2942 if (i++ > 3000) {
2943 lpfc_printf_vlog(vport, KERN_ERR,
2944 LOG_TRACE_EVENT,
2945 "0233 Nodelist not empty\n");
2946 list_for_each_entry_safe(ndlp, next_ndlp,
2947 &vport->fc_nodes, nlp_listp) {
2948 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2949 LOG_TRACE_EVENT,
2950 "0282 did:x%x ndlp:x%px "
2951 "refcnt:%d xflags x%x nflag x%x\n",
2952 ndlp->nlp_DID, (void *)ndlp,
2953 kref_read(&ndlp->kref),
2954 ndlp->fc4_xpt_flags,
2955 ndlp->nlp_flag);
2956 }
2957 break;
2958 }
2959
2960
2961 msleep(10);
2962 }
2963 lpfc_cleanup_vports_rrqs(vport, NULL);
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974void
2975lpfc_stop_vport_timers(struct lpfc_vport *vport)
2976{
2977 del_timer_sync(&vport->els_tmofunc);
2978 del_timer_sync(&vport->delayed_disc_tmo);
2979 lpfc_can_disctmo(vport);
2980 return;
2981}
2982
2983
2984
2985
2986
2987
2988
2989
2990void
2991__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2992{
2993
2994 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2995
2996
2997 del_timer(&phba->fcf.redisc_wait);
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009void
3010lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3011{
3012 spin_lock_irq(&phba->hbalock);
3013 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3014
3015 spin_unlock_irq(&phba->hbalock);
3016 return;
3017 }
3018 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3019
3020 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3021 spin_unlock_irq(&phba->hbalock);
3022}
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032void
3033lpfc_cmf_stop(struct lpfc_hba *phba)
3034{
3035 int cpu;
3036 struct lpfc_cgn_stat *cgs;
3037
3038
3039 if (!phba->sli4_hba.pc_sli4_params.cmf)
3040 return;
3041
3042 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3043 "6221 Stop CMF / Cancel Timer\n");
3044
3045
3046 hrtimer_cancel(&phba->cmf_timer);
3047
3048
3049 atomic_set(&phba->cmf_busy, 0);
3050 for_each_present_cpu(cpu) {
3051 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3052 atomic64_set(&cgs->total_bytes, 0);
3053 atomic64_set(&cgs->rcv_bytes, 0);
3054 atomic_set(&cgs->rx_io_cnt, 0);
3055 atomic64_set(&cgs->rx_latency, 0);
3056 }
3057 atomic_set(&phba->cmf_bw_wait, 0);
3058
3059
3060 queue_work(phba->wq, &phba->unblock_request_work);
3061}
3062
3063static inline uint64_t
3064lpfc_get_max_line_rate(struct lpfc_hba *phba)
3065{
3066 uint64_t rate = lpfc_sli_port_speed_get(phba);
3067
3068 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3069}
3070
3071void
3072lpfc_cmf_signal_init(struct lpfc_hba *phba)
3073{
3074 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3075 "6223 Signal CMF init\n");
3076
3077
3078 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3079 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3080 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3081 phba->cmf_interval_rate, 1000);
3082 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3083
3084
3085 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095void
3096lpfc_cmf_start(struct lpfc_hba *phba)
3097{
3098 struct lpfc_cgn_stat *cgs;
3099 int cpu;
3100
3101
3102 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3103 phba->cmf_active_mode == LPFC_CFG_OFF)
3104 return;
3105
3106
3107 lpfc_init_congestion_buf(phba);
3108
3109 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3110 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3111 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3112 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3113
3114 atomic_set(&phba->cmf_busy, 0);
3115 for_each_present_cpu(cpu) {
3116 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3117 atomic64_set(&cgs->total_bytes, 0);
3118 atomic64_set(&cgs->rcv_bytes, 0);
3119 atomic_set(&cgs->rx_io_cnt, 0);
3120 atomic64_set(&cgs->rx_latency, 0);
3121 }
3122 phba->cmf_latency.tv_sec = 0;
3123 phba->cmf_latency.tv_nsec = 0;
3124
3125 lpfc_cmf_signal_init(phba);
3126
3127 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3128 "6222 Start CMF / Timer\n");
3129
3130 phba->cmf_timer_cnt = 0;
3131 hrtimer_start(&phba->cmf_timer,
3132 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3133 HRTIMER_MODE_REL);
3134
3135 ktime_get_real_ts64(&phba->cmf_latency);
3136
3137 atomic_set(&phba->cmf_bw_wait, 0);
3138 atomic_set(&phba->cmf_stop_io, 0);
3139}
3140
3141
3142
3143
3144
3145
3146
3147
3148void
3149lpfc_stop_hba_timers(struct lpfc_hba *phba)
3150{
3151 if (phba->pport)
3152 lpfc_stop_vport_timers(phba->pport);
3153 cancel_delayed_work_sync(&phba->eq_delay_work);
3154 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3155 del_timer_sync(&phba->sli.mbox_tmo);
3156 del_timer_sync(&phba->fabric_block_timer);
3157 del_timer_sync(&phba->eratt_poll);
3158 del_timer_sync(&phba->hb_tmofunc);
3159 if (phba->sli_rev == LPFC_SLI_REV4) {
3160 del_timer_sync(&phba->rrq_tmr);
3161 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3162 }
3163 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3164
3165 switch (phba->pci_dev_grp) {
3166 case LPFC_PCI_DEV_LP:
3167
3168 del_timer_sync(&phba->fcp_poll_timer);
3169 break;
3170 case LPFC_PCI_DEV_OC:
3171
3172 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3173 break;
3174 default:
3175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3176 "0297 Invalid device group (x%x)\n",
3177 phba->pci_dev_grp);
3178 break;
3179 }
3180 return;
3181}
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static void
3195lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3196{
3197 unsigned long iflag;
3198 uint8_t actcmd = MBX_HEARTBEAT;
3199 unsigned long timeout;
3200
3201 spin_lock_irqsave(&phba->hbalock, iflag);
3202 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3203 spin_unlock_irqrestore(&phba->hbalock, iflag);
3204 if (mbx_action == LPFC_MBX_NO_WAIT)
3205 return;
3206 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3207 spin_lock_irqsave(&phba->hbalock, iflag);
3208 if (phba->sli.mbox_active) {
3209 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3210
3211
3212
3213 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3214 phba->sli.mbox_active) * 1000) + jiffies;
3215 }
3216 spin_unlock_irqrestore(&phba->hbalock, iflag);
3217
3218
3219 while (phba->sli.mbox_active) {
3220
3221 msleep(2);
3222 if (time_after(jiffies, timeout)) {
3223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3224 "2813 Mgmt IO is Blocked %x "
3225 "- mbox cmd %x still active\n",
3226 phba->sli.sli_flag, actcmd);
3227 break;
3228 }
3229 }
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240void
3241lpfc_sli4_node_prep(struct lpfc_hba *phba)
3242{
3243 struct lpfc_nodelist *ndlp, *next_ndlp;
3244 struct lpfc_vport **vports;
3245 int i, rpi;
3246
3247 if (phba->sli_rev != LPFC_SLI_REV4)
3248 return;
3249
3250 vports = lpfc_create_vport_work_array(phba);
3251 if (vports == NULL)
3252 return;
3253
3254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3255 if (vports[i]->load_flag & FC_UNLOADING)
3256 continue;
3257
3258 list_for_each_entry_safe(ndlp, next_ndlp,
3259 &vports[i]->fc_nodes,
3260 nlp_listp) {
3261 rpi = lpfc_sli4_alloc_rpi(phba);
3262 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3263
3264 continue;
3265 }
3266 ndlp->nlp_rpi = rpi;
3267 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3268 LOG_NODE | LOG_DISCOVERY,
3269 "0009 Assign RPI x%x to ndlp x%px "
3270 "DID:x%06x flg:x%x\n",
3271 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3272 ndlp->nlp_flag);
3273 }
3274 }
3275 lpfc_destroy_vport_work_array(phba, vports);
3276}
3277
3278
3279
3280
3281
3282
3283
3284
3285static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3286{
3287 struct lpfc_sli4_hdw_queue *qp;
3288 struct lpfc_io_buf *lpfc_ncmd;
3289 struct lpfc_io_buf *lpfc_ncmd_next;
3290 struct lpfc_epd_pool *epd_pool;
3291 unsigned long iflag;
3292
3293 epd_pool = &phba->epd_pool;
3294 qp = &phba->sli4_hba.hdwq[0];
3295
3296 spin_lock_init(&epd_pool->lock);
3297 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3298 spin_lock(&epd_pool->lock);
3299 INIT_LIST_HEAD(&epd_pool->list);
3300 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3301 &qp->lpfc_io_buf_list_put, list) {
3302 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3303 lpfc_ncmd->expedite = true;
3304 qp->put_io_bufs--;
3305 epd_pool->count++;
3306 if (epd_pool->count >= XRI_BATCH)
3307 break;
3308 }
3309 spin_unlock(&epd_pool->lock);
3310 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3321{
3322 struct lpfc_sli4_hdw_queue *qp;
3323 struct lpfc_io_buf *lpfc_ncmd;
3324 struct lpfc_io_buf *lpfc_ncmd_next;
3325 struct lpfc_epd_pool *epd_pool;
3326 unsigned long iflag;
3327
3328 epd_pool = &phba->epd_pool;
3329 qp = &phba->sli4_hba.hdwq[0];
3330
3331 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3332 spin_lock(&epd_pool->lock);
3333 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3334 &epd_pool->list, list) {
3335 list_move_tail(&lpfc_ncmd->list,
3336 &qp->lpfc_io_buf_list_put);
3337 lpfc_ncmd->flags = false;
3338 qp->put_io_bufs++;
3339 epd_pool->count--;
3340 }
3341 spin_unlock(&epd_pool->lock);
3342 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3343}
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3354{
3355 u32 i, j;
3356 u32 hwq_count;
3357 u32 count_per_hwq;
3358 struct lpfc_io_buf *lpfc_ncmd;
3359 struct lpfc_io_buf *lpfc_ncmd_next;
3360 unsigned long iflag;
3361 struct lpfc_sli4_hdw_queue *qp;
3362 struct lpfc_multixri_pool *multixri_pool;
3363 struct lpfc_pbl_pool *pbl_pool;
3364 struct lpfc_pvt_pool *pvt_pool;
3365
3366 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3367 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3368 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3369 phba->sli4_hba.io_xri_cnt);
3370
3371 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3372 lpfc_create_expedite_pool(phba);
3373
3374 hwq_count = phba->cfg_hdw_queue;
3375 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3376
3377 for (i = 0; i < hwq_count; i++) {
3378 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3379
3380 if (!multixri_pool) {
3381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3382 "1238 Failed to allocate memory for "
3383 "multixri_pool\n");
3384
3385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3386 lpfc_destroy_expedite_pool(phba);
3387
3388 j = 0;
3389 while (j < i) {
3390 qp = &phba->sli4_hba.hdwq[j];
3391 kfree(qp->p_multixri_pool);
3392 j++;
3393 }
3394 phba->cfg_xri_rebalancing = 0;
3395 return;
3396 }
3397
3398 qp = &phba->sli4_hba.hdwq[i];
3399 qp->p_multixri_pool = multixri_pool;
3400
3401 multixri_pool->xri_limit = count_per_hwq;
3402 multixri_pool->rrb_next_hwqid = i;
3403
3404
3405 pbl_pool = &multixri_pool->pbl_pool;
3406 spin_lock_init(&pbl_pool->lock);
3407 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3408 spin_lock(&pbl_pool->lock);
3409 INIT_LIST_HEAD(&pbl_pool->list);
3410 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3411 &qp->lpfc_io_buf_list_put, list) {
3412 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3413 qp->put_io_bufs--;
3414 pbl_pool->count++;
3415 }
3416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3417 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3418 pbl_pool->count, i);
3419 spin_unlock(&pbl_pool->lock);
3420 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3421
3422
3423 pvt_pool = &multixri_pool->pvt_pool;
3424 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3425 pvt_pool->low_watermark = XRI_BATCH;
3426 spin_lock_init(&pvt_pool->lock);
3427 spin_lock_irqsave(&pvt_pool->lock, iflag);
3428 INIT_LIST_HEAD(&pvt_pool->list);
3429 pvt_pool->count = 0;
3430 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3431 }
3432}
3433
3434
3435
3436
3437
3438
3439
3440static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3441{
3442 u32 i;
3443 u32 hwq_count;
3444 struct lpfc_io_buf *lpfc_ncmd;
3445 struct lpfc_io_buf *lpfc_ncmd_next;
3446 unsigned long iflag;
3447 struct lpfc_sli4_hdw_queue *qp;
3448 struct lpfc_multixri_pool *multixri_pool;
3449 struct lpfc_pbl_pool *pbl_pool;
3450 struct lpfc_pvt_pool *pvt_pool;
3451
3452 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3453 lpfc_destroy_expedite_pool(phba);
3454
3455 if (!(phba->pport->load_flag & FC_UNLOADING))
3456 lpfc_sli_flush_io_rings(phba);
3457
3458 hwq_count = phba->cfg_hdw_queue;
3459
3460 for (i = 0; i < hwq_count; i++) {
3461 qp = &phba->sli4_hba.hdwq[i];
3462 multixri_pool = qp->p_multixri_pool;
3463 if (!multixri_pool)
3464 continue;
3465
3466 qp->p_multixri_pool = NULL;
3467
3468 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3469
3470
3471 pbl_pool = &multixri_pool->pbl_pool;
3472 spin_lock(&pbl_pool->lock);
3473
3474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3475 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3476 pbl_pool->count, i);
3477
3478 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3479 &pbl_pool->list, list) {
3480 list_move_tail(&lpfc_ncmd->list,
3481 &qp->lpfc_io_buf_list_put);
3482 qp->put_io_bufs++;
3483 pbl_pool->count--;
3484 }
3485
3486 INIT_LIST_HEAD(&pbl_pool->list);
3487 pbl_pool->count = 0;
3488
3489 spin_unlock(&pbl_pool->lock);
3490
3491
3492 pvt_pool = &multixri_pool->pvt_pool;
3493 spin_lock(&pvt_pool->lock);
3494
3495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3496 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3497 pvt_pool->count, i);
3498
3499 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3500 &pvt_pool->list, list) {
3501 list_move_tail(&lpfc_ncmd->list,
3502 &qp->lpfc_io_buf_list_put);
3503 qp->put_io_bufs++;
3504 pvt_pool->count--;
3505 }
3506
3507 INIT_LIST_HEAD(&pvt_pool->list);
3508 pvt_pool->count = 0;
3509
3510 spin_unlock(&pvt_pool->lock);
3511 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3512
3513 kfree(multixri_pool);
3514 }
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529int
3530lpfc_online(struct lpfc_hba *phba)
3531{
3532 struct lpfc_vport *vport;
3533 struct lpfc_vport **vports;
3534 int i, error = 0;
3535 bool vpis_cleared = false;
3536
3537 if (!phba)
3538 return 0;
3539 vport = phba->pport;
3540
3541 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3542 return 0;
3543
3544 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3545 "0458 Bring Adapter online\n");
3546
3547 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3548
3549 if (phba->sli_rev == LPFC_SLI_REV4) {
3550 if (lpfc_sli4_hba_setup(phba)) {
3551 lpfc_unblock_mgmt_io(phba);
3552 return 1;
3553 }
3554 spin_lock_irq(&phba->hbalock);
3555 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3556 vpis_cleared = true;
3557 spin_unlock_irq(&phba->hbalock);
3558
3559
3560
3561
3562 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3563 !phba->nvmet_support) {
3564 error = lpfc_nvme_create_localport(phba->pport);
3565 if (error)
3566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3567 "6132 NVME restore reg failed "
3568 "on nvmei error x%x\n", error);
3569 }
3570 } else {
3571 lpfc_sli_queue_init(phba);
3572 if (lpfc_sli_hba_setup(phba)) {
3573 lpfc_unblock_mgmt_io(phba);
3574 return 1;
3575 }
3576 }
3577
3578 vports = lpfc_create_vport_work_array(phba);
3579 if (vports != NULL) {
3580 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3581 struct Scsi_Host *shost;
3582 shost = lpfc_shost_from_vport(vports[i]);
3583 spin_lock_irq(shost->host_lock);
3584 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3585 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3586 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3587 if (phba->sli_rev == LPFC_SLI_REV4) {
3588 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3589 if ((vpis_cleared) &&
3590 (vports[i]->port_type !=
3591 LPFC_PHYSICAL_PORT))
3592 vports[i]->vpi = 0;
3593 }
3594 spin_unlock_irq(shost->host_lock);
3595 }
3596 }
3597 lpfc_destroy_vport_work_array(phba, vports);
3598
3599 if (phba->cfg_xri_rebalancing)
3600 lpfc_create_multixri_pools(phba);
3601
3602 lpfc_cpuhp_add(phba);
3603
3604 lpfc_unblock_mgmt_io(phba);
3605 return 0;
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619void
3620lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3621{
3622 unsigned long iflag;
3623
3624 spin_lock_irqsave(&phba->hbalock, iflag);
3625 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3626 spin_unlock_irqrestore(&phba->hbalock, iflag);
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638void
3639lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3640{
3641 struct lpfc_vport *vport = phba->pport;
3642 struct lpfc_nodelist *ndlp, *next_ndlp;
3643 struct lpfc_vport **vports;
3644 struct Scsi_Host *shost;
3645 int i;
3646
3647 if (vport->fc_flag & FC_OFFLINE_MODE)
3648 return;
3649
3650 lpfc_block_mgmt_io(phba, mbx_action);
3651
3652 lpfc_linkdown(phba);
3653
3654
3655 vports = lpfc_create_vport_work_array(phba);
3656 if (vports != NULL) {
3657 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3658 if (vports[i]->load_flag & FC_UNLOADING)
3659 continue;
3660 shost = lpfc_shost_from_vport(vports[i]);
3661 spin_lock_irq(shost->host_lock);
3662 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3663 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3664 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3665 spin_unlock_irq(shost->host_lock);
3666
3667 shost = lpfc_shost_from_vport(vports[i]);
3668 list_for_each_entry_safe(ndlp, next_ndlp,
3669 &vports[i]->fc_nodes,
3670 nlp_listp) {
3671
3672 spin_lock_irq(&ndlp->lock);
3673 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3674 spin_unlock_irq(&ndlp->lock);
3675
3676 lpfc_unreg_rpi(vports[i], ndlp);
3677
3678
3679
3680
3681
3682 if (phba->sli_rev == LPFC_SLI_REV4) {
3683 lpfc_printf_vlog(vports[i], KERN_INFO,
3684 LOG_NODE | LOG_DISCOVERY,
3685 "0011 Free RPI x%x on "
3686 "ndlp: x%px did x%x\n",
3687 ndlp->nlp_rpi, ndlp,
3688 ndlp->nlp_DID);
3689 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3690 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3691 }
3692
3693 if (ndlp->nlp_type & NLP_FABRIC) {
3694 lpfc_disc_state_machine(vports[i], ndlp,
3695 NULL, NLP_EVT_DEVICE_RECOVERY);
3696
3697
3698
3699
3700
3701
3702 if (!(ndlp->fc4_xpt_flags &
3703 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3704 lpfc_disc_state_machine
3705 (vports[i], ndlp,
3706 NULL,
3707 NLP_EVT_DEVICE_RM);
3708 }
3709 }
3710 }
3711 }
3712 lpfc_destroy_vport_work_array(phba, vports);
3713
3714 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3715
3716 if (phba->wq)
3717 flush_workqueue(phba->wq);
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728void
3729lpfc_offline(struct lpfc_hba *phba)
3730{
3731 struct Scsi_Host *shost;
3732 struct lpfc_vport **vports;
3733 int i;
3734
3735 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3736 return;
3737
3738
3739 lpfc_stop_port(phba);
3740
3741
3742
3743
3744 lpfc_nvmet_destroy_targetport(phba);
3745 lpfc_nvme_destroy_localport(phba->pport);
3746
3747 vports = lpfc_create_vport_work_array(phba);
3748 if (vports != NULL)
3749 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3750 lpfc_stop_vport_timers(vports[i]);
3751 lpfc_destroy_vport_work_array(phba, vports);
3752 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3753 "0460 Bring Adapter offline\n");
3754
3755
3756 lpfc_sli_hba_down(phba);
3757 spin_lock_irq(&phba->hbalock);
3758 phba->work_ha = 0;
3759 spin_unlock_irq(&phba->hbalock);
3760 vports = lpfc_create_vport_work_array(phba);
3761 if (vports != NULL)
3762 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3763 shost = lpfc_shost_from_vport(vports[i]);
3764 spin_lock_irq(shost->host_lock);
3765 vports[i]->work_port_events = 0;
3766 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3767 spin_unlock_irq(shost->host_lock);
3768 }
3769 lpfc_destroy_vport_work_array(phba, vports);
3770
3771
3772
3773 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3774 __lpfc_cpuhp_remove(phba);
3775
3776 if (phba->cfg_xri_rebalancing)
3777 lpfc_destroy_multixri_pools(phba);
3778}
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788static void
3789lpfc_scsi_free(struct lpfc_hba *phba)
3790{
3791 struct lpfc_io_buf *sb, *sb_next;
3792
3793 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3794 return;
3795
3796 spin_lock_irq(&phba->hbalock);
3797
3798
3799
3800 spin_lock(&phba->scsi_buf_list_put_lock);
3801 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3802 list) {
3803 list_del(&sb->list);
3804 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3805 sb->dma_handle);
3806 kfree(sb);
3807 phba->total_scsi_bufs--;
3808 }
3809 spin_unlock(&phba->scsi_buf_list_put_lock);
3810
3811 spin_lock(&phba->scsi_buf_list_get_lock);
3812 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3813 list) {
3814 list_del(&sb->list);
3815 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3816 sb->dma_handle);
3817 kfree(sb);
3818 phba->total_scsi_bufs--;
3819 }
3820 spin_unlock(&phba->scsi_buf_list_get_lock);
3821 spin_unlock_irq(&phba->hbalock);
3822}
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832void
3833lpfc_io_free(struct lpfc_hba *phba)
3834{
3835 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3836 struct lpfc_sli4_hdw_queue *qp;
3837 int idx;
3838
3839 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3840 qp = &phba->sli4_hba.hdwq[idx];
3841
3842 spin_lock(&qp->io_buf_list_put_lock);
3843 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3844 &qp->lpfc_io_buf_list_put,
3845 list) {
3846 list_del(&lpfc_ncmd->list);
3847 qp->put_io_bufs--;
3848 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3849 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3850 if (phba->cfg_xpsgl && !phba->nvmet_support)
3851 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3852 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3853 kfree(lpfc_ncmd);
3854 qp->total_io_bufs--;
3855 }
3856 spin_unlock(&qp->io_buf_list_put_lock);
3857
3858 spin_lock(&qp->io_buf_list_get_lock);
3859 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3860 &qp->lpfc_io_buf_list_get,
3861 list) {
3862 list_del(&lpfc_ncmd->list);
3863 qp->get_io_bufs--;
3864 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3865 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3866 if (phba->cfg_xpsgl && !phba->nvmet_support)
3867 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3868 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3869 kfree(lpfc_ncmd);
3870 qp->total_io_bufs--;
3871 }
3872 spin_unlock(&qp->io_buf_list_get_lock);
3873 }
3874}
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888int
3889lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3890{
3891 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3892 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3893 LIST_HEAD(els_sgl_list);
3894 int rc;
3895
3896
3897
3898
3899 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3900
3901 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3902
3903 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3904 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3905 "3157 ELS xri-sgl count increased from "
3906 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3907 els_xri_cnt);
3908
3909 for (i = 0; i < xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3911 GFP_KERNEL);
3912 if (sglq_entry == NULL) {
3913 lpfc_printf_log(phba, KERN_ERR,
3914 LOG_TRACE_EVENT,
3915 "2562 Failure to allocate an "
3916 "ELS sgl entry:%d\n", i);
3917 rc = -ENOMEM;
3918 goto out_free_mem;
3919 }
3920 sglq_entry->buff_type = GEN_BUFF_TYPE;
3921 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3922 &sglq_entry->phys);
3923 if (sglq_entry->virt == NULL) {
3924 kfree(sglq_entry);
3925 lpfc_printf_log(phba, KERN_ERR,
3926 LOG_TRACE_EVENT,
3927 "2563 Failure to allocate an "
3928 "ELS mbuf:%d\n", i);
3929 rc = -ENOMEM;
3930 goto out_free_mem;
3931 }
3932 sglq_entry->sgl = sglq_entry->virt;
3933 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3934 sglq_entry->state = SGL_FREED;
3935 list_add_tail(&sglq_entry->list, &els_sgl_list);
3936 }
3937 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3938 list_splice_init(&els_sgl_list,
3939 &phba->sli4_hba.lpfc_els_sgl_list);
3940 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3941 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3942
3943 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3944 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3945 "3158 ELS xri-sgl count decreased from "
3946 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3947 els_xri_cnt);
3948 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3949 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3950 &els_sgl_list);
3951
3952 for (i = 0; i < xri_cnt; i++) {
3953 list_remove_head(&els_sgl_list,
3954 sglq_entry, struct lpfc_sglq, list);
3955 if (sglq_entry) {
3956 __lpfc_mbuf_free(phba, sglq_entry->virt,
3957 sglq_entry->phys);
3958 kfree(sglq_entry);
3959 }
3960 }
3961 list_splice_init(&els_sgl_list,
3962 &phba->sli4_hba.lpfc_els_sgl_list);
3963 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3964 } else
3965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3966 "3163 ELS xri-sgl count unchanged: %d\n",
3967 els_xri_cnt);
3968 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3969
3970
3971 sglq_entry = NULL;
3972 sglq_entry_next = NULL;
3973 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3974 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3975 lxri = lpfc_sli4_next_xritag(phba);
3976 if (lxri == NO_XRI) {
3977 lpfc_printf_log(phba, KERN_ERR,
3978 LOG_TRACE_EVENT,
3979 "2400 Failed to allocate xri for "
3980 "ELS sgl\n");
3981 rc = -ENOMEM;
3982 goto out_free_mem;
3983 }
3984 sglq_entry->sli4_lxritag = lxri;
3985 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3986 }
3987 return 0;
3988
3989out_free_mem:
3990 lpfc_free_els_sgl_list(phba);
3991 return rc;
3992}
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006int
4007lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4008{
4009 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4010 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4011 uint16_t nvmet_xri_cnt;
4012 LIST_HEAD(nvmet_sgl_list);
4013 int rc;
4014
4015
4016
4017
4018 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4019
4020
4021 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4022 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4023
4024 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4025 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4026 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4027 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4028
4029 for (i = 0; i < xri_cnt; i++) {
4030 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4031 GFP_KERNEL);
4032 if (sglq_entry == NULL) {
4033 lpfc_printf_log(phba, KERN_ERR,
4034 LOG_TRACE_EVENT,
4035 "6303 Failure to allocate an "
4036 "NVMET sgl entry:%d\n", i);
4037 rc = -ENOMEM;
4038 goto out_free_mem;
4039 }
4040 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4041 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4042 &sglq_entry->phys);
4043 if (sglq_entry->virt == NULL) {
4044 kfree(sglq_entry);
4045 lpfc_printf_log(phba, KERN_ERR,
4046 LOG_TRACE_EVENT,
4047 "6304 Failure to allocate an "
4048 "NVMET buf:%d\n", i);
4049 rc = -ENOMEM;
4050 goto out_free_mem;
4051 }
4052 sglq_entry->sgl = sglq_entry->virt;
4053 memset(sglq_entry->sgl, 0,
4054 phba->cfg_sg_dma_buf_size);
4055 sglq_entry->state = SGL_FREED;
4056 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4057 }
4058 spin_lock_irq(&phba->hbalock);
4059 spin_lock(&phba->sli4_hba.sgl_list_lock);
4060 list_splice_init(&nvmet_sgl_list,
4061 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4062 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4063 spin_unlock_irq(&phba->hbalock);
4064 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4065
4066 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4067 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4068 "6305 NVMET xri-sgl count decreased from "
4069 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4070 nvmet_xri_cnt);
4071 spin_lock_irq(&phba->hbalock);
4072 spin_lock(&phba->sli4_hba.sgl_list_lock);
4073 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4074 &nvmet_sgl_list);
4075
4076 for (i = 0; i < xri_cnt; i++) {
4077 list_remove_head(&nvmet_sgl_list,
4078 sglq_entry, struct lpfc_sglq, list);
4079 if (sglq_entry) {
4080 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4081 sglq_entry->phys);
4082 kfree(sglq_entry);
4083 }
4084 }
4085 list_splice_init(&nvmet_sgl_list,
4086 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4087 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4088 spin_unlock_irq(&phba->hbalock);
4089 } else
4090 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4091 "6306 NVMET xri-sgl count unchanged: %d\n",
4092 nvmet_xri_cnt);
4093 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4094
4095
4096 sglq_entry = NULL;
4097 sglq_entry_next = NULL;
4098 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4099 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4100 lxri = lpfc_sli4_next_xritag(phba);
4101 if (lxri == NO_XRI) {
4102 lpfc_printf_log(phba, KERN_ERR,
4103 LOG_TRACE_EVENT,
4104 "6307 Failed to allocate xri for "
4105 "NVMET sgl\n");
4106 rc = -ENOMEM;
4107 goto out_free_mem;
4108 }
4109 sglq_entry->sli4_lxritag = lxri;
4110 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4111 }
4112 return 0;
4113
4114out_free_mem:
4115 lpfc_free_nvmet_sgl_list(phba);
4116 return rc;
4117}
4118
4119int
4120lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4121{
4122 LIST_HEAD(blist);
4123 struct lpfc_sli4_hdw_queue *qp;
4124 struct lpfc_io_buf *lpfc_cmd;
4125 struct lpfc_io_buf *iobufp, *prev_iobufp;
4126 int idx, cnt, xri, inserted;
4127
4128 cnt = 0;
4129 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4130 qp = &phba->sli4_hba.hdwq[idx];
4131 spin_lock_irq(&qp->io_buf_list_get_lock);
4132 spin_lock(&qp->io_buf_list_put_lock);
4133
4134
4135 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4136 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4137 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4138 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4139 cnt += qp->get_io_bufs + qp->put_io_bufs;
4140 qp->get_io_bufs = 0;
4141 qp->put_io_bufs = 0;
4142 qp->total_io_bufs = 0;
4143 spin_unlock(&qp->io_buf_list_put_lock);
4144 spin_unlock_irq(&qp->io_buf_list_get_lock);
4145 }
4146
4147
4148
4149
4150
4151
4152 for (idx = 0; idx < cnt; idx++) {
4153 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4154 if (!lpfc_cmd)
4155 return cnt;
4156 if (idx == 0) {
4157 list_add_tail(&lpfc_cmd->list, cbuf);
4158 continue;
4159 }
4160 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4161 inserted = 0;
4162 prev_iobufp = NULL;
4163 list_for_each_entry(iobufp, cbuf, list) {
4164 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4165 if (prev_iobufp)
4166 list_add(&lpfc_cmd->list,
4167 &prev_iobufp->list);
4168 else
4169 list_add(&lpfc_cmd->list, cbuf);
4170 inserted = 1;
4171 break;
4172 }
4173 prev_iobufp = iobufp;
4174 }
4175 if (!inserted)
4176 list_add_tail(&lpfc_cmd->list, cbuf);
4177 }
4178 return cnt;
4179}
4180
4181int
4182lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4183{
4184 struct lpfc_sli4_hdw_queue *qp;
4185 struct lpfc_io_buf *lpfc_cmd;
4186 int idx, cnt;
4187
4188 qp = phba->sli4_hba.hdwq;
4189 cnt = 0;
4190 while (!list_empty(cbuf)) {
4191 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4192 list_remove_head(cbuf, lpfc_cmd,
4193 struct lpfc_io_buf, list);
4194 if (!lpfc_cmd)
4195 return cnt;
4196 cnt++;
4197 qp = &phba->sli4_hba.hdwq[idx];
4198 lpfc_cmd->hdwq_no = idx;
4199 lpfc_cmd->hdwq = qp;
4200 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4201 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4202 spin_lock(&qp->io_buf_list_put_lock);
4203 list_add_tail(&lpfc_cmd->list,
4204 &qp->lpfc_io_buf_list_put);
4205 qp->put_io_bufs++;
4206 qp->total_io_bufs++;
4207 spin_unlock(&qp->io_buf_list_put_lock);
4208 }
4209 }
4210 return cnt;
4211}
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225int
4226lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4227{
4228 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4229 uint16_t i, lxri, els_xri_cnt;
4230 uint16_t io_xri_cnt, io_xri_max;
4231 LIST_HEAD(io_sgl_list);
4232 int rc, cnt;
4233
4234
4235
4236
4237
4238
4239 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4240 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4241 phba->sli4_hba.io_xri_max = io_xri_max;
4242
4243 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4244 "6074 Current allocated XRI sgl count:%d, "
4245 "maximum XRI count:%d\n",
4246 phba->sli4_hba.io_xri_cnt,
4247 phba->sli4_hba.io_xri_max);
4248
4249 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4250
4251 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4252
4253 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4254 phba->sli4_hba.io_xri_max;
4255
4256 for (i = 0; i < io_xri_cnt; i++) {
4257 list_remove_head(&io_sgl_list, lpfc_ncmd,
4258 struct lpfc_io_buf, list);
4259 if (lpfc_ncmd) {
4260 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4261 lpfc_ncmd->data,
4262 lpfc_ncmd->dma_handle);
4263 kfree(lpfc_ncmd);
4264 }
4265 }
4266 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4267 }
4268
4269
4270 lpfc_ncmd = NULL;
4271 lpfc_ncmd_next = NULL;
4272 phba->sli4_hba.io_xri_cnt = cnt;
4273 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4274 &io_sgl_list, list) {
4275 lxri = lpfc_sli4_next_xritag(phba);
4276 if (lxri == NO_XRI) {
4277 lpfc_printf_log(phba, KERN_ERR,
4278 LOG_TRACE_EVENT,
4279 "6075 Failed to allocate xri for "
4280 "nvme buffer\n");
4281 rc = -ENOMEM;
4282 goto out_free_mem;
4283 }
4284 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4285 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4286 }
4287 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4288 return 0;
4289
4290out_free_mem:
4291 lpfc_io_free(phba);
4292 return rc;
4293}
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309int
4310lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4311{
4312 struct lpfc_io_buf *lpfc_ncmd;
4313 struct lpfc_iocbq *pwqeq;
4314 uint16_t iotag, lxri = 0;
4315 int bcnt, num_posted;
4316 LIST_HEAD(prep_nblist);
4317 LIST_HEAD(post_nblist);
4318 LIST_HEAD(nvme_nblist);
4319
4320 phba->sli4_hba.io_xri_cnt = 0;
4321 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4322 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4323 if (!lpfc_ncmd)
4324 break;
4325
4326
4327
4328
4329
4330 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4331 GFP_KERNEL,
4332 &lpfc_ncmd->dma_handle);
4333 if (!lpfc_ncmd->data) {
4334 kfree(lpfc_ncmd);
4335 break;
4336 }
4337
4338 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4339 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4340 } else {
4341
4342
4343
4344
4345 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4346 (((unsigned long)(lpfc_ncmd->data) &
4347 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4348 lpfc_printf_log(phba, KERN_ERR,
4349 LOG_TRACE_EVENT,
4350 "3369 Memory alignment err: "
4351 "addr=%lx\n",
4352 (unsigned long)lpfc_ncmd->data);
4353 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4354 lpfc_ncmd->data,
4355 lpfc_ncmd->dma_handle);
4356 kfree(lpfc_ncmd);
4357 break;
4358 }
4359 }
4360
4361 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4362
4363 lxri = lpfc_sli4_next_xritag(phba);
4364 if (lxri == NO_XRI) {
4365 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4366 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4367 kfree(lpfc_ncmd);
4368 break;
4369 }
4370 pwqeq = &lpfc_ncmd->cur_iocbq;
4371
4372
4373 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4374 if (iotag == 0) {
4375 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4376 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4377 kfree(lpfc_ncmd);
4378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4379 "6121 Failed to allocate IOTAG for"
4380 " XRI:0x%x\n", lxri);
4381 lpfc_sli4_free_xri(phba, lxri);
4382 break;
4383 }
4384 pwqeq->sli4_lxritag = lxri;
4385 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4386 pwqeq->context1 = lpfc_ncmd;
4387
4388
4389 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4390 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4391 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4392 spin_lock_init(&lpfc_ncmd->buf_lock);
4393
4394
4395 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4396 phba->sli4_hba.io_xri_cnt++;
4397 }
4398 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4399 "6114 Allocate %d out of %d requested new NVME "
4400 "buffers\n", bcnt, num_to_alloc);
4401
4402
4403 if (!list_empty(&post_nblist))
4404 num_posted = lpfc_sli4_post_io_sgl_list(
4405 phba, &post_nblist, bcnt);
4406 else
4407 num_posted = 0;
4408
4409 return num_posted;
4410}
4411
4412static uint64_t
4413lpfc_get_wwpn(struct lpfc_hba *phba)
4414{
4415 uint64_t wwn;
4416 int rc;
4417 LPFC_MBOXQ_t *mboxq;
4418 MAILBOX_t *mb;
4419
4420 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4421 GFP_KERNEL);
4422 if (!mboxq)
4423 return (uint64_t)-1;
4424
4425
4426 lpfc_read_nv(phba, mboxq);
4427 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4428 if (rc != MBX_SUCCESS) {
4429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4430 "6019 Mailbox failed , mbxCmd x%x "
4431 "READ_NV, mbxStatus x%x\n",
4432 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4433 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4434 mempool_free(mboxq, phba->mbox_mem_pool);
4435 return (uint64_t) -1;
4436 }
4437 mb = &mboxq->u.mb;
4438 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4439
4440 mempool_free(mboxq, phba->mbox_mem_pool);
4441 if (phba->sli_rev == LPFC_SLI_REV4)
4442 return be64_to_cpu(wwn);
4443 else
4444 return rol64(wwn, 32);
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458static int
4459lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4460{
4461
4462 if (phba->sli_rev == LPFC_SLI_REV3) {
4463 phba->cfg_vmid_app_header = 0;
4464 phba->cfg_vmid_priority_tagging = 0;
4465 }
4466
4467 if (lpfc_is_vmid_enabled(phba)) {
4468 vport->vmid =
4469 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4470 GFP_KERNEL);
4471 if (!vport->vmid)
4472 return -ENOMEM;
4473
4474 rwlock_init(&vport->vmid_lock);
4475
4476
4477 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4478 vport->vmid_inactivity_timeout =
4479 phba->cfg_vmid_inactivity_timeout;
4480 vport->max_vmid = phba->cfg_max_vmid;
4481 vport->cur_vmid_cnt = 0;
4482
4483 vport->vmid_priority_range = bitmap_zalloc
4484 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4485
4486 if (!vport->vmid_priority_range) {
4487 kfree(vport->vmid);
4488 return -ENOMEM;
4489 }
4490
4491 hash_init(vport->hash_table);
4492 }
4493 return 0;
4494}
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512struct lpfc_vport *
4513lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4514{
4515 struct lpfc_vport *vport;
4516 struct Scsi_Host *shost = NULL;
4517 struct scsi_host_template *template;
4518 int error = 0;
4519 int i;
4520 uint64_t wwn;
4521 bool use_no_reset_hba = false;
4522 int rc;
4523
4524 if (lpfc_no_hba_reset_cnt) {
4525 if (phba->sli_rev < LPFC_SLI_REV4 &&
4526 dev == &phba->pcidev->dev) {
4527
4528 lpfc_sli_brdrestart(phba);
4529 rc = lpfc_sli_chipset_init(phba);
4530 if (rc)
4531 return NULL;
4532 }
4533 wwn = lpfc_get_wwpn(phba);
4534 }
4535
4536 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4537 if (wwn == lpfc_no_hba_reset[i]) {
4538 lpfc_printf_log(phba, KERN_ERR,
4539 LOG_TRACE_EVENT,
4540 "6020 Setting use_no_reset port=%llx\n",
4541 wwn);
4542 use_no_reset_hba = true;
4543 break;
4544 }
4545 }
4546
4547
4548 if (dev == &phba->pcidev->dev) {
4549 template = &phba->port_template;
4550
4551 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4552
4553 memcpy(template, &lpfc_template, sizeof(*template));
4554
4555 if (use_no_reset_hba)
4556
4557 template->eh_host_reset_handler = NULL;
4558
4559
4560 memcpy(&phba->vport_template, &lpfc_template,
4561 sizeof(*template));
4562 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4563 phba->vport_template.eh_bus_reset_handler = NULL;
4564 phba->vport_template.eh_host_reset_handler = NULL;
4565 phba->vport_template.vendor_id = 0;
4566
4567
4568 if (phba->sli_rev == LPFC_SLI_REV4) {
4569 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4570 phba->vport_template.sg_tablesize =
4571 phba->cfg_scsi_seg_cnt;
4572 } else {
4573 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4574 phba->vport_template.sg_tablesize =
4575 phba->cfg_sg_seg_cnt;
4576 }
4577
4578 } else {
4579
4580 memcpy(template, &lpfc_template_nvme,
4581 sizeof(*template));
4582 }
4583 } else {
4584 template = &phba->vport_template;
4585 }
4586
4587 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4588 if (!shost)
4589 goto out;
4590
4591 vport = (struct lpfc_vport *) shost->hostdata;
4592 vport->phba = phba;
4593 vport->load_flag |= FC_LOADING;
4594 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4595 vport->fc_rscn_flush = 0;
4596 lpfc_get_vport_cfgparam(vport);
4597
4598
4599 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4600
4601 shost->unique_id = instance;
4602 shost->max_id = LPFC_MAX_TARGET;
4603 shost->max_lun = vport->cfg_max_luns;
4604 shost->this_id = -1;
4605 shost->max_cmd_len = 16;
4606
4607 if (phba->sli_rev == LPFC_SLI_REV4) {
4608 if (!phba->cfg_fcp_mq_threshold ||
4609 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4610 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4611
4612 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4613 phba->cfg_fcp_mq_threshold);
4614
4615 shost->dma_boundary =
4616 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4617
4618 if (phba->cfg_xpsgl && !phba->nvmet_support)
4619 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4620 else
4621 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4622 } else
4623
4624
4625
4626 shost->nr_hw_queues = 1;
4627
4628
4629
4630
4631
4632
4633 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4634 if (dev != &phba->pcidev->dev) {
4635 shost->transportt = lpfc_vport_transport_template;
4636 vport->port_type = LPFC_NPIV_PORT;
4637 } else {
4638 shost->transportt = lpfc_transport_template;
4639 vport->port_type = LPFC_PHYSICAL_PORT;
4640 }
4641
4642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4643 "9081 CreatePort TMPLATE type %x TBLsize %d "
4644 "SEGcnt %d/%d\n",
4645 vport->port_type, shost->sg_tablesize,
4646 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4647
4648
4649 rc = lpfc_vmid_res_alloc(phba, vport);
4650
4651 if (rc)
4652 goto out;
4653
4654
4655 INIT_LIST_HEAD(&vport->fc_nodes);
4656 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4657 spin_lock_init(&vport->work_port_lock);
4658
4659 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4660
4661 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4662
4663 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4664
4665 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4666 lpfc_setup_bg(phba, shost);
4667
4668 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4669 if (error)
4670 goto out_put_shost;
4671
4672 spin_lock_irq(&phba->port_list_lock);
4673 list_add_tail(&vport->listentry, &phba->port_list);
4674 spin_unlock_irq(&phba->port_list_lock);
4675 return vport;
4676
4677out_put_shost:
4678 kfree(vport->vmid);
4679 bitmap_free(vport->vmid_priority_range);
4680 scsi_host_put(shost);
4681out:
4682 return NULL;
4683}
4684
4685
4686
4687
4688
4689
4690
4691
4692void
4693destroy_port(struct lpfc_vport *vport)
4694{
4695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4696 struct lpfc_hba *phba = vport->phba;
4697
4698 lpfc_debugfs_terminate(vport);
4699 fc_remove_host(shost);
4700 scsi_remove_host(shost);
4701
4702 spin_lock_irq(&phba->port_list_lock);
4703 list_del_init(&vport->listentry);
4704 spin_unlock_irq(&phba->port_list_lock);
4705
4706 lpfc_cleanup(vport);
4707 return;
4708}
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720int
4721lpfc_get_instance(void)
4722{
4723 int ret;
4724
4725 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4726 return ret < 0 ? -1 : ret;
4727}
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4745{
4746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4747 struct lpfc_hba *phba = vport->phba;
4748 int stat = 0;
4749
4750 spin_lock_irq(shost->host_lock);
4751
4752 if (vport->load_flag & FC_UNLOADING) {
4753 stat = 1;
4754 goto finished;
4755 }
4756 if (time >= msecs_to_jiffies(30 * 1000)) {
4757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4758 "0461 Scanning longer than 30 "
4759 "seconds. Continuing initialization\n");
4760 stat = 1;
4761 goto finished;
4762 }
4763 if (time >= msecs_to_jiffies(15 * 1000) &&
4764 phba->link_state <= LPFC_LINK_DOWN) {
4765 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4766 "0465 Link down longer than 15 "
4767 "seconds. Continuing initialization\n");
4768 stat = 1;
4769 goto finished;
4770 }
4771
4772 if (vport->port_state != LPFC_VPORT_READY)
4773 goto finished;
4774 if (vport->num_disc_nodes || vport->fc_prli_sent)
4775 goto finished;
4776 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4777 goto finished;
4778 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4779 goto finished;
4780
4781 stat = 1;
4782
4783finished:
4784 spin_unlock_irq(shost->host_lock);
4785 return stat;
4786}
4787
4788static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4789{
4790 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4791 struct lpfc_hba *phba = vport->phba;
4792
4793 fc_host_supported_speeds(shost) = 0;
4794
4795
4796
4797
4798 if (phba->hba_flag & HBA_FCOE_MODE)
4799 return;
4800
4801 if (phba->lmt & LMT_256Gb)
4802 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4803 if (phba->lmt & LMT_128Gb)
4804 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4805 if (phba->lmt & LMT_64Gb)
4806 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4807 if (phba->lmt & LMT_32Gb)
4808 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4809 if (phba->lmt & LMT_16Gb)
4810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4811 if (phba->lmt & LMT_10Gb)
4812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4813 if (phba->lmt & LMT_8Gb)
4814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4815 if (phba->lmt & LMT_4Gb)
4816 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4817 if (phba->lmt & LMT_2Gb)
4818 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4819 if (phba->lmt & LMT_1Gb)
4820 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4821}
4822
4823
4824
4825
4826
4827
4828
4829
4830void lpfc_host_attrib_init(struct Scsi_Host *shost)
4831{
4832 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4833 struct lpfc_hba *phba = vport->phba;
4834
4835
4836
4837
4838 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4839 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4840 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4841
4842 memset(fc_host_supported_fc4s(shost), 0,
4843 sizeof(fc_host_supported_fc4s(shost)));
4844 fc_host_supported_fc4s(shost)[2] = 1;
4845 fc_host_supported_fc4s(shost)[7] = 1;
4846
4847 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4848 sizeof fc_host_symbolic_name(shost));
4849
4850 lpfc_host_supported_speeds_set(shost);
4851
4852 fc_host_maxframe_size(shost) =
4853 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4854 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4855
4856 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4857
4858
4859 memset(fc_host_active_fc4s(shost), 0,
4860 sizeof(fc_host_active_fc4s(shost)));
4861 fc_host_active_fc4s(shost)[2] = 1;
4862 fc_host_active_fc4s(shost)[7] = 1;
4863
4864 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4865 spin_lock_irq(shost->host_lock);
4866 vport->load_flag &= ~FC_LOADING;
4867 spin_unlock_irq(shost->host_lock);
4868}
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878static void
4879lpfc_stop_port_s3(struct lpfc_hba *phba)
4880{
4881
4882 writel(0, phba->HCregaddr);
4883 readl(phba->HCregaddr);
4884
4885 writel(0xffffffff, phba->HAregaddr);
4886 readl(phba->HAregaddr);
4887
4888
4889 lpfc_stop_hba_timers(phba);
4890 phba->pport->work_port_events = 0;
4891}
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901static void
4902lpfc_stop_port_s4(struct lpfc_hba *phba)
4903{
4904
4905 lpfc_stop_hba_timers(phba);
4906 if (phba->pport)
4907 phba->pport->work_port_events = 0;
4908 phba->sli4_hba.intr_enable = 0;
4909}
4910
4911
4912
4913
4914
4915
4916
4917
4918void
4919lpfc_stop_port(struct lpfc_hba *phba)
4920{
4921 phba->lpfc_stop_port(phba);
4922
4923 if (phba->wq)
4924 flush_workqueue(phba->wq);
4925}
4926
4927
4928
4929
4930
4931
4932
4933void
4934lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4935{
4936 unsigned long fcf_redisc_wait_tmo =
4937 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4938
4939 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4940 spin_lock_irq(&phba->hbalock);
4941
4942 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4943
4944 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4945 spin_unlock_irq(&phba->hbalock);
4946}
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958static void
4959lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4960{
4961 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4962
4963
4964 spin_lock_irq(&phba->hbalock);
4965 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4966 spin_unlock_irq(&phba->hbalock);
4967 return;
4968 }
4969
4970 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4971
4972 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4973 spin_unlock_irq(&phba->hbalock);
4974 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4975 "2776 FCF rediscover quiescent timer expired\n");
4976
4977 lpfc_worker_wake_up(phba);
4978}
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989static void
4990lpfc_vmid_poll(struct timer_list *t)
4991{
4992 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4993 u32 wake_up = 0;
4994
4995
4996 if (phba->pport->vmid_priority_tagging) {
4997 wake_up = 1;
4998 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4999 }
5000
5001
5002 if (phba->pport->vmid_inactivity_timeout ||
5003 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5004 wake_up = 1;
5005 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5006 }
5007
5008 if (wake_up)
5009 lpfc_worker_wake_up(phba);
5010
5011
5012 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5013 LPFC_VMID_TIMER));
5014}
5015
5016
5017
5018
5019
5020
5021
5022
5023static void
5024lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5025 struct lpfc_acqe_link *acqe_link)
5026{
5027 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5028 case LPFC_ASYNC_LINK_FAULT_NONE:
5029 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5030 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5031 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5032 break;
5033 default:
5034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5035 "0398 Unknown link fault code: x%x\n",
5036 bf_get(lpfc_acqe_link_fault, acqe_link));
5037 break;
5038 }
5039}
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051static uint8_t
5052lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5053 struct lpfc_acqe_link *acqe_link)
5054{
5055 uint8_t att_type;
5056
5057 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5058 case LPFC_ASYNC_LINK_STATUS_DOWN:
5059 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5060 att_type = LPFC_ATT_LINK_DOWN;
5061 break;
5062 case LPFC_ASYNC_LINK_STATUS_UP:
5063
5064 att_type = LPFC_ATT_RESERVED;
5065 break;
5066 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5067 att_type = LPFC_ATT_LINK_UP;
5068 break;
5069 default:
5070 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5071 "0399 Invalid link attention type: x%x\n",
5072 bf_get(lpfc_acqe_link_status, acqe_link));
5073 att_type = LPFC_ATT_RESERVED;
5074 break;
5075 }
5076 return att_type;
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087uint32_t
5088lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5089{
5090 uint32_t link_speed;
5091
5092 if (!lpfc_is_link_up(phba))
5093 return 0;
5094
5095 if (phba->sli_rev <= LPFC_SLI_REV3) {
5096 switch (phba->fc_linkspeed) {
5097 case LPFC_LINK_SPEED_1GHZ:
5098 link_speed = 1000;
5099 break;
5100 case LPFC_LINK_SPEED_2GHZ:
5101 link_speed = 2000;
5102 break;
5103 case LPFC_LINK_SPEED_4GHZ:
5104 link_speed = 4000;
5105 break;
5106 case LPFC_LINK_SPEED_8GHZ:
5107 link_speed = 8000;
5108 break;
5109 case LPFC_LINK_SPEED_10GHZ:
5110 link_speed = 10000;
5111 break;
5112 case LPFC_LINK_SPEED_16GHZ:
5113 link_speed = 16000;
5114 break;
5115 default:
5116 link_speed = 0;
5117 }
5118 } else {
5119 if (phba->sli4_hba.link_state.logical_speed)
5120 link_speed =
5121 phba->sli4_hba.link_state.logical_speed;
5122 else
5123 link_speed = phba->sli4_hba.link_state.speed;
5124 }
5125 return link_speed;
5126}
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139static uint32_t
5140lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5141 uint8_t speed_code)
5142{
5143 uint32_t port_speed;
5144
5145 switch (evt_code) {
5146 case LPFC_TRAILER_CODE_LINK:
5147 switch (speed_code) {
5148 case LPFC_ASYNC_LINK_SPEED_ZERO:
5149 port_speed = 0;
5150 break;
5151 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5152 port_speed = 10;
5153 break;
5154 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5155 port_speed = 100;
5156 break;
5157 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5158 port_speed = 1000;
5159 break;
5160 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5161 port_speed = 10000;
5162 break;
5163 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5164 port_speed = 20000;
5165 break;
5166 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5167 port_speed = 25000;
5168 break;
5169 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5170 port_speed = 40000;
5171 break;
5172 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5173 port_speed = 100000;
5174 break;
5175 default:
5176 port_speed = 0;
5177 }
5178 break;
5179 case LPFC_TRAILER_CODE_FC:
5180 switch (speed_code) {
5181 case LPFC_FC_LA_SPEED_UNKNOWN:
5182 port_speed = 0;
5183 break;
5184 case LPFC_FC_LA_SPEED_1G:
5185 port_speed = 1000;
5186 break;
5187 case LPFC_FC_LA_SPEED_2G:
5188 port_speed = 2000;
5189 break;
5190 case LPFC_FC_LA_SPEED_4G:
5191 port_speed = 4000;
5192 break;
5193 case LPFC_FC_LA_SPEED_8G:
5194 port_speed = 8000;
5195 break;
5196 case LPFC_FC_LA_SPEED_10G:
5197 port_speed = 10000;
5198 break;
5199 case LPFC_FC_LA_SPEED_16G:
5200 port_speed = 16000;
5201 break;
5202 case LPFC_FC_LA_SPEED_32G:
5203 port_speed = 32000;
5204 break;
5205 case LPFC_FC_LA_SPEED_64G:
5206 port_speed = 64000;
5207 break;
5208 case LPFC_FC_LA_SPEED_128G:
5209 port_speed = 128000;
5210 break;
5211 case LPFC_FC_LA_SPEED_256G:
5212 port_speed = 256000;
5213 break;
5214 default:
5215 port_speed = 0;
5216 }
5217 break;
5218 default:
5219 port_speed = 0;
5220 }
5221 return port_speed;
5222}
5223
5224
5225
5226
5227
5228
5229
5230
5231static void
5232lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5233 struct lpfc_acqe_link *acqe_link)
5234{
5235 struct lpfc_dmabuf *mp;
5236 LPFC_MBOXQ_t *pmb;
5237 MAILBOX_t *mb;
5238 struct lpfc_mbx_read_top *la;
5239 uint8_t att_type;
5240 int rc;
5241
5242 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5243 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5244 return;
5245 phba->fcoe_eventtag = acqe_link->event_tag;
5246 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5247 if (!pmb) {
5248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5249 "0395 The mboxq allocation failed\n");
5250 return;
5251 }
5252 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5253 if (!mp) {
5254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5255 "0396 The lpfc_dmabuf allocation failed\n");
5256 goto out_free_pmb;
5257 }
5258 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5259 if (!mp->virt) {
5260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5261 "0397 The mbuf allocation failed\n");
5262 goto out_free_dmabuf;
5263 }
5264
5265
5266 lpfc_els_flush_all_cmd(phba);
5267
5268
5269 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5270
5271
5272 phba->sli.slistat.link_event++;
5273
5274
5275 lpfc_read_topology(phba, pmb, mp);
5276 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5277 pmb->vport = phba->pport;
5278
5279
5280 phba->sli4_hba.link_state.speed =
5281 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5282 bf_get(lpfc_acqe_link_speed, acqe_link));
5283 phba->sli4_hba.link_state.duplex =
5284 bf_get(lpfc_acqe_link_duplex, acqe_link);
5285 phba->sli4_hba.link_state.status =
5286 bf_get(lpfc_acqe_link_status, acqe_link);
5287 phba->sli4_hba.link_state.type =
5288 bf_get(lpfc_acqe_link_type, acqe_link);
5289 phba->sli4_hba.link_state.number =
5290 bf_get(lpfc_acqe_link_number, acqe_link);
5291 phba->sli4_hba.link_state.fault =
5292 bf_get(lpfc_acqe_link_fault, acqe_link);
5293 phba->sli4_hba.link_state.logical_speed =
5294 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5295
5296 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5297 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5298 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5299 "Logical speed:%dMbps Fault:%d\n",
5300 phba->sli4_hba.link_state.speed,
5301 phba->sli4_hba.link_state.topology,
5302 phba->sli4_hba.link_state.status,
5303 phba->sli4_hba.link_state.type,
5304 phba->sli4_hba.link_state.number,
5305 phba->sli4_hba.link_state.logical_speed,
5306 phba->sli4_hba.link_state.fault);
5307
5308
5309
5310
5311 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5312 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5313 if (rc == MBX_NOT_FINISHED)
5314 goto out_free_dmabuf;
5315 return;
5316 }
5317
5318
5319
5320
5321
5322
5323 mb = &pmb->u.mb;
5324 mb->mbxStatus = MBX_SUCCESS;
5325
5326
5327 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5328
5329
5330 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5331 la->eventTag = acqe_link->event_tag;
5332 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5333 bf_set(lpfc_mbx_read_top_link_spd, la,
5334 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5335
5336
5337 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5338 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5339 bf_set(lpfc_mbx_read_top_il, la, 0);
5340 bf_set(lpfc_mbx_read_top_pb, la, 0);
5341 bf_set(lpfc_mbx_read_top_fa, la, 0);
5342 bf_set(lpfc_mbx_read_top_mm, la, 0);
5343
5344
5345 lpfc_mbx_cmpl_read_topology(phba, pmb);
5346
5347 return;
5348
5349out_free_dmabuf:
5350 kfree(mp);
5351out_free_pmb:
5352 mempool_free(pmb, phba->mbox_mem_pool);
5353}
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366static uint8_t
5367lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5368{
5369 uint8_t port_speed;
5370
5371 switch (speed_code) {
5372 case LPFC_FC_LA_SPEED_1G:
5373 port_speed = LPFC_LINK_SPEED_1GHZ;
5374 break;
5375 case LPFC_FC_LA_SPEED_2G:
5376 port_speed = LPFC_LINK_SPEED_2GHZ;
5377 break;
5378 case LPFC_FC_LA_SPEED_4G:
5379 port_speed = LPFC_LINK_SPEED_4GHZ;
5380 break;
5381 case LPFC_FC_LA_SPEED_8G:
5382 port_speed = LPFC_LINK_SPEED_8GHZ;
5383 break;
5384 case LPFC_FC_LA_SPEED_16G:
5385 port_speed = LPFC_LINK_SPEED_16GHZ;
5386 break;
5387 case LPFC_FC_LA_SPEED_32G:
5388 port_speed = LPFC_LINK_SPEED_32GHZ;
5389 break;
5390 case LPFC_FC_LA_SPEED_64G:
5391 port_speed = LPFC_LINK_SPEED_64GHZ;
5392 break;
5393 case LPFC_FC_LA_SPEED_128G:
5394 port_speed = LPFC_LINK_SPEED_128GHZ;
5395 break;
5396 case LPFC_FC_LA_SPEED_256G:
5397 port_speed = LPFC_LINK_SPEED_256GHZ;
5398 break;
5399 default:
5400 port_speed = 0;
5401 break;
5402 }
5403
5404 return port_speed;
5405}
5406
5407void
5408lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5409{
5410 struct rxtable_entry *entry;
5411 int cnt = 0, head, tail, last, start;
5412
5413 head = atomic_read(&phba->rxtable_idx_head);
5414 tail = atomic_read(&phba->rxtable_idx_tail);
5415 if (!phba->rxtable || head == tail) {
5416 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5417 "4411 Rxtable is empty\n");
5418 return;
5419 }
5420 last = tail;
5421 start = head;
5422
5423
5424 while (start != last) {
5425 if (start)
5426 start--;
5427 else
5428 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5429 entry = &phba->rxtable[start];
5430 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5431 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5432 "Lat %lld ASz %lld Info %02d BWUtil %d "
5433 "Int %d slot %d\n",
5434 cnt, entry->max_bytes_per_interval,
5435 entry->total_bytes, entry->rcv_bytes,
5436 entry->avg_io_latency, entry->avg_io_size,
5437 entry->cmf_info, entry->timer_utilization,
5438 entry->timer_interval, start);
5439 cnt++;
5440 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5441 return;
5442 }
5443}
5444
5445
5446
5447
5448
5449
5450
5451
5452void
5453lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5454{
5455 struct lpfc_cgn_info *cp;
5456 struct tm broken;
5457 struct timespec64 cur_time;
5458 u32 cnt;
5459 u16 value;
5460
5461
5462 if (!phba->cgn_i)
5463 return;
5464 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5465 ktime_get_real_ts64(&cur_time);
5466 time64_to_tm(cur_time.tv_sec, 0, &broken);
5467
5468
5469 switch (dtag) {
5470 case ELS_DTAG_LNK_INTEGRITY:
5471 cnt = le32_to_cpu(cp->link_integ_notification);
5472 cnt++;
5473 cp->link_integ_notification = cpu_to_le32(cnt);
5474
5475 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5476 cp->cgn_stat_lnk_day = broken.tm_mday;
5477 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5478 cp->cgn_stat_lnk_hour = broken.tm_hour;
5479 cp->cgn_stat_lnk_min = broken.tm_min;
5480 cp->cgn_stat_lnk_sec = broken.tm_sec;
5481 break;
5482 case ELS_DTAG_DELIVERY:
5483 cnt = le32_to_cpu(cp->delivery_notification);
5484 cnt++;
5485 cp->delivery_notification = cpu_to_le32(cnt);
5486
5487 cp->cgn_stat_del_month = broken.tm_mon + 1;
5488 cp->cgn_stat_del_day = broken.tm_mday;
5489 cp->cgn_stat_del_year = broken.tm_year - 100;
5490 cp->cgn_stat_del_hour = broken.tm_hour;
5491 cp->cgn_stat_del_min = broken.tm_min;
5492 cp->cgn_stat_del_sec = broken.tm_sec;
5493 break;
5494 case ELS_DTAG_PEER_CONGEST:
5495 cnt = le32_to_cpu(cp->cgn_peer_notification);
5496 cnt++;
5497 cp->cgn_peer_notification = cpu_to_le32(cnt);
5498
5499 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5500 cp->cgn_stat_peer_day = broken.tm_mday;
5501 cp->cgn_stat_peer_year = broken.tm_year - 100;
5502 cp->cgn_stat_peer_hour = broken.tm_hour;
5503 cp->cgn_stat_peer_min = broken.tm_min;
5504 cp->cgn_stat_peer_sec = broken.tm_sec;
5505 break;
5506 case ELS_DTAG_CONGESTION:
5507 cnt = le32_to_cpu(cp->cgn_notification);
5508 cnt++;
5509 cp->cgn_notification = cpu_to_le32(cnt);
5510
5511 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5512 cp->cgn_stat_cgn_day = broken.tm_mday;
5513 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5514 cp->cgn_stat_cgn_hour = broken.tm_hour;
5515 cp->cgn_stat_cgn_min = broken.tm_min;
5516 cp->cgn_stat_cgn_sec = broken.tm_sec;
5517 }
5518 if (phba->cgn_fpin_frequency &&
5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5521 cp->cgn_stat_npm = value;
5522 }
5523 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5524 LPFC_CGN_CRC32_SEED);
5525 cp->cgn_info_crc = cpu_to_le32(value);
5526}
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538static void
5539lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5540{
5541 struct lpfc_cgn_info *cp;
5542 struct tm broken;
5543 struct timespec64 cur_time;
5544 uint32_t i, index;
5545 uint16_t value, mvalue;
5546 uint64_t bps;
5547 uint32_t mbps;
5548 uint32_t dvalue, wvalue, lvalue, avalue;
5549 uint64_t latsum;
5550 __le16 *ptr;
5551 __le32 *lptr;
5552 __le16 *mptr;
5553
5554
5555 if (!phba->cgn_i)
5556 return;
5557 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5558
5559 if (time_before(jiffies, phba->cgn_evt_timestamp))
5560 return;
5561 phba->cgn_evt_timestamp = jiffies +
5562 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5563 phba->cgn_evt_minute++;
5564
5565
5566
5567 ktime_get_real_ts64(&cur_time);
5568 time64_to_tm(cur_time.tv_sec, 0, &broken);
5569
5570 if (phba->cgn_fpin_frequency &&
5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5573 cp->cgn_stat_npm = value;
5574 }
5575
5576
5577 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5578 latsum = atomic64_read(&phba->cgn_latency_evt);
5579 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5580 atomic64_set(&phba->cgn_latency_evt, 0);
5581
5582
5583
5584
5585
5586 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5587 phba->rx_block_cnt = 0;
5588 mvalue = bps / (1024 * 1024);
5589
5590
5591
5592 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5593 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5594 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5595 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5596
5597
5598 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5599 cp->cgn_lunq = cpu_to_le16(value);
5600
5601
5602
5603
5604
5605
5606
5607 index = ++cp->cgn_index_minute;
5608 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5609 cp->cgn_index_minute = 0;
5610 index = 0;
5611 }
5612
5613
5614 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5615 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5616
5617
5618 wvalue = 0;
5619 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5620 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5621 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5622 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5623 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5624
5625
5626 avalue = 0;
5627 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5628 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5629 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5630 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5631
5632
5633
5634
5635 ptr = &cp->cgn_drvr_min[index];
5636 value = (uint16_t)dvalue;
5637 *ptr = cpu_to_le16(value);
5638
5639 ptr = &cp->cgn_warn_min[index];
5640 value = (uint16_t)wvalue;
5641 *ptr = cpu_to_le16(value);
5642
5643 ptr = &cp->cgn_alarm_min[index];
5644 value = (uint16_t)avalue;
5645 *ptr = cpu_to_le16(value);
5646
5647 lptr = &cp->cgn_latency_min[index];
5648 if (lvalue) {
5649 lvalue = (uint32_t)div_u64(latsum, lvalue);
5650 *lptr = cpu_to_le32(lvalue);
5651 } else {
5652 *lptr = 0;
5653 }
5654
5655
5656 mptr = &cp->cgn_bw_min[index];
5657 *mptr = cpu_to_le16(mvalue);
5658
5659 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5660 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5661 index, dvalue, wvalue, *lptr, mvalue, avalue);
5662
5663
5664 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5665
5666
5667
5668 index = ++cp->cgn_index_hour;
5669 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5670 cp->cgn_index_hour = 0;
5671 index = 0;
5672 }
5673
5674 dvalue = 0;
5675 wvalue = 0;
5676 lvalue = 0;
5677 avalue = 0;
5678 mvalue = 0;
5679 mbps = 0;
5680 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5681 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5682 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5683 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5684 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5685 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5686 }
5687 if (lvalue)
5688 lvalue /= LPFC_MIN_HOUR;
5689 if (mbps)
5690 mvalue = mbps / LPFC_MIN_HOUR;
5691
5692 lptr = &cp->cgn_drvr_hr[index];
5693 *lptr = cpu_to_le32(dvalue);
5694 lptr = &cp->cgn_warn_hr[index];
5695 *lptr = cpu_to_le32(wvalue);
5696 lptr = &cp->cgn_latency_hr[index];
5697 *lptr = cpu_to_le32(lvalue);
5698 mptr = &cp->cgn_bw_hr[index];
5699 *mptr = cpu_to_le16(mvalue);
5700 lptr = &cp->cgn_alarm_hr[index];
5701 *lptr = cpu_to_le32(avalue);
5702
5703 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5704 "2419 Congestion Info - hour "
5705 "(%d): %d %d %d %d %d\n",
5706 index, dvalue, wvalue, lvalue, mvalue, avalue);
5707 }
5708
5709
5710 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5711
5712
5713
5714
5715 index = ++cp->cgn_index_day;
5716 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5717 cp->cgn_index_day = 0;
5718 index = 0;
5719 }
5720
5721
5722
5723
5724
5725
5726
5727 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5728 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5729
5730 cp->cgn_info_month = broken.tm_mon + 1;
5731 cp->cgn_info_day = broken.tm_mday;
5732 cp->cgn_info_year = broken.tm_year - 100;
5733 cp->cgn_info_hour = broken.tm_hour;
5734 cp->cgn_info_minute = broken.tm_min;
5735 cp->cgn_info_second = broken.tm_sec;
5736
5737 lpfc_printf_log
5738 (phba, KERN_INFO, LOG_CGN_MGMT,
5739 "2646 CGNInfo idx0 Start Time: "
5740 "%d/%d/%d %d:%d:%d\n",
5741 cp->cgn_info_day, cp->cgn_info_month,
5742 cp->cgn_info_year, cp->cgn_info_hour,
5743 cp->cgn_info_minute, cp->cgn_info_second);
5744 }
5745
5746 dvalue = 0;
5747 wvalue = 0;
5748 lvalue = 0;
5749 mvalue = 0;
5750 mbps = 0;
5751 avalue = 0;
5752 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5756 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5758 }
5759 if (lvalue)
5760 lvalue /= LPFC_HOUR_DAY;
5761 if (mbps)
5762 mvalue = mbps / LPFC_HOUR_DAY;
5763
5764 lptr = &cp->cgn_drvr_day[index];
5765 *lptr = cpu_to_le32(dvalue);
5766 lptr = &cp->cgn_warn_day[index];
5767 *lptr = cpu_to_le32(wvalue);
5768 lptr = &cp->cgn_latency_day[index];
5769 *lptr = cpu_to_le32(lvalue);
5770 mptr = &cp->cgn_bw_day[index];
5771 *mptr = cpu_to_le16(mvalue);
5772 lptr = &cp->cgn_alarm_day[index];
5773 *lptr = cpu_to_le32(avalue);
5774
5775 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5776 "2420 Congestion Info - daily (%d): "
5777 "%d %d %d %d %d\n",
5778 index, dvalue, wvalue, lvalue, mvalue, avalue);
5779
5780
5781
5782
5783
5784 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5785 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5786 ktime_get_real_ts64(&phba->cgn_daily_ts);
5787 }
5788 }
5789
5790
5791 value = phba->cgn_fpin_frequency;
5792 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
5793 cp->cgn_warn_freq = cpu_to_le16(value);
5794 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
5795 cp->cgn_alarm_freq = cpu_to_le16(value);
5796
5797
5798
5799
5800 value = phba->cgn_sig_freq;
5801
5802 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5803 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5804 cp->cgn_warn_freq = cpu_to_le16(value);
5805 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5806 cp->cgn_alarm_freq = cpu_to_le16(value);
5807
5808 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5809 LPFC_CGN_CRC32_SEED);
5810 cp->cgn_info_crc = cpu_to_le32(lvalue);
5811}
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821uint32_t
5822lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5823{
5824 struct timespec64 cmpl_time;
5825 uint32_t msec = 0;
5826
5827 ktime_get_real_ts64(&cmpl_time);
5828
5829
5830
5831
5832 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5833 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5834 NSEC_PER_MSEC;
5835 } else {
5836 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5837 msec = (cmpl_time.tv_sec -
5838 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5839 msec += ((cmpl_time.tv_nsec -
5840 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5841 } else {
5842 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5843 1) * MSEC_PER_SEC;
5844 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5845 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5846 }
5847 }
5848 return msec;
5849}
5850
5851
5852
5853
5854
5855
5856static enum hrtimer_restart
5857lpfc_cmf_timer(struct hrtimer *timer)
5858{
5859 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5860 cmf_timer);
5861 struct rxtable_entry *entry;
5862 uint32_t io_cnt;
5863 uint32_t head, tail;
5864 uint32_t busy, max_read;
5865 uint64_t total, rcv, lat, mbpi;
5866 int timer_interval = LPFC_CMF_INTERVAL;
5867 uint32_t ms;
5868 struct lpfc_cgn_stat *cgs;
5869 int cpu;
5870
5871
5872 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5873 !phba->cmf_latency.tv_sec) {
5874 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5875 "6224 CMF timer exit: %d %lld\n",
5876 phba->cmf_active_mode,
5877 (uint64_t)phba->cmf_latency.tv_sec);
5878 return HRTIMER_NORESTART;
5879 }
5880
5881
5882
5883
5884 if (!phba->pport)
5885 goto skip;
5886
5887
5888
5889
5890 atomic_set(&phba->cmf_stop_io, 1);
5891
5892
5893
5894
5895
5896
5897 ms = lpfc_calc_cmf_latency(phba);
5898
5899
5900
5901
5902
5903
5904 ktime_get_real_ts64(&phba->cmf_latency);
5905
5906 phba->cmf_link_byte_count =
5907 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5908
5909
5910 total = 0;
5911 io_cnt = 0;
5912 lat = 0;
5913 rcv = 0;
5914 for_each_present_cpu(cpu) {
5915 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5916 total += atomic64_xchg(&cgs->total_bytes, 0);
5917 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5918 lat += atomic64_xchg(&cgs->rx_latency, 0);
5919 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5920 }
5921
5922
5923
5924
5925
5926
5927 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
5928 phba->link_state != LPFC_LINK_DOWN &&
5929 phba->hba_flag & HBA_SETUP) {
5930 mbpi = phba->cmf_last_sync_bw;
5931 phba->cmf_last_sync_bw = 0;
5932 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
5933 } else {
5934
5935
5936
5937 mbpi = phba->cmf_link_byte_count;
5938 }
5939 phba->cmf_timer_cnt++;
5940
5941 if (io_cnt) {
5942
5943 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
5944 atomic64_add(lat, &phba->cgn_latency_evt);
5945 }
5946 busy = atomic_xchg(&phba->cmf_busy, 0);
5947 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
5948
5949
5950 if (mbpi) {
5951 if (mbpi > phba->cmf_link_byte_count ||
5952 phba->cmf_active_mode == LPFC_CFG_MONITOR)
5953 mbpi = phba->cmf_link_byte_count;
5954
5955
5956
5957
5958 if (mbpi != phba->cmf_max_bytes_per_interval)
5959 phba->cmf_max_bytes_per_interval = mbpi;
5960 }
5961
5962
5963 if (phba->rxtable) {
5964 head = atomic_xchg(&phba->rxtable_idx_head,
5965 LPFC_RXMONITOR_TABLE_IN_USE);
5966 entry = &phba->rxtable[head];
5967 entry->total_bytes = total;
5968 entry->rcv_bytes = rcv;
5969 entry->cmf_busy = busy;
5970 entry->cmf_info = phba->cmf_active_info;
5971 if (io_cnt) {
5972 entry->avg_io_latency = div_u64(lat, io_cnt);
5973 entry->avg_io_size = div_u64(rcv, io_cnt);
5974 } else {
5975 entry->avg_io_latency = 0;
5976 entry->avg_io_size = 0;
5977 }
5978 entry->max_read_cnt = max_read;
5979 entry->io_cnt = io_cnt;
5980 entry->max_bytes_per_interval = mbpi;
5981 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
5982 entry->timer_utilization = phba->cmf_last_ts;
5983 else
5984 entry->timer_utilization = ms;
5985 entry->timer_interval = ms;
5986 phba->cmf_last_ts = 0;
5987
5988
5989 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5990 tail = atomic_read(&phba->rxtable_idx_tail);
5991 if (head == tail) {
5992 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5993 atomic_set(&phba->rxtable_idx_tail, tail);
5994 }
5995 atomic_set(&phba->rxtable_idx_head, head);
5996 }
5997
5998 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
5999
6000
6001
6002 if (mbpi && total > mbpi)
6003 atomic_inc(&phba->cgn_driver_evt_cnt);
6004 }
6005 phba->rx_block_cnt += div_u64(rcv, 512);
6006
6007
6008 lpfc_cgn_save_evt_cnt(phba);
6009
6010
6011
6012
6013
6014 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6015 phba->cgn_evt_timestamp)) {
6016 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6017 jiffies);
6018 if (timer_interval <= 0)
6019 timer_interval = LPFC_CMF_INTERVAL;
6020
6021
6022
6023
6024 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6025 timer_interval, 1000);
6026 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6027 phba->cmf_max_bytes_per_interval =
6028 phba->cmf_link_byte_count;
6029 }
6030
6031
6032
6033
6034 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6035 queue_work(phba->wq, &phba->unblock_request_work);
6036
6037
6038 atomic_set(&phba->cmf_stop_io, 0);
6039
6040skip:
6041 hrtimer_forward_now(timer,
6042 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6043 return HRTIMER_RESTART;
6044}
6045
6046#define trunk_link_status(__idx)\
6047 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6048 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6049 "Link up" : "Link down") : "NA"
6050
6051#define trunk_port_fault(__idx)\
6052 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6053 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6054
6055static void
6056lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6057 struct lpfc_acqe_fc_la *acqe_fc)
6058{
6059 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6060 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6061
6062 phba->sli4_hba.link_state.speed =
6063 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6064 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6065
6066 phba->sli4_hba.link_state.logical_speed =
6067 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6068
6069 phba->fc_linkspeed =
6070 lpfc_async_link_speed_to_read_top(
6071 phba,
6072 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6073
6074 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6075 phba->trunk_link.link0.state =
6076 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6077 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6078 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6079 }
6080 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6081 phba->trunk_link.link1.state =
6082 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6083 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6084 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6085 }
6086 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6087 phba->trunk_link.link2.state =
6088 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6089 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6090 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6091 }
6092 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6093 phba->trunk_link.link3.state =
6094 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6095 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6096 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6097 }
6098
6099 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6100 "2910 Async FC Trunking Event - Speed:%d\n"
6101 "\tLogical speed:%d "
6102 "port0: %s port1: %s port2: %s port3: %s\n",
6103 phba->sli4_hba.link_state.speed,
6104 phba->sli4_hba.link_state.logical_speed,
6105 trunk_link_status(0), trunk_link_status(1),
6106 trunk_link_status(2), trunk_link_status(3));
6107
6108 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6109 lpfc_cmf_signal_init(phba);
6110
6111 if (port_fault)
6112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6113 "3202 trunk error:0x%x (%s) seen on port0:%s "
6114
6115
6116
6117
6118
6119 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6120 "UNDEFINED. update driver." : trunk_errmsg[err],
6121 trunk_port_fault(0), trunk_port_fault(1),
6122 trunk_port_fault(2), trunk_port_fault(3));
6123}
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135static void
6136lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6137{
6138 struct lpfc_dmabuf *mp;
6139 LPFC_MBOXQ_t *pmb;
6140 MAILBOX_t *mb;
6141 struct lpfc_mbx_read_top *la;
6142 int rc;
6143
6144 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6145 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6146 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6147 "2895 Non FC link Event detected.(%d)\n",
6148 bf_get(lpfc_trailer_type, acqe_fc));
6149 return;
6150 }
6151
6152 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6153 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6154 lpfc_update_trunk_link_status(phba, acqe_fc);
6155 return;
6156 }
6157
6158
6159 phba->sli4_hba.link_state.speed =
6160 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6161 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6162 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6163 phba->sli4_hba.link_state.topology =
6164 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6165 phba->sli4_hba.link_state.status =
6166 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6167 phba->sli4_hba.link_state.type =
6168 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6169 phba->sli4_hba.link_state.number =
6170 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6171 phba->sli4_hba.link_state.fault =
6172 bf_get(lpfc_acqe_link_fault, acqe_fc);
6173
6174 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6175 LPFC_FC_LA_TYPE_LINK_DOWN)
6176 phba->sli4_hba.link_state.logical_speed = 0;
6177 else if (!phba->sli4_hba.conf_trunk)
6178 phba->sli4_hba.link_state.logical_speed =
6179 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6180
6181 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6182 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6183 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6184 "%dMbps Fault:%d\n",
6185 phba->sli4_hba.link_state.speed,
6186 phba->sli4_hba.link_state.topology,
6187 phba->sli4_hba.link_state.status,
6188 phba->sli4_hba.link_state.type,
6189 phba->sli4_hba.link_state.number,
6190 phba->sli4_hba.link_state.logical_speed,
6191 phba->sli4_hba.link_state.fault);
6192 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6193 if (!pmb) {
6194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6195 "2897 The mboxq allocation failed\n");
6196 return;
6197 }
6198 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6199 if (!mp) {
6200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6201 "2898 The lpfc_dmabuf allocation failed\n");
6202 goto out_free_pmb;
6203 }
6204 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6205 if (!mp->virt) {
6206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6207 "2899 The mbuf allocation failed\n");
6208 goto out_free_dmabuf;
6209 }
6210
6211
6212 lpfc_els_flush_all_cmd(phba);
6213
6214
6215 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6216
6217
6218 phba->sli.slistat.link_event++;
6219
6220
6221 lpfc_read_topology(phba, pmb, mp);
6222 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6223 pmb->vport = phba->pport;
6224
6225 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6226 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6227
6228 switch (phba->sli4_hba.link_state.status) {
6229 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6230 phba->link_flag |= LS_MDS_LINK_DOWN;
6231 break;
6232 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6233 phba->link_flag |= LS_MDS_LOOPBACK;
6234 break;
6235 default:
6236 break;
6237 }
6238
6239
6240 mb = &pmb->u.mb;
6241 mb->mbxStatus = MBX_SUCCESS;
6242
6243
6244 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6245
6246
6247 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6248 la->eventTag = acqe_fc->event_tag;
6249
6250 if (phba->sli4_hba.link_state.status ==
6251 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6252 bf_set(lpfc_mbx_read_top_att_type, la,
6253 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6254 } else {
6255 bf_set(lpfc_mbx_read_top_att_type, la,
6256 LPFC_FC_LA_TYPE_LINK_DOWN);
6257 }
6258
6259 lpfc_mbx_cmpl_read_topology(phba, pmb);
6260
6261 return;
6262 }
6263
6264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6265 if (rc == MBX_NOT_FINISHED)
6266 goto out_free_dmabuf;
6267 return;
6268
6269out_free_dmabuf:
6270 kfree(mp);
6271out_free_pmb:
6272 mempool_free(pmb, phba->mbox_mem_pool);
6273}
6274
6275
6276
6277
6278
6279
6280
6281
6282static void
6283lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6284{
6285 char port_name;
6286 char message[128];
6287 uint8_t status;
6288 uint8_t evt_type;
6289 uint8_t operational = 0;
6290 struct temp_event temp_event_data;
6291 struct lpfc_acqe_misconfigured_event *misconfigured;
6292 struct lpfc_acqe_cgn_signal *cgn_signal;
6293 struct Scsi_Host *shost;
6294 struct lpfc_vport **vports;
6295 int rc, i, cnt;
6296
6297 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6298
6299 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6300 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6301 "x%08x x%08x x%08x\n", evt_type,
6302 acqe_sli->event_data1, acqe_sli->event_data2,
6303 acqe_sli->reserved, acqe_sli->trailer);
6304
6305 port_name = phba->Port[0];
6306 if (port_name == 0x00)
6307 port_name = '?';
6308
6309 switch (evt_type) {
6310 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6311 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6312 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6313 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6314
6315 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6316 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6317 acqe_sli->event_data1, port_name);
6318
6319 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6320 shost = lpfc_shost_from_vport(phba->pport);
6321 fc_host_post_vendor_event(shost, fc_get_event_number(),
6322 sizeof(temp_event_data),
6323 (char *)&temp_event_data,
6324 SCSI_NL_VID_TYPE_PCI
6325 | PCI_VENDOR_ID_EMULEX);
6326 break;
6327 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6328 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6329 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6330 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6331
6332 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6333 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6334 acqe_sli->event_data1, port_name);
6335
6336 shost = lpfc_shost_from_vport(phba->pport);
6337 fc_host_post_vendor_event(shost, fc_get_event_number(),
6338 sizeof(temp_event_data),
6339 (char *)&temp_event_data,
6340 SCSI_NL_VID_TYPE_PCI
6341 | PCI_VENDOR_ID_EMULEX);
6342 break;
6343 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6344 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6345 &acqe_sli->event_data1;
6346
6347
6348 switch (phba->sli4_hba.lnk_info.lnk_no) {
6349 case LPFC_LINK_NUMBER_0:
6350 status = bf_get(lpfc_sli_misconfigured_port0_state,
6351 &misconfigured->theEvent);
6352 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6353 &misconfigured->theEvent);
6354 break;
6355 case LPFC_LINK_NUMBER_1:
6356 status = bf_get(lpfc_sli_misconfigured_port1_state,
6357 &misconfigured->theEvent);
6358 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6359 &misconfigured->theEvent);
6360 break;
6361 case LPFC_LINK_NUMBER_2:
6362 status = bf_get(lpfc_sli_misconfigured_port2_state,
6363 &misconfigured->theEvent);
6364 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6365 &misconfigured->theEvent);
6366 break;
6367 case LPFC_LINK_NUMBER_3:
6368 status = bf_get(lpfc_sli_misconfigured_port3_state,
6369 &misconfigured->theEvent);
6370 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6371 &misconfigured->theEvent);
6372 break;
6373 default:
6374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6375 "3296 "
6376 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6377 "event: Invalid link %d",
6378 phba->sli4_hba.lnk_info.lnk_no);
6379 return;
6380 }
6381
6382
6383 if (phba->sli4_hba.lnk_info.optic_state == status)
6384 return;
6385
6386 switch (status) {
6387 case LPFC_SLI_EVENT_STATUS_VALID:
6388 sprintf(message, "Physical Link is functional");
6389 break;
6390 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6391 sprintf(message, "Optics faulted/incorrectly "
6392 "installed/not installed - Reseat optics, "
6393 "if issue not resolved, replace.");
6394 break;
6395 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6396 sprintf(message,
6397 "Optics of two types installed - Remove one "
6398 "optic or install matching pair of optics.");
6399 break;
6400 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6401 sprintf(message, "Incompatible optics - Replace with "
6402 "compatible optics for card to function.");
6403 break;
6404 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6405 sprintf(message, "Unqualified optics - Replace with "
6406 "Avago optics for Warranty and Technical "
6407 "Support - Link is%s operational",
6408 (operational) ? " not" : "");
6409 break;
6410 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6411 sprintf(message, "Uncertified optics - Replace with "
6412 "Avago-certified optics to enable link "
6413 "operation - Link is%s operational",
6414 (operational) ? " not" : "");
6415 break;
6416 default:
6417
6418 sprintf(message, "Unknown event status x%02x", status);
6419 break;
6420 }
6421
6422
6423 rc = lpfc_sli4_read_config(phba);
6424 if (rc) {
6425 phba->lmt = 0;
6426 lpfc_printf_log(phba, KERN_ERR,
6427 LOG_TRACE_EVENT,
6428 "3194 Unable to retrieve supported "
6429 "speeds, rc = 0x%x\n", rc);
6430 }
6431 vports = lpfc_create_vport_work_array(phba);
6432 if (vports != NULL) {
6433 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6434 i++) {
6435 shost = lpfc_shost_from_vport(vports[i]);
6436 lpfc_host_supported_speeds_set(shost);
6437 }
6438 }
6439 lpfc_destroy_vport_work_array(phba, vports);
6440
6441 phba->sli4_hba.lnk_info.optic_state = status;
6442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6443 "3176 Port Name %c %s\n", port_name, message);
6444 break;
6445 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6446 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6447 "3192 Remote DPort Test Initiated - "
6448 "Event Data1:x%08x Event Data2: x%08x\n",
6449 acqe_sli->event_data1, acqe_sli->event_data2);
6450 break;
6451 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6452
6453 lpfc_sli4_cgn_parm_chg_evt(phba);
6454 break;
6455 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6456
6457
6458
6459
6460
6461 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6462 "2699 Misconfigured FA-WWN - Attached device does "
6463 "not support FA-WWN\n");
6464 break;
6465 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6466
6467 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6468 "2518 EEPROM failure - "
6469 "Event Data1: x%08x Event Data2: x%08x\n",
6470 acqe_sli->event_data1, acqe_sli->event_data2);
6471 break;
6472 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6473 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6474 break;
6475 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6476 &acqe_sli->event_data1;
6477 phba->cgn_acqe_cnt++;
6478
6479 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6480 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6481 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6482
6483
6484
6485
6486 if (cgn_signal->alarm_cnt) {
6487 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6488
6489 atomic_add(cgn_signal->alarm_cnt,
6490 &phba->cgn_fabric_alarm_cnt);
6491
6492 atomic_add(cgn_signal->alarm_cnt,
6493 &phba->cgn_sync_alarm_cnt);
6494 }
6495 } else if (cnt) {
6496
6497 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6498 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6499
6500 atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
6501
6502 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6503 }
6504 }
6505 break;
6506 default:
6507 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6508 "3193 Unrecognized SLI event, type: 0x%x",
6509 evt_type);
6510 break;
6511 }
6512}
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524static struct lpfc_nodelist *
6525lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6526{
6527 struct lpfc_nodelist *ndlp;
6528 struct Scsi_Host *shost;
6529 struct lpfc_hba *phba;
6530
6531 if (!vport)
6532 return NULL;
6533 phba = vport->phba;
6534 if (!phba)
6535 return NULL;
6536 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6537 if (!ndlp) {
6538
6539 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6540 if (!ndlp)
6541 return 0;
6542
6543 ndlp->nlp_type |= NLP_FABRIC;
6544
6545 lpfc_enqueue_node(vport, ndlp);
6546 }
6547 if ((phba->pport->port_state < LPFC_FLOGI) &&
6548 (phba->pport->port_state != LPFC_VPORT_FAILED))
6549 return NULL;
6550
6551 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6552 && (vport->port_state != LPFC_VPORT_FAILED))
6553 return NULL;
6554 shost = lpfc_shost_from_vport(vport);
6555 if (!shost)
6556 return NULL;
6557 lpfc_linkdown_port(vport);
6558 lpfc_cleanup_pending_mbox(vport);
6559 spin_lock_irq(shost->host_lock);
6560 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6561 spin_unlock_irq(shost->host_lock);
6562
6563 return ndlp;
6564}
6565
6566
6567
6568
6569
6570
6571
6572
6573static void
6574lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6575{
6576 struct lpfc_vport **vports;
6577 int i;
6578
6579 vports = lpfc_create_vport_work_array(phba);
6580 if (vports)
6581 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6582 lpfc_sli4_perform_vport_cvl(vports[i]);
6583 lpfc_destroy_vport_work_array(phba, vports);
6584}
6585
6586
6587
6588
6589
6590
6591
6592
6593static void
6594lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6595 struct lpfc_acqe_fip *acqe_fip)
6596{
6597 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6598 int rc;
6599 struct lpfc_vport *vport;
6600 struct lpfc_nodelist *ndlp;
6601 int active_vlink_present;
6602 struct lpfc_vport **vports;
6603 int i;
6604
6605 phba->fc_eventTag = acqe_fip->event_tag;
6606 phba->fcoe_eventtag = acqe_fip->event_tag;
6607 switch (event_type) {
6608 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6609 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6610 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6612 "2546 New FCF event, evt_tag:x%x, "
6613 "index:x%x\n",
6614 acqe_fip->event_tag,
6615 acqe_fip->index);
6616 else
6617 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6618 LOG_DISCOVERY,
6619 "2788 FCF param modified event, "
6620 "evt_tag:x%x, index:x%x\n",
6621 acqe_fip->event_tag,
6622 acqe_fip->index);
6623 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6624
6625
6626
6627
6628
6629 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6630 LOG_DISCOVERY,
6631 "2779 Read FCF (x%x) for updating "
6632 "roundrobin FCF failover bmask\n",
6633 acqe_fip->index);
6634 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6635 }
6636
6637
6638 spin_lock_irq(&phba->hbalock);
6639 if (phba->hba_flag & FCF_TS_INPROG) {
6640 spin_unlock_irq(&phba->hbalock);
6641 break;
6642 }
6643
6644 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6645 spin_unlock_irq(&phba->hbalock);
6646 break;
6647 }
6648
6649
6650 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6651 spin_unlock_irq(&phba->hbalock);
6652 break;
6653 }
6654 spin_unlock_irq(&phba->hbalock);
6655
6656
6657 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6658 "2770 Start FCF table scan per async FCF "
6659 "event, evt_tag:x%x, index:x%x\n",
6660 acqe_fip->event_tag, acqe_fip->index);
6661 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6662 LPFC_FCOE_FCF_GET_FIRST);
6663 if (rc)
6664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6665 "2547 Issue FCF scan read FCF mailbox "
6666 "command failed (x%x)\n", rc);
6667 break;
6668
6669 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6670 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671 "2548 FCF Table full count 0x%x tag 0x%x\n",
6672 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6673 acqe_fip->event_tag);
6674 break;
6675
6676 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6677 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6679 "2549 FCF (x%x) disconnected from network, "
6680 "tag:x%x\n", acqe_fip->index,
6681 acqe_fip->event_tag);
6682
6683
6684
6685
6686 spin_lock_irq(&phba->hbalock);
6687 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6688 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6689 spin_unlock_irq(&phba->hbalock);
6690
6691 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6692 break;
6693 }
6694 spin_unlock_irq(&phba->hbalock);
6695
6696
6697 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6698 break;
6699
6700
6701
6702
6703
6704
6705
6706 spin_lock_irq(&phba->hbalock);
6707
6708 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6709 spin_unlock_irq(&phba->hbalock);
6710
6711 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6712 "2771 Start FCF fast failover process due to "
6713 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6714 "\n", acqe_fip->event_tag, acqe_fip->index);
6715 rc = lpfc_sli4_redisc_fcf_table(phba);
6716 if (rc) {
6717 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6718 LOG_TRACE_EVENT,
6719 "2772 Issue FCF rediscover mailbox "
6720 "command failed, fail through to FCF "
6721 "dead event\n");
6722 spin_lock_irq(&phba->hbalock);
6723 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6724 spin_unlock_irq(&phba->hbalock);
6725
6726
6727
6728
6729 lpfc_sli4_fcf_dead_failthrough(phba);
6730 } else {
6731
6732 lpfc_sli4_clear_fcf_rr_bmask(phba);
6733
6734
6735
6736
6737 lpfc_sli4_perform_all_vport_cvl(phba);
6738 }
6739 break;
6740 case LPFC_FIP_EVENT_TYPE_CVL:
6741 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6742 lpfc_printf_log(phba, KERN_ERR,
6743 LOG_TRACE_EVENT,
6744 "2718 Clear Virtual Link Received for VPI 0x%x"
6745 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6746
6747 vport = lpfc_find_vport_by_vpid(phba,
6748 acqe_fip->index);
6749 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6750 if (!ndlp)
6751 break;
6752 active_vlink_present = 0;
6753
6754 vports = lpfc_create_vport_work_array(phba);
6755 if (vports) {
6756 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6757 i++) {
6758 if ((!(vports[i]->fc_flag &
6759 FC_VPORT_CVL_RCVD)) &&
6760 (vports[i]->port_state > LPFC_FDISC)) {
6761 active_vlink_present = 1;
6762 break;
6763 }
6764 }
6765 lpfc_destroy_vport_work_array(phba, vports);
6766 }
6767
6768
6769
6770
6771
6772
6773 if (!(vport->load_flag & FC_UNLOADING) &&
6774 active_vlink_present) {
6775
6776
6777
6778
6779 mod_timer(&ndlp->nlp_delayfunc,
6780 jiffies + msecs_to_jiffies(1000));
6781 spin_lock_irq(&ndlp->lock);
6782 ndlp->nlp_flag |= NLP_DELAY_TMO;
6783 spin_unlock_irq(&ndlp->lock);
6784 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6785 vport->port_state = LPFC_FDISC;
6786 } else {
6787
6788
6789
6790
6791
6792
6793
6794 spin_lock_irq(&phba->hbalock);
6795 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6796 spin_unlock_irq(&phba->hbalock);
6797 break;
6798 }
6799
6800 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6801 spin_unlock_irq(&phba->hbalock);
6802 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6803 LOG_DISCOVERY,
6804 "2773 Start FCF failover per CVL, "
6805 "evt_tag:x%x\n", acqe_fip->event_tag);
6806 rc = lpfc_sli4_redisc_fcf_table(phba);
6807 if (rc) {
6808 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6809 LOG_TRACE_EVENT,
6810 "2774 Issue FCF rediscover "
6811 "mailbox command failed, "
6812 "through to CVL event\n");
6813 spin_lock_irq(&phba->hbalock);
6814 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6815 spin_unlock_irq(&phba->hbalock);
6816
6817
6818
6819
6820 lpfc_retry_pport_discovery(phba);
6821 } else
6822
6823
6824
6825
6826 lpfc_sli4_clear_fcf_rr_bmask(phba);
6827 }
6828 break;
6829 default:
6830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6831 "0288 Unknown FCoE event type 0x%x event tag "
6832 "0x%x\n", event_type, acqe_fip->event_tag);
6833 break;
6834 }
6835}
6836
6837
6838
6839
6840
6841
6842
6843
6844static void
6845lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6846 struct lpfc_acqe_dcbx *acqe_dcbx)
6847{
6848 phba->fc_eventTag = acqe_dcbx->event_tag;
6849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6850 "0290 The SLI4 DCBX asynchronous event is not "
6851 "handled yet\n");
6852}
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863static void
6864lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6865 struct lpfc_acqe_grp5 *acqe_grp5)
6866{
6867 uint16_t prev_ll_spd;
6868
6869 phba->fc_eventTag = acqe_grp5->event_tag;
6870 phba->fcoe_eventtag = acqe_grp5->event_tag;
6871 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6872 phba->sli4_hba.link_state.logical_speed =
6873 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6874 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6875 "2789 GRP5 Async Event: Updating logical link speed "
6876 "from %dMbps to %dMbps\n", prev_ll_spd,
6877 phba->sli4_hba.link_state.logical_speed);
6878}
6879
6880
6881
6882
6883
6884
6885
6886
6887static void
6888lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
6889{
6890 if (!phba->cgn_i)
6891 return;
6892 lpfc_init_congestion_stat(phba);
6893}
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903static void
6904lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
6905{
6906 spin_lock_irq(&phba->hbalock);
6907
6908 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
6909 LPFC_CFG_MONITOR)) {
6910 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
6911 "6225 CMF mode param out of range: %d\n",
6912 p_cfg_param->cgn_param_mode);
6913 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
6914 }
6915
6916 spin_unlock_irq(&phba->hbalock);
6917}
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932static void
6933lpfc_cgn_params_parse(struct lpfc_hba *phba,
6934 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
6935{
6936 struct lpfc_cgn_info *cp;
6937 uint32_t crc, oldmode;
6938
6939
6940
6941
6942 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
6943 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
6944 "4668 FW cgn parm buffer data: "
6945 "magic 0x%x version %d mode %d "
6946 "level0 %d level1 %d "
6947 "level2 %d byte13 %d "
6948 "byte14 %d byte15 %d "
6949 "byte11 %d byte12 %d activeMode %d\n",
6950 p_cgn_param->cgn_param_magic,
6951 p_cgn_param->cgn_param_version,
6952 p_cgn_param->cgn_param_mode,
6953 p_cgn_param->cgn_param_level0,
6954 p_cgn_param->cgn_param_level1,
6955 p_cgn_param->cgn_param_level2,
6956 p_cgn_param->byte13,
6957 p_cgn_param->byte14,
6958 p_cgn_param->byte15,
6959 p_cgn_param->byte11,
6960 p_cgn_param->byte12,
6961 phba->cmf_active_mode);
6962
6963 oldmode = phba->cmf_active_mode;
6964
6965
6966
6967
6968 lpfc_cgn_params_val(phba, p_cgn_param);
6969
6970
6971 spin_lock_irq(&phba->hbalock);
6972 memcpy(&phba->cgn_p, p_cgn_param,
6973 sizeof(struct lpfc_cgn_param));
6974
6975
6976 if (phba->cgn_i) {
6977 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
6978 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
6979 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
6980 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
6981 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
6982 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
6983 LPFC_CGN_CRC32_SEED);
6984 cp->cgn_info_crc = cpu_to_le32(crc);
6985 }
6986 spin_unlock_irq(&phba->hbalock);
6987
6988 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
6989
6990 switch (oldmode) {
6991 case LPFC_CFG_OFF:
6992 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
6993
6994 lpfc_cmf_start(phba);
6995
6996 if (phba->link_state >= LPFC_LINK_UP) {
6997 phba->cgn_reg_fpin =
6998 phba->cgn_init_reg_fpin;
6999 phba->cgn_reg_signal =
7000 phba->cgn_init_reg_signal;
7001 lpfc_issue_els_edc(phba->pport, 0);
7002 }
7003 }
7004 break;
7005 case LPFC_CFG_MANAGED:
7006 switch (phba->cgn_p.cgn_param_mode) {
7007 case LPFC_CFG_OFF:
7008
7009 lpfc_cmf_stop(phba);
7010 if (phba->link_state >= LPFC_LINK_UP)
7011 lpfc_issue_els_edc(phba->pport, 0);
7012 break;
7013 case LPFC_CFG_MONITOR:
7014 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7015 "4661 Switch from MANAGED to "
7016 "`MONITOR mode\n");
7017 phba->cmf_max_bytes_per_interval =
7018 phba->cmf_link_byte_count;
7019
7020
7021 queue_work(phba->wq,
7022 &phba->unblock_request_work);
7023 break;
7024 }
7025 break;
7026 case LPFC_CFG_MONITOR:
7027 switch (phba->cgn_p.cgn_param_mode) {
7028 case LPFC_CFG_OFF:
7029
7030 lpfc_cmf_stop(phba);
7031 if (phba->link_state >= LPFC_LINK_UP)
7032 lpfc_issue_els_edc(phba->pport, 0);
7033 break;
7034 case LPFC_CFG_MANAGED:
7035 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7036 "4662 Switch from MONITOR to "
7037 "MANAGED mode\n");
7038 lpfc_cmf_signal_init(phba);
7039 break;
7040 }
7041 break;
7042 }
7043 } else {
7044 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7045 "4669 FW cgn parm buf wrong magic 0x%x "
7046 "version %d\n", p_cgn_param->cgn_param_magic,
7047 p_cgn_param->cgn_param_version);
7048 }
7049}
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064int
7065lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7066{
7067 int ret = 0;
7068 struct lpfc_cgn_param *p_cgn_param = NULL;
7069 u32 *pdata = NULL;
7070 u32 len = 0;
7071
7072
7073 len = sizeof(struct lpfc_cgn_param);
7074 pdata = kzalloc(len, GFP_KERNEL);
7075 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7076 pdata, len);
7077
7078
7079
7080
7081 if (!ret) {
7082 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7083 "4670 CGN RD OBJ returns no data\n");
7084 goto rd_obj_err;
7085 } else if (ret < 0) {
7086
7087 goto rd_obj_err;
7088 }
7089
7090 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7091 "6234 READ CGN PARAMS Successful %d\n", len);
7092
7093
7094
7095
7096
7097 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7098 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7099
7100 rd_obj_err:
7101 kfree(pdata);
7102 return ret;
7103}
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123static int
7124lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7125{
7126 int ret = 0;
7127
7128 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7129 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7130 "4664 Cgn Evt when E2E off. Drop event\n");
7131 return -EACCES;
7132 }
7133
7134
7135
7136
7137
7138 ret = lpfc_sli4_cgn_params_read(phba);
7139 if (ret < 0) {
7140 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7141 "4667 Error reading Cgn Params (%d)\n",
7142 ret);
7143 } else if (!ret) {
7144 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7145 "4673 CGN Event empty object.\n");
7146 }
7147 return ret;
7148}
7149
7150
7151
7152
7153
7154
7155
7156
7157void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7158{
7159 struct lpfc_cq_event *cq_event;
7160 unsigned long iflags;
7161
7162
7163 spin_lock_irqsave(&phba->hbalock, iflags);
7164 phba->hba_flag &= ~ASYNC_EVENT;
7165 spin_unlock_irqrestore(&phba->hbalock, iflags);
7166
7167
7168 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7169 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7170 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7171 cq_event, struct lpfc_cq_event, list);
7172 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7173 iflags);
7174
7175
7176 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7177 case LPFC_TRAILER_CODE_LINK:
7178 lpfc_sli4_async_link_evt(phba,
7179 &cq_event->cqe.acqe_link);
7180 break;
7181 case LPFC_TRAILER_CODE_FCOE:
7182 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7183 break;
7184 case LPFC_TRAILER_CODE_DCBX:
7185 lpfc_sli4_async_dcbx_evt(phba,
7186 &cq_event->cqe.acqe_dcbx);
7187 break;
7188 case LPFC_TRAILER_CODE_GRP5:
7189 lpfc_sli4_async_grp5_evt(phba,
7190 &cq_event->cqe.acqe_grp5);
7191 break;
7192 case LPFC_TRAILER_CODE_FC:
7193 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7194 break;
7195 case LPFC_TRAILER_CODE_SLI:
7196 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7197 break;
7198 case LPFC_TRAILER_CODE_CMSTAT:
7199 lpfc_sli4_async_cmstat_evt(phba);
7200 break;
7201 default:
7202 lpfc_printf_log(phba, KERN_ERR,
7203 LOG_TRACE_EVENT,
7204 "1804 Invalid asynchronous event code: "
7205 "x%x\n", bf_get(lpfc_trailer_code,
7206 &cq_event->cqe.mcqe_cmpl));
7207 break;
7208 }
7209
7210
7211 lpfc_sli4_cq_event_release(phba, cq_event);
7212 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7213 }
7214 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7215}
7216
7217
7218
7219
7220
7221
7222
7223
7224void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7225{
7226 int rc;
7227
7228 spin_lock_irq(&phba->hbalock);
7229
7230 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7231
7232 phba->fcf.failover_rec.flag = 0;
7233
7234 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7235 spin_unlock_irq(&phba->hbalock);
7236
7237
7238 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7239 "2777 Start post-quiescent FCF table scan\n");
7240 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7241 if (rc)
7242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7243 "2747 Issue FCF scan read FCF mailbox "
7244 "command failed 0x%x\n", rc);
7245}
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257int
7258lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7259{
7260 int rc;
7261
7262
7263 phba->pci_dev_grp = dev_grp;
7264
7265
7266 if (dev_grp == LPFC_PCI_DEV_OC)
7267 phba->sli_rev = LPFC_SLI_REV4;
7268
7269
7270 rc = lpfc_init_api_table_setup(phba, dev_grp);
7271 if (rc)
7272 return -ENODEV;
7273
7274 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7275 if (rc)
7276 return -ENODEV;
7277
7278 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7279 if (rc)
7280 return -ENODEV;
7281
7282 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7283 if (rc)
7284 return -ENODEV;
7285
7286 return 0;
7287}
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7298{
7299 switch (intr_mode) {
7300 case 0:
7301 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7302 "0470 Enable INTx interrupt mode.\n");
7303 break;
7304 case 1:
7305 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7306 "0481 Enabled MSI interrupt mode.\n");
7307 break;
7308 case 2:
7309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7310 "0480 Enabled MSI-X interrupt mode.\n");
7311 break;
7312 default:
7313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7314 "0482 Illegal interrupt mode.\n");
7315 break;
7316 }
7317 return;
7318}
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331static int
7332lpfc_enable_pci_dev(struct lpfc_hba *phba)
7333{
7334 struct pci_dev *pdev;
7335
7336
7337 if (!phba->pcidev)
7338 goto out_error;
7339 else
7340 pdev = phba->pcidev;
7341
7342 if (pci_enable_device_mem(pdev))
7343 goto out_error;
7344
7345 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7346 goto out_disable_device;
7347
7348 pci_set_master(pdev);
7349 pci_try_set_mwi(pdev);
7350 pci_save_state(pdev);
7351
7352
7353 if (pci_is_pcie(pdev))
7354 pdev->needs_freset = 1;
7355
7356 return 0;
7357
7358out_disable_device:
7359 pci_disable_device(pdev);
7360out_error:
7361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7362 "1401 Failed to enable pci device\n");
7363 return -ENODEV;
7364}
7365
7366
7367
7368
7369
7370
7371
7372
7373static void
7374lpfc_disable_pci_dev(struct lpfc_hba *phba)
7375{
7376 struct pci_dev *pdev;
7377
7378
7379 if (!phba->pcidev)
7380 return;
7381 else
7382 pdev = phba->pcidev;
7383
7384 pci_release_mem_regions(pdev);
7385 pci_disable_device(pdev);
7386
7387 return;
7388}
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398
7399void
7400lpfc_reset_hba(struct lpfc_hba *phba)
7401{
7402
7403 if (!phba->cfg_enable_hba_reset) {
7404 phba->link_state = LPFC_HBA_ERROR;
7405 return;
7406 }
7407
7408
7409 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7410 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7411 } else {
7412 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7413 lpfc_sli_flush_io_rings(phba);
7414 }
7415 lpfc_offline(phba);
7416 lpfc_sli_brdrestart(phba);
7417 lpfc_online(phba);
7418 lpfc_unblock_mgmt_io(phba);
7419}
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431uint16_t
7432lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7433{
7434 struct pci_dev *pdev = phba->pcidev;
7435 uint16_t nr_virtfn;
7436 int pos;
7437
7438 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7439 if (pos == 0)
7440 return 0;
7441
7442 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7443 return nr_virtfn;
7444}
7445
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457int
7458lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7459{
7460 struct pci_dev *pdev = phba->pcidev;
7461 uint16_t max_nr_vfn;
7462 int rc;
7463
7464 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7465 if (nr_vfn > max_nr_vfn) {
7466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7467 "3057 Requested vfs (%d) greater than "
7468 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7469 return -EINVAL;
7470 }
7471
7472 rc = pci_enable_sriov(pdev, nr_vfn);
7473 if (rc) {
7474 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7475 "2806 Failed to enable sriov on this device "
7476 "with vfn number nr_vf:%d, rc:%d\n",
7477 nr_vfn, rc);
7478 } else
7479 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7480 "2807 Successful enable sriov on this device "
7481 "with vfn number nr_vf:%d\n", nr_vfn);
7482 return rc;
7483}
7484
7485static void
7486lpfc_unblock_requests_work(struct work_struct *work)
7487{
7488 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7489 unblock_request_work);
7490
7491 lpfc_unblock_requests(phba);
7492}
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505static int
7506lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7507{
7508 struct lpfc_sli *psli = &phba->sli;
7509
7510
7511
7512
7513 atomic_set(&phba->fast_event_count, 0);
7514 atomic_set(&phba->dbg_log_idx, 0);
7515 atomic_set(&phba->dbg_log_cnt, 0);
7516 atomic_set(&phba->dbg_log_dmping, 0);
7517 spin_lock_init(&phba->hbalock);
7518
7519
7520 spin_lock_init(&phba->port_list_lock);
7521 INIT_LIST_HEAD(&phba->port_list);
7522
7523 INIT_LIST_HEAD(&phba->work_list);
7524 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7525
7526
7527 init_waitqueue_head(&phba->work_waitq);
7528
7529 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7530 "1403 Protocols supported %s %s %s\n",
7531 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7532 "SCSI" : " "),
7533 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7534 "NVME" : " "),
7535 (phba->nvmet_support ? "NVMET" : " "));
7536
7537
7538 spin_lock_init(&phba->scsi_buf_list_get_lock);
7539 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7540 spin_lock_init(&phba->scsi_buf_list_put_lock);
7541 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7542
7543
7544 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7545
7546
7547 INIT_LIST_HEAD(&phba->elsbuf);
7548
7549
7550 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7551
7552
7553 spin_lock_init(&phba->devicelock);
7554 INIT_LIST_HEAD(&phba->luns);
7555
7556
7557 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7558
7559 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7560
7561 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7562
7563 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7564
7565 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7566
7567 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7568 lpfc_idle_stat_delay_work);
7569 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7570 return 0;
7571}
7572
7573
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584static int
7585lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7586{
7587 int rc, entry_sz;
7588
7589
7590
7591
7592
7593
7594 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7595
7596
7597 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7598 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7599
7600
7601 lpfc_get_cfgparam(phba);
7602
7603
7604 rc = lpfc_setup_driver_resource_phase1(phba);
7605 if (rc)
7606 return -ENODEV;
7607
7608 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7609 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7610
7611 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7612 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7613 }
7614
7615 if (!phba->sli.sli3_ring)
7616 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7617 sizeof(struct lpfc_sli_ring),
7618 GFP_KERNEL);
7619 if (!phba->sli.sli3_ring)
7620 return -ENOMEM;
7621
7622
7623
7624
7625
7626
7627 if (phba->sli_rev == LPFC_SLI_REV4)
7628 entry_sz = sizeof(struct sli4_sge);
7629 else
7630 entry_sz = sizeof(struct ulp_bde64);
7631
7632
7633 if (phba->cfg_enable_bg) {
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7644 sizeof(struct fcp_rsp) +
7645 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7646
7647 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7648 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7649
7650
7651 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7652 } else {
7653
7654
7655
7656
7657
7658 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7659 sizeof(struct fcp_rsp) +
7660 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7661
7662
7663 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7664 }
7665
7666 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7667 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7668 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7669 phba->cfg_total_seg_cnt);
7670
7671 phba->max_vpi = LPFC_MAX_VPI;
7672
7673 phba->max_vports = 0;
7674
7675
7676
7677
7678 lpfc_sli_setup(phba);
7679 lpfc_sli_queue_init(phba);
7680
7681
7682 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7683 return -ENOMEM;
7684
7685 phba->lpfc_sg_dma_buf_pool =
7686 dma_pool_create("lpfc_sg_dma_buf_pool",
7687 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7688 BPL_ALIGN_SZ, 0);
7689
7690 if (!phba->lpfc_sg_dma_buf_pool)
7691 goto fail_free_mem;
7692
7693 phba->lpfc_cmd_rsp_buf_pool =
7694 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7695 &phba->pcidev->dev,
7696 sizeof(struct fcp_cmnd) +
7697 sizeof(struct fcp_rsp),
7698 BPL_ALIGN_SZ, 0);
7699
7700 if (!phba->lpfc_cmd_rsp_buf_pool)
7701 goto fail_free_dma_buf_pool;
7702
7703
7704
7705
7706
7707 if (phba->cfg_sriov_nr_virtfn > 0) {
7708 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7709 phba->cfg_sriov_nr_virtfn);
7710 if (rc) {
7711 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7712 "2808 Requested number of SR-IOV "
7713 "virtual functions (%d) is not "
7714 "supported\n",
7715 phba->cfg_sriov_nr_virtfn);
7716 phba->cfg_sriov_nr_virtfn = 0;
7717 }
7718 }
7719
7720 return 0;
7721
7722fail_free_dma_buf_pool:
7723 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7724 phba->lpfc_sg_dma_buf_pool = NULL;
7725fail_free_mem:
7726 lpfc_mem_free(phba);
7727 return -ENOMEM;
7728}
7729
7730
7731
7732
7733
7734
7735
7736
7737static void
7738lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7739{
7740
7741 lpfc_mem_free_all(phba);
7742
7743 return;
7744}
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755
7756
7757static int
7758lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7759{
7760 LPFC_MBOXQ_t *mboxq;
7761 MAILBOX_t *mb;
7762 int rc, i, max_buf_size;
7763 int longs;
7764 int extra;
7765 uint64_t wwn;
7766 u32 if_type;
7767 u32 if_fam;
7768
7769 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7770 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7771 phba->sli4_hba.curr_disp_cpu = 0;
7772
7773
7774 lpfc_get_cfgparam(phba);
7775
7776
7777 rc = lpfc_setup_driver_resource_phase1(phba);
7778 if (rc)
7779 return -ENODEV;
7780
7781
7782 rc = lpfc_sli4_post_status_check(phba);
7783 if (rc)
7784 return -ENODEV;
7785
7786
7787
7788
7789 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7790
7791
7792
7793
7794
7795 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7796
7797
7798 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7799
7800
7801 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7802 phba->cmf_timer.function = lpfc_cmf_timer;
7803
7804
7805
7806
7807
7808 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7809 sizeof(struct lpfc_mbox_ext_buf_ctx));
7810 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7811
7812 phba->max_vpi = LPFC_MAX_VPI;
7813
7814
7815 phba->max_vports = 0;
7816
7817
7818 phba->valid_vlan = 0;
7819 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7820 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7821 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7822
7823
7824
7825
7826
7827
7828
7829
7830 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7831 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7832 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7833
7834
7835 if (lpfc_is_vmid_enabled(phba))
7836 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7837
7838
7839
7840
7841
7842 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7843 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7844
7845 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7846
7847 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7848 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7849 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7850 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7851 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7852 }
7853
7854
7855 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7856 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7857 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7858 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7859
7860
7861
7862
7863
7864
7865 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7866
7867 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7868
7869 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7870
7871 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7872
7873 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7874
7875
7876 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7877 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7878 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7879 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7880
7881
7882
7883
7884 INIT_LIST_HEAD(&phba->sli.mboxq);
7885 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7886
7887
7888 phba->sli4_hba.lnk_info.optic_state = 0xff;
7889
7890
7891 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
7892 if (rc)
7893 return -ENOMEM;
7894
7895
7896 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
7897 LPFC_SLI_INTF_IF_TYPE_2) {
7898 rc = lpfc_pci_function_reset(phba);
7899 if (unlikely(rc)) {
7900 rc = -ENODEV;
7901 goto out_free_mem;
7902 }
7903 phba->temp_sensor_support = 1;
7904 }
7905
7906
7907 rc = lpfc_create_bootstrap_mbox(phba);
7908 if (unlikely(rc))
7909 goto out_free_mem;
7910
7911
7912 rc = lpfc_setup_endian_order(phba);
7913 if (unlikely(rc))
7914 goto out_free_bsmbx;
7915
7916
7917 rc = lpfc_sli4_read_config(phba);
7918 if (unlikely(rc))
7919 goto out_free_bsmbx;
7920 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
7921 if (unlikely(rc))
7922 goto out_free_bsmbx;
7923
7924
7925 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7926 LPFC_SLI_INTF_IF_TYPE_0) {
7927 rc = lpfc_pci_function_reset(phba);
7928 if (unlikely(rc))
7929 goto out_free_bsmbx;
7930 }
7931
7932 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7933 GFP_KERNEL);
7934 if (!mboxq) {
7935 rc = -ENOMEM;
7936 goto out_free_bsmbx;
7937 }
7938
7939
7940 phba->nvmet_support = 0;
7941 if (lpfc_enable_nvmet_cnt) {
7942
7943
7944 lpfc_read_nv(phba, mboxq);
7945 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7946 if (rc != MBX_SUCCESS) {
7947 lpfc_printf_log(phba, KERN_ERR,
7948 LOG_TRACE_EVENT,
7949 "6016 Mailbox failed , mbxCmd x%x "
7950 "READ_NV, mbxStatus x%x\n",
7951 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7952 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
7953 mempool_free(mboxq, phba->mbox_mem_pool);
7954 rc = -EIO;
7955 goto out_free_bsmbx;
7956 }
7957 mb = &mboxq->u.mb;
7958 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
7959 sizeof(uint64_t));
7960 wwn = cpu_to_be64(wwn);
7961 phba->sli4_hba.wwnn.u.name = wwn;
7962 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
7963 sizeof(uint64_t));
7964
7965 wwn = cpu_to_be64(wwn);
7966 phba->sli4_hba.wwpn.u.name = wwn;
7967
7968
7969 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
7970 if (wwn == lpfc_enable_nvmet[i]) {
7971#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
7972 if (lpfc_nvmet_mem_alloc(phba))
7973 break;
7974
7975 phba->nvmet_support = 1;
7976
7977 lpfc_printf_log(phba, KERN_ERR,
7978 LOG_TRACE_EVENT,
7979 "6017 NVME Target %016llx\n",
7980 wwn);
7981#else
7982 lpfc_printf_log(phba, KERN_ERR,
7983 LOG_TRACE_EVENT,
7984 "6021 Can't enable NVME Target."
7985 " NVME_TARGET_FC infrastructure"
7986 " is not in kernel\n");
7987#endif
7988
7989 phba->cfg_xri_rebalancing = 0;
7990 if (phba->irq_chann_mode == NHT_MODE) {
7991 phba->cfg_irq_chann =
7992 phba->sli4_hba.num_present_cpu;
7993 phba->cfg_hdw_queue =
7994 phba->sli4_hba.num_present_cpu;
7995 phba->irq_chann_mode = NORMAL_MODE;
7996 }
7997 break;
7998 }
7999 }
8000 }
8001
8002 lpfc_nvme_mod_param_dep(phba);
8003
8004
8005
8006
8007
8008
8009 rc = lpfc_get_sli4_parameters(phba, mboxq);
8010 if (rc) {
8011 if_type = bf_get(lpfc_sli_intf_if_type,
8012 &phba->sli4_hba.sli_intf);
8013 if_fam = bf_get(lpfc_sli_intf_sli_family,
8014 &phba->sli4_hba.sli_intf);
8015 if (phba->sli4_hba.extents_in_use &&
8016 phba->sli4_hba.rpi_hdrs_in_use) {
8017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8018 "2999 Unsupported SLI4 Parameters "
8019 "Extents and RPI headers enabled.\n");
8020 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8021 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8022 mempool_free(mboxq, phba->mbox_mem_pool);
8023 rc = -EIO;
8024 goto out_free_bsmbx;
8025 }
8026 }
8027 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8028 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8029 mempool_free(mboxq, phba->mbox_mem_pool);
8030 rc = -EIO;
8031 goto out_free_bsmbx;
8032 }
8033 }
8034
8035
8036
8037
8038
8039 extra = 2;
8040 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8041 extra++;
8042
8043
8044
8045
8046
8047
8048 max_buf_size = (2 * SLI4_PAGE_SIZE);
8049
8050
8051
8052
8053
8054 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8067 sizeof(struct fcp_rsp) + max_buf_size;
8068
8069
8070 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8071
8072
8073
8074
8075
8076 if (phba->cfg_enable_bg &&
8077 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8078 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8079 else
8080 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8081
8082 } else {
8083
8084
8085
8086
8087
8088 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8089 sizeof(struct fcp_rsp) +
8090 ((phba->cfg_sg_seg_cnt + extra) *
8091 sizeof(struct sli4_sge));
8092
8093
8094 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8095 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8096
8097
8098
8099
8100
8101 }
8102
8103 if (phba->cfg_xpsgl && !phba->nvmet_support)
8104 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8105 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8106 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8107 else
8108 phba->cfg_sg_dma_buf_size =
8109 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8110
8111 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8112 sizeof(struct sli4_sge);
8113
8114
8115 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8116 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8117 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8118 "6300 Reducing NVME sg segment "
8119 "cnt to %d\n",
8120 LPFC_MAX_NVME_SEG_CNT);
8121 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8122 } else
8123 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8124 }
8125
8126 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8127 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8128 "total:%d scsi:%d nvme:%d\n",
8129 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8130 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8131 phba->cfg_nvme_seg_cnt);
8132
8133 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8134 i = phba->cfg_sg_dma_buf_size;
8135 else
8136 i = SLI4_PAGE_SIZE;
8137
8138 phba->lpfc_sg_dma_buf_pool =
8139 dma_pool_create("lpfc_sg_dma_buf_pool",
8140 &phba->pcidev->dev,
8141 phba->cfg_sg_dma_buf_size,
8142 i, 0);
8143 if (!phba->lpfc_sg_dma_buf_pool)
8144 goto out_free_bsmbx;
8145
8146 phba->lpfc_cmd_rsp_buf_pool =
8147 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8148 &phba->pcidev->dev,
8149 sizeof(struct fcp_cmnd) +
8150 sizeof(struct fcp_rsp),
8151 i, 0);
8152 if (!phba->lpfc_cmd_rsp_buf_pool)
8153 goto out_free_sg_dma_buf;
8154
8155 mempool_free(mboxq, phba->mbox_mem_pool);
8156
8157
8158 lpfc_sli4_oas_verify(phba);
8159
8160
8161 lpfc_sli4_ras_init(phba);
8162
8163
8164 rc = lpfc_sli4_queue_verify(phba);
8165 if (rc)
8166 goto out_free_cmd_rsp_buf;
8167
8168
8169 rc = lpfc_sli4_cq_event_pool_create(phba);
8170 if (rc)
8171 goto out_free_cmd_rsp_buf;
8172
8173
8174 lpfc_init_sgl_list(phba);
8175
8176
8177 rc = lpfc_init_active_sgl_array(phba);
8178 if (rc) {
8179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8180 "1430 Failed to initialize sgl list.\n");
8181 goto out_destroy_cq_event_pool;
8182 }
8183 rc = lpfc_sli4_init_rpi_hdrs(phba);
8184 if (rc) {
8185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8186 "1432 Failed to initialize rpi headers.\n");
8187 goto out_free_active_sgl;
8188 }
8189
8190
8191 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8192 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8193 GFP_KERNEL);
8194 if (!phba->fcf.fcf_rr_bmask) {
8195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8196 "2759 Failed allocate memory for FCF round "
8197 "robin failover bmask\n");
8198 rc = -ENOMEM;
8199 goto out_remove_rpi_hdrs;
8200 }
8201
8202 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8203 sizeof(struct lpfc_hba_eq_hdl),
8204 GFP_KERNEL);
8205 if (!phba->sli4_hba.hba_eq_hdl) {
8206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8207 "2572 Failed allocate memory for "
8208 "fast-path per-EQ handle array\n");
8209 rc = -ENOMEM;
8210 goto out_free_fcf_rr_bmask;
8211 }
8212
8213 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8214 sizeof(struct lpfc_vector_map_info),
8215 GFP_KERNEL);
8216 if (!phba->sli4_hba.cpu_map) {
8217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8218 "3327 Failed allocate memory for msi-x "
8219 "interrupt vector mapping\n");
8220 rc = -ENOMEM;
8221 goto out_free_hba_eq_hdl;
8222 }
8223
8224 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8225 if (!phba->sli4_hba.eq_info) {
8226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8227 "3321 Failed allocation for per_cpu stats\n");
8228 rc = -ENOMEM;
8229 goto out_free_hba_cpu_map;
8230 }
8231
8232 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8233 sizeof(*phba->sli4_hba.idle_stat),
8234 GFP_KERNEL);
8235 if (!phba->sli4_hba.idle_stat) {
8236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8237 "3390 Failed allocation for idle_stat\n");
8238 rc = -ENOMEM;
8239 goto out_free_hba_eq_info;
8240 }
8241
8242#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8243 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8244 if (!phba->sli4_hba.c_stat) {
8245 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8246 "3332 Failed allocating per cpu hdwq stats\n");
8247 rc = -ENOMEM;
8248 goto out_free_hba_idle_stat;
8249 }
8250#endif
8251
8252 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8253 if (!phba->cmf_stat) {
8254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8255 "3331 Failed allocating per cpu cgn stats\n");
8256 rc = -ENOMEM;
8257 goto out_free_hba_hdwq_info;
8258 }
8259
8260
8261
8262
8263
8264 if (phba->cfg_sriov_nr_virtfn > 0) {
8265 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8266 phba->cfg_sriov_nr_virtfn);
8267 if (rc) {
8268 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8269 "3020 Requested number of SR-IOV "
8270 "virtual functions (%d) is not "
8271 "supported\n",
8272 phba->cfg_sriov_nr_virtfn);
8273 phba->cfg_sriov_nr_virtfn = 0;
8274 }
8275 }
8276
8277 return 0;
8278
8279out_free_hba_hdwq_info:
8280#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8281 free_percpu(phba->sli4_hba.c_stat);
8282out_free_hba_idle_stat:
8283#endif
8284 kfree(phba->sli4_hba.idle_stat);
8285out_free_hba_eq_info:
8286 free_percpu(phba->sli4_hba.eq_info);
8287out_free_hba_cpu_map:
8288 kfree(phba->sli4_hba.cpu_map);
8289out_free_hba_eq_hdl:
8290 kfree(phba->sli4_hba.hba_eq_hdl);
8291out_free_fcf_rr_bmask:
8292 kfree(phba->fcf.fcf_rr_bmask);
8293out_remove_rpi_hdrs:
8294 lpfc_sli4_remove_rpi_hdrs(phba);
8295out_free_active_sgl:
8296 lpfc_free_active_sgl(phba);
8297out_destroy_cq_event_pool:
8298 lpfc_sli4_cq_event_pool_destroy(phba);
8299out_free_cmd_rsp_buf:
8300 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8301 phba->lpfc_cmd_rsp_buf_pool = NULL;
8302out_free_sg_dma_buf:
8303 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8304 phba->lpfc_sg_dma_buf_pool = NULL;
8305out_free_bsmbx:
8306 lpfc_destroy_bootstrap_mbox(phba);
8307out_free_mem:
8308 lpfc_mem_free(phba);
8309 return rc;
8310}
8311
8312
8313
8314
8315
8316
8317
8318
8319static void
8320lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8321{
8322 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8323
8324 free_percpu(phba->sli4_hba.eq_info);
8325#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8326 free_percpu(phba->sli4_hba.c_stat);
8327#endif
8328 free_percpu(phba->cmf_stat);
8329 kfree(phba->sli4_hba.idle_stat);
8330
8331
8332 kfree(phba->sli4_hba.cpu_map);
8333 phba->sli4_hba.num_possible_cpu = 0;
8334 phba->sli4_hba.num_present_cpu = 0;
8335 phba->sli4_hba.curr_disp_cpu = 0;
8336 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8337
8338
8339 kfree(phba->sli4_hba.hba_eq_hdl);
8340
8341
8342 lpfc_sli4_remove_rpi_hdrs(phba);
8343 lpfc_sli4_remove_rpis(phba);
8344
8345
8346 kfree(phba->fcf.fcf_rr_bmask);
8347
8348
8349 lpfc_free_active_sgl(phba);
8350 lpfc_free_els_sgl_list(phba);
8351 lpfc_free_nvmet_sgl_list(phba);
8352
8353
8354 lpfc_sli4_cq_event_release_all(phba);
8355 lpfc_sli4_cq_event_pool_destroy(phba);
8356
8357
8358 lpfc_sli4_dealloc_resource_identifiers(phba);
8359
8360
8361 lpfc_destroy_bootstrap_mbox(phba);
8362
8363
8364 lpfc_mem_free_all(phba);
8365
8366
8367 list_for_each_entry_safe(conn_entry, next_conn_entry,
8368 &phba->fcf_conn_rec_list, list) {
8369 list_del_init(&conn_entry->list);
8370 kfree(conn_entry);
8371 }
8372
8373 return;
8374}
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386int
8387lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8388{
8389 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8390 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8391 phba->lpfc_selective_reset = lpfc_selective_reset;
8392 switch (dev_grp) {
8393 case LPFC_PCI_DEV_LP:
8394 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8395 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8396 phba->lpfc_stop_port = lpfc_stop_port_s3;
8397 break;
8398 case LPFC_PCI_DEV_OC:
8399 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8400 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8401 phba->lpfc_stop_port = lpfc_stop_port_s4;
8402 break;
8403 default:
8404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8405 "1431 Invalid HBA PCI-device group: 0x%x\n",
8406 dev_grp);
8407 return -ENODEV;
8408 }
8409 return 0;
8410}
8411
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421
8422
8423static int
8424lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8425{
8426 int error;
8427
8428
8429 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8430 "lpfc_worker_%d", phba->brd_no);
8431 if (IS_ERR(phba->worker_thread)) {
8432 error = PTR_ERR(phba->worker_thread);
8433 return error;
8434 }
8435
8436 return 0;
8437}
8438
8439
8440
8441
8442
8443
8444
8445
8446
8447static void
8448lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8449{
8450 if (phba->wq) {
8451 flush_workqueue(phba->wq);
8452 destroy_workqueue(phba->wq);
8453 phba->wq = NULL;
8454 }
8455
8456
8457 if (phba->worker_thread)
8458 kthread_stop(phba->worker_thread);
8459}
8460
8461
8462
8463
8464
8465
8466
8467void
8468lpfc_free_iocb_list(struct lpfc_hba *phba)
8469{
8470 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8471
8472 spin_lock_irq(&phba->hbalock);
8473 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8474 &phba->lpfc_iocb_list, list) {
8475 list_del(&iocbq_entry->list);
8476 kfree(iocbq_entry);
8477 phba->total_iocbq_bufs--;
8478 }
8479 spin_unlock_irq(&phba->hbalock);
8480
8481 return;
8482}
8483
8484
8485
8486
8487
8488
8489
8490
8491
8492
8493
8494
8495
8496int
8497lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8498{
8499 struct lpfc_iocbq *iocbq_entry = NULL;
8500 uint16_t iotag;
8501 int i;
8502
8503
8504 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8505 for (i = 0; i < iocb_count; i++) {
8506 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8507 if (iocbq_entry == NULL) {
8508 printk(KERN_ERR "%s: only allocated %d iocbs of "
8509 "expected %d count. Unloading driver.\n",
8510 __func__, i, iocb_count);
8511 goto out_free_iocbq;
8512 }
8513
8514 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8515 if (iotag == 0) {
8516 kfree(iocbq_entry);
8517 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8518 "Unloading driver.\n", __func__);
8519 goto out_free_iocbq;
8520 }
8521 iocbq_entry->sli4_lxritag = NO_XRI;
8522 iocbq_entry->sli4_xritag = NO_XRI;
8523
8524 spin_lock_irq(&phba->hbalock);
8525 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8526 phba->total_iocbq_bufs++;
8527 spin_unlock_irq(&phba->hbalock);
8528 }
8529
8530 return 0;
8531
8532out_free_iocbq:
8533 lpfc_free_iocb_list(phba);
8534
8535 return -ENOMEM;
8536}
8537
8538
8539
8540
8541
8542
8543
8544
8545void
8546lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8547{
8548 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8549
8550 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8551 list_del(&sglq_entry->list);
8552 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8553 kfree(sglq_entry);
8554 }
8555}
8556
8557
8558
8559
8560
8561
8562
8563static void
8564lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8565{
8566 LIST_HEAD(sglq_list);
8567
8568
8569 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8570 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8571 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8572
8573
8574 lpfc_free_sgl_list(phba, &sglq_list);
8575}
8576
8577
8578
8579
8580
8581
8582
8583static void
8584lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8585{
8586 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8587 LIST_HEAD(sglq_list);
8588
8589
8590 spin_lock_irq(&phba->hbalock);
8591 spin_lock(&phba->sli4_hba.sgl_list_lock);
8592 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8593 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8594 spin_unlock_irq(&phba->hbalock);
8595
8596
8597 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8598 list_del(&sglq_entry->list);
8599 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8600 kfree(sglq_entry);
8601 }
8602
8603
8604
8605
8606
8607 phba->sli4_hba.nvmet_xri_cnt = 0;
8608}
8609
8610
8611
8612
8613
8614
8615
8616
8617static int
8618lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8619{
8620 int size;
8621 size = sizeof(struct lpfc_sglq *);
8622 size *= phba->sli4_hba.max_cfg_param.max_xri;
8623
8624 phba->sli4_hba.lpfc_sglq_active_list =
8625 kzalloc(size, GFP_KERNEL);
8626 if (!phba->sli4_hba.lpfc_sglq_active_list)
8627 return -ENOMEM;
8628 return 0;
8629}
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639static void
8640lpfc_free_active_sgl(struct lpfc_hba *phba)
8641{
8642 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8643}
8644
8645
8646
8647
8648
8649
8650
8651
8652
8653static void
8654lpfc_init_sgl_list(struct lpfc_hba *phba)
8655{
8656
8657 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8658 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8659 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8660 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8661
8662
8663 phba->sli4_hba.els_xri_cnt = 0;
8664
8665
8666 phba->sli4_hba.io_xri_cnt = 0;
8667}
8668
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683int
8684lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8685{
8686 int rc = 0;
8687 struct lpfc_rpi_hdr *rpi_hdr;
8688
8689 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8690 if (!phba->sli4_hba.rpi_hdrs_in_use)
8691 return rc;
8692 if (phba->sli4_hba.extents_in_use)
8693 return -EIO;
8694
8695 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8696 if (!rpi_hdr) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8698 "0391 Error during rpi post operation\n");
8699 lpfc_sli4_remove_rpis(phba);
8700 rc = -ENODEV;
8701 }
8702
8703 return rc;
8704}
8705
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718
8719struct lpfc_rpi_hdr *
8720lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8721{
8722 uint16_t rpi_limit, curr_rpi_range;
8723 struct lpfc_dmabuf *dmabuf;
8724 struct lpfc_rpi_hdr *rpi_hdr;
8725
8726
8727
8728
8729
8730
8731 if (!phba->sli4_hba.rpi_hdrs_in_use)
8732 return NULL;
8733 if (phba->sli4_hba.extents_in_use)
8734 return NULL;
8735
8736
8737 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8738
8739 spin_lock_irq(&phba->hbalock);
8740
8741
8742
8743
8744
8745 curr_rpi_range = phba->sli4_hba.next_rpi;
8746 spin_unlock_irq(&phba->hbalock);
8747
8748
8749 if (curr_rpi_range == rpi_limit)
8750 return NULL;
8751
8752
8753
8754
8755
8756 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8757 if (!dmabuf)
8758 return NULL;
8759
8760 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8761 LPFC_HDR_TEMPLATE_SIZE,
8762 &dmabuf->phys, GFP_KERNEL);
8763 if (!dmabuf->virt) {
8764 rpi_hdr = NULL;
8765 goto err_free_dmabuf;
8766 }
8767
8768 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8769 rpi_hdr = NULL;
8770 goto err_free_coherent;
8771 }
8772
8773
8774 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8775 if (!rpi_hdr)
8776 goto err_free_coherent;
8777
8778 rpi_hdr->dmabuf = dmabuf;
8779 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8780 rpi_hdr->page_count = 1;
8781 spin_lock_irq(&phba->hbalock);
8782
8783
8784 rpi_hdr->start_rpi = curr_rpi_range;
8785 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8786 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8787
8788 spin_unlock_irq(&phba->hbalock);
8789 return rpi_hdr;
8790
8791 err_free_coherent:
8792 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8793 dmabuf->virt, dmabuf->phys);
8794 err_free_dmabuf:
8795 kfree(dmabuf);
8796 return NULL;
8797}
8798
8799
8800
8801
8802
8803
8804
8805
8806
8807
8808void
8809lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8810{
8811 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8812
8813 if (!phba->sli4_hba.rpi_hdrs_in_use)
8814 goto exit;
8815
8816 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8817 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8818 list_del(&rpi_hdr->list);
8819 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8820 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8821 kfree(rpi_hdr->dmabuf);
8822 kfree(rpi_hdr);
8823 }
8824 exit:
8825
8826 phba->sli4_hba.next_rpi = 0;
8827}
8828
8829
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841static struct lpfc_hba *
8842lpfc_hba_alloc(struct pci_dev *pdev)
8843{
8844 struct lpfc_hba *phba;
8845
8846
8847 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8848 if (!phba) {
8849 dev_err(&pdev->dev, "failed to allocate hba struct\n");
8850 return NULL;
8851 }
8852
8853
8854 phba->pcidev = pdev;
8855
8856
8857 phba->brd_no = lpfc_get_instance();
8858 if (phba->brd_no < 0) {
8859 kfree(phba);
8860 return NULL;
8861 }
8862 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8863
8864 spin_lock_init(&phba->ct_ev_lock);
8865 INIT_LIST_HEAD(&phba->ct_ev_waiters);
8866
8867 return phba;
8868}
8869
8870
8871
8872
8873
8874
8875
8876
8877static void
8878lpfc_hba_free(struct lpfc_hba *phba)
8879{
8880 if (phba->sli_rev == LPFC_SLI_REV4)
8881 kfree(phba->sli4_hba.hdwq);
8882
8883
8884 idr_remove(&lpfc_hba_index, phba->brd_no);
8885
8886
8887 kfree(phba->sli.sli3_ring);
8888 phba->sli.sli3_ring = NULL;
8889
8890 kfree(phba);
8891 return;
8892}
8893
8894
8895
8896
8897
8898
8899
8900
8901
8902
8903
8904
8905static int
8906lpfc_create_shost(struct lpfc_hba *phba)
8907{
8908 struct lpfc_vport *vport;
8909 struct Scsi_Host *shost;
8910
8911
8912 phba->fc_edtov = FF_DEF_EDTOV;
8913 phba->fc_ratov = FF_DEF_RATOV;
8914 phba->fc_altov = FF_DEF_ALTOV;
8915 phba->fc_arbtov = FF_DEF_ARBTOV;
8916
8917 atomic_set(&phba->sdev_cnt, 0);
8918 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
8919 if (!vport)
8920 return -ENODEV;
8921
8922 shost = lpfc_shost_from_vport(vport);
8923 phba->pport = vport;
8924
8925 if (phba->nvmet_support) {
8926
8927 phba->targetport = NULL;
8928 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
8929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
8930 "6076 NVME Target Found\n");
8931 }
8932
8933 lpfc_debugfs_initialize(vport);
8934
8935 pci_set_drvdata(phba->pcidev, shost);
8936
8937
8938
8939
8940
8941 vport->load_flag |= FC_ALLOW_FDMI;
8942 if (phba->cfg_enable_SmartSAN ||
8943 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
8944
8945
8946 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8947 if (phba->cfg_enable_SmartSAN)
8948 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
8949 else
8950 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8951 }
8952 return 0;
8953}
8954
8955
8956
8957
8958
8959
8960
8961
8962static void
8963lpfc_destroy_shost(struct lpfc_hba *phba)
8964{
8965 struct lpfc_vport *vport = phba->pport;
8966
8967
8968 destroy_port(vport);
8969
8970 return;
8971}
8972
8973
8974
8975
8976
8977
8978
8979
8980
8981static void
8982lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
8983{
8984 uint32_t old_mask;
8985 uint32_t old_guard;
8986
8987 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
8988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8989 "1478 Registering BlockGuard with the "
8990 "SCSI layer\n");
8991
8992 old_mask = phba->cfg_prot_mask;
8993 old_guard = phba->cfg_prot_guard;
8994
8995
8996 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
8997 SHOST_DIX_TYPE0_PROTECTION |
8998 SHOST_DIX_TYPE1_PROTECTION);
8999 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9000 SHOST_DIX_GUARD_CRC);
9001
9002
9003 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9004 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9005
9006 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9007 if ((old_mask != phba->cfg_prot_mask) ||
9008 (old_guard != phba->cfg_prot_guard))
9009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9010 "1475 Registering BlockGuard with the "
9011 "SCSI layer: mask %d guard %d\n",
9012 phba->cfg_prot_mask,
9013 phba->cfg_prot_guard);
9014
9015 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9016 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9017 } else
9018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9019 "1479 Not Registering BlockGuard with the SCSI "
9020 "layer, Bad protection parameters: %d %d\n",
9021 old_mask, old_guard);
9022 }
9023}
9024
9025
9026
9027
9028
9029
9030
9031
9032static void
9033lpfc_post_init_setup(struct lpfc_hba *phba)
9034{
9035 struct Scsi_Host *shost;
9036 struct lpfc_adapter_event_header adapter_event;
9037
9038
9039 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9040
9041
9042
9043
9044
9045 shost = pci_get_drvdata(phba->pcidev);
9046 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9047
9048 lpfc_host_attrib_init(shost);
9049
9050 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9051 spin_lock_irq(shost->host_lock);
9052 lpfc_poll_start_timer(phba);
9053 spin_unlock_irq(shost->host_lock);
9054 }
9055
9056 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9057 "0428 Perform SCSI scan\n");
9058
9059 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9060 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9061 fc_host_post_vendor_event(shost, fc_get_event_number(),
9062 sizeof(adapter_event),
9063 (char *) &adapter_event,
9064 LPFC_NL_VENDOR_ID);
9065 return;
9066}
9067
9068
9069
9070
9071
9072
9073
9074
9075
9076
9077
9078
9079static int
9080lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9081{
9082 struct pci_dev *pdev = phba->pcidev;
9083 unsigned long bar0map_len, bar2map_len;
9084 int i, hbq_count;
9085 void *ptr;
9086 int error;
9087
9088 if (!pdev)
9089 return -ENODEV;
9090
9091
9092 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9093 if (error)
9094 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9095 if (error)
9096 return error;
9097 error = -ENODEV;
9098
9099
9100
9101
9102 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9103 bar0map_len = pci_resource_len(pdev, 0);
9104
9105 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9106 bar2map_len = pci_resource_len(pdev, 2);
9107
9108
9109 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9110 if (!phba->slim_memmap_p) {
9111 dev_printk(KERN_ERR, &pdev->dev,
9112 "ioremap failed for SLIM memory.\n");
9113 goto out;
9114 }
9115
9116
9117 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9118 if (!phba->ctrl_regs_memmap_p) {
9119 dev_printk(KERN_ERR, &pdev->dev,
9120 "ioremap failed for HBA control registers.\n");
9121 goto out_iounmap_slim;
9122 }
9123
9124
9125 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9126 &phba->slim2p.phys, GFP_KERNEL);
9127 if (!phba->slim2p.virt)
9128 goto out_iounmap;
9129
9130 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9131 phba->mbox_ext = (phba->slim2p.virt +
9132 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9133 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9134 phba->IOCBs = (phba->slim2p.virt +
9135 offsetof(struct lpfc_sli2_slim, IOCBs));
9136
9137 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9138 lpfc_sli_hbq_size(),
9139 &phba->hbqslimp.phys,
9140 GFP_KERNEL);
9141 if (!phba->hbqslimp.virt)
9142 goto out_free_slim;
9143
9144 hbq_count = lpfc_sli_hbq_count();
9145 ptr = phba->hbqslimp.virt;
9146 for (i = 0; i < hbq_count; ++i) {
9147 phba->hbqs[i].hbq_virt = ptr;
9148 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9149 ptr += (lpfc_hbq_defs[i]->entry_count *
9150 sizeof(struct lpfc_hbq_entry));
9151 }
9152 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9153 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9154
9155 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9156
9157 phba->MBslimaddr = phba->slim_memmap_p;
9158 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9159 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9160 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9161 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9162
9163 return 0;
9164
9165out_free_slim:
9166 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9167 phba->slim2p.virt, phba->slim2p.phys);
9168out_iounmap:
9169 iounmap(phba->ctrl_regs_memmap_p);
9170out_iounmap_slim:
9171 iounmap(phba->slim_memmap_p);
9172out:
9173 return error;
9174}
9175
9176
9177
9178
9179
9180
9181
9182
9183static void
9184lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9185{
9186 struct pci_dev *pdev;
9187
9188
9189 if (!phba->pcidev)
9190 return;
9191 else
9192 pdev = phba->pcidev;
9193
9194
9195 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9196 phba->hbqslimp.virt, phba->hbqslimp.phys);
9197 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9198 phba->slim2p.virt, phba->slim2p.phys);
9199
9200
9201 iounmap(phba->ctrl_regs_memmap_p);
9202 iounmap(phba->slim_memmap_p);
9203
9204 return;
9205}
9206
9207
9208
9209
9210
9211
9212
9213
9214
9215
9216int
9217lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9218{
9219 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9220 struct lpfc_register reg_data;
9221 int i, port_error = 0;
9222 uint32_t if_type;
9223
9224 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9225 memset(®_data, 0, sizeof(reg_data));
9226 if (!phba->sli4_hba.PSMPHRregaddr)
9227 return -ENODEV;
9228
9229
9230 for (i = 0; i < 3000; i++) {
9231 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9232 &portsmphr_reg.word0) ||
9233 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9234
9235 port_error = -ENODEV;
9236 break;
9237 }
9238 if (LPFC_POST_STAGE_PORT_READY ==
9239 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9240 break;
9241 msleep(10);
9242 }
9243
9244
9245
9246
9247
9248 if (port_error) {
9249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9250 "1408 Port Failed POST - portsmphr=0x%x, "
9251 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9252 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9253 portsmphr_reg.word0,
9254 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9255 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9256 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9257 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9258 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9259 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9260 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9261 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9262 } else {
9263 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9264 "2534 Device Info: SLIFamily=0x%x, "
9265 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9266 "SLIHint_2=0x%x, FT=0x%x\n",
9267 bf_get(lpfc_sli_intf_sli_family,
9268 &phba->sli4_hba.sli_intf),
9269 bf_get(lpfc_sli_intf_slirev,
9270 &phba->sli4_hba.sli_intf),
9271 bf_get(lpfc_sli_intf_if_type,
9272 &phba->sli4_hba.sli_intf),
9273 bf_get(lpfc_sli_intf_sli_hint1,
9274 &phba->sli4_hba.sli_intf),
9275 bf_get(lpfc_sli_intf_sli_hint2,
9276 &phba->sli4_hba.sli_intf),
9277 bf_get(lpfc_sli_intf_func_type,
9278 &phba->sli4_hba.sli_intf));
9279
9280
9281
9282
9283
9284 if_type = bf_get(lpfc_sli_intf_if_type,
9285 &phba->sli4_hba.sli_intf);
9286 switch (if_type) {
9287 case LPFC_SLI_INTF_IF_TYPE_0:
9288 phba->sli4_hba.ue_mask_lo =
9289 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9290 phba->sli4_hba.ue_mask_hi =
9291 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9292 uerrlo_reg.word0 =
9293 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9294 uerrhi_reg.word0 =
9295 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9296 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9297 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9298 lpfc_printf_log(phba, KERN_ERR,
9299 LOG_TRACE_EVENT,
9300 "1422 Unrecoverable Error "
9301 "Detected during POST "
9302 "uerr_lo_reg=0x%x, "
9303 "uerr_hi_reg=0x%x, "
9304 "ue_mask_lo_reg=0x%x, "
9305 "ue_mask_hi_reg=0x%x\n",
9306 uerrlo_reg.word0,
9307 uerrhi_reg.word0,
9308 phba->sli4_hba.ue_mask_lo,
9309 phba->sli4_hba.ue_mask_hi);
9310 port_error = -ENODEV;
9311 }
9312 break;
9313 case LPFC_SLI_INTF_IF_TYPE_2:
9314 case LPFC_SLI_INTF_IF_TYPE_6:
9315
9316 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9317 ®_data.word0) ||
9318 (bf_get(lpfc_sliport_status_err, ®_data) &&
9319 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9320 phba->work_status[0] =
9321 readl(phba->sli4_hba.u.if_type2.
9322 ERR1regaddr);
9323 phba->work_status[1] =
9324 readl(phba->sli4_hba.u.if_type2.
9325 ERR2regaddr);
9326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9327 "2888 Unrecoverable port error "
9328 "following POST: port status reg "
9329 "0x%x, port_smphr reg 0x%x, "
9330 "error 1=0x%x, error 2=0x%x\n",
9331 reg_data.word0,
9332 portsmphr_reg.word0,
9333 phba->work_status[0],
9334 phba->work_status[1]);
9335 port_error = -ENODEV;
9336 }
9337 break;
9338 case LPFC_SLI_INTF_IF_TYPE_1:
9339 default:
9340 break;
9341 }
9342 }
9343 return port_error;
9344}
9345
9346
9347
9348
9349
9350
9351
9352
9353
9354static void
9355lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9356{
9357 switch (if_type) {
9358 case LPFC_SLI_INTF_IF_TYPE_0:
9359 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9360 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9361 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9362 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9363 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9364 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9365 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9366 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9367 phba->sli4_hba.SLIINTFregaddr =
9368 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9369 break;
9370 case LPFC_SLI_INTF_IF_TYPE_2:
9371 phba->sli4_hba.u.if_type2.EQDregaddr =
9372 phba->sli4_hba.conf_regs_memmap_p +
9373 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9374 phba->sli4_hba.u.if_type2.ERR1regaddr =
9375 phba->sli4_hba.conf_regs_memmap_p +
9376 LPFC_CTL_PORT_ER1_OFFSET;
9377 phba->sli4_hba.u.if_type2.ERR2regaddr =
9378 phba->sli4_hba.conf_regs_memmap_p +
9379 LPFC_CTL_PORT_ER2_OFFSET;
9380 phba->sli4_hba.u.if_type2.CTRLregaddr =
9381 phba->sli4_hba.conf_regs_memmap_p +
9382 LPFC_CTL_PORT_CTL_OFFSET;
9383 phba->sli4_hba.u.if_type2.STATUSregaddr =
9384 phba->sli4_hba.conf_regs_memmap_p +
9385 LPFC_CTL_PORT_STA_OFFSET;
9386 phba->sli4_hba.SLIINTFregaddr =
9387 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9388 phba->sli4_hba.PSMPHRregaddr =
9389 phba->sli4_hba.conf_regs_memmap_p +
9390 LPFC_CTL_PORT_SEM_OFFSET;
9391 phba->sli4_hba.RQDBregaddr =
9392 phba->sli4_hba.conf_regs_memmap_p +
9393 LPFC_ULP0_RQ_DOORBELL;
9394 phba->sli4_hba.WQDBregaddr =
9395 phba->sli4_hba.conf_regs_memmap_p +
9396 LPFC_ULP0_WQ_DOORBELL;
9397 phba->sli4_hba.CQDBregaddr =
9398 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9399 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9400 phba->sli4_hba.MQDBregaddr =
9401 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9402 phba->sli4_hba.BMBXregaddr =
9403 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9404 break;
9405 case LPFC_SLI_INTF_IF_TYPE_6:
9406 phba->sli4_hba.u.if_type2.EQDregaddr =
9407 phba->sli4_hba.conf_regs_memmap_p +
9408 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9409 phba->sli4_hba.u.if_type2.ERR1regaddr =
9410 phba->sli4_hba.conf_regs_memmap_p +
9411 LPFC_CTL_PORT_ER1_OFFSET;
9412 phba->sli4_hba.u.if_type2.ERR2regaddr =
9413 phba->sli4_hba.conf_regs_memmap_p +
9414 LPFC_CTL_PORT_ER2_OFFSET;
9415 phba->sli4_hba.u.if_type2.CTRLregaddr =
9416 phba->sli4_hba.conf_regs_memmap_p +
9417 LPFC_CTL_PORT_CTL_OFFSET;
9418 phba->sli4_hba.u.if_type2.STATUSregaddr =
9419 phba->sli4_hba.conf_regs_memmap_p +
9420 LPFC_CTL_PORT_STA_OFFSET;
9421 phba->sli4_hba.PSMPHRregaddr =
9422 phba->sli4_hba.conf_regs_memmap_p +
9423 LPFC_CTL_PORT_SEM_OFFSET;
9424 phba->sli4_hba.BMBXregaddr =
9425 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9426 break;
9427 case LPFC_SLI_INTF_IF_TYPE_1:
9428 default:
9429 dev_printk(KERN_ERR, &phba->pcidev->dev,
9430 "FATAL - unsupported SLI4 interface type - %d\n",
9431 if_type);
9432 break;
9433 }
9434}
9435
9436
9437
9438
9439
9440
9441
9442
9443static void
9444lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9445{
9446 switch (if_type) {
9447 case LPFC_SLI_INTF_IF_TYPE_0:
9448 phba->sli4_hba.PSMPHRregaddr =
9449 phba->sli4_hba.ctrl_regs_memmap_p +
9450 LPFC_SLIPORT_IF0_SMPHR;
9451 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9452 LPFC_HST_ISR0;
9453 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9454 LPFC_HST_IMR0;
9455 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9456 LPFC_HST_ISCR0;
9457 break;
9458 case LPFC_SLI_INTF_IF_TYPE_6:
9459 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9460 LPFC_IF6_RQ_DOORBELL;
9461 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9462 LPFC_IF6_WQ_DOORBELL;
9463 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9464 LPFC_IF6_CQ_DOORBELL;
9465 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9466 LPFC_IF6_EQ_DOORBELL;
9467 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9468 LPFC_IF6_MQ_DOORBELL;
9469 break;
9470 case LPFC_SLI_INTF_IF_TYPE_2:
9471 case LPFC_SLI_INTF_IF_TYPE_1:
9472 default:
9473 dev_err(&phba->pcidev->dev,
9474 "FATAL - unsupported SLI4 interface type - %d\n",
9475 if_type);
9476 break;
9477 }
9478}
9479
9480
9481
9482
9483
9484
9485
9486
9487
9488
9489
9490static int
9491lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9492{
9493 if (vf > LPFC_VIR_FUNC_MAX)
9494 return -ENODEV;
9495
9496 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9497 vf * LPFC_VFR_PAGE_SIZE +
9498 LPFC_ULP0_RQ_DOORBELL);
9499 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9500 vf * LPFC_VFR_PAGE_SIZE +
9501 LPFC_ULP0_WQ_DOORBELL);
9502 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9503 vf * LPFC_VFR_PAGE_SIZE +
9504 LPFC_EQCQ_DOORBELL);
9505 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9506 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9507 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9508 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9509 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9510 return 0;
9511}
9512
9513
9514
9515
9516
9517
9518
9519
9520
9521
9522
9523
9524
9525
9526
9527
9528static int
9529lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9530{
9531 uint32_t bmbx_size;
9532 struct lpfc_dmabuf *dmabuf;
9533 struct dma_address *dma_address;
9534 uint32_t pa_addr;
9535 uint64_t phys_addr;
9536
9537 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9538 if (!dmabuf)
9539 return -ENOMEM;
9540
9541
9542
9543
9544
9545 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9546 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9547 &dmabuf->phys, GFP_KERNEL);
9548 if (!dmabuf->virt) {
9549 kfree(dmabuf);
9550 return -ENOMEM;
9551 }
9552
9553
9554
9555
9556
9557
9558
9559
9560 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9561 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9562
9563 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9564 LPFC_ALIGN_16_BYTE);
9565 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9566 LPFC_ALIGN_16_BYTE);
9567
9568
9569
9570
9571
9572
9573
9574
9575
9576 dma_address = &phba->sli4_hba.bmbx.dma_address;
9577 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9578 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9579 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9580 LPFC_BMBX_BIT1_ADDR_HI);
9581
9582 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9583 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9584 LPFC_BMBX_BIT1_ADDR_LO);
9585 return 0;
9586}
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596
9597
9598
9599static void
9600lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9601{
9602 dma_free_coherent(&phba->pcidev->dev,
9603 phba->sli4_hba.bmbx.bmbx_size,
9604 phba->sli4_hba.bmbx.dmabuf->virt,
9605 phba->sli4_hba.bmbx.dmabuf->phys);
9606
9607 kfree(phba->sli4_hba.bmbx.dmabuf);
9608 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9609}
9610
9611static const char * const lpfc_topo_to_str[] = {
9612 "Loop then P2P",
9613 "Loopback",
9614 "P2P Only",
9615 "Unsupported",
9616 "Loop Only",
9617 "Unsupported",
9618 "P2P then Loop",
9619};
9620
9621#define LINK_FLAGS_DEF 0x0
9622#define LINK_FLAGS_P2P 0x1
9623#define LINK_FLAGS_LOOP 0x2
9624
9625
9626
9627
9628
9629
9630
9631
9632
9633
9634static void
9635lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9636{
9637 u8 ptv, tf, pt;
9638
9639 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9640 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9641 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9642
9643 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9644 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9645 ptv, tf, pt);
9646 if (!ptv) {
9647 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9648 "2019 FW does not support persistent topology "
9649 "Using driver parameter defined value [%s]",
9650 lpfc_topo_to_str[phba->cfg_topology]);
9651 return;
9652 }
9653
9654 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9655
9656
9657 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9658 LPFC_SLI_INTF_IF_TYPE_6) ||
9659 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9660 LPFC_SLI_INTF_FAMILY_G6)) {
9661 if (!tf) {
9662 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9663 ? FLAGS_TOPOLOGY_MODE_LOOP
9664 : FLAGS_TOPOLOGY_MODE_PT_PT);
9665 } else {
9666 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9667 }
9668 } else {
9669 if (tf) {
9670
9671 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9672 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9673 } else {
9674 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9675 ? FLAGS_TOPOLOGY_MODE_PT_PT
9676 : FLAGS_TOPOLOGY_MODE_LOOP);
9677 }
9678 }
9679 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9680 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9681 "2020 Using persistent topology value [%s]",
9682 lpfc_topo_to_str[phba->cfg_topology]);
9683 } else {
9684 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9685 "2021 Invalid topology values from FW "
9686 "Using driver parameter defined value [%s]",
9687 lpfc_topo_to_str[phba->cfg_topology]);
9688 }
9689}
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701
9702
9703
9704
9705int
9706lpfc_sli4_read_config(struct lpfc_hba *phba)
9707{
9708 LPFC_MBOXQ_t *pmb;
9709 struct lpfc_mbx_read_config *rd_config;
9710 union lpfc_sli4_cfg_shdr *shdr;
9711 uint32_t shdr_status, shdr_add_status;
9712 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9713 struct lpfc_rsrc_desc_fcfcoe *desc;
9714 char *pdesc_0;
9715 uint16_t forced_link_speed;
9716 uint32_t if_type, qmin;
9717 int length, i, rc = 0, rc2;
9718
9719 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9720 if (!pmb) {
9721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9722 "2011 Unable to allocate memory for issuing "
9723 "SLI_CONFIG_SPECIAL mailbox command\n");
9724 return -ENOMEM;
9725 }
9726
9727 lpfc_read_config(phba, pmb);
9728
9729 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9730 if (rc != MBX_SUCCESS) {
9731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9732 "2012 Mailbox failed , mbxCmd x%x "
9733 "READ_CONFIG, mbxStatus x%x\n",
9734 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9735 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9736 rc = -EIO;
9737 } else {
9738 rd_config = &pmb->u.mqe.un.rd_config;
9739 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9740 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9741 phba->sli4_hba.lnk_info.lnk_tp =
9742 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9743 phba->sli4_hba.lnk_info.lnk_no =
9744 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9745 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9746 "3081 lnk_type:%d, lnk_numb:%d\n",
9747 phba->sli4_hba.lnk_info.lnk_tp,
9748 phba->sli4_hba.lnk_info.lnk_no);
9749 } else
9750 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9751 "3082 Mailbox (x%x) returned ldv:x0\n",
9752 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9753 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9754 phba->bbcredit_support = 1;
9755 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9756 }
9757
9758 phba->sli4_hba.conf_trunk =
9759 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9760 phba->sli4_hba.extents_in_use =
9761 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9762 phba->sli4_hba.max_cfg_param.max_xri =
9763 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9764
9765 if (is_kdump_kernel() &&
9766 phba->sli4_hba.max_cfg_param.max_xri > 512)
9767 phba->sli4_hba.max_cfg_param.max_xri = 512;
9768 phba->sli4_hba.max_cfg_param.xri_base =
9769 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9770 phba->sli4_hba.max_cfg_param.max_vpi =
9771 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9772
9773 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9774 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9775 phba->sli4_hba.max_cfg_param.vpi_base =
9776 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9777 phba->sli4_hba.max_cfg_param.max_rpi =
9778 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9779 phba->sli4_hba.max_cfg_param.rpi_base =
9780 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9781 phba->sli4_hba.max_cfg_param.max_vfi =
9782 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9783 phba->sli4_hba.max_cfg_param.vfi_base =
9784 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9785 phba->sli4_hba.max_cfg_param.max_fcfi =
9786 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9787 phba->sli4_hba.max_cfg_param.max_eq =
9788 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9789 phba->sli4_hba.max_cfg_param.max_rq =
9790 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9791 phba->sli4_hba.max_cfg_param.max_wq =
9792 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9793 phba->sli4_hba.max_cfg_param.max_cq =
9794 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9795 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9796 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9797 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9798 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9799 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9801 phba->max_vports = phba->max_vpi;
9802
9803
9804
9805
9806
9807
9808
9809
9810
9811
9812 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9813 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9814 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9815
9816 if (lpfc_use_cgn_signal) {
9817 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9818 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9819 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9820 }
9821 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9822
9823
9824
9825 if (phba->cgn_reg_signal !=
9826 EDC_CG_SIG_WARN_ONLY) {
9827
9828 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9829 phba->cgn_reg_signal =
9830 EDC_CG_SIG_NOTSUPPORTED;
9831 } else {
9832 phba->cgn_reg_signal =
9833 EDC_CG_SIG_WARN_ALARM;
9834 phba->cgn_reg_fpin =
9835 LPFC_CGN_FPIN_NONE;
9836 }
9837 }
9838 }
9839
9840
9841 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9842 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9843
9844 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9845 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9846 phba->cgn_reg_signal, phba->cgn_reg_fpin);
9847
9848 lpfc_map_topology(phba, rd_config);
9849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9850 "2003 cfg params Extents? %d "
9851 "XRI(B:%d M:%d), "
9852 "VPI(B:%d M:%d) "
9853 "VFI(B:%d M:%d) "
9854 "RPI(B:%d M:%d) "
9855 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9856 phba->sli4_hba.extents_in_use,
9857 phba->sli4_hba.max_cfg_param.xri_base,
9858 phba->sli4_hba.max_cfg_param.max_xri,
9859 phba->sli4_hba.max_cfg_param.vpi_base,
9860 phba->sli4_hba.max_cfg_param.max_vpi,
9861 phba->sli4_hba.max_cfg_param.vfi_base,
9862 phba->sli4_hba.max_cfg_param.max_vfi,
9863 phba->sli4_hba.max_cfg_param.rpi_base,
9864 phba->sli4_hba.max_cfg_param.max_rpi,
9865 phba->sli4_hba.max_cfg_param.max_fcfi,
9866 phba->sli4_hba.max_cfg_param.max_eq,
9867 phba->sli4_hba.max_cfg_param.max_cq,
9868 phba->sli4_hba.max_cfg_param.max_wq,
9869 phba->sli4_hba.max_cfg_param.max_rq,
9870 phba->lmt);
9871
9872
9873
9874
9875
9876 qmin = phba->sli4_hba.max_cfg_param.max_wq;
9877 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9878 qmin = phba->sli4_hba.max_cfg_param.max_cq;
9879 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9880 qmin = phba->sli4_hba.max_cfg_param.max_eq;
9881
9882
9883
9884
9885
9886
9887 qmin -= 4;
9888
9889
9890 if ((phba->cfg_irq_chann > qmin) ||
9891 (phba->cfg_hdw_queue > qmin)) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893 "2005 Reducing Queues - "
9894 "FW resource limitation: "
9895 "WQ %d CQ %d EQ %d: min %d: "
9896 "IRQ %d HDWQ %d\n",
9897 phba->sli4_hba.max_cfg_param.max_wq,
9898 phba->sli4_hba.max_cfg_param.max_cq,
9899 phba->sli4_hba.max_cfg_param.max_eq,
9900 qmin, phba->cfg_irq_chann,
9901 phba->cfg_hdw_queue);
9902
9903 if (phba->cfg_irq_chann > qmin)
9904 phba->cfg_irq_chann = qmin;
9905 if (phba->cfg_hdw_queue > qmin)
9906 phba->cfg_hdw_queue = qmin;
9907 }
9908 }
9909
9910 if (rc)
9911 goto read_cfg_out;
9912
9913
9914 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9915 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9916 forced_link_speed =
9917 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
9918 if (forced_link_speed) {
9919 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
9920
9921 switch (forced_link_speed) {
9922 case LINK_SPEED_1G:
9923 phba->cfg_link_speed =
9924 LPFC_USER_LINK_SPEED_1G;
9925 break;
9926 case LINK_SPEED_2G:
9927 phba->cfg_link_speed =
9928 LPFC_USER_LINK_SPEED_2G;
9929 break;
9930 case LINK_SPEED_4G:
9931 phba->cfg_link_speed =
9932 LPFC_USER_LINK_SPEED_4G;
9933 break;
9934 case LINK_SPEED_8G:
9935 phba->cfg_link_speed =
9936 LPFC_USER_LINK_SPEED_8G;
9937 break;
9938 case LINK_SPEED_10G:
9939 phba->cfg_link_speed =
9940 LPFC_USER_LINK_SPEED_10G;
9941 break;
9942 case LINK_SPEED_16G:
9943 phba->cfg_link_speed =
9944 LPFC_USER_LINK_SPEED_16G;
9945 break;
9946 case LINK_SPEED_32G:
9947 phba->cfg_link_speed =
9948 LPFC_USER_LINK_SPEED_32G;
9949 break;
9950 case LINK_SPEED_64G:
9951 phba->cfg_link_speed =
9952 LPFC_USER_LINK_SPEED_64G;
9953 break;
9954 case 0xffff:
9955 phba->cfg_link_speed =
9956 LPFC_USER_LINK_SPEED_AUTO;
9957 break;
9958 default:
9959 lpfc_printf_log(phba, KERN_ERR,
9960 LOG_TRACE_EVENT,
9961 "0047 Unrecognized link "
9962 "speed : %d\n",
9963 forced_link_speed);
9964 phba->cfg_link_speed =
9965 LPFC_USER_LINK_SPEED_AUTO;
9966 }
9967 }
9968 }
9969
9970
9971 length = phba->sli4_hba.max_cfg_param.max_xri -
9972 lpfc_sli4_get_els_iocb_cnt(phba);
9973 if (phba->cfg_hba_queue_depth > length) {
9974 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9975 "3361 HBA queue depth changed from %d to %d\n",
9976 phba->cfg_hba_queue_depth, length);
9977 phba->cfg_hba_queue_depth = length;
9978 }
9979
9980 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
9981 LPFC_SLI_INTF_IF_TYPE_2)
9982 goto read_cfg_out;
9983
9984
9985 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
9989 length, LPFC_SLI4_MBX_EMBED);
9990
9991 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9992 shdr = (union lpfc_sli4_cfg_shdr *)
9993 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
9994 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9995 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9996 if (rc2 || shdr_status || shdr_add_status) {
9997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9998 "3026 Mailbox failed , mbxCmd x%x "
9999 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10000 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10001 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10002 goto read_cfg_out;
10003 }
10004
10005
10006 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10007
10008 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10009 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10010 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10011 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10012 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10013 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10014 goto read_cfg_out;
10015
10016 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10017 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10018 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10019 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10020 phba->sli4_hba.iov.pf_number =
10021 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10022 phba->sli4_hba.iov.vf_number =
10023 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10024 break;
10025 }
10026 }
10027
10028 if (i < LPFC_RSRC_DESC_MAX_NUM)
10029 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10030 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10031 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10032 phba->sli4_hba.iov.vf_number);
10033 else
10034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10035 "3028 GET_FUNCTION_CONFIG: failed to find "
10036 "Resource Descriptor:x%x\n",
10037 LPFC_RSRC_DESC_TYPE_FCFCOE);
10038
10039read_cfg_out:
10040 mempool_free(pmb, phba->mbox_mem_pool);
10041 return rc;
10042}
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057static int
10058lpfc_setup_endian_order(struct lpfc_hba *phba)
10059{
10060 LPFC_MBOXQ_t *mboxq;
10061 uint32_t if_type, rc = 0;
10062 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10063 HOST_ENDIAN_HIGH_WORD1};
10064
10065 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10066 switch (if_type) {
10067 case LPFC_SLI_INTF_IF_TYPE_0:
10068 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10069 GFP_KERNEL);
10070 if (!mboxq) {
10071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10072 "0492 Unable to allocate memory for "
10073 "issuing SLI_CONFIG_SPECIAL mailbox "
10074 "command\n");
10075 return -ENOMEM;
10076 }
10077
10078
10079
10080
10081
10082 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10083 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10084 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10085 if (rc != MBX_SUCCESS) {
10086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10087 "0493 SLI_CONFIG_SPECIAL mailbox "
10088 "failed with status x%x\n",
10089 rc);
10090 rc = -EIO;
10091 }
10092 mempool_free(mboxq, phba->mbox_mem_pool);
10093 break;
10094 case LPFC_SLI_INTF_IF_TYPE_6:
10095 case LPFC_SLI_INTF_IF_TYPE_2:
10096 case LPFC_SLI_INTF_IF_TYPE_1:
10097 default:
10098 break;
10099 }
10100 return rc;
10101}
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116static int
10117lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10118{
10119
10120
10121
10122
10123
10124 if (phba->nvmet_support) {
10125 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10126 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10127 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10128 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10129 }
10130
10131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10132 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10133 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10134 phba->cfg_nvmet_mrq);
10135
10136
10137 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10138 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10139
10140
10141 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10142 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10143 return 0;
10144}
10145
10146static int
10147lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10148{
10149 struct lpfc_queue *qdesc;
10150 u32 wqesize;
10151 int cpu;
10152
10153 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10154
10155 if (phba->enab_exp_wqcq_pages)
10156
10157 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10158 phba->sli4_hba.cq_esize,
10159 LPFC_CQE_EXP_COUNT, cpu);
10160
10161 else
10162 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10163 phba->sli4_hba.cq_esize,
10164 phba->sli4_hba.cq_ecount, cpu);
10165 if (!qdesc) {
10166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10167 "0499 Failed allocate fast-path IO CQ (%d)\n",
10168 idx);
10169 return 1;
10170 }
10171 qdesc->qe_valid = 1;
10172 qdesc->hdwq = idx;
10173 qdesc->chann = cpu;
10174 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10175
10176
10177 if (phba->enab_exp_wqcq_pages) {
10178
10179 wqesize = (phba->fcp_embed_io) ?
10180 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10181 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10182 wqesize,
10183 LPFC_WQE_EXP_COUNT, cpu);
10184 } else
10185 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10186 phba->sli4_hba.wq_esize,
10187 phba->sli4_hba.wq_ecount, cpu);
10188
10189 if (!qdesc) {
10190 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10191 "0503 Failed allocate fast-path IO WQ (%d)\n",
10192 idx);
10193 return 1;
10194 }
10195 qdesc->hdwq = idx;
10196 qdesc->chann = cpu;
10197 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10198 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10199 return 0;
10200}
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216int
10217lpfc_sli4_queue_create(struct lpfc_hba *phba)
10218{
10219 struct lpfc_queue *qdesc;
10220 int idx, cpu, eqcpu;
10221 struct lpfc_sli4_hdw_queue *qp;
10222 struct lpfc_vector_map_info *cpup;
10223 struct lpfc_vector_map_info *eqcpup;
10224 struct lpfc_eq_intr_info *eqi;
10225
10226
10227
10228
10229
10230 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10231 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10232 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10233 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10234 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10235 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10236 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10237 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10238 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10239 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10240
10241 if (!phba->sli4_hba.hdwq) {
10242 phba->sli4_hba.hdwq = kcalloc(
10243 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10244 GFP_KERNEL);
10245 if (!phba->sli4_hba.hdwq) {
10246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10247 "6427 Failed allocate memory for "
10248 "fast-path Hardware Queue array\n");
10249 goto out_error;
10250 }
10251
10252 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10253 qp = &phba->sli4_hba.hdwq[idx];
10254 spin_lock_init(&qp->io_buf_list_get_lock);
10255 spin_lock_init(&qp->io_buf_list_put_lock);
10256 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10257 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10258 qp->get_io_bufs = 0;
10259 qp->put_io_bufs = 0;
10260 qp->total_io_bufs = 0;
10261 spin_lock_init(&qp->abts_io_buf_list_lock);
10262 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10263 qp->abts_scsi_io_bufs = 0;
10264 qp->abts_nvme_io_bufs = 0;
10265 INIT_LIST_HEAD(&qp->sgl_list);
10266 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10267 spin_lock_init(&qp->hdwq_lock);
10268 }
10269 }
10270
10271 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10272 if (phba->nvmet_support) {
10273 phba->sli4_hba.nvmet_cqset = kcalloc(
10274 phba->cfg_nvmet_mrq,
10275 sizeof(struct lpfc_queue *),
10276 GFP_KERNEL);
10277 if (!phba->sli4_hba.nvmet_cqset) {
10278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10279 "3121 Fail allocate memory for "
10280 "fast-path CQ set array\n");
10281 goto out_error;
10282 }
10283 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10284 phba->cfg_nvmet_mrq,
10285 sizeof(struct lpfc_queue *),
10286 GFP_KERNEL);
10287 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10289 "3122 Fail allocate memory for "
10290 "fast-path RQ set hdr array\n");
10291 goto out_error;
10292 }
10293 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10294 phba->cfg_nvmet_mrq,
10295 sizeof(struct lpfc_queue *),
10296 GFP_KERNEL);
10297 if (!phba->sli4_hba.nvmet_mrq_data) {
10298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10299 "3124 Fail allocate memory for "
10300 "fast-path RQ set data array\n");
10301 goto out_error;
10302 }
10303 }
10304 }
10305
10306 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10307
10308
10309 for_each_present_cpu(cpu) {
10310
10311
10312
10313
10314 cpup = &phba->sli4_hba.cpu_map[cpu];
10315 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10316 continue;
10317
10318
10319 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10320
10321
10322 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10323 phba->sli4_hba.eq_esize,
10324 phba->sli4_hba.eq_ecount, cpu);
10325 if (!qdesc) {
10326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10327 "0497 Failed allocate EQ (%d)\n",
10328 cpup->hdwq);
10329 goto out_error;
10330 }
10331 qdesc->qe_valid = 1;
10332 qdesc->hdwq = cpup->hdwq;
10333 qdesc->chann = cpu;
10334 qdesc->last_cpu = qdesc->chann;
10335
10336
10337 qp->hba_eq = qdesc;
10338
10339 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10340 list_add(&qdesc->cpu_list, &eqi->list);
10341 }
10342
10343
10344
10345
10346 for_each_present_cpu(cpu) {
10347 cpup = &phba->sli4_hba.cpu_map[cpu];
10348
10349
10350 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10351 continue;
10352
10353
10354 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10355 if (qp->hba_eq)
10356 continue;
10357
10358
10359 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10360 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10361 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10362 }
10363
10364
10365 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10366 if (lpfc_alloc_io_wq_cq(phba, idx))
10367 goto out_error;
10368 }
10369
10370 if (phba->nvmet_support) {
10371 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10372 cpu = lpfc_find_cpu_handle(phba, idx,
10373 LPFC_FIND_BY_HDWQ);
10374 qdesc = lpfc_sli4_queue_alloc(phba,
10375 LPFC_DEFAULT_PAGE_SIZE,
10376 phba->sli4_hba.cq_esize,
10377 phba->sli4_hba.cq_ecount,
10378 cpu);
10379 if (!qdesc) {
10380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10381 "3142 Failed allocate NVME "
10382 "CQ Set (%d)\n", idx);
10383 goto out_error;
10384 }
10385 qdesc->qe_valid = 1;
10386 qdesc->hdwq = idx;
10387 qdesc->chann = cpu;
10388 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10389 }
10390 }
10391
10392
10393
10394
10395
10396 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10397
10398 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10399 phba->sli4_hba.cq_esize,
10400 phba->sli4_hba.cq_ecount, cpu);
10401 if (!qdesc) {
10402 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10403 "0500 Failed allocate slow-path mailbox CQ\n");
10404 goto out_error;
10405 }
10406 qdesc->qe_valid = 1;
10407 phba->sli4_hba.mbx_cq = qdesc;
10408
10409
10410 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10411 phba->sli4_hba.cq_esize,
10412 phba->sli4_hba.cq_ecount, cpu);
10413 if (!qdesc) {
10414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415 "0501 Failed allocate slow-path ELS CQ\n");
10416 goto out_error;
10417 }
10418 qdesc->qe_valid = 1;
10419 qdesc->chann = cpu;
10420 phba->sli4_hba.els_cq = qdesc;
10421
10422
10423
10424
10425
10426
10427
10428
10429 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10430 phba->sli4_hba.mq_esize,
10431 phba->sli4_hba.mq_ecount, cpu);
10432 if (!qdesc) {
10433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10434 "0505 Failed allocate slow-path MQ\n");
10435 goto out_error;
10436 }
10437 qdesc->chann = cpu;
10438 phba->sli4_hba.mbx_wq = qdesc;
10439
10440
10441
10442
10443
10444
10445 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10446 phba->sli4_hba.wq_esize,
10447 phba->sli4_hba.wq_ecount, cpu);
10448 if (!qdesc) {
10449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10450 "0504 Failed allocate slow-path ELS WQ\n");
10451 goto out_error;
10452 }
10453 qdesc->chann = cpu;
10454 phba->sli4_hba.els_wq = qdesc;
10455 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10456
10457 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10458
10459 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10460 phba->sli4_hba.cq_esize,
10461 phba->sli4_hba.cq_ecount, cpu);
10462 if (!qdesc) {
10463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10464 "6079 Failed allocate NVME LS CQ\n");
10465 goto out_error;
10466 }
10467 qdesc->chann = cpu;
10468 qdesc->qe_valid = 1;
10469 phba->sli4_hba.nvmels_cq = qdesc;
10470
10471
10472 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10473 phba->sli4_hba.wq_esize,
10474 phba->sli4_hba.wq_ecount, cpu);
10475 if (!qdesc) {
10476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10477 "6080 Failed allocate NVME LS WQ\n");
10478 goto out_error;
10479 }
10480 qdesc->chann = cpu;
10481 phba->sli4_hba.nvmels_wq = qdesc;
10482 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10483 }
10484
10485
10486
10487
10488
10489
10490 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10491 phba->sli4_hba.rq_esize,
10492 phba->sli4_hba.rq_ecount, cpu);
10493 if (!qdesc) {
10494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10495 "0506 Failed allocate receive HRQ\n");
10496 goto out_error;
10497 }
10498 phba->sli4_hba.hdr_rq = qdesc;
10499
10500
10501 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10502 phba->sli4_hba.rq_esize,
10503 phba->sli4_hba.rq_ecount, cpu);
10504 if (!qdesc) {
10505 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10506 "0507 Failed allocate receive DRQ\n");
10507 goto out_error;
10508 }
10509 phba->sli4_hba.dat_rq = qdesc;
10510
10511 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10512 phba->nvmet_support) {
10513 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10514 cpu = lpfc_find_cpu_handle(phba, idx,
10515 LPFC_FIND_BY_HDWQ);
10516
10517 qdesc = lpfc_sli4_queue_alloc(phba,
10518 LPFC_DEFAULT_PAGE_SIZE,
10519 phba->sli4_hba.rq_esize,
10520 LPFC_NVMET_RQE_DEF_COUNT,
10521 cpu);
10522 if (!qdesc) {
10523 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10524 "3146 Failed allocate "
10525 "receive HRQ\n");
10526 goto out_error;
10527 }
10528 qdesc->hdwq = idx;
10529 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10530
10531
10532 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10533 GFP_KERNEL,
10534 cpu_to_node(cpu));
10535 if (qdesc->rqbp == NULL) {
10536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10537 "6131 Failed allocate "
10538 "Header RQBP\n");
10539 goto out_error;
10540 }
10541
10542
10543 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10544
10545
10546 qdesc = lpfc_sli4_queue_alloc(phba,
10547 LPFC_DEFAULT_PAGE_SIZE,
10548 phba->sli4_hba.rq_esize,
10549 LPFC_NVMET_RQE_DEF_COUNT,
10550 cpu);
10551 if (!qdesc) {
10552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10553 "3156 Failed allocate "
10554 "receive DRQ\n");
10555 goto out_error;
10556 }
10557 qdesc->hdwq = idx;
10558 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10559 }
10560 }
10561
10562
10563 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10564 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10565 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10566 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10567 }
10568 }
10569
10570
10571 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10572 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10573 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10574 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10575 }
10576 }
10577
10578 return 0;
10579
10580out_error:
10581 lpfc_sli4_queue_destroy(phba);
10582 return -ENOMEM;
10583}
10584
10585static inline void
10586__lpfc_sli4_release_queue(struct lpfc_queue **qp)
10587{
10588 if (*qp != NULL) {
10589 lpfc_sli4_queue_free(*qp);
10590 *qp = NULL;
10591 }
10592}
10593
10594static inline void
10595lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10596{
10597 int idx;
10598
10599 if (*qs == NULL)
10600 return;
10601
10602 for (idx = 0; idx < max; idx++)
10603 __lpfc_sli4_release_queue(&(*qs)[idx]);
10604
10605 kfree(*qs);
10606 *qs = NULL;
10607}
10608
10609static inline void
10610lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10611{
10612 struct lpfc_sli4_hdw_queue *hdwq;
10613 struct lpfc_queue *eq;
10614 uint32_t idx;
10615
10616 hdwq = phba->sli4_hba.hdwq;
10617
10618
10619 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10620
10621 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10622 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10623 hdwq[idx].hba_eq = NULL;
10624 hdwq[idx].io_cq = NULL;
10625 hdwq[idx].io_wq = NULL;
10626 if (phba->cfg_xpsgl && !phba->nvmet_support)
10627 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10628 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10629 }
10630
10631 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10632
10633 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10634 lpfc_sli4_queue_free(eq);
10635 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10636 }
10637}
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651void
10652lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10653{
10654
10655
10656
10657
10658
10659 spin_lock_irq(&phba->hbalock);
10660 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10661 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10662 spin_unlock_irq(&phba->hbalock);
10663 msleep(20);
10664 spin_lock_irq(&phba->hbalock);
10665 }
10666 spin_unlock_irq(&phba->hbalock);
10667
10668 lpfc_sli4_cleanup_poll_list(phba);
10669
10670
10671 if (phba->sli4_hba.hdwq)
10672 lpfc_sli4_release_hdwq(phba);
10673
10674 if (phba->nvmet_support) {
10675 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10676 phba->cfg_nvmet_mrq);
10677
10678 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10679 phba->cfg_nvmet_mrq);
10680 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10681 phba->cfg_nvmet_mrq);
10682 }
10683
10684
10685 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10686
10687
10688 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10689
10690
10691 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10692
10693
10694 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10695 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10696
10697
10698 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10699
10700
10701 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10702
10703
10704 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10705
10706
10707 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10708
10709
10710 spin_lock_irq(&phba->hbalock);
10711 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10712 spin_unlock_irq(&phba->hbalock);
10713}
10714
10715int
10716lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10717{
10718 struct lpfc_rqb *rqbp;
10719 struct lpfc_dmabuf *h_buf;
10720 struct rqb_dmabuf *rqb_buffer;
10721
10722 rqbp = rq->rqbp;
10723 while (!list_empty(&rqbp->rqb_buffer_list)) {
10724 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10725 struct lpfc_dmabuf, list);
10726
10727 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10728 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10729 rqbp->buffer_count--;
10730 }
10731 return 1;
10732}
10733
10734static int
10735lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10736 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10737 int qidx, uint32_t qtype)
10738{
10739 struct lpfc_sli_ring *pring;
10740 int rc;
10741
10742 if (!eq || !cq || !wq) {
10743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10744 "6085 Fast-path %s (%d) not allocated\n",
10745 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10746 return -ENOMEM;
10747 }
10748
10749
10750 rc = lpfc_cq_create(phba, cq, eq,
10751 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10752 if (rc) {
10753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10754 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10755 qidx, (uint32_t)rc);
10756 return rc;
10757 }
10758
10759 if (qtype != LPFC_MBOX) {
10760
10761 if (cq_map)
10762 *cq_map = cq->queue_id;
10763
10764 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10765 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10766 qidx, cq->queue_id, qidx, eq->queue_id);
10767
10768
10769 rc = lpfc_wq_create(phba, wq, cq, qtype);
10770 if (rc) {
10771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10772 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10773 qidx, (uint32_t)rc);
10774
10775 return rc;
10776 }
10777
10778
10779 pring = wq->pring;
10780 pring->sli.sli4.wqp = (void *)wq;
10781 cq->pring = pring;
10782
10783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10784 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10785 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10786 } else {
10787 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10788 if (rc) {
10789 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10790 "0539 Failed setup of slow-path MQ: "
10791 "rc = 0x%x\n", rc);
10792
10793 return rc;
10794 }
10795
10796 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10797 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10798 phba->sli4_hba.mbx_wq->queue_id,
10799 phba->sli4_hba.mbx_cq->queue_id);
10800 }
10801
10802 return 0;
10803}
10804
10805
10806
10807
10808
10809
10810
10811
10812static void
10813lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10814{
10815 struct lpfc_queue *eq, *childq;
10816 int qidx;
10817
10818 memset(phba->sli4_hba.cq_lookup, 0,
10819 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10820
10821 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10822
10823 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10824 if (!eq)
10825 continue;
10826
10827 list_for_each_entry(childq, &eq->child_list, list) {
10828 if (childq->queue_id > phba->sli4_hba.cq_max)
10829 continue;
10830 if (childq->subtype == LPFC_IO)
10831 phba->sli4_hba.cq_lookup[childq->queue_id] =
10832 childq;
10833 }
10834 }
10835}
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849int
10850lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10851{
10852 uint32_t shdr_status, shdr_add_status;
10853 union lpfc_sli4_cfg_shdr *shdr;
10854 struct lpfc_vector_map_info *cpup;
10855 struct lpfc_sli4_hdw_queue *qp;
10856 LPFC_MBOXQ_t *mboxq;
10857 int qidx, cpu;
10858 uint32_t length, usdelay;
10859 int rc = -ENOMEM;
10860
10861
10862 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10863 if (!mboxq) {
10864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10865 "3249 Unable to allocate memory for "
10866 "QUERY_FW_CFG mailbox command\n");
10867 return -ENOMEM;
10868 }
10869 length = (sizeof(struct lpfc_mbx_query_fw_config) -
10870 sizeof(struct lpfc_sli4_cfg_mhdr));
10871 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10872 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10873 length, LPFC_SLI4_MBX_EMBED);
10874
10875 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10876
10877 shdr = (union lpfc_sli4_cfg_shdr *)
10878 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10879 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10880 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10881 if (shdr_status || shdr_add_status || rc) {
10882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10883 "3250 QUERY_FW_CFG mailbox failed with status "
10884 "x%x add_status x%x, mbx status x%x\n",
10885 shdr_status, shdr_add_status, rc);
10886 mempool_free(mboxq, phba->mbox_mem_pool);
10887 rc = -ENXIO;
10888 goto out_error;
10889 }
10890
10891 phba->sli4_hba.fw_func_mode =
10892 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
10893 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
10894 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
10895 phba->sli4_hba.physical_port =
10896 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
10897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10898 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
10899 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
10900 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
10901
10902 mempool_free(mboxq, phba->mbox_mem_pool);
10903
10904
10905
10906
10907 qp = phba->sli4_hba.hdwq;
10908
10909
10910 if (!qp) {
10911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10912 "3147 Fast-path EQs not allocated\n");
10913 rc = -ENOMEM;
10914 goto out_error;
10915 }
10916
10917
10918 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10919
10920 for_each_present_cpu(cpu) {
10921 cpup = &phba->sli4_hba.cpu_map[cpu];
10922
10923
10924
10925
10926 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10927 continue;
10928 if (qidx != cpup->eq)
10929 continue;
10930
10931
10932 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
10933 phba->cfg_fcp_imax);
10934 if (rc) {
10935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10936 "0523 Failed setup of fast-path"
10937 " EQ (%d), rc = 0x%x\n",
10938 cpup->eq, (uint32_t)rc);
10939 goto out_destroy;
10940 }
10941
10942
10943 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
10944 qp[cpup->hdwq].hba_eq;
10945
10946 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10947 "2584 HBA EQ setup: queue[%d]-id=%d\n",
10948 cpup->eq,
10949 qp[cpup->hdwq].hba_eq->queue_id);
10950 }
10951 }
10952
10953
10954 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10955 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
10956 cpup = &phba->sli4_hba.cpu_map[cpu];
10957
10958
10959 rc = lpfc_create_wq_cq(phba,
10960 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
10961 qp[qidx].io_cq,
10962 qp[qidx].io_wq,
10963 &phba->sli4_hba.hdwq[qidx].io_cq_map,
10964 qidx,
10965 LPFC_IO);
10966 if (rc) {
10967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10968 "0535 Failed to setup fastpath "
10969 "IO WQ/CQ (%d), rc = 0x%x\n",
10970 qidx, (uint32_t)rc);
10971 goto out_destroy;
10972 }
10973 }
10974
10975
10976
10977
10978
10979
10980
10981 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
10982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10983 "0528 %s not allocated\n",
10984 phba->sli4_hba.mbx_cq ?
10985 "Mailbox WQ" : "Mailbox CQ");
10986 rc = -ENOMEM;
10987 goto out_destroy;
10988 }
10989
10990 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
10991 phba->sli4_hba.mbx_cq,
10992 phba->sli4_hba.mbx_wq,
10993 NULL, 0, LPFC_MBOX);
10994 if (rc) {
10995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
10997 (uint32_t)rc);
10998 goto out_destroy;
10999 }
11000 if (phba->nvmet_support) {
11001 if (!phba->sli4_hba.nvmet_cqset) {
11002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11003 "3165 Fast-path NVME CQ Set "
11004 "array not allocated\n");
11005 rc = -ENOMEM;
11006 goto out_destroy;
11007 }
11008 if (phba->cfg_nvmet_mrq > 1) {
11009 rc = lpfc_cq_create_set(phba,
11010 phba->sli4_hba.nvmet_cqset,
11011 qp,
11012 LPFC_WCQ, LPFC_NVMET);
11013 if (rc) {
11014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11015 "3164 Failed setup of NVME CQ "
11016 "Set, rc = 0x%x\n",
11017 (uint32_t)rc);
11018 goto out_destroy;
11019 }
11020 } else {
11021
11022 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11023 qp[0].hba_eq,
11024 LPFC_WCQ, LPFC_NVMET);
11025 if (rc) {
11026 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11027 "6089 Failed setup NVMET CQ: "
11028 "rc = 0x%x\n", (uint32_t)rc);
11029 goto out_destroy;
11030 }
11031 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11032
11033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11034 "6090 NVMET CQ setup: cq-id=%d, "
11035 "parent eq-id=%d\n",
11036 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11037 qp[0].hba_eq->queue_id);
11038 }
11039 }
11040
11041
11042 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11044 "0530 ELS %s not allocated\n",
11045 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11046 rc = -ENOMEM;
11047 goto out_destroy;
11048 }
11049 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11050 phba->sli4_hba.els_cq,
11051 phba->sli4_hba.els_wq,
11052 NULL, 0, LPFC_ELS);
11053 if (rc) {
11054 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11055 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11056 (uint32_t)rc);
11057 goto out_destroy;
11058 }
11059 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11060 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11061 phba->sli4_hba.els_wq->queue_id,
11062 phba->sli4_hba.els_cq->queue_id);
11063
11064 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11065
11066 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11068 "6091 LS %s not allocated\n",
11069 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11070 rc = -ENOMEM;
11071 goto out_destroy;
11072 }
11073 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11074 phba->sli4_hba.nvmels_cq,
11075 phba->sli4_hba.nvmels_wq,
11076 NULL, 0, LPFC_NVME_LS);
11077 if (rc) {
11078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11079 "0526 Failed setup of NVVME LS WQ/CQ: "
11080 "rc = 0x%x\n", (uint32_t)rc);
11081 goto out_destroy;
11082 }
11083
11084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11085 "6096 ELS WQ setup: wq-id=%d, "
11086 "parent cq-id=%d\n",
11087 phba->sli4_hba.nvmels_wq->queue_id,
11088 phba->sli4_hba.nvmels_cq->queue_id);
11089 }
11090
11091
11092
11093
11094 if (phba->nvmet_support) {
11095 if ((!phba->sli4_hba.nvmet_cqset) ||
11096 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11097 (!phba->sli4_hba.nvmet_mrq_data)) {
11098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11099 "6130 MRQ CQ Queues not "
11100 "allocated\n");
11101 rc = -ENOMEM;
11102 goto out_destroy;
11103 }
11104 if (phba->cfg_nvmet_mrq > 1) {
11105 rc = lpfc_mrq_create(phba,
11106 phba->sli4_hba.nvmet_mrq_hdr,
11107 phba->sli4_hba.nvmet_mrq_data,
11108 phba->sli4_hba.nvmet_cqset,
11109 LPFC_NVMET);
11110 if (rc) {
11111 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11112 "6098 Failed setup of NVMET "
11113 "MRQ: rc = 0x%x\n",
11114 (uint32_t)rc);
11115 goto out_destroy;
11116 }
11117
11118 } else {
11119 rc = lpfc_rq_create(phba,
11120 phba->sli4_hba.nvmet_mrq_hdr[0],
11121 phba->sli4_hba.nvmet_mrq_data[0],
11122 phba->sli4_hba.nvmet_cqset[0],
11123 LPFC_NVMET);
11124 if (rc) {
11125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11126 "6057 Failed setup of NVMET "
11127 "Receive Queue: rc = 0x%x\n",
11128 (uint32_t)rc);
11129 goto out_destroy;
11130 }
11131
11132 lpfc_printf_log(
11133 phba, KERN_INFO, LOG_INIT,
11134 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11135 "dat-rq-id=%d parent cq-id=%d\n",
11136 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11137 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11138 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11139
11140 }
11141 }
11142
11143 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11145 "0540 Receive Queue not allocated\n");
11146 rc = -ENOMEM;
11147 goto out_destroy;
11148 }
11149
11150 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11151 phba->sli4_hba.els_cq, LPFC_USOL);
11152 if (rc) {
11153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11154 "0541 Failed setup of Receive Queue: "
11155 "rc = 0x%x\n", (uint32_t)rc);
11156 goto out_destroy;
11157 }
11158
11159 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11160 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11161 "parent cq-id=%d\n",
11162 phba->sli4_hba.hdr_rq->queue_id,
11163 phba->sli4_hba.dat_rq->queue_id,
11164 phba->sli4_hba.els_cq->queue_id);
11165
11166 if (phba->cfg_fcp_imax)
11167 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11168 else
11169 usdelay = 0;
11170
11171 for (qidx = 0; qidx < phba->cfg_irq_chann;
11172 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11173 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11174 usdelay);
11175
11176 if (phba->sli4_hba.cq_max) {
11177 kfree(phba->sli4_hba.cq_lookup);
11178 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11179 sizeof(struct lpfc_queue *), GFP_KERNEL);
11180 if (!phba->sli4_hba.cq_lookup) {
11181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11182 "0549 Failed setup of CQ Lookup table: "
11183 "size 0x%x\n", phba->sli4_hba.cq_max);
11184 rc = -ENOMEM;
11185 goto out_destroy;
11186 }
11187 lpfc_setup_cq_lookup(phba);
11188 }
11189 return 0;
11190
11191out_destroy:
11192 lpfc_sli4_queue_unset(phba);
11193out_error:
11194 return rc;
11195}
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209void
11210lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11211{
11212 struct lpfc_sli4_hdw_queue *qp;
11213 struct lpfc_queue *eq;
11214 int qidx;
11215
11216
11217 if (phba->sli4_hba.mbx_wq)
11218 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11219
11220
11221 if (phba->sli4_hba.nvmels_wq)
11222 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11223
11224
11225 if (phba->sli4_hba.els_wq)
11226 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11227
11228
11229 if (phba->sli4_hba.hdr_rq)
11230 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11231 phba->sli4_hba.dat_rq);
11232
11233
11234 if (phba->sli4_hba.mbx_cq)
11235 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11236
11237
11238 if (phba->sli4_hba.els_cq)
11239 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11240
11241
11242 if (phba->sli4_hba.nvmels_cq)
11243 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11244
11245 if (phba->nvmet_support) {
11246
11247 if (phba->sli4_hba.nvmet_mrq_hdr) {
11248 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11249 lpfc_rq_destroy(
11250 phba,
11251 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11252 phba->sli4_hba.nvmet_mrq_data[qidx]);
11253 }
11254
11255
11256 if (phba->sli4_hba.nvmet_cqset) {
11257 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11258 lpfc_cq_destroy(
11259 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11260 }
11261 }
11262
11263
11264 if (phba->sli4_hba.hdwq) {
11265
11266 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11267
11268 qp = &phba->sli4_hba.hdwq[qidx];
11269 lpfc_wq_destroy(phba, qp->io_wq);
11270 lpfc_cq_destroy(phba, qp->io_cq);
11271 }
11272
11273 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11274
11275 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11276 lpfc_eq_destroy(phba, eq);
11277 }
11278 }
11279
11280 kfree(phba->sli4_hba.cq_lookup);
11281 phba->sli4_hba.cq_lookup = NULL;
11282 phba->sli4_hba.cq_max = 0;
11283}
11284
11285
11286
11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301static int
11302lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11303{
11304 struct lpfc_cq_event *cq_event;
11305 int i;
11306
11307 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11308 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11309 if (!cq_event)
11310 goto out_pool_create_fail;
11311 list_add_tail(&cq_event->list,
11312 &phba->sli4_hba.sp_cqe_event_pool);
11313 }
11314 return 0;
11315
11316out_pool_create_fail:
11317 lpfc_sli4_cq_event_pool_destroy(phba);
11318 return -ENOMEM;
11319}
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331static void
11332lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11333{
11334 struct lpfc_cq_event *cq_event, *next_cq_event;
11335
11336 list_for_each_entry_safe(cq_event, next_cq_event,
11337 &phba->sli4_hba.sp_cqe_event_pool, list) {
11338 list_del(&cq_event->list);
11339 kfree(cq_event);
11340 }
11341}
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353struct lpfc_cq_event *
11354__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11355{
11356 struct lpfc_cq_event *cq_event = NULL;
11357
11358 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11359 struct lpfc_cq_event, list);
11360 return cq_event;
11361}
11362
11363
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373struct lpfc_cq_event *
11374lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11375{
11376 struct lpfc_cq_event *cq_event;
11377 unsigned long iflags;
11378
11379 spin_lock_irqsave(&phba->hbalock, iflags);
11380 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11381 spin_unlock_irqrestore(&phba->hbalock, iflags);
11382 return cq_event;
11383}
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393void
11394__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11395 struct lpfc_cq_event *cq_event)
11396{
11397 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11398}
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408void
11409lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11410 struct lpfc_cq_event *cq_event)
11411{
11412 unsigned long iflags;
11413 spin_lock_irqsave(&phba->hbalock, iflags);
11414 __lpfc_sli4_cq_event_release(phba, cq_event);
11415 spin_unlock_irqrestore(&phba->hbalock, iflags);
11416}
11417
11418
11419
11420
11421
11422
11423
11424
11425static void
11426lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11427{
11428 LIST_HEAD(cq_event_list);
11429 struct lpfc_cq_event *cq_event;
11430 unsigned long iflags;
11431
11432
11433
11434
11435 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11436 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11437 &cq_event_list);
11438 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11439
11440
11441 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11442 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11443 &cq_event_list);
11444 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11445
11446 while (!list_empty(&cq_event_list)) {
11447 list_remove_head(&cq_event_list, cq_event,
11448 struct lpfc_cq_event, list);
11449 lpfc_sli4_cq_event_release(phba, cq_event);
11450 }
11451}
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465int
11466lpfc_pci_function_reset(struct lpfc_hba *phba)
11467{
11468 LPFC_MBOXQ_t *mboxq;
11469 uint32_t rc = 0, if_type;
11470 uint32_t shdr_status, shdr_add_status;
11471 uint32_t rdy_chk;
11472 uint32_t port_reset = 0;
11473 union lpfc_sli4_cfg_shdr *shdr;
11474 struct lpfc_register reg_data;
11475 uint16_t devid;
11476
11477 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11478 switch (if_type) {
11479 case LPFC_SLI_INTF_IF_TYPE_0:
11480 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11481 GFP_KERNEL);
11482 if (!mboxq) {
11483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11484 "0494 Unable to allocate memory for "
11485 "issuing SLI_FUNCTION_RESET mailbox "
11486 "command\n");
11487 return -ENOMEM;
11488 }
11489
11490
11491 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11492 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11493 LPFC_SLI4_MBX_EMBED);
11494 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11495 shdr = (union lpfc_sli4_cfg_shdr *)
11496 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11497 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11498 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11499 &shdr->response);
11500 mempool_free(mboxq, phba->mbox_mem_pool);
11501 if (shdr_status || shdr_add_status || rc) {
11502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11503 "0495 SLI_FUNCTION_RESET mailbox "
11504 "failed with status x%x add_status x%x,"
11505 " mbx status x%x\n",
11506 shdr_status, shdr_add_status, rc);
11507 rc = -ENXIO;
11508 }
11509 break;
11510 case LPFC_SLI_INTF_IF_TYPE_2:
11511 case LPFC_SLI_INTF_IF_TYPE_6:
11512wait:
11513
11514
11515
11516
11517
11518 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11519 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11520 STATUSregaddr, ®_data.word0)) {
11521 rc = -ENODEV;
11522 goto out;
11523 }
11524 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11525 break;
11526 msleep(20);
11527 }
11528
11529 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11530 phba->work_status[0] = readl(
11531 phba->sli4_hba.u.if_type2.ERR1regaddr);
11532 phba->work_status[1] = readl(
11533 phba->sli4_hba.u.if_type2.ERR2regaddr);
11534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11535 "2890 Port not ready, port status reg "
11536 "0x%x error 1=0x%x, error 2=0x%x\n",
11537 reg_data.word0,
11538 phba->work_status[0],
11539 phba->work_status[1]);
11540 rc = -ENODEV;
11541 goto out;
11542 }
11543
11544 if (!port_reset) {
11545
11546
11547
11548 reg_data.word0 = 0;
11549 bf_set(lpfc_sliport_ctrl_end, ®_data,
11550 LPFC_SLIPORT_LITTLE_ENDIAN);
11551 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11552 LPFC_SLIPORT_INIT_PORT);
11553 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11554 CTRLregaddr);
11555
11556 pci_read_config_word(phba->pcidev,
11557 PCI_DEVICE_ID, &devid);
11558
11559 port_reset = 1;
11560 msleep(20);
11561 goto wait;
11562 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11563 rc = -ENODEV;
11564 goto out;
11565 }
11566 break;
11567
11568 case LPFC_SLI_INTF_IF_TYPE_1:
11569 default:
11570 break;
11571 }
11572
11573out:
11574
11575 if (rc) {
11576 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11577 "3317 HBA not functional: IP Reset Failed "
11578 "try: echo fw_reset > board_mode\n");
11579 rc = -ENODEV;
11580 }
11581
11582 return rc;
11583}
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596static int
11597lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11598{
11599 struct pci_dev *pdev = phba->pcidev;
11600 unsigned long bar0map_len, bar1map_len, bar2map_len;
11601 int error;
11602 uint32_t if_type;
11603
11604 if (!pdev)
11605 return -ENODEV;
11606
11607
11608 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11609 if (error)
11610 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11611 if (error)
11612 return error;
11613
11614
11615
11616
11617
11618 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11619 &phba->sli4_hba.sli_intf.word0)) {
11620 return -ENODEV;
11621 }
11622
11623
11624 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11625 LPFC_SLI_INTF_VALID) {
11626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11627 "2894 SLI_INTF reg contents invalid "
11628 "sli_intf reg 0x%x\n",
11629 phba->sli4_hba.sli_intf.word0);
11630 return -ENODEV;
11631 }
11632
11633 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11634
11635
11636
11637
11638
11639
11640 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11641 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11642 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11643
11644
11645
11646
11647
11648 phba->sli4_hba.conf_regs_memmap_p =
11649 ioremap(phba->pci_bar0_map, bar0map_len);
11650 if (!phba->sli4_hba.conf_regs_memmap_p) {
11651 dev_printk(KERN_ERR, &pdev->dev,
11652 "ioremap failed for SLI4 PCI config "
11653 "registers.\n");
11654 return -ENODEV;
11655 }
11656 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11657
11658 lpfc_sli4_bar0_register_memmap(phba, if_type);
11659 } else {
11660 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11661 bar0map_len = pci_resource_len(pdev, 1);
11662 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11663 dev_printk(KERN_ERR, &pdev->dev,
11664 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11665 return -ENODEV;
11666 }
11667 phba->sli4_hba.conf_regs_memmap_p =
11668 ioremap(phba->pci_bar0_map, bar0map_len);
11669 if (!phba->sli4_hba.conf_regs_memmap_p) {
11670 dev_printk(KERN_ERR, &pdev->dev,
11671 "ioremap failed for SLI4 PCI config "
11672 "registers.\n");
11673 return -ENODEV;
11674 }
11675 lpfc_sli4_bar0_register_memmap(phba, if_type);
11676 }
11677
11678 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11679 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11680
11681
11682
11683
11684 phba->pci_bar1_map = pci_resource_start(pdev,
11685 PCI_64BIT_BAR2);
11686 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11687 phba->sli4_hba.ctrl_regs_memmap_p =
11688 ioremap(phba->pci_bar1_map,
11689 bar1map_len);
11690 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11691 dev_err(&pdev->dev,
11692 "ioremap failed for SLI4 HBA "
11693 "control registers.\n");
11694 error = -ENOMEM;
11695 goto out_iounmap_conf;
11696 }
11697 phba->pci_bar2_memmap_p =
11698 phba->sli4_hba.ctrl_regs_memmap_p;
11699 lpfc_sli4_bar1_register_memmap(phba, if_type);
11700 } else {
11701 error = -ENOMEM;
11702 goto out_iounmap_conf;
11703 }
11704 }
11705
11706 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11707 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11708
11709
11710
11711
11712 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11713 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11714 phba->sli4_hba.drbl_regs_memmap_p =
11715 ioremap(phba->pci_bar1_map, bar1map_len);
11716 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11717 dev_err(&pdev->dev,
11718 "ioremap failed for SLI4 HBA doorbell registers.\n");
11719 error = -ENOMEM;
11720 goto out_iounmap_conf;
11721 }
11722 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11723 lpfc_sli4_bar1_register_memmap(phba, if_type);
11724 }
11725
11726 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11727 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11728
11729
11730
11731
11732 phba->pci_bar2_map = pci_resource_start(pdev,
11733 PCI_64BIT_BAR4);
11734 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11735 phba->sli4_hba.drbl_regs_memmap_p =
11736 ioremap(phba->pci_bar2_map,
11737 bar2map_len);
11738 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11739 dev_err(&pdev->dev,
11740 "ioremap failed for SLI4 HBA"
11741 " doorbell registers.\n");
11742 error = -ENOMEM;
11743 goto out_iounmap_ctrl;
11744 }
11745 phba->pci_bar4_memmap_p =
11746 phba->sli4_hba.drbl_regs_memmap_p;
11747 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11748 if (error)
11749 goto out_iounmap_all;
11750 } else {
11751 error = -ENOMEM;
11752 goto out_iounmap_all;
11753 }
11754 }
11755
11756 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11757 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11758
11759
11760
11761
11762 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11763 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11764 phba->sli4_hba.dpp_regs_memmap_p =
11765 ioremap(phba->pci_bar2_map, bar2map_len);
11766 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11767 dev_err(&pdev->dev,
11768 "ioremap failed for SLI4 HBA dpp registers.\n");
11769 error = -ENOMEM;
11770 goto out_iounmap_ctrl;
11771 }
11772 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11773 }
11774
11775
11776 switch (if_type) {
11777 case LPFC_SLI_INTF_IF_TYPE_0:
11778 case LPFC_SLI_INTF_IF_TYPE_2:
11779 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11780 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11781 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11782 break;
11783 case LPFC_SLI_INTF_IF_TYPE_6:
11784 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11785 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11786 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11787 break;
11788 default:
11789 break;
11790 }
11791
11792 return 0;
11793
11794out_iounmap_all:
11795 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11796out_iounmap_ctrl:
11797 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11798out_iounmap_conf:
11799 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11800
11801 return error;
11802}
11803
11804
11805
11806
11807
11808
11809
11810
11811static void
11812lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11813{
11814 uint32_t if_type;
11815 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11816
11817 switch (if_type) {
11818 case LPFC_SLI_INTF_IF_TYPE_0:
11819 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11820 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11821 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11822 break;
11823 case LPFC_SLI_INTF_IF_TYPE_2:
11824 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11825 break;
11826 case LPFC_SLI_INTF_IF_TYPE_6:
11827 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11828 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11829 if (phba->sli4_hba.dpp_regs_memmap_p)
11830 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11831 break;
11832 case LPFC_SLI_INTF_IF_TYPE_1:
11833 default:
11834 dev_printk(KERN_ERR, &phba->pcidev->dev,
11835 "FATAL - unsupported SLI4 interface type - %d\n",
11836 if_type);
11837 break;
11838 }
11839}
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852static int
11853lpfc_sli_enable_msix(struct lpfc_hba *phba)
11854{
11855 int rc;
11856 LPFC_MBOXQ_t *pmb;
11857
11858
11859 rc = pci_alloc_irq_vectors(phba->pcidev,
11860 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11861 if (rc < 0) {
11862 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11863 "0420 PCI enable MSI-X failed (%d)\n", rc);
11864 goto vec_fail_out;
11865 }
11866
11867
11868
11869
11870
11871
11872 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11873 &lpfc_sli_sp_intr_handler, 0,
11874 LPFC_SP_DRIVER_HANDLER_NAME, phba);
11875 if (rc) {
11876 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11877 "0421 MSI-X slow-path request_irq failed "
11878 "(%d)\n", rc);
11879 goto msi_fail_out;
11880 }
11881
11882
11883 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
11884 &lpfc_sli_fp_intr_handler, 0,
11885 LPFC_FP_DRIVER_HANDLER_NAME, phba);
11886
11887 if (rc) {
11888 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11889 "0429 MSI-X fast-path request_irq failed "
11890 "(%d)\n", rc);
11891 goto irq_fail_out;
11892 }
11893
11894
11895
11896
11897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11898
11899 if (!pmb) {
11900 rc = -ENOMEM;
11901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11902 "0474 Unable to allocate memory for issuing "
11903 "MBOX_CONFIG_MSI command\n");
11904 goto mem_fail_out;
11905 }
11906 rc = lpfc_config_msi(phba, pmb);
11907 if (rc)
11908 goto mbx_fail_out;
11909 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11910 if (rc != MBX_SUCCESS) {
11911 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
11912 "0351 Config MSI mailbox command failed, "
11913 "mbxCmd x%x, mbxStatus x%x\n",
11914 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
11915 goto mbx_fail_out;
11916 }
11917
11918
11919 mempool_free(pmb, phba->mbox_mem_pool);
11920 return rc;
11921
11922mbx_fail_out:
11923
11924 mempool_free(pmb, phba->mbox_mem_pool);
11925
11926mem_fail_out:
11927
11928 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
11929
11930irq_fail_out:
11931
11932 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
11933
11934msi_fail_out:
11935
11936 pci_free_irq_vectors(phba->pcidev);
11937
11938vec_fail_out:
11939 return rc;
11940}
11941
11942
11943
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956static int
11957lpfc_sli_enable_msi(struct lpfc_hba *phba)
11958{
11959 int rc;
11960
11961 rc = pci_enable_msi(phba->pcidev);
11962 if (!rc)
11963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11964 "0462 PCI enable MSI mode success.\n");
11965 else {
11966 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11967 "0471 PCI enable MSI mode failed (%d)\n", rc);
11968 return rc;
11969 }
11970
11971 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
11972 0, LPFC_DRIVER_NAME, phba);
11973 if (rc) {
11974 pci_disable_msi(phba->pcidev);
11975 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11976 "0478 MSI request_irq failed (%d)\n", rc);
11977 }
11978 return rc;
11979}
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998static uint32_t
11999lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12000{
12001 uint32_t intr_mode = LPFC_INTR_ERROR;
12002 int retval;
12003
12004
12005 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12006 if (retval)
12007 return intr_mode;
12008 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12009
12010 if (cfg_mode == 2) {
12011
12012 retval = lpfc_sli_enable_msix(phba);
12013 if (!retval) {
12014
12015 phba->intr_type = MSIX;
12016 intr_mode = 2;
12017 }
12018 }
12019
12020
12021 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12022 retval = lpfc_sli_enable_msi(phba);
12023 if (!retval) {
12024
12025 phba->intr_type = MSI;
12026 intr_mode = 1;
12027 }
12028 }
12029
12030
12031 if (phba->intr_type == NONE) {
12032 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12033 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12034 if (!retval) {
12035
12036 phba->intr_type = INTx;
12037 intr_mode = 0;
12038 }
12039 }
12040 return intr_mode;
12041}
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052static void
12053lpfc_sli_disable_intr(struct lpfc_hba *phba)
12054{
12055 int nr_irqs, i;
12056
12057 if (phba->intr_type == MSIX)
12058 nr_irqs = LPFC_MSIX_VECTORS;
12059 else
12060 nr_irqs = 1;
12061
12062 for (i = 0; i < nr_irqs; i++)
12063 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12064 pci_free_irq_vectors(phba->pcidev);
12065
12066
12067 phba->intr_type = NONE;
12068 phba->sli.slistat.sli_intr = 0;
12069}
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079static uint16_t
12080lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12081{
12082 struct lpfc_vector_map_info *cpup;
12083 int cpu;
12084
12085
12086 for_each_present_cpu(cpu) {
12087 cpup = &phba->sli4_hba.cpu_map[cpu];
12088
12089
12090
12091
12092
12093 if ((match == LPFC_FIND_BY_EQ) &&
12094 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12095 (cpup->eq == id))
12096 return cpu;
12097
12098
12099 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12100 return cpu;
12101 }
12102 return 0;
12103}
12104
12105#ifdef CONFIG_X86
12106
12107
12108
12109
12110
12111
12112
12113static int
12114lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12115 uint16_t phys_id, uint16_t core_id)
12116{
12117 struct lpfc_vector_map_info *cpup;
12118 int idx;
12119
12120 for_each_present_cpu(idx) {
12121 cpup = &phba->sli4_hba.cpu_map[idx];
12122
12123 if ((cpup->phys_id == phys_id) &&
12124 (cpup->core_id == core_id) &&
12125 (cpu != idx))
12126 return 1;
12127 }
12128 return 0;
12129}
12130#endif
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141static inline void
12142lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12143 unsigned int cpu)
12144{
12145 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12146 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12147
12148 cpup->eq = eqidx;
12149 cpup->flag |= flag;
12150
12151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12152 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12153 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12154}
12155
12156
12157
12158
12159
12160
12161
12162static void
12163lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12164{
12165 struct lpfc_vector_map_info *cpup;
12166 struct lpfc_eq_intr_info *eqi;
12167 int cpu;
12168
12169 for_each_possible_cpu(cpu) {
12170 cpup = &phba->sli4_hba.cpu_map[cpu];
12171 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12172 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12173 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12174 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12175 cpup->flag = 0;
12176 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12177 INIT_LIST_HEAD(&eqi->list);
12178 eqi->icnt = 0;
12179 }
12180}
12181
12182
12183
12184
12185
12186
12187
12188static void
12189lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12190{
12191 struct lpfc_hba_eq_hdl *eqhdl;
12192 int i;
12193
12194 for (i = 0; i < phba->cfg_irq_chann; i++) {
12195 eqhdl = lpfc_get_eq_hdl(i);
12196 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12197 eqhdl->phba = phba;
12198 }
12199}
12200
12201
12202
12203
12204
12205
12206
12207
12208
12209
12210
12211static void
12212lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12213{
12214 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12215 int max_phys_id, min_phys_id;
12216 int max_core_id, min_core_id;
12217 struct lpfc_vector_map_info *cpup;
12218 struct lpfc_vector_map_info *new_cpup;
12219#ifdef CONFIG_X86
12220 struct cpuinfo_x86 *cpuinfo;
12221#endif
12222#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12223 struct lpfc_hdwq_stat *c_stat;
12224#endif
12225
12226 max_phys_id = 0;
12227 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12228 max_core_id = 0;
12229 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12230
12231
12232 for_each_present_cpu(cpu) {
12233 cpup = &phba->sli4_hba.cpu_map[cpu];
12234#ifdef CONFIG_X86
12235 cpuinfo = &cpu_data(cpu);
12236 cpup->phys_id = cpuinfo->phys_proc_id;
12237 cpup->core_id = cpuinfo->cpu_core_id;
12238 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12239 cpup->flag |= LPFC_CPU_MAP_HYPER;
12240#else
12241
12242 cpup->phys_id = 0;
12243 cpup->core_id = cpu;
12244#endif
12245
12246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12247 "3328 CPU %d physid %d coreid %d flag x%x\n",
12248 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12249
12250 if (cpup->phys_id > max_phys_id)
12251 max_phys_id = cpup->phys_id;
12252 if (cpup->phys_id < min_phys_id)
12253 min_phys_id = cpup->phys_id;
12254
12255 if (cpup->core_id > max_core_id)
12256 max_core_id = cpup->core_id;
12257 if (cpup->core_id < min_core_id)
12258 min_core_id = cpup->core_id;
12259 }
12260
12261
12262
12263
12264
12265
12266 first_cpu = cpumask_first(cpu_present_mask);
12267 start_cpu = first_cpu;
12268
12269 for_each_present_cpu(cpu) {
12270 cpup = &phba->sli4_hba.cpu_map[cpu];
12271
12272
12273 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12274
12275 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12276
12277
12278
12279
12280
12281
12282 new_cpu = start_cpu;
12283 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12284 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12285 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12286 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12287 (new_cpup->phys_id == cpup->phys_id))
12288 goto found_same;
12289 new_cpu = cpumask_next(
12290 new_cpu, cpu_present_mask);
12291 if (new_cpu == nr_cpumask_bits)
12292 new_cpu = first_cpu;
12293 }
12294
12295 continue;
12296found_same:
12297
12298 cpup->eq = new_cpup->eq;
12299
12300
12301
12302
12303
12304 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12305 if (start_cpu == nr_cpumask_bits)
12306 start_cpu = first_cpu;
12307
12308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12309 "3337 Set Affinity: CPU %d "
12310 "eq %d from peer cpu %d same "
12311 "phys_id (%d)\n",
12312 cpu, cpup->eq, new_cpu,
12313 cpup->phys_id);
12314 }
12315 }
12316
12317
12318 start_cpu = first_cpu;
12319
12320 for_each_present_cpu(cpu) {
12321 cpup = &phba->sli4_hba.cpu_map[cpu];
12322
12323
12324 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12325
12326 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12327
12328
12329
12330
12331
12332
12333 new_cpu = start_cpu;
12334 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12335 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12336 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12337 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12338 goto found_any;
12339 new_cpu = cpumask_next(
12340 new_cpu, cpu_present_mask);
12341 if (new_cpu == nr_cpumask_bits)
12342 new_cpu = first_cpu;
12343 }
12344
12345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12346 "3339 Set Affinity: CPU %d "
12347 "eq %d UNASSIGNED\n",
12348 cpup->hdwq, cpup->eq);
12349 continue;
12350found_any:
12351
12352 cpup->eq = new_cpup->eq;
12353
12354
12355
12356
12357
12358 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12359 if (start_cpu == nr_cpumask_bits)
12360 start_cpu = first_cpu;
12361
12362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12363 "3338 Set Affinity: CPU %d "
12364 "eq %d from peer cpu %d (%d/%d)\n",
12365 cpu, cpup->eq, new_cpu,
12366 new_cpup->phys_id, new_cpup->core_id);
12367 }
12368 }
12369
12370
12371
12372
12373 idx = 0;
12374 for_each_present_cpu(cpu) {
12375 cpup = &phba->sli4_hba.cpu_map[cpu];
12376
12377
12378 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12379 continue;
12380
12381
12382 cpup->hdwq = idx;
12383 idx++;
12384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12385 "3333 Set Affinity: CPU %d (phys %d core %d): "
12386 "hdwq %d eq %d flg x%x\n",
12387 cpu, cpup->phys_id, cpup->core_id,
12388 cpup->hdwq, cpup->eq, cpup->flag);
12389 }
12390
12391
12392
12393
12394
12395
12396
12397
12398 next_idx = idx;
12399 start_cpu = 0;
12400 idx = 0;
12401 for_each_present_cpu(cpu) {
12402 cpup = &phba->sli4_hba.cpu_map[cpu];
12403
12404
12405 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12406 continue;
12407
12408
12409
12410
12411
12412 if (next_idx < phba->cfg_hdw_queue) {
12413 cpup->hdwq = next_idx;
12414 next_idx++;
12415 continue;
12416 }
12417
12418
12419
12420
12421
12422
12423 new_cpu = start_cpu;
12424 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12425 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12426 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12427 new_cpup->phys_id == cpup->phys_id &&
12428 new_cpup->core_id == cpup->core_id) {
12429 goto found_hdwq;
12430 }
12431 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12432 if (new_cpu == nr_cpumask_bits)
12433 new_cpu = first_cpu;
12434 }
12435
12436
12437
12438
12439 new_cpu = start_cpu;
12440 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12441 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12442 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12443 new_cpup->phys_id == cpup->phys_id)
12444 goto found_hdwq;
12445
12446 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12447 if (new_cpu == nr_cpumask_bits)
12448 new_cpu = first_cpu;
12449 }
12450
12451
12452 cpup->hdwq = idx % phba->cfg_hdw_queue;
12453 idx++;
12454 goto logit;
12455 found_hdwq:
12456
12457 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12458 if (start_cpu == nr_cpumask_bits)
12459 start_cpu = first_cpu;
12460 cpup->hdwq = new_cpup->hdwq;
12461 logit:
12462 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12463 "3335 Set Affinity: CPU %d (phys %d core %d): "
12464 "hdwq %d eq %d flg x%x\n",
12465 cpu, cpup->phys_id, cpup->core_id,
12466 cpup->hdwq, cpup->eq, cpup->flag);
12467 }
12468
12469
12470
12471
12472
12473 idx = 0;
12474 for_each_possible_cpu(cpu) {
12475 cpup = &phba->sli4_hba.cpu_map[cpu];
12476#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12477 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12478 c_stat->hdwq_no = cpup->hdwq;
12479#endif
12480 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12481 continue;
12482
12483 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12484#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12485 c_stat->hdwq_no = cpup->hdwq;
12486#endif
12487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12488 "3340 Set Affinity: not present "
12489 "CPU %d hdwq %d\n",
12490 cpu, cpup->hdwq);
12491 }
12492
12493
12494
12495
12496 return;
12497}
12498
12499
12500
12501
12502
12503
12504
12505
12506static int
12507lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12508 struct list_head *eqlist)
12509{
12510 const struct cpumask *maskp;
12511 struct lpfc_queue *eq;
12512 struct cpumask *tmp;
12513 u16 idx;
12514
12515 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12516 if (!tmp)
12517 return -ENOMEM;
12518
12519 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12520 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12521 if (!maskp)
12522 continue;
12523
12524
12525
12526
12527
12528 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12529 continue;
12530
12531
12532
12533
12534
12535
12536 cpumask_and(tmp, maskp, cpu_online_mask);
12537 if (cpumask_weight(tmp) > 1)
12538 continue;
12539
12540
12541
12542
12543
12544
12545 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12546 list_add(&eq->_poll_list, eqlist);
12547 }
12548 kfree(tmp);
12549 return 0;
12550}
12551
12552static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12553{
12554 if (phba->sli_rev != LPFC_SLI_REV4)
12555 return;
12556
12557 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12558 &phba->cpuhp);
12559
12560
12561
12562
12563 synchronize_rcu();
12564 del_timer_sync(&phba->cpuhp_poll_timer);
12565}
12566
12567static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12568{
12569 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12570 return;
12571
12572 __lpfc_cpuhp_remove(phba);
12573}
12574
12575static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12576{
12577 if (phba->sli_rev != LPFC_SLI_REV4)
12578 return;
12579
12580 rcu_read_lock();
12581
12582 if (!list_empty(&phba->poll_list))
12583 mod_timer(&phba->cpuhp_poll_timer,
12584 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12585
12586 rcu_read_unlock();
12587
12588 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12589 &phba->cpuhp);
12590}
12591
12592static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12593{
12594 if (phba->pport->load_flag & FC_UNLOADING) {
12595 *retval = -EAGAIN;
12596 return true;
12597 }
12598
12599 if (phba->sli_rev != LPFC_SLI_REV4) {
12600 *retval = 0;
12601 return true;
12602 }
12603
12604
12605 return false;
12606}
12607
12608
12609
12610
12611
12612
12613
12614static inline void
12615lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12616{
12617 cpumask_clear(&eqhdl->aff_mask);
12618 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12619 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12620 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
12621}
12622
12623
12624
12625
12626
12627
12628static inline void
12629lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12630{
12631 cpumask_clear(&eqhdl->aff_mask);
12632 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12633}
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651static void
12652lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12653{
12654 struct lpfc_vector_map_info *cpup;
12655 struct cpumask *aff_mask;
12656 unsigned int cpu_select, cpu_next, idx;
12657 const struct cpumask *orig_mask;
12658
12659 if (phba->irq_chann_mode == NORMAL_MODE)
12660 return;
12661
12662 orig_mask = &phba->sli4_hba.irq_aff_mask;
12663
12664 if (!cpumask_test_cpu(cpu, orig_mask))
12665 return;
12666
12667 cpup = &phba->sli4_hba.cpu_map[cpu];
12668
12669 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12670 return;
12671
12672 if (offline) {
12673
12674 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12675 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12676
12677
12678 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12679
12680
12681
12682 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12683 aff_mask = lpfc_get_aff_mask(idx);
12684
12685
12686 if (cpumask_test_cpu(cpu, aff_mask))
12687 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12688 cpu_select);
12689 }
12690 } else {
12691
12692 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12693 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12694 }
12695 } else {
12696
12697 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12698 }
12699}
12700
12701static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12702{
12703 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12704 struct lpfc_queue *eq, *next;
12705 LIST_HEAD(eqlist);
12706 int retval;
12707
12708 if (!phba) {
12709 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12710 return 0;
12711 }
12712
12713 if (__lpfc_cpuhp_checks(phba, &retval))
12714 return retval;
12715
12716 lpfc_irq_rebalance(phba, cpu, true);
12717
12718 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12719 if (retval)
12720 return retval;
12721
12722
12723 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12724 list_del_init(&eq->_poll_list);
12725 lpfc_sli4_start_polling(eq);
12726 }
12727
12728 return 0;
12729}
12730
12731static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12732{
12733 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12734 struct lpfc_queue *eq, *next;
12735 unsigned int n;
12736 int retval;
12737
12738 if (!phba) {
12739 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12740 return 0;
12741 }
12742
12743 if (__lpfc_cpuhp_checks(phba, &retval))
12744 return retval;
12745
12746 lpfc_irq_rebalance(phba, cpu, false);
12747
12748 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12749 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12750 if (n == cpu)
12751 lpfc_sli4_stop_polling(eq);
12752 }
12753
12754 return 0;
12755}
12756
12757
12758
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770
12771
12772
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785static int
12786lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12787{
12788 int vectors, rc, index;
12789 char *name;
12790 const struct cpumask *aff_mask = NULL;
12791 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12792 struct lpfc_vector_map_info *cpup;
12793 struct lpfc_hba_eq_hdl *eqhdl;
12794 const struct cpumask *maskp;
12795 unsigned int flags = PCI_IRQ_MSIX;
12796
12797
12798 vectors = phba->cfg_irq_chann;
12799
12800 if (phba->irq_chann_mode != NORMAL_MODE)
12801 aff_mask = &phba->sli4_hba.irq_aff_mask;
12802
12803 if (aff_mask) {
12804 cpu_cnt = cpumask_weight(aff_mask);
12805 vectors = min(phba->cfg_irq_chann, cpu_cnt);
12806
12807
12808
12809
12810 cpu = cpumask_first(aff_mask);
12811 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12812 } else {
12813 flags |= PCI_IRQ_AFFINITY;
12814 }
12815
12816 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12817 if (rc < 0) {
12818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12819 "0484 PCI enable MSI-X failed (%d)\n", rc);
12820 goto vec_fail_out;
12821 }
12822 vectors = rc;
12823
12824
12825 for (index = 0; index < vectors; index++) {
12826 eqhdl = lpfc_get_eq_hdl(index);
12827 name = eqhdl->handler_name;
12828 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12829 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12830 LPFC_DRIVER_HANDLER_NAME"%d", index);
12831
12832 eqhdl->idx = index;
12833 rc = request_irq(pci_irq_vector(phba->pcidev, index),
12834 &lpfc_sli4_hba_intr_handler, 0,
12835 name, eqhdl);
12836 if (rc) {
12837 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12838 "0486 MSI-X fast-path (%d) "
12839 "request_irq failed (%d)\n", index, rc);
12840 goto cfg_fail_out;
12841 }
12842
12843 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12844
12845 if (aff_mask) {
12846
12847 if (cpu_select < nr_cpu_ids)
12848 lpfc_irq_set_aff(eqhdl, cpu_select);
12849
12850
12851 lpfc_assign_eq_map_info(phba, index,
12852 LPFC_CPU_FIRST_IRQ,
12853 cpu);
12854
12855
12856 cpu = cpumask_next(cpu, aff_mask);
12857
12858
12859 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12860 } else if (vectors == 1) {
12861 cpu = cpumask_first(cpu_present_mask);
12862 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12863 cpu);
12864 } else {
12865 maskp = pci_irq_get_affinity(phba->pcidev, index);
12866
12867
12868 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12869 cpup = &phba->sli4_hba.cpu_map[cpu];
12870
12871
12872
12873
12874
12875
12876
12877
12878
12879
12880
12881
12882
12883 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
12884 continue;
12885 lpfc_assign_eq_map_info(phba, index,
12886 LPFC_CPU_FIRST_IRQ,
12887 cpu);
12888 break;
12889 }
12890 }
12891 }
12892
12893 if (vectors != phba->cfg_irq_chann) {
12894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12895 "3238 Reducing IO channels to match number of "
12896 "MSI-X vectors, requested %d got %d\n",
12897 phba->cfg_irq_chann, vectors);
12898 if (phba->cfg_irq_chann > vectors)
12899 phba->cfg_irq_chann = vectors;
12900 }
12901
12902 return rc;
12903
12904cfg_fail_out:
12905
12906 for (--index; index >= 0; index--) {
12907 eqhdl = lpfc_get_eq_hdl(index);
12908 lpfc_irq_clear_aff(eqhdl);
12909 irq_set_affinity_hint(eqhdl->irq, NULL);
12910 free_irq(eqhdl->irq, eqhdl);
12911 }
12912
12913
12914 pci_free_irq_vectors(phba->pcidev);
12915
12916vec_fail_out:
12917 return rc;
12918}
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934static int
12935lpfc_sli4_enable_msi(struct lpfc_hba *phba)
12936{
12937 int rc, index;
12938 unsigned int cpu;
12939 struct lpfc_hba_eq_hdl *eqhdl;
12940
12941 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
12942 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
12943 if (rc > 0)
12944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12945 "0487 PCI enable MSI mode success.\n");
12946 else {
12947 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12948 "0488 PCI enable MSI mode failed (%d)\n", rc);
12949 return rc ? rc : -1;
12950 }
12951
12952 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
12953 0, LPFC_DRIVER_NAME, phba);
12954 if (rc) {
12955 pci_free_irq_vectors(phba->pcidev);
12956 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12957 "0490 MSI request_irq failed (%d)\n", rc);
12958 return rc;
12959 }
12960
12961 eqhdl = lpfc_get_eq_hdl(0);
12962 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
12963
12964 cpu = cpumask_first(cpu_present_mask);
12965 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
12966
12967 for (index = 0; index < phba->cfg_irq_chann; index++) {
12968 eqhdl = lpfc_get_eq_hdl(index);
12969 eqhdl->idx = index;
12970 }
12971
12972 return 0;
12973}
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992static uint32_t
12993lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12994{
12995 uint32_t intr_mode = LPFC_INTR_ERROR;
12996 int retval, idx;
12997
12998 if (cfg_mode == 2) {
12999
13000 retval = 0;
13001 if (!retval) {
13002
13003 retval = lpfc_sli4_enable_msix(phba);
13004 if (!retval) {
13005
13006 phba->intr_type = MSIX;
13007 intr_mode = 2;
13008 }
13009 }
13010 }
13011
13012
13013 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13014 retval = lpfc_sli4_enable_msi(phba);
13015 if (!retval) {
13016
13017 phba->intr_type = MSI;
13018 intr_mode = 1;
13019 }
13020 }
13021
13022
13023 if (phba->intr_type == NONE) {
13024 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13025 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13026 if (!retval) {
13027 struct lpfc_hba_eq_hdl *eqhdl;
13028 unsigned int cpu;
13029
13030
13031 phba->intr_type = INTx;
13032 intr_mode = 0;
13033
13034 eqhdl = lpfc_get_eq_hdl(0);
13035 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13036
13037 cpu = cpumask_first(cpu_present_mask);
13038 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13039 cpu);
13040 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13041 eqhdl = lpfc_get_eq_hdl(idx);
13042 eqhdl->idx = idx;
13043 }
13044 }
13045 }
13046 return intr_mode;
13047}
13048
13049
13050
13051
13052
13053
13054
13055
13056
13057
13058static void
13059lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13060{
13061
13062 if (phba->intr_type == MSIX) {
13063 int index;
13064 struct lpfc_hba_eq_hdl *eqhdl;
13065
13066
13067 for (index = 0; index < phba->cfg_irq_chann; index++) {
13068 eqhdl = lpfc_get_eq_hdl(index);
13069 lpfc_irq_clear_aff(eqhdl);
13070 irq_set_affinity_hint(eqhdl->irq, NULL);
13071 free_irq(eqhdl->irq, eqhdl);
13072 }
13073 } else {
13074 free_irq(phba->pcidev->irq, phba);
13075 }
13076
13077 pci_free_irq_vectors(phba->pcidev);
13078
13079
13080 phba->intr_type = NONE;
13081 phba->sli.slistat.sli_intr = 0;
13082}
13083
13084
13085
13086
13087
13088
13089
13090
13091static void
13092lpfc_unset_hba(struct lpfc_hba *phba)
13093{
13094 struct lpfc_vport *vport = phba->pport;
13095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13096
13097 spin_lock_irq(shost->host_lock);
13098 vport->load_flag |= FC_UNLOADING;
13099 spin_unlock_irq(shost->host_lock);
13100
13101 kfree(phba->vpi_bmask);
13102 kfree(phba->vpi_ids);
13103
13104 lpfc_stop_hba_timers(phba);
13105
13106 phba->pport->work_port_events = 0;
13107
13108 lpfc_sli_hba_down(phba);
13109
13110 lpfc_sli_brdrestart(phba);
13111
13112 lpfc_sli_disable_intr(phba);
13113
13114 return;
13115}
13116
13117
13118
13119
13120
13121
13122
13123
13124
13125
13126
13127
13128
13129
13130static void
13131lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13132{
13133 struct lpfc_sli4_hdw_queue *qp;
13134 int idx, ccnt;
13135 int wait_time = 0;
13136 int io_xri_cmpl = 1;
13137 int nvmet_xri_cmpl = 1;
13138 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13139
13140
13141
13142
13143
13144 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13145
13146
13147 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13148 lpfc_nvme_wait_for_io_drain(phba);
13149
13150 ccnt = 0;
13151 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13152 qp = &phba->sli4_hba.hdwq[idx];
13153 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13154 if (!io_xri_cmpl)
13155 ccnt++;
13156 }
13157 if (ccnt)
13158 io_xri_cmpl = 0;
13159
13160 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13161 nvmet_xri_cmpl =
13162 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13163 }
13164
13165 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13166 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13167 if (!nvmet_xri_cmpl)
13168 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13169 "6424 NVMET XRI exchange busy "
13170 "wait time: %d seconds.\n",
13171 wait_time/1000);
13172 if (!io_xri_cmpl)
13173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13174 "6100 IO XRI exchange busy "
13175 "wait time: %d seconds.\n",
13176 wait_time/1000);
13177 if (!els_xri_cmpl)
13178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13179 "2878 ELS XRI exchange busy "
13180 "wait time: %d seconds.\n",
13181 wait_time/1000);
13182 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13183 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13184 } else {
13185 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13186 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13187 }
13188
13189 ccnt = 0;
13190 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13191 qp = &phba->sli4_hba.hdwq[idx];
13192 io_xri_cmpl = list_empty(
13193 &qp->lpfc_abts_io_buf_list);
13194 if (!io_xri_cmpl)
13195 ccnt++;
13196 }
13197 if (ccnt)
13198 io_xri_cmpl = 0;
13199
13200 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13201 nvmet_xri_cmpl = list_empty(
13202 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13203 }
13204 els_xri_cmpl =
13205 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13206
13207 }
13208}
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218
13219
13220static void
13221lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13222{
13223 int wait_cnt = 0;
13224 LPFC_MBOXQ_t *mboxq;
13225 struct pci_dev *pdev = phba->pcidev;
13226
13227 lpfc_stop_hba_timers(phba);
13228 hrtimer_cancel(&phba->cmf_timer);
13229
13230 if (phba->pport)
13231 phba->sli4_hba.intr_enable = 0;
13232
13233
13234
13235
13236
13237
13238
13239 spin_lock_irq(&phba->hbalock);
13240 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13241 spin_unlock_irq(&phba->hbalock);
13242
13243 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13244 msleep(10);
13245 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13246 break;
13247 }
13248
13249 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13250 spin_lock_irq(&phba->hbalock);
13251 mboxq = phba->sli.mbox_active;
13252 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13253 __lpfc_mbox_cmpl_put(phba, mboxq);
13254 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13255 phba->sli.mbox_active = NULL;
13256 spin_unlock_irq(&phba->hbalock);
13257 }
13258
13259
13260 lpfc_sli_hba_iocb_abort(phba);
13261
13262
13263 lpfc_sli4_xri_exchange_busy_wait(phba);
13264
13265
13266 if (phba->pport)
13267 lpfc_cpuhp_remove(phba);
13268
13269
13270 lpfc_sli4_disable_intr(phba);
13271
13272
13273 if (phba->cfg_sriov_nr_virtfn)
13274 pci_disable_sriov(pdev);
13275
13276
13277 kthread_stop(phba->worker_thread);
13278
13279
13280 lpfc_ras_stop_fwlog(phba);
13281
13282
13283
13284
13285 lpfc_sli4_queue_unset(phba);
13286 lpfc_sli4_queue_destroy(phba);
13287
13288
13289 lpfc_pci_function_reset(phba);
13290
13291
13292 if (phba->ras_fwlog.ras_enabled)
13293 lpfc_sli4_ras_dma_free(phba);
13294
13295
13296 if (phba->pport)
13297 phba->pport->work_port_events = 0;
13298}
13299
13300static uint32_t
13301lpfc_cgn_crc32(uint32_t crc, u8 byte)
13302{
13303 uint32_t msb = 0;
13304 uint32_t bit;
13305
13306 for (bit = 0; bit < 8; bit++) {
13307 msb = (crc >> 31) & 1;
13308 crc <<= 1;
13309
13310 if (msb ^ (byte & 1)) {
13311 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13312 crc |= 1;
13313 }
13314 byte >>= 1;
13315 }
13316 return crc;
13317}
13318
13319static uint32_t
13320lpfc_cgn_reverse_bits(uint32_t wd)
13321{
13322 uint32_t result = 0;
13323 uint32_t i;
13324
13325 for (i = 0; i < 32; i++) {
13326 result <<= 1;
13327 result |= (1 & (wd >> i));
13328 }
13329 return result;
13330}
13331
13332
13333
13334
13335
13336uint32_t
13337lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13338{
13339 uint32_t i;
13340 uint32_t result;
13341 uint8_t *data = (uint8_t *)ptr;
13342
13343 for (i = 0; i < byteLen; ++i)
13344 crc = lpfc_cgn_crc32(crc, data[i]);
13345
13346 result = ~lpfc_cgn_reverse_bits(crc);
13347 return result;
13348}
13349
13350void
13351lpfc_init_congestion_buf(struct lpfc_hba *phba)
13352{
13353 struct lpfc_cgn_info *cp;
13354 struct timespec64 cmpl_time;
13355 struct tm broken;
13356 uint16_t size;
13357 uint32_t crc;
13358
13359 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13360 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13361
13362 if (!phba->cgn_i)
13363 return;
13364 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13365
13366 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13367 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13368 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13369 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13370
13371 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
13372 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
13373 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13374 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13375 atomic64_set(&phba->cgn_latency_evt, 0);
13376 phba->cgn_evt_minute = 0;
13377 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13378
13379 memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
13380 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13381 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13382
13383
13384 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13385 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13386 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13387 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13388
13389 ktime_get_real_ts64(&cmpl_time);
13390 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13391
13392 cp->cgn_info_month = broken.tm_mon + 1;
13393 cp->cgn_info_day = broken.tm_mday;
13394 cp->cgn_info_year = broken.tm_year - 100;
13395 cp->cgn_info_hour = broken.tm_hour;
13396 cp->cgn_info_minute = broken.tm_min;
13397 cp->cgn_info_second = broken.tm_sec;
13398
13399 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13400 "2643 CGNInfo Init: Start Time "
13401 "%d/%d/%d %d:%d:%d\n",
13402 cp->cgn_info_day, cp->cgn_info_month,
13403 cp->cgn_info_year, cp->cgn_info_hour,
13404 cp->cgn_info_minute, cp->cgn_info_second);
13405
13406
13407 if (phba->pport) {
13408 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13409 cp->cgn_lunq = cpu_to_le16(size);
13410 }
13411
13412
13413
13414 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13415 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13416 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13417 cp->cgn_info_crc = cpu_to_le32(crc);
13418
13419 phba->cgn_evt_timestamp = jiffies +
13420 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13421}
13422
13423void
13424lpfc_init_congestion_stat(struct lpfc_hba *phba)
13425{
13426 struct lpfc_cgn_info *cp;
13427 struct timespec64 cmpl_time;
13428 struct tm broken;
13429 uint32_t crc;
13430
13431 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13432 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13433
13434 if (!phba->cgn_i)
13435 return;
13436
13437 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13438 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
13439
13440 ktime_get_real_ts64(&cmpl_time);
13441 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13442
13443 cp->cgn_stat_month = broken.tm_mon + 1;
13444 cp->cgn_stat_day = broken.tm_mday;
13445 cp->cgn_stat_year = broken.tm_year - 100;
13446 cp->cgn_stat_hour = broken.tm_hour;
13447 cp->cgn_stat_minute = broken.tm_min;
13448
13449 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13450 "2647 CGNstat Init: Start Time "
13451 "%d/%d/%d %d:%d\n",
13452 cp->cgn_stat_day, cp->cgn_stat_month,
13453 cp->cgn_stat_year, cp->cgn_stat_hour,
13454 cp->cgn_stat_minute);
13455
13456 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13457 cp->cgn_info_crc = cpu_to_le32(crc);
13458}
13459
13460
13461
13462
13463
13464
13465static int
13466__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13467{
13468 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13469 union lpfc_sli4_cfg_shdr *shdr;
13470 uint32_t shdr_status, shdr_add_status;
13471 LPFC_MBOXQ_t *mboxq;
13472 int length, rc;
13473
13474 if (!phba->cgn_i)
13475 return -ENXIO;
13476
13477 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13478 if (!mboxq) {
13479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13480 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13481 "HBA state x%x reg %d\n",
13482 phba->pport->port_state, reg);
13483 return -ENOMEM;
13484 }
13485
13486 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13487 sizeof(struct lpfc_sli4_cfg_mhdr));
13488 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13489 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13490 LPFC_SLI4_MBX_EMBED);
13491 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13492 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13493 if (reg > 0)
13494 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13495 else
13496 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13497 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13498 reg_congestion_buf->addr_lo =
13499 putPaddrLow(phba->cgn_i->phys);
13500 reg_congestion_buf->addr_hi =
13501 putPaddrHigh(phba->cgn_i->phys);
13502
13503 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13504 shdr = (union lpfc_sli4_cfg_shdr *)
13505 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13508 &shdr->response);
13509 mempool_free(mboxq, phba->mbox_mem_pool);
13510 if (shdr_status || shdr_add_status || rc) {
13511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13512 "2642 REG_CONGESTION_BUF mailbox "
13513 "failed with status x%x add_status x%x,"
13514 " mbx status x%x reg %d\n",
13515 shdr_status, shdr_add_status, rc, reg);
13516 return -ENXIO;
13517 }
13518 return 0;
13519}
13520
13521int
13522lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13523{
13524 lpfc_cmf_stop(phba);
13525 return __lpfc_reg_congestion_buf(phba, 0);
13526}
13527
13528int
13529lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13530{
13531 return __lpfc_reg_congestion_buf(phba, 1);
13532}
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546int
13547lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13548{
13549 int rc;
13550 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13551 struct lpfc_pc_sli4_params *sli4_params;
13552 uint32_t mbox_tmo;
13553 int length;
13554 bool exp_wqcq_pages = true;
13555 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13556
13557
13558
13559
13560
13561
13562 phba->sli4_hba.rpi_hdrs_in_use = 1;
13563
13564
13565 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13566 sizeof(struct lpfc_sli4_cfg_mhdr));
13567 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13568 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13569 length, LPFC_SLI4_MBX_EMBED);
13570 if (!phba->sli4_hba.intr_enable)
13571 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13572 else {
13573 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13574 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13575 }
13576 if (unlikely(rc))
13577 return rc;
13578 sli4_params = &phba->sli4_hba.pc_sli4_params;
13579 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13580 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13581 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13582 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13583 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13584 mbx_sli4_parameters);
13585 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13586 mbx_sli4_parameters);
13587 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13588 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13589 else
13590 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13591 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13592 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13593 mbx_sli4_parameters);
13594 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13595 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13596 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13597 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13598 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13599 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13600 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13601 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13602 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13603 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13604 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13605 mbx_sli4_parameters);
13606 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13607 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13608 mbx_sli4_parameters);
13609 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13610 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13611
13612
13613 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13614
13615
13616 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13617 bf_get(cfg_xib, mbx_sli4_parameters));
13618
13619 if (rc) {
13620
13621 sli4_params->nvme = 1;
13622
13623
13624 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13625 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13626 "6133 Disabling NVME support: "
13627 "FC4 type not supported: x%x\n",
13628 phba->cfg_enable_fc4_type);
13629 goto fcponly;
13630 }
13631 } else {
13632
13633 sli4_params->nvme = 0;
13634 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13636 "6101 Disabling NVME support: Not "
13637 "supported by firmware (%d %d) x%x\n",
13638 bf_get(cfg_nvme, mbx_sli4_parameters),
13639 bf_get(cfg_xib, mbx_sli4_parameters),
13640 phba->cfg_enable_fc4_type);
13641fcponly:
13642 phba->nvmet_support = 0;
13643 phba->cfg_nvmet_mrq = 0;
13644 phba->cfg_nvme_seg_cnt = 0;
13645
13646
13647 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13648 return -ENODEV;
13649 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13650 }
13651 }
13652
13653
13654
13655
13656 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13657 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13658
13659
13660 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13661 phba->cfg_enable_pbde = 1;
13662 else
13663 phba->cfg_enable_pbde = 0;
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13674 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13675 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13676 else
13677 phba->cfg_suppress_rsp = 0;
13678
13679 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13680 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13681
13682
13683 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13684 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13685
13686
13687
13688
13689
13690
13691 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13692 phba->fcp_embed_io = 1;
13693 else
13694 phba->fcp_embed_io = 0;
13695
13696 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13697 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13698 bf_get(cfg_xib, mbx_sli4_parameters),
13699 phba->cfg_enable_pbde,
13700 phba->fcp_embed_io, sli4_params->nvme,
13701 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13702
13703 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13704 LPFC_SLI_INTF_IF_TYPE_2) &&
13705 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13706 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13707 exp_wqcq_pages = false;
13708
13709 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13710 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13711 exp_wqcq_pages &&
13712 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13713 phba->enab_exp_wqcq_pages = 1;
13714 else
13715 phba->enab_exp_wqcq_pages = 0;
13716
13717
13718
13719 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13720 phba->mds_diags_support = 1;
13721 else
13722 phba->mds_diags_support = 0;
13723
13724
13725
13726
13727 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13728 phba->nsler = 1;
13729 else
13730 phba->nsler = 0;
13731
13732 return 0;
13733}
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752static int
13753lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13754{
13755 struct lpfc_hba *phba;
13756 struct lpfc_vport *vport = NULL;
13757 struct Scsi_Host *shost = NULL;
13758 int error;
13759 uint32_t cfg_mode, intr_mode;
13760
13761
13762 phba = lpfc_hba_alloc(pdev);
13763 if (!phba)
13764 return -ENOMEM;
13765
13766
13767 error = lpfc_enable_pci_dev(phba);
13768 if (error)
13769 goto out_free_phba;
13770
13771
13772 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13773 if (error)
13774 goto out_disable_pci_dev;
13775
13776
13777 error = lpfc_sli_pci_mem_setup(phba);
13778 if (error) {
13779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13780 "1402 Failed to set up pci memory space.\n");
13781 goto out_disable_pci_dev;
13782 }
13783
13784
13785 error = lpfc_sli_driver_resource_setup(phba);
13786 if (error) {
13787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13788 "1404 Failed to set up driver resource.\n");
13789 goto out_unset_pci_mem_s3;
13790 }
13791
13792
13793
13794 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13795 if (error) {
13796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13797 "1405 Failed to initialize iocb list.\n");
13798 goto out_unset_driver_resource_s3;
13799 }
13800
13801
13802 error = lpfc_setup_driver_resource_phase2(phba);
13803 if (error) {
13804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13805 "1406 Failed to set up driver resource.\n");
13806 goto out_free_iocb_list;
13807 }
13808
13809
13810 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13811
13812
13813 error = lpfc_create_shost(phba);
13814 if (error) {
13815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13816 "1407 Failed to create scsi host.\n");
13817 goto out_unset_driver_resource;
13818 }
13819
13820
13821 vport = phba->pport;
13822 error = lpfc_alloc_sysfs_attr(vport);
13823 if (error) {
13824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13825 "1476 Failed to allocate sysfs attr\n");
13826 goto out_destroy_shost;
13827 }
13828
13829 shost = lpfc_shost_from_vport(vport);
13830
13831 cfg_mode = phba->cfg_use_msi;
13832 while (true) {
13833
13834 lpfc_stop_port(phba);
13835
13836 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13837 if (intr_mode == LPFC_INTR_ERROR) {
13838 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13839 "0431 Failed to enable interrupt.\n");
13840 error = -ENODEV;
13841 goto out_free_sysfs_attr;
13842 }
13843
13844 if (lpfc_sli_hba_setup(phba)) {
13845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13846 "1477 Failed to set up hba\n");
13847 error = -ENODEV;
13848 goto out_remove_device;
13849 }
13850
13851
13852 msleep(50);
13853
13854 if (intr_mode == 0 ||
13855 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13856
13857 phba->intr_mode = intr_mode;
13858 lpfc_log_intr_mode(phba, intr_mode);
13859 break;
13860 } else {
13861 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13862 "0447 Configure interrupt mode (%d) "
13863 "failed active interrupt test.\n",
13864 intr_mode);
13865
13866 lpfc_sli_disable_intr(phba);
13867
13868 cfg_mode = --intr_mode;
13869 }
13870 }
13871
13872
13873 lpfc_post_init_setup(phba);
13874
13875
13876 lpfc_create_static_vport(phba);
13877
13878 return 0;
13879
13880out_remove_device:
13881 lpfc_unset_hba(phba);
13882out_free_sysfs_attr:
13883 lpfc_free_sysfs_attr(vport);
13884out_destroy_shost:
13885 lpfc_destroy_shost(phba);
13886out_unset_driver_resource:
13887 lpfc_unset_driver_resource_phase2(phba);
13888out_free_iocb_list:
13889 lpfc_free_iocb_list(phba);
13890out_unset_driver_resource_s3:
13891 lpfc_sli_driver_resource_unset(phba);
13892out_unset_pci_mem_s3:
13893 lpfc_sli_pci_mem_unset(phba);
13894out_disable_pci_dev:
13895 lpfc_disable_pci_dev(phba);
13896 if (shost)
13897 scsi_host_put(shost);
13898out_free_phba:
13899 lpfc_hba_free(phba);
13900 return error;
13901}
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912static void
13913lpfc_pci_remove_one_s3(struct pci_dev *pdev)
13914{
13915 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13916 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13917 struct lpfc_vport **vports;
13918 struct lpfc_hba *phba = vport->phba;
13919 int i;
13920
13921 spin_lock_irq(&phba->hbalock);
13922 vport->load_flag |= FC_UNLOADING;
13923 spin_unlock_irq(&phba->hbalock);
13924
13925 lpfc_free_sysfs_attr(vport);
13926
13927
13928 vports = lpfc_create_vport_work_array(phba);
13929 if (vports != NULL)
13930 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13931 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13932 continue;
13933 fc_vport_terminate(vports[i]->fc_vport);
13934 }
13935 lpfc_destroy_vport_work_array(phba, vports);
13936
13937
13938 fc_remove_host(shost);
13939 scsi_remove_host(shost);
13940
13941
13942 lpfc_cleanup(vport);
13943
13944
13945
13946
13947
13948
13949
13950
13951 lpfc_sli_hba_down(phba);
13952
13953 kthread_stop(phba->worker_thread);
13954
13955 lpfc_sli_brdrestart(phba);
13956
13957 kfree(phba->vpi_bmask);
13958 kfree(phba->vpi_ids);
13959
13960 lpfc_stop_hba_timers(phba);
13961 spin_lock_irq(&phba->port_list_lock);
13962 list_del_init(&vport->listentry);
13963 spin_unlock_irq(&phba->port_list_lock);
13964
13965 lpfc_debugfs_terminate(vport);
13966
13967
13968 if (phba->cfg_sriov_nr_virtfn)
13969 pci_disable_sriov(pdev);
13970
13971
13972 lpfc_sli_disable_intr(phba);
13973
13974 scsi_host_put(shost);
13975
13976
13977
13978
13979
13980 lpfc_scsi_free(phba);
13981 lpfc_free_iocb_list(phba);
13982
13983 lpfc_mem_free_all(phba);
13984
13985 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
13986 phba->hbqslimp.virt, phba->hbqslimp.phys);
13987
13988
13989 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
13990 phba->slim2p.virt, phba->slim2p.phys);
13991
13992
13993 iounmap(phba->ctrl_regs_memmap_p);
13994 iounmap(phba->slim_memmap_p);
13995
13996 lpfc_hba_free(phba);
13997
13998 pci_release_mem_regions(pdev);
13999 pci_disable_device(pdev);
14000}
14001
14002
14003
14004
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016
14017
14018
14019
14020
14021
14022static int __maybe_unused
14023lpfc_pci_suspend_one_s3(struct device *dev_d)
14024{
14025 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14026 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14027
14028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14029 "0473 PCI device Power Management suspend.\n");
14030
14031
14032 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14033 lpfc_offline(phba);
14034 kthread_stop(phba->worker_thread);
14035
14036
14037 lpfc_sli_disable_intr(phba);
14038
14039 return 0;
14040}
14041
14042
14043
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061static int __maybe_unused
14062lpfc_pci_resume_one_s3(struct device *dev_d)
14063{
14064 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14065 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14066 uint32_t intr_mode;
14067 int error;
14068
14069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14070 "0452 PCI device Power Management resume.\n");
14071
14072
14073 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14074 "lpfc_worker_%d", phba->brd_no);
14075 if (IS_ERR(phba->worker_thread)) {
14076 error = PTR_ERR(phba->worker_thread);
14077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14078 "0434 PM resume failed to start worker "
14079 "thread: error=x%x.\n", error);
14080 return error;
14081 }
14082
14083
14084 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14085 if (intr_mode == LPFC_INTR_ERROR) {
14086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14087 "0430 PM resume Failed to enable interrupt\n");
14088 return -EIO;
14089 } else
14090 phba->intr_mode = intr_mode;
14091
14092
14093 lpfc_sli_brdrestart(phba);
14094 lpfc_online(phba);
14095
14096
14097 lpfc_log_intr_mode(phba, phba->intr_mode);
14098
14099 return 0;
14100}
14101
14102
14103
14104
14105
14106
14107
14108
14109static void
14110lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14111{
14112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14113 "2723 PCI channel I/O abort preparing for recovery\n");
14114
14115
14116
14117
14118
14119 lpfc_sli_abort_fcp_rings(phba);
14120}
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130static void
14131lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14132{
14133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14134 "2710 PCI channel disable preparing for reset\n");
14135
14136
14137 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14138
14139
14140 lpfc_scsi_dev_block(phba);
14141
14142
14143 lpfc_sli_flush_io_rings(phba);
14144
14145
14146 lpfc_stop_hba_timers(phba);
14147
14148
14149 lpfc_sli_disable_intr(phba);
14150 pci_disable_device(phba->pcidev);
14151}
14152
14153
14154
14155
14156
14157
14158
14159
14160
14161static void
14162lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14163{
14164 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14165 "2711 PCI channel permanent disable for failure\n");
14166
14167 lpfc_scsi_dev_block(phba);
14168
14169
14170 lpfc_stop_hba_timers(phba);
14171
14172
14173 lpfc_sli_flush_io_rings(phba);
14174}
14175
14176
14177
14178
14179
14180
14181
14182
14183
14184
14185
14186
14187
14188
14189
14190
14191
14192
14193
14194static pci_ers_result_t
14195lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14196{
14197 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14199
14200 switch (state) {
14201 case pci_channel_io_normal:
14202
14203 lpfc_sli_prep_dev_for_recover(phba);
14204 return PCI_ERS_RESULT_CAN_RECOVER;
14205 case pci_channel_io_frozen:
14206
14207 lpfc_sli_prep_dev_for_reset(phba);
14208 return PCI_ERS_RESULT_NEED_RESET;
14209 case pci_channel_io_perm_failure:
14210
14211 lpfc_sli_prep_dev_for_perm_failure(phba);
14212 return PCI_ERS_RESULT_DISCONNECT;
14213 default:
14214
14215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14216 "0472 Unknown PCI error state: x%x\n", state);
14217 lpfc_sli_prep_dev_for_reset(phba);
14218 return PCI_ERS_RESULT_NEED_RESET;
14219 }
14220}
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240static pci_ers_result_t
14241lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14242{
14243 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14244 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14245 struct lpfc_sli *psli = &phba->sli;
14246 uint32_t intr_mode;
14247
14248 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14249 if (pci_enable_device_mem(pdev)) {
14250 printk(KERN_ERR "lpfc: Cannot re-enable "
14251 "PCI device after reset.\n");
14252 return PCI_ERS_RESULT_DISCONNECT;
14253 }
14254
14255 pci_restore_state(pdev);
14256
14257
14258
14259
14260
14261 pci_save_state(pdev);
14262
14263 if (pdev->is_busmaster)
14264 pci_set_master(pdev);
14265
14266 spin_lock_irq(&phba->hbalock);
14267 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14268 spin_unlock_irq(&phba->hbalock);
14269
14270
14271 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14272 if (intr_mode == LPFC_INTR_ERROR) {
14273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14274 "0427 Cannot re-enable interrupt after "
14275 "slot reset.\n");
14276 return PCI_ERS_RESULT_DISCONNECT;
14277 } else
14278 phba->intr_mode = intr_mode;
14279
14280
14281 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14282 lpfc_offline(phba);
14283 lpfc_sli_brdrestart(phba);
14284
14285
14286 lpfc_log_intr_mode(phba, phba->intr_mode);
14287
14288 return PCI_ERS_RESULT_RECOVERED;
14289}
14290
14291
14292
14293
14294
14295
14296
14297
14298
14299
14300
14301static void
14302lpfc_io_resume_s3(struct pci_dev *pdev)
14303{
14304 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14305 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14306
14307
14308 lpfc_online(phba);
14309}
14310
14311
14312
14313
14314
14315
14316
14317int
14318lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14319{
14320 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14321
14322 if (phba->sli_rev == LPFC_SLI_REV4) {
14323 if (max_xri <= 100)
14324 return 10;
14325 else if (max_xri <= 256)
14326 return 25;
14327 else if (max_xri <= 512)
14328 return 50;
14329 else if (max_xri <= 1024)
14330 return 100;
14331 else if (max_xri <= 1536)
14332 return 150;
14333 else if (max_xri <= 2048)
14334 return 200;
14335 else
14336 return 250;
14337 } else
14338 return 0;
14339}
14340
14341
14342
14343
14344
14345
14346
14347int
14348lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14349{
14350 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14351
14352 if (phba->nvmet_support)
14353 max_xri += LPFC_NVMET_BUF_POST;
14354 return max_xri;
14355}
14356
14357
14358static int
14359lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14360 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14361 const struct firmware *fw)
14362{
14363 int rc;
14364 u8 sli_family;
14365
14366 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14367
14368
14369
14370
14371
14372
14373 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14374 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14375 magic_number != MAGIC_NUMBER_G6) ||
14376 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14377 magic_number != MAGIC_NUMBER_G7) ||
14378 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14379 magic_number != MAGIC_NUMBER_G7P)) {
14380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14381 "3030 This firmware version is not supported on"
14382 " this HBA model. Device:%x Magic:%x Type:%x "
14383 "ID:%x Size %d %zd\n",
14384 phba->pcidev->device, magic_number, ftype, fid,
14385 fsize, fw->size);
14386 rc = -EINVAL;
14387 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14389 "3021 Firmware downloads have been prohibited "
14390 "by a system configuration setting on "
14391 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14392 "%zd\n",
14393 phba->pcidev->device, magic_number, ftype, fid,
14394 fsize, fw->size);
14395 rc = -EACCES;
14396 } else {
14397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14398 "3022 FW Download failed. Add Status x%x "
14399 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14400 "%zd\n",
14401 offset, phba->pcidev->device, magic_number,
14402 ftype, fid, fsize, fw->size);
14403 rc = -EIO;
14404 }
14405 return rc;
14406}
14407
14408
14409
14410
14411
14412
14413
14414static void
14415lpfc_write_firmware(const struct firmware *fw, void *context)
14416{
14417 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14418 char fwrev[FW_REV_STR_SIZE];
14419 struct lpfc_grp_hdr *image;
14420 struct list_head dma_buffer_list;
14421 int i, rc = 0;
14422 struct lpfc_dmabuf *dmabuf, *next;
14423 uint32_t offset = 0, temp_offset = 0;
14424 uint32_t magic_number, ftype, fid, fsize;
14425
14426
14427 if (!fw) {
14428 rc = -ENXIO;
14429 goto out;
14430 }
14431 image = (struct lpfc_grp_hdr *)fw->data;
14432
14433 magic_number = be32_to_cpu(image->magic_number);
14434 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14435 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14436 fsize = be32_to_cpu(image->size);
14437
14438 INIT_LIST_HEAD(&dma_buffer_list);
14439 lpfc_decode_firmware_rev(phba, fwrev, 1);
14440 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442 "3023 Updating Firmware, Current Version:%s "
14443 "New Version:%s\n",
14444 fwrev, image->revision);
14445 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14446 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14447 GFP_KERNEL);
14448 if (!dmabuf) {
14449 rc = -ENOMEM;
14450 goto release_out;
14451 }
14452 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14453 SLI4_PAGE_SIZE,
14454 &dmabuf->phys,
14455 GFP_KERNEL);
14456 if (!dmabuf->virt) {
14457 kfree(dmabuf);
14458 rc = -ENOMEM;
14459 goto release_out;
14460 }
14461 list_add_tail(&dmabuf->list, &dma_buffer_list);
14462 }
14463 while (offset < fw->size) {
14464 temp_offset = offset;
14465 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14466 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14467 memcpy(dmabuf->virt,
14468 fw->data + temp_offset,
14469 fw->size - temp_offset);
14470 temp_offset = fw->size;
14471 break;
14472 }
14473 memcpy(dmabuf->virt, fw->data + temp_offset,
14474 SLI4_PAGE_SIZE);
14475 temp_offset += SLI4_PAGE_SIZE;
14476 }
14477 rc = lpfc_wr_object(phba, &dma_buffer_list,
14478 (fw->size - offset), &offset);
14479 if (rc) {
14480 rc = lpfc_log_write_firmware_error(phba, offset,
14481 magic_number,
14482 ftype,
14483 fid,
14484 fsize,
14485 fw);
14486 goto release_out;
14487 }
14488 }
14489 rc = offset;
14490 } else
14491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14492 "3029 Skipped Firmware update, Current "
14493 "Version:%s New Version:%s\n",
14494 fwrev, image->revision);
14495
14496release_out:
14497 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14498 list_del(&dmabuf->list);
14499 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14500 dmabuf->virt, dmabuf->phys);
14501 kfree(dmabuf);
14502 }
14503 release_firmware(fw);
14504out:
14505 if (rc < 0)
14506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14507 "3062 Firmware update error, status %d.\n", rc);
14508 else
14509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14510 "3024 Firmware update success: size %d.\n", rc);
14511}
14512
14513
14514
14515
14516
14517
14518
14519
14520
14521int
14522lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14523{
14524 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14525 int ret;
14526 const struct firmware *fw;
14527
14528
14529 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14530 LPFC_SLI_INTF_IF_TYPE_2)
14531 return -EPERM;
14532
14533 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14534
14535 if (fw_upgrade == INT_FW_UPGRADE) {
14536 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14537 file_name, &phba->pcidev->dev,
14538 GFP_KERNEL, (void *)phba,
14539 lpfc_write_firmware);
14540 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14541 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14542 if (!ret)
14543 lpfc_write_firmware(fw, (void *)phba);
14544 } else {
14545 ret = -EINVAL;
14546 }
14547
14548 return ret;
14549}
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563
14564
14565
14566
14567
14568
14569static int
14570lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14571{
14572 struct lpfc_hba *phba;
14573 struct lpfc_vport *vport = NULL;
14574 struct Scsi_Host *shost = NULL;
14575 int error;
14576 uint32_t cfg_mode, intr_mode;
14577
14578
14579 phba = lpfc_hba_alloc(pdev);
14580 if (!phba)
14581 return -ENOMEM;
14582
14583 INIT_LIST_HEAD(&phba->poll_list);
14584
14585
14586 error = lpfc_enable_pci_dev(phba);
14587 if (error)
14588 goto out_free_phba;
14589
14590
14591 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14592 if (error)
14593 goto out_disable_pci_dev;
14594
14595
14596 error = lpfc_sli4_pci_mem_setup(phba);
14597 if (error) {
14598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14599 "1410 Failed to set up pci memory space.\n");
14600 goto out_disable_pci_dev;
14601 }
14602
14603
14604 error = lpfc_sli4_driver_resource_setup(phba);
14605 if (error) {
14606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14607 "1412 Failed to set up driver resource.\n");
14608 goto out_unset_pci_mem_s4;
14609 }
14610
14611 INIT_LIST_HEAD(&phba->active_rrq_list);
14612 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14613
14614
14615 error = lpfc_setup_driver_resource_phase2(phba);
14616 if (error) {
14617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14618 "1414 Failed to set up driver resource.\n");
14619 goto out_unset_driver_resource_s4;
14620 }
14621
14622
14623 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14624
14625
14626 cfg_mode = phba->cfg_use_msi;
14627
14628
14629 phba->pport = NULL;
14630 lpfc_stop_port(phba);
14631
14632
14633 lpfc_cpu_map_array_init(phba);
14634
14635
14636 lpfc_hba_eq_hdl_array_init(phba);
14637
14638
14639 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14640 if (intr_mode == LPFC_INTR_ERROR) {
14641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14642 "0426 Failed to enable interrupt.\n");
14643 error = -ENODEV;
14644 goto out_unset_driver_resource;
14645 }
14646
14647 if (phba->intr_type != MSIX) {
14648 phba->cfg_irq_chann = 1;
14649 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14650 if (phba->nvmet_support)
14651 phba->cfg_nvmet_mrq = 1;
14652 }
14653 }
14654 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14655
14656
14657 error = lpfc_create_shost(phba);
14658 if (error) {
14659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14660 "1415 Failed to create scsi host.\n");
14661 goto out_disable_intr;
14662 }
14663 vport = phba->pport;
14664 shost = lpfc_shost_from_vport(vport);
14665
14666
14667 error = lpfc_alloc_sysfs_attr(vport);
14668 if (error) {
14669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14670 "1416 Failed to allocate sysfs attr\n");
14671 goto out_destroy_shost;
14672 }
14673
14674
14675 if (lpfc_sli4_hba_setup(phba)) {
14676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14677 "1421 Failed to set up hba\n");
14678 error = -ENODEV;
14679 goto out_free_sysfs_attr;
14680 }
14681
14682
14683 phba->intr_mode = intr_mode;
14684 lpfc_log_intr_mode(phba, intr_mode);
14685
14686
14687 lpfc_post_init_setup(phba);
14688
14689
14690
14691
14692 if (phba->nvmet_support == 0) {
14693 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14694
14695
14696
14697
14698
14699 error = lpfc_nvme_create_localport(vport);
14700 if (error) {
14701 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14702 "6004 NVME registration "
14703 "failed, error x%x\n",
14704 error);
14705 }
14706 }
14707 }
14708
14709
14710 if (phba->cfg_request_firmware_upgrade)
14711 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14712
14713
14714 lpfc_create_static_vport(phba);
14715
14716
14717 lpfc_sli4_ras_setup(phba);
14718
14719 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14720 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14721
14722 return 0;
14723
14724out_free_sysfs_attr:
14725 lpfc_free_sysfs_attr(vport);
14726out_destroy_shost:
14727 lpfc_destroy_shost(phba);
14728out_disable_intr:
14729 lpfc_sli4_disable_intr(phba);
14730out_unset_driver_resource:
14731 lpfc_unset_driver_resource_phase2(phba);
14732out_unset_driver_resource_s4:
14733 lpfc_sli4_driver_resource_unset(phba);
14734out_unset_pci_mem_s4:
14735 lpfc_sli4_pci_mem_unset(phba);
14736out_disable_pci_dev:
14737 lpfc_disable_pci_dev(phba);
14738 if (shost)
14739 scsi_host_put(shost);
14740out_free_phba:
14741 lpfc_hba_free(phba);
14742 return error;
14743}
14744
14745
14746
14747
14748
14749
14750
14751
14752
14753
14754static void
14755lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14756{
14757 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14759 struct lpfc_vport **vports;
14760 struct lpfc_hba *phba = vport->phba;
14761 int i;
14762
14763
14764 spin_lock_irq(&phba->hbalock);
14765 vport->load_flag |= FC_UNLOADING;
14766 spin_unlock_irq(&phba->hbalock);
14767 if (phba->cgn_i)
14768 lpfc_unreg_congestion_buf(phba);
14769
14770 lpfc_free_sysfs_attr(vport);
14771
14772
14773 vports = lpfc_create_vport_work_array(phba);
14774 if (vports != NULL)
14775 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14776 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14777 continue;
14778 fc_vport_terminate(vports[i]->fc_vport);
14779 }
14780 lpfc_destroy_vport_work_array(phba, vports);
14781
14782
14783 fc_remove_host(shost);
14784 scsi_remove_host(shost);
14785
14786
14787
14788
14789 lpfc_cleanup(vport);
14790 lpfc_nvmet_destroy_targetport(phba);
14791 lpfc_nvme_destroy_localport(vport);
14792
14793
14794 if (phba->cfg_xri_rebalancing)
14795 lpfc_destroy_multixri_pools(phba);
14796
14797
14798
14799
14800
14801
14802 lpfc_debugfs_terminate(vport);
14803
14804 lpfc_stop_hba_timers(phba);
14805 spin_lock_irq(&phba->port_list_lock);
14806 list_del_init(&vport->listentry);
14807 spin_unlock_irq(&phba->port_list_lock);
14808
14809
14810
14811
14812 lpfc_io_free(phba);
14813 lpfc_free_iocb_list(phba);
14814 lpfc_sli4_hba_unset(phba);
14815
14816 lpfc_unset_driver_resource_phase2(phba);
14817 lpfc_sli4_driver_resource_unset(phba);
14818
14819
14820 lpfc_sli4_pci_mem_unset(phba);
14821
14822
14823 scsi_host_put(shost);
14824 lpfc_disable_pci_dev(phba);
14825
14826
14827 lpfc_hba_free(phba);
14828
14829 return;
14830}
14831
14832
14833
14834
14835
14836
14837
14838
14839
14840
14841
14842
14843
14844
14845
14846
14847
14848
14849
14850
14851
14852static int __maybe_unused
14853lpfc_pci_suspend_one_s4(struct device *dev_d)
14854{
14855 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14856 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14857
14858 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14859 "2843 PCI device Power Management suspend.\n");
14860
14861
14862 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14863 lpfc_offline(phba);
14864 kthread_stop(phba->worker_thread);
14865
14866
14867 lpfc_sli4_disable_intr(phba);
14868 lpfc_sli4_queue_destroy(phba);
14869
14870 return 0;
14871}
14872
14873
14874
14875
14876
14877
14878
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889
14890
14891
14892static int __maybe_unused
14893lpfc_pci_resume_one_s4(struct device *dev_d)
14894{
14895 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14896 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14897 uint32_t intr_mode;
14898 int error;
14899
14900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14901 "0292 PCI device Power Management resume.\n");
14902
14903
14904 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14905 "lpfc_worker_%d", phba->brd_no);
14906 if (IS_ERR(phba->worker_thread)) {
14907 error = PTR_ERR(phba->worker_thread);
14908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14909 "0293 PM resume failed to start worker "
14910 "thread: error=x%x.\n", error);
14911 return error;
14912 }
14913
14914
14915 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
14916 if (intr_mode == LPFC_INTR_ERROR) {
14917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14918 "0294 PM resume Failed to enable interrupt\n");
14919 return -EIO;
14920 } else
14921 phba->intr_mode = intr_mode;
14922
14923
14924 lpfc_sli_brdrestart(phba);
14925 lpfc_online(phba);
14926
14927
14928 lpfc_log_intr_mode(phba, phba->intr_mode);
14929
14930 return 0;
14931}
14932
14933
14934
14935
14936
14937
14938
14939
14940static void
14941lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
14942{
14943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14944 "2828 PCI channel I/O abort preparing for recovery\n");
14945
14946
14947
14948
14949 lpfc_sli_abort_fcp_rings(phba);
14950}
14951
14952
14953
14954
14955
14956
14957
14958
14959
14960static void
14961lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
14962{
14963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14964 "2826 PCI channel disable preparing for reset\n");
14965
14966
14967 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
14968
14969
14970 lpfc_scsi_dev_block(phba);
14971
14972
14973 lpfc_sli_flush_io_rings(phba);
14974
14975
14976 lpfc_stop_hba_timers(phba);
14977
14978
14979 lpfc_sli4_disable_intr(phba);
14980 lpfc_sli4_queue_destroy(phba);
14981 pci_disable_device(phba->pcidev);
14982}
14983
14984
14985
14986
14987
14988
14989
14990
14991
14992static void
14993lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14994{
14995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14996 "2827 PCI channel permanent disable for failure\n");
14997
14998
14999 lpfc_scsi_dev_block(phba);
15000
15001
15002 lpfc_stop_hba_timers(phba);
15003
15004
15005 lpfc_sli_flush_io_rings(phba);
15006}
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024static pci_ers_result_t
15025lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15026{
15027 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15028 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15029
15030 switch (state) {
15031 case pci_channel_io_normal:
15032
15033 lpfc_sli4_prep_dev_for_recover(phba);
15034 return PCI_ERS_RESULT_CAN_RECOVER;
15035 case pci_channel_io_frozen:
15036
15037 lpfc_sli4_prep_dev_for_reset(phba);
15038 return PCI_ERS_RESULT_NEED_RESET;
15039 case pci_channel_io_perm_failure:
15040
15041 lpfc_sli4_prep_dev_for_perm_failure(phba);
15042 return PCI_ERS_RESULT_DISCONNECT;
15043 default:
15044
15045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15046 "2825 Unknown PCI error state: x%x\n", state);
15047 lpfc_sli4_prep_dev_for_reset(phba);
15048 return PCI_ERS_RESULT_NEED_RESET;
15049 }
15050}
15051
15052
15053
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070static pci_ers_result_t
15071lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15072{
15073 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15074 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15075 struct lpfc_sli *psli = &phba->sli;
15076 uint32_t intr_mode;
15077
15078 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15079 if (pci_enable_device_mem(pdev)) {
15080 printk(KERN_ERR "lpfc: Cannot re-enable "
15081 "PCI device after reset.\n");
15082 return PCI_ERS_RESULT_DISCONNECT;
15083 }
15084
15085 pci_restore_state(pdev);
15086
15087
15088
15089
15090
15091 pci_save_state(pdev);
15092
15093 if (pdev->is_busmaster)
15094 pci_set_master(pdev);
15095
15096 spin_lock_irq(&phba->hbalock);
15097 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15098 spin_unlock_irq(&phba->hbalock);
15099
15100
15101 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15102 if (intr_mode == LPFC_INTR_ERROR) {
15103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15104 "2824 Cannot re-enable interrupt after "
15105 "slot reset.\n");
15106 return PCI_ERS_RESULT_DISCONNECT;
15107 } else
15108 phba->intr_mode = intr_mode;
15109
15110
15111 lpfc_log_intr_mode(phba, phba->intr_mode);
15112
15113 return PCI_ERS_RESULT_RECOVERED;
15114}
15115
15116
15117
15118
15119
15120
15121
15122
15123
15124
15125
15126static void
15127lpfc_io_resume_s4(struct pci_dev *pdev)
15128{
15129 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15130 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15131
15132
15133
15134
15135
15136
15137
15138 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15139
15140 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15141 lpfc_offline(phba);
15142 lpfc_sli_brdrestart(phba);
15143
15144 lpfc_online(phba);
15145 }
15146}
15147
15148
15149
15150
15151
15152
15153
15154
15155
15156
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166static int
15167lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15168{
15169 int rc;
15170 struct lpfc_sli_intf intf;
15171
15172 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15173 return -ENODEV;
15174
15175 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15176 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15177 rc = lpfc_pci_probe_one_s4(pdev, pid);
15178 else
15179 rc = lpfc_pci_probe_one_s3(pdev, pid);
15180
15181 return rc;
15182}
15183
15184
15185
15186
15187
15188
15189
15190
15191
15192
15193
15194static void
15195lpfc_pci_remove_one(struct pci_dev *pdev)
15196{
15197 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15199
15200 switch (phba->pci_dev_grp) {
15201 case LPFC_PCI_DEV_LP:
15202 lpfc_pci_remove_one_s3(pdev);
15203 break;
15204 case LPFC_PCI_DEV_OC:
15205 lpfc_pci_remove_one_s4(pdev);
15206 break;
15207 default:
15208 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15209 "1424 Invalid PCI device group: 0x%x\n",
15210 phba->pci_dev_grp);
15211 break;
15212 }
15213 return;
15214}
15215
15216
15217
15218
15219
15220
15221
15222
15223
15224
15225
15226
15227
15228
15229static int __maybe_unused
15230lpfc_pci_suspend_one(struct device *dev)
15231{
15232 struct Scsi_Host *shost = dev_get_drvdata(dev);
15233 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15234 int rc = -ENODEV;
15235
15236 switch (phba->pci_dev_grp) {
15237 case LPFC_PCI_DEV_LP:
15238 rc = lpfc_pci_suspend_one_s3(dev);
15239 break;
15240 case LPFC_PCI_DEV_OC:
15241 rc = lpfc_pci_suspend_one_s4(dev);
15242 break;
15243 default:
15244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15245 "1425 Invalid PCI device group: 0x%x\n",
15246 phba->pci_dev_grp);
15247 break;
15248 }
15249 return rc;
15250}
15251
15252
15253
15254
15255
15256
15257
15258
15259
15260
15261
15262
15263
15264
15265static int __maybe_unused
15266lpfc_pci_resume_one(struct device *dev)
15267{
15268 struct Scsi_Host *shost = dev_get_drvdata(dev);
15269 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15270 int rc = -ENODEV;
15271
15272 switch (phba->pci_dev_grp) {
15273 case LPFC_PCI_DEV_LP:
15274 rc = lpfc_pci_resume_one_s3(dev);
15275 break;
15276 case LPFC_PCI_DEV_OC:
15277 rc = lpfc_pci_resume_one_s4(dev);
15278 break;
15279 default:
15280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15281 "1426 Invalid PCI device group: 0x%x\n",
15282 phba->pci_dev_grp);
15283 break;
15284 }
15285 return rc;
15286}
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303static pci_ers_result_t
15304lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15305{
15306 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15308 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15309
15310 switch (phba->pci_dev_grp) {
15311 case LPFC_PCI_DEV_LP:
15312 rc = lpfc_io_error_detected_s3(pdev, state);
15313 break;
15314 case LPFC_PCI_DEV_OC:
15315 rc = lpfc_io_error_detected_s4(pdev, state);
15316 break;
15317 default:
15318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15319 "1427 Invalid PCI device group: 0x%x\n",
15320 phba->pci_dev_grp);
15321 break;
15322 }
15323 return rc;
15324}
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336
15337
15338
15339
15340static pci_ers_result_t
15341lpfc_io_slot_reset(struct pci_dev *pdev)
15342{
15343 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15344 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15345 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15346
15347 switch (phba->pci_dev_grp) {
15348 case LPFC_PCI_DEV_LP:
15349 rc = lpfc_io_slot_reset_s3(pdev);
15350 break;
15351 case LPFC_PCI_DEV_OC:
15352 rc = lpfc_io_slot_reset_s4(pdev);
15353 break;
15354 default:
15355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15356 "1428 Invalid PCI device group: 0x%x\n",
15357 phba->pci_dev_grp);
15358 break;
15359 }
15360 return rc;
15361}
15362
15363
15364
15365
15366
15367
15368
15369
15370
15371
15372
15373static void
15374lpfc_io_resume(struct pci_dev *pdev)
15375{
15376 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15378
15379 switch (phba->pci_dev_grp) {
15380 case LPFC_PCI_DEV_LP:
15381 lpfc_io_resume_s3(pdev);
15382 break;
15383 case LPFC_PCI_DEV_OC:
15384 lpfc_io_resume_s4(pdev);
15385 break;
15386 default:
15387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15388 "1429 Invalid PCI device group: 0x%x\n",
15389 phba->pci_dev_grp);
15390 break;
15391 }
15392 return;
15393}
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405static void
15406lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15407{
15408
15409 if (!phba->cfg_EnableXLane)
15410 return;
15411
15412 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15413 phba->cfg_fof = 1;
15414 } else {
15415 phba->cfg_fof = 0;
15416 mempool_destroy(phba->device_data_mem_pool);
15417 phba->device_data_mem_pool = NULL;
15418 }
15419
15420 return;
15421}
15422
15423
15424
15425
15426
15427
15428
15429
15430void
15431lpfc_sli4_ras_init(struct lpfc_hba *phba)
15432{
15433
15434 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15435 LPFC_SLI_INTF_IF_TYPE_6) ||
15436 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15437 LPFC_SLI_INTF_FAMILY_G6)) {
15438 phba->ras_fwlog.ras_hwsupport = true;
15439 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15440 phba->cfg_ras_fwlog_buffsize)
15441 phba->ras_fwlog.ras_enabled = true;
15442 else
15443 phba->ras_fwlog.ras_enabled = false;
15444 } else {
15445 phba->ras_fwlog.ras_hwsupport = false;
15446 }
15447}
15448
15449
15450MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15451
15452static const struct pci_error_handlers lpfc_err_handler = {
15453 .error_detected = lpfc_io_error_detected,
15454 .slot_reset = lpfc_io_slot_reset,
15455 .resume = lpfc_io_resume,
15456};
15457
15458static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15459 lpfc_pci_suspend_one,
15460 lpfc_pci_resume_one);
15461
15462static struct pci_driver lpfc_driver = {
15463 .name = LPFC_DRIVER_NAME,
15464 .id_table = lpfc_id_table,
15465 .probe = lpfc_pci_probe_one,
15466 .remove = lpfc_pci_remove_one,
15467 .shutdown = lpfc_pci_remove_one,
15468 .driver.pm = &lpfc_pci_pm_ops_one,
15469 .err_handler = &lpfc_err_handler,
15470};
15471
15472static const struct file_operations lpfc_mgmt_fop = {
15473 .owner = THIS_MODULE,
15474};
15475
15476static struct miscdevice lpfc_mgmt_dev = {
15477 .minor = MISC_DYNAMIC_MINOR,
15478 .name = "lpfcmgmt",
15479 .fops = &lpfc_mgmt_fop,
15480};
15481
15482
15483
15484
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494static int __init
15495lpfc_init(void)
15496{
15497 int error = 0;
15498
15499 pr_info(LPFC_MODULE_DESC "\n");
15500 pr_info(LPFC_COPYRIGHT "\n");
15501
15502 error = misc_register(&lpfc_mgmt_dev);
15503 if (error)
15504 printk(KERN_ERR "Could not register lpfcmgmt device, "
15505 "misc_register returned with status %d", error);
15506
15507 error = -ENOMEM;
15508 lpfc_transport_functions.vport_create = lpfc_vport_create;
15509 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15510 lpfc_transport_template =
15511 fc_attach_transport(&lpfc_transport_functions);
15512 if (lpfc_transport_template == NULL)
15513 goto unregister;
15514 lpfc_vport_transport_template =
15515 fc_attach_transport(&lpfc_vport_transport_functions);
15516 if (lpfc_vport_transport_template == NULL) {
15517 fc_release_transport(lpfc_transport_template);
15518 goto unregister;
15519 }
15520 lpfc_wqe_cmd_template();
15521 lpfc_nvmet_cmd_template();
15522
15523
15524 lpfc_present_cpu = num_present_cpus();
15525
15526 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15527 "lpfc/sli4:online",
15528 lpfc_cpu_online, lpfc_cpu_offline);
15529 if (error < 0)
15530 goto cpuhp_failure;
15531 lpfc_cpuhp_state = error;
15532
15533 error = pci_register_driver(&lpfc_driver);
15534 if (error)
15535 goto unwind;
15536
15537 return error;
15538
15539unwind:
15540 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15541cpuhp_failure:
15542 fc_release_transport(lpfc_transport_template);
15543 fc_release_transport(lpfc_vport_transport_template);
15544unregister:
15545 misc_deregister(&lpfc_mgmt_dev);
15546
15547 return error;
15548}
15549
15550void lpfc_dmp_dbg(struct lpfc_hba *phba)
15551{
15552 unsigned int start_idx;
15553 unsigned int dbg_cnt;
15554 unsigned int temp_idx;
15555 int i;
15556 int j = 0;
15557 unsigned long rem_nsec, iflags;
15558 bool log_verbose = false;
15559 struct lpfc_vport *port_iterator;
15560
15561
15562
15563
15564 if (phba->cfg_log_verbose)
15565 return;
15566
15567 spin_lock_irqsave(&phba->port_list_lock, iflags);
15568 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
15569 if (port_iterator->load_flag & FC_UNLOADING)
15570 continue;
15571 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
15572 if (port_iterator->cfg_log_verbose)
15573 log_verbose = true;
15574
15575 scsi_host_put(lpfc_shost_from_vport(port_iterator));
15576
15577 if (log_verbose) {
15578 spin_unlock_irqrestore(&phba->port_list_lock,
15579 iflags);
15580 return;
15581 }
15582 }
15583 }
15584 spin_unlock_irqrestore(&phba->port_list_lock, iflags);
15585
15586 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15587 return;
15588
15589 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15590 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15591 if (!dbg_cnt)
15592 goto out;
15593 temp_idx = start_idx;
15594 if (dbg_cnt >= DBG_LOG_SZ) {
15595 dbg_cnt = DBG_LOG_SZ;
15596 temp_idx -= 1;
15597 } else {
15598 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15599 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15600 } else {
15601 if (start_idx < dbg_cnt)
15602 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15603 else
15604 start_idx -= dbg_cnt;
15605 }
15606 }
15607 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15608 start_idx, temp_idx, dbg_cnt);
15609
15610 for (i = 0; i < dbg_cnt; i++) {
15611 if ((start_idx + i) < DBG_LOG_SZ)
15612 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15613 else
15614 temp_idx = j++;
15615 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15616 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15617 temp_idx,
15618 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15619 rem_nsec / 1000,
15620 phba->dbg_log[temp_idx].log);
15621 }
15622out:
15623 atomic_set(&phba->dbg_log_cnt, 0);
15624 atomic_set(&phba->dbg_log_dmping, 0);
15625}
15626
15627__printf(2, 3)
15628void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15629{
15630 unsigned int idx;
15631 va_list args;
15632 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15633 struct va_format vaf;
15634
15635
15636 va_start(args, fmt);
15637 if (unlikely(dbg_dmping)) {
15638 vaf.fmt = fmt;
15639 vaf.va = &args;
15640 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15641 va_end(args);
15642 return;
15643 }
15644 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15645 DBG_LOG_SZ;
15646
15647 atomic_inc(&phba->dbg_log_cnt);
15648
15649 vscnprintf(phba->dbg_log[idx].log,
15650 sizeof(phba->dbg_log[idx].log), fmt, args);
15651 va_end(args);
15652
15653 phba->dbg_log[idx].t_ns = local_clock();
15654}
15655
15656
15657
15658
15659
15660
15661
15662
15663static void __exit
15664lpfc_exit(void)
15665{
15666 misc_deregister(&lpfc_mgmt_dev);
15667 pci_unregister_driver(&lpfc_driver);
15668 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15669 fc_release_transport(lpfc_transport_template);
15670 fc_release_transport(lpfc_vport_transport_template);
15671 idr_destroy(&lpfc_hba_index);
15672}
15673
15674module_init(lpfc_init);
15675module_exit(lpfc_exit);
15676MODULE_LICENSE("GPL");
15677MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15678MODULE_AUTHOR("Broadcom");
15679MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15680